Targets


default/pi-stock-service-monitor/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.174:3001/metrics
up endpoint="pi-stock" instance="10.42.0.174:3001" job="pi-stock" namespace="default" pod="pi-stock-5d8dd7f4f7-qxqr8" service="pi-stock" 10.309s ago 12.08ms

kube-system/traefik-monitoring/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.183:8081/metrics
up container="traefik" endpoint="metrics" instance="10.42.0.183:8081" job="kube-system/traefik-monitoring" namespace="kube-system" pod="traefik-4vlnq" 8.849s ago 6.341ms
http://10.42.1.218:8081/metrics
up container="traefik" endpoint="metrics" instance="10.42.1.218:8081" job="kube-system/traefik-monitoring" namespace="kube-system" pod="traefik-mgcdm" 12.012s ago 12.69ms
http://10.42.2.51:8081/metrics
up container="traefik" endpoint="metrics" instance="10.42.2.51:8081" job="kube-system/traefik-monitoring" namespace="kube-system" pod="traefik-s8p27" 807ms ago 6.951ms

monitoring/alertmanager/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.2.47:9093/metrics
up endpoint="web" instance="10.42.2.47:9093" job="alertmanager-main" namespace="monitoring" pod="alertmanager-main-0" service="alertmanager-main" 5.206s ago 9.664ms

monitoring/arm-exporter/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.181:9243/metrics
up endpoint="https" instance="main" job="arm-exporter" namespace="monitoring" pod="arm-exporter-gjg9w" service="arm-exporter" 15.749s ago 176.7ms
https://10.42.1.217:9243/metrics
up endpoint="https" instance="node1" job="arm-exporter" namespace="monitoring" pod="arm-exporter-jzhqp" service="arm-exporter" 12.229s ago 9.84ms
https://10.42.2.50:9243/metrics
up endpoint="https" instance="worker2" job="arm-exporter" namespace="monitoring" pod="arm-exporter-7w8n2" service="arm-exporter" 15.79s ago 595.4ms

monitoring/coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.2.61:9153/metrics
up endpoint="metrics" instance="10.42.2.61:9153" job="kube-dns" namespace="kube-system" pod="coredns-76c974cb66-x45nn" service="kube-dns" 8.737s ago 20.09ms
http://10.42.2.61:9153/metrics
up endpoint="metrics" instance="10.42.2.61:9153" job="kube-dns" namespace="kube-system" pod="coredns-76c974cb66-x45nn" service="kube-dns-prometheus-discovery" 8.39s ago 18.83ms

monitoring/grafana/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.180:3000/metrics
up endpoint="http" instance="10.42.0.180:3000" job="grafana" namespace="monitoring" pod="grafana-594fc7f587-v7wlr" service="grafana" 8.291s ago 8.22ms

monitoring/kube-apiserver/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:6443/metrics
up endpoint="https" instance="192.168.178.170:6443" job="apiserver" namespace="default" service="kubernetes" 15.795s ago 891.1ms

monitoring/kube-controller-manager/0 (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.178.170:10252/metrics
down endpoint="http-metrics" instance="192.168.178.170:10252" job="kube-controller-manager" namespace="kube-system" service="kube-controller-manager-prometheus-discovery" 15.783s ago 1.565ms Get "http://192.168.178.170:10252/metrics": dial tcp 192.168.178.170:10252: connect: connection refused

monitoring/kube-scheduler/0 (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.178.170:10251/metrics
down endpoint="http-metrics" instance="192.168.178.170:10251" job="kube-scheduler" namespace="kube-system" service="kube-scheduler-prometheus-discovery" 15.766s ago 1.108ms Get "http://192.168.178.170:10251/metrics": dial tcp 192.168.178.170:10251: connect: connection refused

monitoring/kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.188:8443/metrics
up instance="10.42.0.188:8443" job="kube-state-metrics" 2.178s ago 81ms

monitoring/kube-state-metrics/1 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.188:9443/metrics
up endpoint="https-self" instance="10.42.0.188:9443" job="kube-state-metrics" namespace="monitoring" pod="kube-state-metrics-6cb6df5d4-hbsn7" service="kube-state-metrics" 3.997s ago 7.977ms

monitoring/kubelet/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:10250/metrics
up endpoint="https-metrics" instance="192.168.178.170:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="main" service="kubelet" 4.218s ago 818.4ms
https://192.168.178.27:10250/metrics
up endpoint="https-metrics" instance="192.168.178.27:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="worker2" service="kubelet" 4.031s ago 107.9ms
https://192.168.178.94:10250/metrics
up endpoint="https-metrics" instance="192.168.178.94:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="node1" service="kubelet" 15.801s ago 112.8ms

monitoring/kubelet/1 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.178.170:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="main" service="kubelet" 15.797s ago 664ms
https://192.168.178.27:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.178.27:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="worker2" service="kubelet" 15.803s ago 118.5ms
https://192.168.178.94:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.178.94:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="node1" service="kubelet" 15.809s ago 151.3ms

monitoring/node-exporter/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:9100/metrics
down endpoint="https" instance="main" job="node-exporter" namespace="monitoring" pod="node-exporter-kgllz" service="node-exporter" 11.197s ago 1.071ms Get "https://192.168.178.170:9100/metrics": dial tcp 192.168.178.170:9100: connect: connection refused
https://192.168.178.94:9100/metrics
down endpoint="https" instance="node1" job="node-exporter" namespace="monitoring" pod="node-exporter-4xk97" service="node-exporter" 2.935s ago 1.334ms Get "https://192.168.178.94:9100/metrics": dial tcp 192.168.178.94:9100: connect: connection refused
https://192.168.178.27:9100/metrics
down endpoint="https" instance="worker2" job="node-exporter" namespace="monitoring" pod="node-exporter-t242n" service="node-exporter" 7.203s ago 1.435ms Get "https://192.168.178.27:9100/metrics": dial tcp 192.168.178.27:9100: connect: connection refused

monitoring/prometheus-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.166:8443/metrics
up endpoint="https" instance="10.42.0.166:8443" job="prometheus-operator" namespace="monitoring" pod="prometheus-operator-67755f959-g8phz" service="prometheus-operator" 45.992s ago 9.243ms

monitoring/prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.186:9090/metrics
up endpoint="web" instance="10.42.0.186:9090" job="prometheus-k8s" namespace="monitoring" pod="prometheus-k8s-0" service="prometheus-k8s" 15.777s ago 42.77ms