Targets


default/pi-stock-service-monitor/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.68:3001/metrics
up endpoint="pi-stock" instance="10.42.0.68:3001" job="pi-stock" namespace="default" pod="pi-stock-5d8dd7f4f7-qxqr8" service="pi-stock" 49.054s ago 9.46ms

kube-system/traefik-monitoring/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.59:8081/metrics
up container="traefik" endpoint="metrics" instance="10.42.0.59:8081" job="kube-system/traefik-monitoring" namespace="kube-system" pod="traefik-4vlnq" 43.291s ago 5.894ms
http://10.42.1.207:8081/metrics
up container="traefik" endpoint="metrics" instance="10.42.1.207:8081" job="kube-system/traefik-monitoring" namespace="kube-system" pod="traefik-mgcdm" 28.766s ago 44.55ms
http://10.42.2.38:8081/metrics
up container="traefik" endpoint="metrics" instance="10.42.2.38:8081" job="kube-system/traefik-monitoring" namespace="kube-system" pod="traefik-s8p27" 55.421s ago 9.795ms

monitoring/alertmanager/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.2.36:9093/metrics
up endpoint="web" instance="10.42.2.36:9093" job="alertmanager-main" namespace="monitoring" pod="alertmanager-main-0" service="alertmanager-main" 21.434s ago 8.944ms

monitoring/arm-exporter/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.56:9243/metrics
up endpoint="https" instance="main" job="arm-exporter" namespace="monitoring" pod="arm-exporter-gjg9w" service="arm-exporter" 28.264s ago 6.697ms
https://10.42.1.208:9243/metrics
up endpoint="https" instance="node1" job="arm-exporter" namespace="monitoring" pod="arm-exporter-jzhqp" service="arm-exporter" 28.015s ago 6.627ms
https://10.42.2.37:9243/metrics
up endpoint="https" instance="worker2" job="arm-exporter" namespace="monitoring" pod="arm-exporter-7w8n2" service="arm-exporter" 20.469s ago 7.746ms

monitoring/coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.74:9153/metrics
up endpoint="metrics" instance="10.42.0.74:9153" job="kube-dns" namespace="kube-system" pod="coredns-7796b77cd4-mt5dq" service="kube-dns" 13.567s ago 11.41ms
http://10.42.0.74:9153/metrics
up endpoint="metrics" instance="10.42.0.74:9153" job="kube-dns" namespace="kube-system" pod="coredns-7796b77cd4-mt5dq" service="kube-dns-prometheus-discovery" 4.006s ago 12.58ms

monitoring/grafana/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.80:3000/metrics
up endpoint="http" instance="10.42.0.80:3000" job="grafana" namespace="monitoring" pod="grafana-594fc7f587-v7wlr" service="grafana" 8.318s ago 8.4ms

monitoring/kube-apiserver/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:6443/metrics
up endpoint="https" instance="192.168.178.170:6443" job="apiserver" namespace="default" service="kubernetes" 4.741s ago 783.9ms

monitoring/kube-controller-manager/0 (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.178.170:10252/metrics
down endpoint="http-metrics" instance="192.168.178.170:10252" job="kube-controller-manager" namespace="kube-system" service="kube-controller-manager-prometheus-discovery" 10.585s ago 1.164ms Get "http://192.168.178.170:10252/metrics": dial tcp 192.168.178.170:10252: connect: connection refused

monitoring/kube-scheduler/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.178.170:10251/metrics
up endpoint="http-metrics" instance="192.168.178.170:10251" job="kube-scheduler" namespace="kube-system" service="kube-scheduler-prometheus-discovery" 2.137s ago 1.569s

monitoring/kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.58:8443/metrics
up instance="10.42.0.58:8443" job="kube-state-metrics" 7.584s ago 84.67ms

monitoring/kube-state-metrics/1 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.58:9443/metrics
up endpoint="https-self" instance="10.42.0.58:9443" job="kube-state-metrics" namespace="monitoring" pod="kube-state-metrics-6cb6df5d4-hbsn7" service="kube-state-metrics" 1.753s ago 156ms

monitoring/kubelet/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:10250/metrics
up endpoint="https-metrics" instance="192.168.178.170:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="main" service="kubelet" 12.423s ago 703.5ms
https://192.168.178.27:10250/metrics
up endpoint="https-metrics" instance="192.168.178.27:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="worker2" service="kubelet" 12.235s ago 121.5ms
https://192.168.178.94:10250/metrics
up endpoint="https-metrics" instance="192.168.178.94:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="node1" service="kubelet" 5.885s ago 59.99ms

monitoring/kubelet/1 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.178.170:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="main" service="kubelet" 3.032s ago 721.6ms
https://192.168.178.27:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.178.27:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="worker2" service="kubelet" 7.408s ago 84.52ms
https://192.168.178.94:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.178.94:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="node1" service="kubelet" 29.799s ago 190.2ms

monitoring/node-exporter/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.178.170:9100/metrics
up endpoint="https" instance="main" job="node-exporter" namespace="monitoring" pod="node-exporter-kgllz" service="node-exporter" 4.402s ago 714ms
https://192.168.178.94:9100/metrics
up endpoint="https" instance="node1" job="node-exporter" namespace="monitoring" pod="node-exporter-4xk97" service="node-exporter" 11.157s ago 462.8ms
https://192.168.178.27:9100/metrics
up endpoint="https" instance="worker2" job="node-exporter" namespace="monitoring" pod="node-exporter-t242n" service="node-exporter" 408ms ago 385.5ms

monitoring/prometheus-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.0.55:8443/metrics
up endpoint="https" instance="10.42.0.55:8443" job="prometheus-operator" namespace="monitoring" pod="prometheus-operator-67755f959-g8phz" service="prometheus-operator" 44.161s ago 7.748ms

monitoring/prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.54:9090/metrics
up endpoint="web" instance="10.42.0.54:9090" job="prometheus-k8s" namespace="monitoring" pod="prometheus-k8s-0" service="prometheus-k8s" 324ms ago 34.57ms