From 0c74421e7ce7a3a8a4c73b51623af1746a971ce0 Mon Sep 17 00:00:00 2001 From: Roboquat Date: Mon, 17 Feb 2025 00:18:14 +0000 Subject: [PATCH] [bot] [main] Automated dependencies update --- jsonnetfile.lock.json | 34 ++-- ...managerConfigCustomResourceDefinition.yaml | 2 +- ...0alertmanagerCustomResourceDefinition.yaml | 2 +- ...r-0podmonitorCustomResourceDefinition.yaml | 2 +- ...erator-0probeCustomResourceDefinition.yaml | 2 +- ...r-0prometheusCustomResourceDefinition.yaml | 27 ++- ...ometheusagentCustomResourceDefinition.yaml | 27 ++- ...rometheusruleCustomResourceDefinition.yaml | 2 +- ...0scrapeconfigCustomResourceDefinition.yaml | 2 +- ...ervicemonitorCustomResourceDefinition.yaml | 2 +- ...-0thanosrulerCustomResourceDefinition.yaml | 2 +- .../manifests/grafana/config.yaml | 2 +- .../grafana/dashboardDatasources.yaml | 2 +- .../grafana/dashboardDefinitions.yaml | 176 +++++++++--------- .../manifests/grafana/dashboardSources.yaml | 2 +- .../manifests/grafana/deployment.yaml | 12 +- .../manifests/grafana/prometheusRule.yaml | 2 +- .../manifests/grafana/service.yaml | 2 +- .../manifests/grafana/serviceAccount.yaml | 2 +- .../manifests/grafana/serviceMonitor.yaml | 2 +- .../kube-prometheus-rules/rules.yaml | 10 + .../.github/workflows/release.yaml | 23 +++ .../kubernetes-mixin/README.md | 23 +++ .../alerts/apps_alerts.libsonnet | 2 +- .../kubernetes-mixin/tests/tests.yaml | 59 ++++++ .../jsonnet/kube-prometheus/versions.json | 6 +- .../alertmanagerconfigs-crd.json | 2 +- .../alertmanagers-crd.json | 2 +- .../prometheus-operator/podmonitors-crd.json | 2 +- .../prometheus-operator/probes-crd.json | 2 +- .../prometheusagents-crd.json | 8 +- .../prometheus-operator/prometheuses-crd.json | 8 +- .../prometheusrules-crd.json | 2 +- .../scrapeconfigs-crd.json | 2 +- .../servicemonitors-crd.json | 2 +- .../prometheus-operator/thanosrulers-crd.json | 2 +- .../docs/node-mixin/alerts/alerts.libsonnet | 14 ++ .../prometheus-mixin/dashboards.libsonnet | 136 +++++++------- 38 files changed, 389 insertions(+), 222 deletions(-) create mode 100644 vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/release.yaml diff --git a/jsonnetfile.lock.json b/jsonnetfile.lock.json index 740fbfee..0c9b4087 100644 --- a/jsonnetfile.lock.json +++ b/jsonnetfile.lock.json @@ -18,7 +18,7 @@ "subdir": "contrib/mixin" } }, - "version": "9de211ddf876b0f6d16987b2ea72093aa8172fe6", + "version": "eb7607bd8b3665d14aa40d50435ae8c9002d620c", "sum": "XmXkOCriQIZmXwlIIFhqlJMa0e6qGWdxZD+ZDYaN0Po=" }, { @@ -28,7 +28,7 @@ "subdir": "operations/observability/mixins" } }, - "version": "fb89d41c767dd48e22316a4f4b44a0cda548a9d5", + "version": "04f590de85ceccc3312ef747f5f5982bc15dc3b7", "sum": "hVjQ0n6vsB+8/f0UJ3PKbV+DwvxgGMy6vhrwouaHIZk=", "name": "gitpod" }, @@ -89,7 +89,7 @@ "subdir": "grafana-builder" } }, - "version": "6f6821511f76cffdfaa3ccc40621c31493c12e49", + "version": "393630ca7ba9b25258c098f1fd4c81962e3ca046", "sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo=" }, { @@ -119,8 +119,8 @@ "subdir": "" } }, - "version": "96d585cafef95c462b1ae19b6aead456756de1b1", - "sum": "KRLU0CHmc0RovPaXaxALBSz5m11B4KE2jsZMprrO53s=" + "version": "4ff562d5e8145940cf355f62cf2308895c4dca81", + "sum": "kiL19fTbXOtNglsmT62kOzIf/Xpu+YwoiMPAApDXhkE=" }, { "source": { @@ -129,7 +129,7 @@ "subdir": "jsonnet/kube-state-metrics" } }, - "version": "2a95d4649b2fea55799032fb9c0b571c4ba7f776", + "version": "350a7c472e1801a2e13b9895ec8ef38876c96dd0", "sum": "3bioG7CfTfY9zeu5xU4yon6Zt3kYvNkyl492nOhQxnM=" }, { @@ -139,7 +139,7 @@ "subdir": "jsonnet/kube-state-metrics-mixin" } }, - "version": "2a95d4649b2fea55799032fb9c0b571c4ba7f776", + "version": "350a7c472e1801a2e13b9895ec8ef38876c96dd0", "sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c=" }, { @@ -149,8 +149,8 @@ "subdir": "jsonnet/kube-prometheus" } }, - "version": "e9e35af6cd0c3013397d056779d684a1d0185af8", - "sum": "bnG5C2tZ073wf7Lr9LZmOzOf+HM7EmH67yl4L7WPZlU=" + "version": "1eea946a1532f1e8cccfceea98d907bf3a10b1d9", + "sum": "17LhiwefVfoNDsF3DcFZw/UL4PMU7YpNNUaOdaYd1gE=" }, { "source": { @@ -159,7 +159,7 @@ "subdir": "jsonnet/mixin" } }, - "version": "b34cb97847c8e8df08fc7e7393a6831f94f52b13", + "version": "7deab71d6d5921eeaf8c79e3ae8e31efe63783a9", "sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=", "name": "prometheus-operator-mixin" }, @@ -170,8 +170,8 @@ "subdir": "jsonnet/prometheus-operator" } }, - "version": "b34cb97847c8e8df08fc7e7393a6831f94f52b13", - "sum": "SzECES1Sqax8WtD7faDXWEjFii0LLkcSkGqE5B3vlvQ=" + "version": "7deab71d6d5921eeaf8c79e3ae8e31efe63783a9", + "sum": "LctDdofQostvviE5y8vpRKWGGO1ZKO3dgJe7P9xifW0=" }, { "source": { @@ -191,8 +191,8 @@ "subdir": "docs/node-mixin" } }, - "version": "11365f97bef6cb0e6259d536a7e21c49e3f5c065", - "sum": "xYj6VYFT/eafsbleNlC+Z2VfLy1CndyYrJs9BcTmnX8=" + "version": "4d2912d49a77cebeb546d6a1d23d2166a96a88db", + "sum": "8dNyJ4vpnKVBbCFN9YLsugp1IjlZjDCwdKMjKi0KTG4=" }, { "source": { @@ -201,8 +201,8 @@ "subdir": "documentation/prometheus-mixin" } }, - "version": "585a21962267c424887fcf86513d528a41520604", - "sum": "CwaQpW66lHx+++sY2g4BgrUTEFZtlDnQzFjo0AlgfIg=", + "version": "a5ffa83be83be22e2ec9fd1d4765299d8d16119e", + "sum": "2c+wttfee9TwuQJZIkNV7Tekem74Qgc7iZ842P28rNw=", "name": "prometheus" }, { @@ -223,7 +223,7 @@ "subdir": "mixin" } }, - "version": "38f4c3c6a212d50eeef764d838de2519eee6409c", + "version": "346d18bb0f8011c63d7106de494cf3b9253161a1", "sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=", "name": "thanos-mixin" }, diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerConfigCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerConfigCustomResourceDefinition.yaml index 8adb910a..e8291b3d 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerConfigCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerConfigCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: alertmanagerconfigs.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerCustomResourceDefinition.yaml index 24c67f51..0c0e715c 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0alertmanagerCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: alertmanagers.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0podmonitorCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0podmonitorCustomResourceDefinition.yaml index af5f8d6c..1f8d0150 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0podmonitorCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0podmonitorCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: podmonitors.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0probeCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0probeCustomResourceDefinition.yaml index 7f365f26..400c7cc9 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0probeCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0probeCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: probes.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusCustomResourceDefinition.yaml index 8b4a1fda..26d76d35 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: prometheuses.monitoring.coreos.com spec: @@ -7037,6 +7037,21 @@ spec: description: Timeout for requests to the remote write endpoint. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string + roundRobinDNS: + description: |- + When enabled: + - The remote-write mechanism will resolve the hostname via DNS. + - It will randomly select one of the resolved IP addresses and connect to it. + + When disabled (default behavior): + - The Go standard library will handle hostname resolution. + - It will attempt connections to each resolved IP address sequentially. + + Note: The connection timeout applies to the entire resolution and connection process. + If disabled, the timeout is distributed across all connection attempts. + + It requires Prometheus >= v3.1.0. + type: boolean sendExemplars: description: |- Enables sending of exemplars over remote write. Note that @@ -8603,7 +8618,7 @@ spec: type: string shards: description: |- - Number of shards to distribute scraped targets onto. + Number of shards to distribute the scraped targets onto. `spec.replicas` multiplied by `spec.shards` is the total number of Pods being created. @@ -8613,11 +8628,11 @@ spec: Note that scaling down shards will not reshard data onto the remaining instances, it must be manually moved. Increasing shards will not reshard data either but it will continue to be available from the same - instances. To query globally, use Thanos sidecar and Thanos querier or - remote write data to a central location. - Alerting and recording rules + instances. To query globally, use either + * Thanos sidecar + querier for query federation and Thanos Ruler for rules. + * Remote-write to send metrics to a central location. - By default, the sharding is performed on: + By default, the sharding of targets is performed on: * The `__address__` target's metadata label for PodMonitor, ServiceMonitor and ScrapeConfig resources. * The `__param_target__` label for Probe resources. diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusagentCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusagentCustomResourceDefinition.yaml index 4de2464e..e20799e2 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusagentCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusagentCustomResourceDefinition.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: prometheusagents.monitoring.coreos.com spec: @@ -5568,6 +5568,21 @@ spec: description: Timeout for requests to the remote write endpoint. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string + roundRobinDNS: + description: |- + When enabled: + - The remote-write mechanism will resolve the hostname via DNS. + - It will randomly select one of the resolved IP addresses and connect to it. + + When disabled (default behavior): + - The Go standard library will handle hostname resolution. + - It will attempt connections to each resolved IP address sequentially. + + Note: The connection timeout applies to the entire resolution and connection process. + If disabled, the timeout is distributed across all connection attempts. + + It requires Prometheus >= v3.1.0. + type: boolean sendExemplars: description: |- Enables sending of exemplars over remote write. Note that @@ -6992,7 +7007,7 @@ spec: type: string shards: description: |- - Number of shards to distribute scraped targets onto. + Number of shards to distribute the scraped targets onto. `spec.replicas` multiplied by `spec.shards` is the total number of Pods being created. @@ -7002,11 +7017,11 @@ spec: Note that scaling down shards will not reshard data onto the remaining instances, it must be manually moved. Increasing shards will not reshard data either but it will continue to be available from the same - instances. To query globally, use Thanos sidecar and Thanos querier or - remote write data to a central location. - Alerting and recording rules + instances. To query globally, use either + * Thanos sidecar + querier for query federation and Thanos Ruler for rules. + * Remote-write to send metrics to a central location. - By default, the sharding is performed on: + By default, the sharding of targets is performed on: * The `__address__` target's metadata label for PodMonitor, ServiceMonitor and ScrapeConfig resources. * The `__param_target__` label for Probe resources. diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusruleCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusruleCustomResourceDefinition.yaml index ccea7ad4..9f50853a 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusruleCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0prometheusruleCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: prometheusrules.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0scrapeconfigCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0scrapeconfigCustomResourceDefinition.yaml index 3a21801f..d0a751c5 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0scrapeconfigCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0scrapeconfigCustomResourceDefinition.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: scrapeconfigs.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0servicemonitorCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0servicemonitorCustomResourceDefinition.yaml index e40faffa..013a6517 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0servicemonitorCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0servicemonitorCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: servicemonitors.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/crds/prometheusOperator-0thanosrulerCustomResourceDefinition.yaml b/monitoring-satellite/manifests/crds/prometheusOperator-0thanosrulerCustomResourceDefinition.yaml index aa5a92ca..8ac382e2 100644 --- a/monitoring-satellite/manifests/crds/prometheusOperator-0thanosrulerCustomResourceDefinition.yaml +++ b/monitoring-satellite/manifests/crds/prometheusOperator-0thanosrulerCustomResourceDefinition.yaml @@ -3,7 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: argocd.argoproj.io/sync-options: Replace=true - controller-gen.kubebuilder.io/version: v0.17.1 + controller-gen.kubebuilder.io/version: v0.17.2 operator.prometheus.io/version: 0.80.0 name: thanosrulers.monitoring.coreos.com spec: diff --git a/monitoring-satellite/manifests/grafana/config.yaml b/monitoring-satellite/manifests/grafana/config.yaml index 28431fb7..ff209cf4 100644 --- a/monitoring-satellite/manifests/grafana/config.yaml +++ b/monitoring-satellite/manifests/grafana/config.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-config namespace: monitoring-satellite stringData: diff --git a/monitoring-satellite/manifests/grafana/dashboardDatasources.yaml b/monitoring-satellite/manifests/grafana/dashboardDatasources.yaml index 97cf324c..4c93e218 100644 --- a/monitoring-satellite/manifests/grafana/dashboardDatasources.yaml +++ b/monitoring-satellite/manifests/grafana/dashboardDatasources.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-datasources namespace: monitoring-satellite stringData: diff --git a/monitoring-satellite/manifests/grafana/dashboardDefinitions.yaml b/monitoring-satellite/manifests/grafana/dashboardDefinitions.yaml index 09c82afb..97c9c544 100644 --- a/monitoring-satellite/manifests/grafana/dashboardDefinitions.yaml +++ b/monitoring-satellite/manifests/grafana/dashboardDefinitions.yaml @@ -1915,7 +1915,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-admin-node namespace: monitoring-satellite - apiVersion: v1 @@ -3159,7 +3159,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-admin-workspace namespace: monitoring-satellite - apiVersion: v1 @@ -3667,7 +3667,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-applications namespace: monitoring-satellite - apiVersion: v1 @@ -4761,7 +4761,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-cluster-autoscaler-k3s namespace: monitoring-satellite - apiVersion: v1 @@ -5410,7 +5410,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-connect-server namespace: monitoring-satellite - apiVersion: v1 @@ -6307,7 +6307,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-grpc-client namespace: monitoring-satellite - apiVersion: v1 @@ -7187,7 +7187,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-grpc-server namespace: monitoring-satellite - apiVersion: v1 @@ -11325,7 +11325,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-node-resource-metrics namespace: monitoring-satellite - apiVersion: v1 @@ -12400,7 +12400,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-nodes-overview namespace: monitoring-satellite - apiVersion: v1 @@ -14288,7 +14288,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-overview namespace: monitoring-satellite - apiVersion: v1 @@ -14741,7 +14741,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-slo-workspace-startuptime namespace: monitoring-satellite - apiVersion: v1 @@ -16356,7 +16356,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-redis namespace: monitoring-satellite - apiVersion: v1 @@ -21369,7 +21369,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-argocd namespace: monitoring-satellite - apiVersion: v1 @@ -21381,7 +21381,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-cardinality-management-overview namespace: monitoring-satellite - apiVersion: v1 @@ -21845,7 +21845,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-sh-example-overview namespace: monitoring-satellite - apiVersion: v1 @@ -23297,7 +23297,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-observability namespace: monitoring-satellite - apiVersion: v1 @@ -25421,7 +25421,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-blobserve namespace: monitoring-satellite - apiVersion: v1 @@ -25647,7 +25647,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-browser-overview namespace: monitoring-satellite - apiVersion: v1 @@ -27057,7 +27057,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-code-browser namespace: monitoring-satellite - apiVersion: v1 @@ -28298,7 +28298,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ide-service namespace: monitoring-satellite - apiVersion: v1 @@ -28988,7 +28988,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ide-startup-time namespace: monitoring-satellite - apiVersion: v1 @@ -29267,7 +29267,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-jb namespace: monitoring-satellite - apiVersion: v1 @@ -29870,7 +29870,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-local-ssh namespace: monitoring-satellite - apiVersion: v1 @@ -32221,7 +32221,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-openvsx-mirror namespace: monitoring-satellite - apiVersion: v1 @@ -34337,7 +34337,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-openvsx-proxy namespace: monitoring-satellite - apiVersion: v1 @@ -34815,7 +34815,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ssh-gateway namespace: monitoring-satellite - apiVersion: v1 @@ -35446,7 +35446,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-supervisor namespace: monitoring-satellite - apiVersion: v1 @@ -35791,7 +35791,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-alertmanager-overview namespace: monitoring-satellite - apiVersion: v1 @@ -36673,7 +36673,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-apiserver namespace: monitoring-satellite - apiVersion: v1 @@ -38010,7 +38010,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-cert-manager namespace: monitoring-satellite - apiVersion: v1 @@ -38824,7 +38824,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-cluster-total namespace: monitoring-satellite - apiVersion: v1 @@ -39428,7 +39428,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-controller-manager namespace: monitoring-satellite - apiVersion: v1 @@ -41015,7 +41015,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-cluster namespace: monitoring-satellite - apiVersion: v1 @@ -41656,7 +41656,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-multicluster namespace: monitoring-satellite - apiVersion: v1 @@ -43174,7 +43174,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-namespace namespace: monitoring-satellite - apiVersion: v1 @@ -43855,7 +43855,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-node namespace: monitoring-satellite - apiVersion: v1 @@ -45239,7 +45239,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-pod namespace: monitoring-satellite - apiVersion: v1 @@ -45926,7 +45926,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-windows-cluster namespace: monitoring-satellite - apiVersion: v1 @@ -46380,7 +46380,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-windows-namespace namespace: monitoring-satellite - apiVersion: v1 @@ -46889,7 +46889,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-windows-pod namespace: monitoring-satellite - apiVersion: v1 @@ -47955,7 +47955,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-workload namespace: monitoring-satellite - apiVersion: v1 @@ -49218,7 +49218,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-resources-workloads-namespace namespace: monitoring-satellite - apiVersion: v1 @@ -49634,7 +49634,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-windows-cluster-rsrc-use namespace: monitoring-satellite - apiVersion: v1 @@ -50261,7 +50261,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-k8s-windows-node-rsrc-use namespace: monitoring-satellite - apiVersion: v1 @@ -51514,7 +51514,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-kubelet namespace: monitoring-satellite - apiVersion: v1 @@ -52152,7 +52152,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-namespace-by-pod namespace: monitoring-satellite - apiVersion: v1 @@ -52948,7 +52948,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-namespace-by-workload namespace: monitoring-satellite - apiVersion: v1 @@ -53549,7 +53549,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-node-cluster-rsrc-use namespace: monitoring-satellite - apiVersion: v1 @@ -54153,7 +54153,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-node-rsrc-use namespace: monitoring-satellite - apiVersion: v1 @@ -54876,7 +54876,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-nodes-aix namespace: monitoring-satellite - apiVersion: v1 @@ -55623,7 +55623,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-nodes-darwin namespace: monitoring-satellite - apiVersion: v1 @@ -56362,7 +56362,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-nodes namespace: monitoring-satellite - apiVersion: v1 @@ -56684,7 +56684,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-persistentvolumesusage namespace: monitoring-satellite - apiVersion: v1 @@ -57176,7 +57176,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-pod-total namespace: monitoring-satellite - apiVersion: v1 @@ -57235,7 +57235,7 @@ items: "expr": "(\n prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~\"$cluster\", instance=~\"$instance\"}\n-\n ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~\"$cluster\", instance=~\"$instance\", url=~\"$url\"} != 0)\n)\n", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{instance}} {{remote_name}}:{{url}}" + "legendFormat": "{{cluster}}::{{instance}} {{remote_name}}:{{url}}" } ], "title": "Highest Timestamp In vs. Highest Timestamp Sent", @@ -57475,7 +57475,7 @@ items: "expr": "prometheus_remote_storage_shards_min{cluster=~\"$cluster\", instance=~\"$instance\", url=~\"$url\"}", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{instance}} {{remote_name}}:{{url}}" + "legendFormat": "{{cluster}}{{instance}} {{remote_name}}:{{url}}" } ], "title": "Min Shards", @@ -57977,7 +57977,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-prometheus-remote-write namespace: monitoring-satellite - apiVersion: v1 @@ -58816,7 +58816,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-prometheus namespace: monitoring-satellite - apiVersion: v1 @@ -59471,7 +59471,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-proxy namespace: monitoring-satellite - apiVersion: v1 @@ -60072,7 +60072,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-scheduler namespace: monitoring-satellite - apiVersion: v1 @@ -60654,7 +60654,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-workload-total namespace: monitoring-satellite - apiVersion: v1 @@ -62061,7 +62061,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-dashboard namespace: monitoring-satellite - apiVersion: v1 @@ -63466,7 +63466,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-db namespace: monitoring-satellite - apiVersion: v1 @@ -65328,7 +65328,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-proxy namespace: monitoring-satellite - apiVersion: v1 @@ -65935,7 +65935,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-server-garbage-collector namespace: monitoring-satellite - apiVersion: v1 @@ -68458,7 +68458,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-server namespace: monitoring-satellite - apiVersion: v1 @@ -69326,7 +69326,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-usage namespace: monitoring-satellite - apiVersion: v1 @@ -71835,7 +71835,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ws-manager-bridge namespace: monitoring-satellite - apiVersion: v1 @@ -72830,7 +72830,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-components-spicedb namespace: monitoring-satellite - apiVersion: v1 @@ -73753,7 +73753,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-meta-overview namespace: monitoring-satellite - apiVersion: v1 @@ -74298,7 +74298,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-meta-services namespace: monitoring-satellite - apiVersion: v1 @@ -75859,7 +75859,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-slo-login namespace: monitoring-satellite - apiVersion: v1 @@ -78729,7 +78729,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-agent-smith namespace: monitoring-satellite - apiVersion: v1 @@ -81297,7 +81297,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-content-service namespace: monitoring-satellite - apiVersion: v1 @@ -83922,7 +83922,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-image-builder namespace: monitoring-satellite - apiVersion: v1 @@ -86943,7 +86943,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-registry-facade namespace: monitoring-satellite - apiVersion: v1 @@ -90382,7 +90382,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ws-daemon namespace: monitoring-satellite - apiVersion: v1 @@ -94737,7 +94737,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ws-manager-mk2 namespace: monitoring-satellite - apiVersion: v1 @@ -96145,7 +96145,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-component-ws-proxy namespace: monitoring-satellite - apiVersion: v1 @@ -96582,7 +96582,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-network-limiting namespace: monitoring-satellite - apiVersion: v1 @@ -96885,7 +96885,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-node-ephemeral-storage namespace: monitoring-satellite - apiVersion: v1 @@ -97647,7 +97647,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-node-problem-detector namespace: monitoring-satellite - apiVersion: v1 @@ -97910,7 +97910,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-node-swap namespace: monitoring-satellite - apiVersion: v1 @@ -98586,7 +98586,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-psi namespace: monitoring-satellite - apiVersion: v1 @@ -100701,7 +100701,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-workspace-coredns namespace: monitoring-satellite - apiVersion: v1 @@ -101175,7 +101175,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-workspace-psi namespace: monitoring-satellite - apiVersion: v1 @@ -101784,7 +101784,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-workspace-registry-facade-blobsource namespace: monitoring-satellite - apiVersion: v1 @@ -102586,7 +102586,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboard-gitpod-workspace-success-criteria namespace: monitoring-satellite kind: ConfigMapList diff --git a/monitoring-satellite/manifests/grafana/dashboardSources.yaml b/monitoring-satellite/manifests/grafana/dashboardSources.yaml index 02300dd2..8c78a9aa 100644 --- a/monitoring-satellite/manifests/grafana/dashboardSources.yaml +++ b/monitoring-satellite/manifests/grafana/dashboardSources.yaml @@ -72,6 +72,6 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana-dashboards namespace: monitoring-satellite diff --git a/monitoring-satellite/manifests/grafana/deployment.yaml b/monitoring-satellite/manifests/grafana/deployment.yaml index efab4805..cc459dce 100644 --- a/monitoring-satellite/manifests/grafana/deployment.yaml +++ b/monitoring-satellite/manifests/grafana/deployment.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana namespace: monitoring-satellite spec: @@ -18,14 +18,14 @@ spec: template: metadata: annotations: - checksum/grafana-config: 16cee7721ec95f18e7320c9f3ee11cc2 - checksum/grafana-dashboardproviders: f6cd8baff875d71fd6639752fa6f57fb - checksum/grafana-datasources: 7d70c5b7a7f67a4f0bb048abd4986f4e + checksum/grafana-config: 7c4027aded40f17977a93b04bdd5096f + checksum/grafana-dashboardproviders: 374de75110463475c05f1ec4c03983e0 + checksum/grafana-datasources: 4f3b2280f1021a244ba59b92ee0cadc8 labels: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 spec: automountServiceAccountToken: false containers: @@ -36,7 +36,7 @@ spec: value: Admin - name: GF_AUTH_DISABLE_LOGIN_FORM value: "true" - image: grafana/grafana:11.5.0 + image: grafana/grafana:11.5.1 name: grafana ports: - containerPort: 3000 diff --git a/monitoring-satellite/manifests/grafana/prometheusRule.yaml b/monitoring-satellite/manifests/grafana/prometheusRule.yaml index 73b4c148..d80be833 100644 --- a/monitoring-satellite/manifests/grafana/prometheusRule.yaml +++ b/monitoring-satellite/manifests/grafana/prometheusRule.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 prometheus: k8s role: alert-rules name: grafana-rules diff --git a/monitoring-satellite/manifests/grafana/service.yaml b/monitoring-satellite/manifests/grafana/service.yaml index 49061318..9109d8f2 100644 --- a/monitoring-satellite/manifests/grafana/service.yaml +++ b/monitoring-satellite/manifests/grafana/service.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana namespace: monitoring-satellite spec: diff --git a/monitoring-satellite/manifests/grafana/serviceAccount.yaml b/monitoring-satellite/manifests/grafana/serviceAccount.yaml index a627fdf7..7371e583 100644 --- a/monitoring-satellite/manifests/grafana/serviceAccount.yaml +++ b/monitoring-satellite/manifests/grafana/serviceAccount.yaml @@ -6,6 +6,6 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana namespace: monitoring-satellite diff --git a/monitoring-satellite/manifests/grafana/serviceMonitor.yaml b/monitoring-satellite/manifests/grafana/serviceMonitor.yaml index 3004e90f..8d91884d 100644 --- a/monitoring-satellite/manifests/grafana/serviceMonitor.yaml +++ b/monitoring-satellite/manifests/grafana/serviceMonitor.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 11.5.0 + app.kubernetes.io/version: 11.5.1 name: grafana namespace: monitoring-satellite spec: diff --git a/monitoring-satellite/manifests/kube-prometheus-rules/rules.yaml b/monitoring-satellite/manifests/kube-prometheus-rules/rules.yaml index a741a4cd..5dd46f50 100644 --- a/monitoring-satellite/manifests/kube-prometheus-rules/rules.yaml +++ b/monitoring-satellite/manifests/kube-prometheus-rules/rules.yaml @@ -1003,6 +1003,16 @@ spec: for: 5m labels: severity: warning + - alert: NodeSystemdServiceCrashlooping + annotations: + description: Systemd service {{ $labels.name }} has being restarted too many times at {{ $labels.instance }} for the last 15 minutes. Please check if service is crash looping. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodesystemdservicecrashlooping + summary: Systemd service keeps restaring, possibly crash looping. + expr: | + increase(node_systemd_service_restart_total{job="node-exporter"}[5m]) > 2 + for: 15m + labels: + severity: warning - alert: NodeBondingDegraded annotations: description: Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures. diff --git a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/release.yaml b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/release.yaml new file mode 100644 index 00000000..e8173346 --- /dev/null +++ b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/release.yaml @@ -0,0 +1,23 @@ +name: Release + +on: + push: + tags: + - "version-*" # Trigger the workflow on push events to version-* tags + +permissions: + contents: write + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Create release on kubernetes-mixin + uses: softprops/action-gh-release@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref_name }} + repository: kubernetes-monitoring/kubernetes-mixin + generate_release_notes: true diff --git a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md index 07d1d1b6..8e1d88f0 100644 --- a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md +++ b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md @@ -8,6 +8,8 @@ A set of Grafana dashboards and Prometheus alerts for Kubernetes. ## Releases +> Note: Releases up until `release-0.12` are changes in their own branches. Changelogs are included in releases starting from [version-0.13.0](https://github.com/kubernetes-monitoring/kubernetes-mixin/releases/tag/version-0.13.0). + | Release branch | Kubernetes Compatibility | Prometheus Compatibility | Kube-state-metrics Compatibility | |----------------|--------------------------|--------------------------|----------------------------------| | release-0.1 | v1.13 and before | | | @@ -33,6 +35,27 @@ Warning: This compatibility matrix was initially created based on experience, we Warning: By default the expressions will generate *grafana 7.2+* compatible rules using the *$__rate_interval* variable for rate functions. If you need backward compatible rules please set *grafana72: false* in your *_config* +### Release steps + +Maintainers can trigger the [release workflow](.github/workflows/release.yaml) by pushing a git tag that matches the pattern: `version-*`. + +1. Checkout `master` branch and pull for latest. + + ```bash + git checkout master + ``` + +2. Create a tag following sem-ver versioning for the version and trigger release. + + ```bash + # replace MAJOR.MINOR.PATCH with e.g. 1.2.3 + tag=version-MAJOR.MINOR.PATCH; git tag $tag && git push origin $tag + ``` + +#### Decisions on backfilling releases + +We wanted to backfill `release-0.1` to `release-0.12` to have a changelog, but we were not able to use a GitHub action in a newer commit to trigger a release that generates a changelog on older commits. See #489 for full discussion. + ## How to use This mixin is designed to be vendored into the repo with your infrastructure config. To do this, use [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler): diff --git a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet index 675f5aee..f575f874 100644 --- a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet +++ b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet @@ -121,7 +121,7 @@ local utils = import '../lib/utils.libsonnet'; ( kube_statefulset_status_replicas_ready{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} != - kube_statefulset_status_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} + kube_statefulset_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} ) and ( changes(kube_statefulset_status_replicas_updated{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[10m]) == diff --git a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests/tests.yaml b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests/tests.yaml index 49a81c73..c8d6147d 100644 --- a/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests/tests.yaml +++ b/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests/tests.yaml @@ -1323,3 +1323,62 @@ tests: description: 'Cluster has overcommitted memory resource requests for Namespaces.' runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememoryquotaovercommit" summary: "Cluster has overcommitted memory resource requests." + +# Verify KubeStatefulSetReplicasMismatch fires, when no replicas could be created +- interval: 1m + name: StatefulSet replicas not created + input_series: + - series: 'kube_statefulset_replicas{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '3x15' + - series: 'kube_statefulset_status_replicas{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '0x15' + - series: 'kube_statefulset_status_replicas_ready{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '0x15' + - series: 'kube_statefulset_status_replicas_updated{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '0x15' + alert_rule_test: + - eval_time: 14m + alertname: KubeStatefulSetReplicasMismatch + - eval_time: 15m + alertname: KubeStatefulSetReplicasMismatch + exp_alerts: + - exp_labels: + severity: "warning" + job: "kube-state-metrics" + cluster: "kubernetes" + namespace: "test" + statefulset: "sts" + exp_annotations: + description: "StatefulSet test/sts has not matched the expected number of replicas for longer than 15 minutes." + runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch" + summary: "StatefulSet has not matched the expected number of replicas." + +# Verify KubeStatefulSetReplicasMismatch fires, when replicas could be created but are not ready +- interval: 1m + name: StatefulSet replicas created but not ready + input_series: + - series: 'kube_statefulset_replicas{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '3x15' + - series: 'kube_statefulset_status_replicas{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '3x15' + - series: 'kube_statefulset_status_replicas_ready{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '0x15' + - series: 'kube_statefulset_status_replicas_updated{cluster="kubernetes",namespace="test",job="kube-state-metrics",statefulset="sts"}' + values: '0x15' + alert_rule_test: + - eval_time: 14m + alertname: KubeStatefulSetReplicasMismatch + - eval_time: 15m + alertname: KubeStatefulSetReplicasMismatch + exp_alerts: + - exp_labels: + severity: "warning" + job: "kube-state-metrics" + cluster: "kubernetes" + namespace: "test" + statefulset: "sts" + exp_annotations: + description: "StatefulSet test/sts has not matched the expected number of replicas for longer than 15 minutes." + runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch" + summary: "StatefulSet has not matched the expected number of replicas." + diff --git a/vendor/github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus/versions.json b/vendor/github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus/versions.json index 1ce2476e..70fc17b7 100644 --- a/vendor/github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus/versions.json +++ b/vendor/github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus/versions.json @@ -1,12 +1,12 @@ { "alertmanager": "0.28.0", "blackboxExporter": "0.25.0", - "grafana": "11.5.0", - "kubeStateMetrics": "2.14.0", + "grafana": "11.5.1", + "kubeStateMetrics": "2.15.0", "nodeExporter": "1.8.2", "prometheus": "3.1.0", "prometheusAdapter": "0.12.0", - "prometheusOperator": "0.79.2", + "prometheusOperator": "0.80.0", "kubeRbacProxy": "0.18.2", "configmapReload": "0.14.0", "pyrra": "0.6.4" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagerconfigs-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagerconfigs-crd.json index 671d0b9d..e6ddd0a0 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagerconfigs-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagerconfigs-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "alertmanagerconfigs.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagers-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagers-crd.json index b83fb007..92baddf6 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagers-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/alertmanagers-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "alertmanagers.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/podmonitors-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/podmonitors-crd.json index 6bde6dac..f45491ef 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/podmonitors-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/podmonitors-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "podmonitors.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/probes-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/probes-crd.json index 234c198c..79ec19e6 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/probes-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/probes-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "probes.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusagents-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusagents-crd.json index d98afb35..2bffff7c 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusagents-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusagents-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "prometheusagents.monitoring.coreos.com" @@ -4961,6 +4961,10 @@ "pattern": "^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$", "type": "string" }, + "roundRobinDNS": { + "description": "When enabled:\n - The remote-write mechanism will resolve the hostname via DNS.\n - It will randomly select one of the resolved IP addresses and connect to it.\n\nWhen disabled (default behavior):\n - The Go standard library will handle hostname resolution.\n - It will attempt connections to each resolved IP address sequentially.\n\nNote: The connection timeout applies to the entire resolution and connection process.\n If disabled, the timeout is distributed across all connection attempts.\n\nIt requires Prometheus >= v3.1.0.", + "type": "boolean" + }, "sendExemplars": { "description": "Enables sending of exemplars over remote write. Note that\nexemplar-storage itself must be enabled using the `spec.enableFeatures`\noption for exemplars to be scraped in the first place.\n\nIt requires Prometheus >= v2.27.0.", "type": "boolean" @@ -6173,7 +6177,7 @@ "type": "string" }, "shards": { - "description": "Number of shards to distribute scraped targets onto.\n\n`spec.replicas` multiplied by `spec.shards` is the total number of Pods\nbeing created.\n\nWhen not defined, the operator assumes only one shard.\n\nNote that scaling down shards will not reshard data onto the remaining\ninstances, it must be manually moved. Increasing shards will not reshard\ndata either but it will continue to be available from the same\ninstances. To query globally, use Thanos sidecar and Thanos querier or\nremote write data to a central location.\nAlerting and recording rules\n\nBy default, the sharding is performed on:\n* The `__address__` target's metadata label for PodMonitor,\nServiceMonitor and ScrapeConfig resources.\n* The `__param_target__` label for Probe resources.\n\nUsers can define their own sharding implementation by setting the\n`__tmp_hash` label during the target discovery with relabeling\nconfiguration (either in the monitoring resources or via scrape class).", + "description": "Number of shards to distribute the scraped targets onto.\n\n`spec.replicas` multiplied by `spec.shards` is the total number of Pods\nbeing created.\n\nWhen not defined, the operator assumes only one shard.\n\nNote that scaling down shards will not reshard data onto the remaining\ninstances, it must be manually moved. Increasing shards will not reshard\ndata either but it will continue to be available from the same\ninstances. To query globally, use either\n* Thanos sidecar + querier for query federation and Thanos Ruler for rules.\n* Remote-write to send metrics to a central location.\n\nBy default, the sharding of targets is performed on:\n* The `__address__` target's metadata label for PodMonitor,\nServiceMonitor and ScrapeConfig resources.\n* The `__param_target__` label for Probe resources.\n\nUsers can define their own sharding implementation by setting the\n`__tmp_hash` label during the target discovery with relabeling\nconfiguration (either in the monitoring resources or via scrape class).", "format": "int32", "type": "integer" }, diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheuses-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheuses-crd.json index f77e991e..aba6db81 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheuses-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheuses-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "prometheuses.monitoring.coreos.com" @@ -6340,6 +6340,10 @@ "pattern": "^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$", "type": "string" }, + "roundRobinDNS": { + "description": "When enabled:\n - The remote-write mechanism will resolve the hostname via DNS.\n - It will randomly select one of the resolved IP addresses and connect to it.\n\nWhen disabled (default behavior):\n - The Go standard library will handle hostname resolution.\n - It will attempt connections to each resolved IP address sequentially.\n\nNote: The connection timeout applies to the entire resolution and connection process.\n If disabled, the timeout is distributed across all connection attempts.\n\nIt requires Prometheus >= v3.1.0.", + "type": "boolean" + }, "sendExemplars": { "description": "Enables sending of exemplars over remote write. Note that\nexemplar-storage itself must be enabled using the `spec.enableFeatures`\noption for exemplars to be scraped in the first place.\n\nIt requires Prometheus >= v2.27.0.", "type": "boolean" @@ -7685,7 +7689,7 @@ "type": "string" }, "shards": { - "description": "Number of shards to distribute scraped targets onto.\n\n`spec.replicas` multiplied by `spec.shards` is the total number of Pods\nbeing created.\n\nWhen not defined, the operator assumes only one shard.\n\nNote that scaling down shards will not reshard data onto the remaining\ninstances, it must be manually moved. Increasing shards will not reshard\ndata either but it will continue to be available from the same\ninstances. To query globally, use Thanos sidecar and Thanos querier or\nremote write data to a central location.\nAlerting and recording rules\n\nBy default, the sharding is performed on:\n* The `__address__` target's metadata label for PodMonitor,\nServiceMonitor and ScrapeConfig resources.\n* The `__param_target__` label for Probe resources.\n\nUsers can define their own sharding implementation by setting the\n`__tmp_hash` label during the target discovery with relabeling\nconfiguration (either in the monitoring resources or via scrape class).", + "description": "Number of shards to distribute the scraped targets onto.\n\n`spec.replicas` multiplied by `spec.shards` is the total number of Pods\nbeing created.\n\nWhen not defined, the operator assumes only one shard.\n\nNote that scaling down shards will not reshard data onto the remaining\ninstances, it must be manually moved. Increasing shards will not reshard\ndata either but it will continue to be available from the same\ninstances. To query globally, use either\n* Thanos sidecar + querier for query federation and Thanos Ruler for rules.\n* Remote-write to send metrics to a central location.\n\nBy default, the sharding of targets is performed on:\n* The `__address__` target's metadata label for PodMonitor,\nServiceMonitor and ScrapeConfig resources.\n* The `__param_target__` label for Probe resources.\n\nUsers can define their own sharding implementation by setting the\n`__tmp_hash` label during the target discovery with relabeling\nconfiguration (either in the monitoring resources or via scrape class).", "format": "int32", "type": "integer" }, diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusrules-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusrules-crd.json index 5682f2ee..d13b989f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusrules-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheusrules-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "prometheusrules.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/scrapeconfigs-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/scrapeconfigs-crd.json index 6583f670..a8262dd1 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/scrapeconfigs-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/scrapeconfigs-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "scrapeconfigs.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/servicemonitors-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/servicemonitors-crd.json index 7a3b033b..b2e4eae2 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/servicemonitors-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/servicemonitors-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "servicemonitors.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/thanosrulers-crd.json b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/thanosrulers-crd.json index 61f150e2..c545ceae 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/thanosrulers-crd.json +++ b/vendor/github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/thanosrulers-crd.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.17.1", + "controller-gen.kubebuilder.io/version": "v0.17.2", "operator.prometheus.io/version": "0.80.0" }, "name": "thanosrulers.monitoring.coreos.com" diff --git a/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet b/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet index dc93c9a8..61d9dd2d 100644 --- a/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet +++ b/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet @@ -407,6 +407,20 @@ description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}', }, }, + { + alert: 'NodeSystemdServiceCrashlooping', + expr: ||| + increase(node_systemd_service_restart_total{%(nodeExporterSelector)s}[5m]) > 2 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Systemd service keeps restaring, possibly crash looping.', + description: 'Systemd service {{ $labels.name }} has being restarted too many times at {{ $labels.instance }} for the last 15 minutes. Please check if service is crash looping.', + }, + }, { alert: 'NodeBondingDegraded', expr: ||| diff --git a/vendor/github.com/prometheus/prometheus/documentation/prometheus-mixin/dashboards.libsonnet b/vendor/github.com/prometheus/prometheus/documentation/prometheus-mixin/dashboards.libsonnet index 22b8c92e..50cbf72d 100644 --- a/vendor/github.com/prometheus/prometheus/documentation/prometheus-mixin/dashboards.libsonnet +++ b/vendor/github.com/prometheus/prometheus/documentation/prometheus-mixin/dashboards.libsonnet @@ -56,7 +56,7 @@ local row = panel.row; + variable.query.selectionOptions.withIncludeAll(true, '.+') + variable.query.selectionOptions.withMulti(true) + if showMultiCluster then - variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{cluster=~"$cluster"}') + variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config) else variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(prometheusSelector)s}' % $._config) ; @@ -70,7 +70,7 @@ local row = panel.row; + variable.query.selectionOptions.withIncludeAll(true, '.+') + variable.query.selectionOptions.withMulti(true) + if showMultiCluster then - variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster", job=~"$job"}') + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job"}' % $._config) else variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{job=~"$job"}') ; @@ -121,14 +121,14 @@ local row = panel.row; panel.table.queryOptions.withTargets([ prometheus.new( '$datasource', - 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})' + 'count by (cluster, job, instance, version) (prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config ) + prometheus.withFormat('table') + prometheus.withInstant(true) + prometheus.withLegendFormat(''), prometheus.new( '$datasource', - 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})' + 'max by (cluster, job, instance) (time() - process_start_time_seconds{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config ) + prometheus.withFormat('table') + prometheus.withInstant(true) @@ -163,10 +163,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' + 'sum(rate(prometheus_target_sync_length_seconds_sum{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (%(clusterLabel)s, job, scrape_job, instance) * 1e3' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}:{{scrape_job}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -190,10 +190,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' + 'sum by (%(clusterLabel)s, job, instance) (prometheus_sd_discovered_targets{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"})' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -216,10 +216,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' + 'rate(prometheus_target_interval_length_seconds_sum{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}} {{interval}} configured'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}} {{interval}} configured' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -243,34 +243,34 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('exceeded body size limit: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('exceeded body size limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('exceeded sample limit: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('exceeded sample limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('duplicate timestamp: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('duplicate timestamp: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('out of bounds: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('out of bounds: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('out of order: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('out of order: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -318,10 +318,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' + 'rate(prometheus_tsdb_head_samples_appended_total{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -345,10 +345,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + 'prometheus_tsdb_head_series{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head series'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head series' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -372,10 +372,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + 'prometheus_tsdb_head_chunks{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head chunks'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head chunks' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -399,10 +399,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + 'rate(prometheus_engine_query_duration_seconds_count{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -426,7 +426,7 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' + 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' % $._config ) + prometheus.withFormat('time_series') + prometheus.withLegendFormat('{{slice}}'), @@ -514,7 +514,7 @@ local row = panel.row; + variable.query.withDatasourceFromVariable(datasourceVariable) + variable.query.refresh.onTime() + variable.query.selectionOptions.withIncludeAll(true) - + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster"}') + + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config) ; local urlVariable = @@ -522,7 +522,7 @@ local row = panel.row; + variable.query.withDatasourceFromVariable(datasourceVariable) + variable.query.refresh.onTime() + variable.query.selectionOptions.withIncludeAll(true) - + variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}') + + variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config) ; local timestampComparison = @@ -534,15 +534,15 @@ local row = panel.row; '$datasource', ||| ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} + prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"} - - ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) + ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} != 0) ) - ||| + ||| % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}::{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local timestampComparisonRate = @@ -554,15 +554,15 @@ local row = panel.row; '$datasource', ||| clamp_min( - rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) + rate(prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m]) - - ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) + ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) , 0) - ||| + ||| % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local samplesRate = @@ -574,16 +574,16 @@ local row = panel.row; '$datasource', ||| rate( - prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) + prometheus_remote_storage_samples_in_total{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m]) - - ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - - (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - ||| + (rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + ||| % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local currentShards = @@ -593,11 +593,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local maxShards = @@ -607,11 +607,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards_max{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local minShards = @@ -621,11 +621,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards_min{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local desiredShards = @@ -635,11 +635,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards_desired{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local shardsCapacity = @@ -649,11 +649,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shard_capacity{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local pendingSamples = @@ -663,11 +663,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_pending_samples{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local walSegment = @@ -679,11 +679,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}' + 'prometheus_tsdb_wal_segment_current{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}}' % $._config), ]); local queueSegment = @@ -695,11 +695,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}' + 'prometheus_wal_watcher_current_segment{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{consumer}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{consumer}}' % $._config), ]); local droppedSamples = @@ -710,11 +710,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local failedSamples = @@ -725,11 +725,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_failed_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local retriedSamples = @@ -740,11 +740,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_retried_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local enqueueRetries = @@ -755,11 +755,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_enqueue_retries_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); dashboard.new('%(prefix)sRemote Write' % $._config.grafanaPrometheus)