diff --git a/Jenkinsfile b/Jenkinsfile index 1d313d8bf..10e66bcb2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -22,6 +22,7 @@ pipeline { sh 'helm repo add nfs https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner' sh "helm repo add airflow https://airflow-helm.github.io/charts" sh "helm repo add grafana https://grafana.github.io/helm-charts" + sh "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts" } } } diff --git a/charts/molgenis-prometheus/Chart.lock b/charts/molgenis-prometheus/Chart.lock new file mode 100644 index 000000000..856fd8a51 --- /dev/null +++ b/charts/molgenis-prometheus/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: prometheus + repository: https://prometheus-community.github.io/helm-charts + version: 18.4.0 +- name: prometheus-blackbox-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 7.5.0 +digest: sha256:7584eabe09a9c5543fd693aac7a796c05f5767308770bc3e11f53981314c1a33 +generated: "2023-02-17T14:42:04.557024+01:00" diff --git a/charts/molgenis-prometheus/Chart.yaml b/charts/molgenis-prometheus/Chart.yaml index 72a843804..54e06a518 100755 --- a/charts/molgenis-prometheus/Chart.yaml +++ b/charts/molgenis-prometheus/Chart.yaml @@ -1,9 +1,16 @@ -apiVersion: v1 +apiVersion: v2 name: molgenis-prometheus home: https://prometheus.io/ -version: 1.3.21 -appVersion: 2.13.1 +version: 2.0.0 +appVersion: 2.39.1 description: Molgenis installation for the Prometheus chart. +dependencies: + - name: prometheus + version: 18.4.0 + repository: https://prometheus-community.github.io/helm-charts + - name: prometheus-blackbox-exporter + version: 7.5.0 + repository: https://prometheus-community.github.io/helm-charts sources: - https://github.com/molgenis/molgenis-ops-helm.git icon: https://raw.githubusercontent.com/molgenis/molgenis-ops-helm/master/charts/molgenis-prometheus/catalogIcon-molgenis-prometheus.png diff --git a/charts/molgenis-prometheus/charts/prometheus-18.4.0.tgz b/charts/molgenis-prometheus/charts/prometheus-18.4.0.tgz new file mode 100644 index 000000000..0d959a15f Binary files /dev/null and b/charts/molgenis-prometheus/charts/prometheus-18.4.0.tgz differ diff --git a/charts/molgenis-prometheus/charts/prometheus-9.7.5.tgz b/charts/molgenis-prometheus/charts/prometheus-9.7.5.tgz deleted file mode 100644 index 50e047cdd..000000000 Binary files a/charts/molgenis-prometheus/charts/prometheus-9.7.5.tgz and /dev/null differ diff --git a/charts/molgenis-prometheus/charts/prometheus-blackbox-exporter-3.0.0.tgz b/charts/molgenis-prometheus/charts/prometheus-blackbox-exporter-3.0.0.tgz deleted file mode 100644 index 8dd6ccf32..000000000 Binary files a/charts/molgenis-prometheus/charts/prometheus-blackbox-exporter-3.0.0.tgz and /dev/null differ diff --git a/charts/molgenis-prometheus/charts/prometheus-blackbox-exporter-7.5.0.tgz b/charts/molgenis-prometheus/charts/prometheus-blackbox-exporter-7.5.0.tgz new file mode 100644 index 000000000..be2a56fe8 Binary files /dev/null and b/charts/molgenis-prometheus/charts/prometheus-blackbox-exporter-7.5.0.tgz differ diff --git a/charts/molgenis-prometheus/questions.yml b/charts/molgenis-prometheus/questions.yml index c915ad924..312e1334d 100644 --- a/charts/molgenis-prometheus/questions.yml +++ b/charts/molgenis-prometheus/questions.yml @@ -1,30 +1,19 @@ questions: - - variable: 'environment' + - variable: "secret.githubtoken" required: true - label: environment - default: "development" - type: enum - options: - - production - - development - description: "Cluster" - show_subquestion_if: production - subquestions: - - variable: "secret.githubtoken" - required: true - label: "Github token" - description: "Github token for MOLGENIS deployments. Only if cluster is set to production. Otherwise set xxxx" - type: string - - variable: "secret.serverlistuser" - required: true - label: "Serverlist username" - description: "Username for the serverlist server to request token to do the api calls." - type: string - - variable: "secret.serverlistpassword" - required: true - label: "Serverlist password" - description: "Password from the username for the serverlist server to request token to do the api calls." - type: string + label: "Github token" + description: "Github token for MOLGENIS deployments. Only if cluster is set to production. Otherwise set xxxx" + type: string + - variable: "secret.serverlistuser" + required: true + label: "Serverlist username" + description: "Username for the serverlist server to request token to do the api calls." + type: string + - variable: "secret.serverlistpassword" + required: true + label: "Serverlist password" + description: "Password from the username for the serverlist server to request token to do the api calls." + type: string - variable: 'prometheus.alertmanagerFiles.alertmanager\.yml.global.slack_api_url' required: true default: "https://hooks.slack.com/services/..." diff --git a/charts/molgenis-prometheus/requirements.lock b/charts/molgenis-prometheus/requirements.lock deleted file mode 100644 index de826c1a2..000000000 --- a/charts/molgenis-prometheus/requirements.lock +++ /dev/null @@ -1,9 +0,0 @@ -dependencies: -- name: prometheus - repository: https://charts.helm.sh/stable - version: 9.7.5 -- name: prometheus-blackbox-exporter - repository: https://charts.helm.sh/stable - version: 3.0.0 -digest: sha256:819594c2ba31a6ca13643c0d1ea8704313667c57d756ea8b289e4cb9d47a5f7e -generated: "2021-01-07T13:54:14.05772+01:00" diff --git a/charts/molgenis-prometheus/requirements.yaml b/charts/molgenis-prometheus/requirements.yaml deleted file mode 100644 index 32a94afad..000000000 --- a/charts/molgenis-prometheus/requirements.yaml +++ /dev/null @@ -1,7 +0,0 @@ -dependencies: - - name: prometheus - version: 9.7.5 - repository: https://charts.helm.sh/stable - - name: prometheus-blackbox-exporter - version: 3.0.0 - repository: https://charts.helm.sh/stable \ No newline at end of file diff --git a/charts/molgenis-prometheus/scripts/serverlistRequester.py b/charts/molgenis-prometheus/scripts/serverlistRequester.py index 16e096d54..436037ad0 100755 --- a/charts/molgenis-prometheus/scripts/serverlistRequester.py +++ b/charts/molgenis-prometheus/scripts/serverlistRequester.py @@ -90,7 +90,7 @@ def iterateServerlistWeb(): def writeToFile(): with open('node-targets.yml', 'w') as outputFile: outputFile.write("\n".join(str(item) for item in node_exporter_targets)) - with open('blackbox-targets.yml', 'w') as outputFile: + with open('website-targets.yml', 'w') as outputFile: outputFile.write("\n".join(str(item) for item in blackbox_exporter_urls)) def closeConnection(): diff --git a/charts/molgenis-prometheus/templates/getTargets-cronjob.yaml b/charts/molgenis-prometheus/templates/getTargets-cronjob.yaml index 3e82dd620..1221ce6ee 100644 --- a/charts/molgenis-prometheus/templates/getTargets-cronjob.yaml +++ b/charts/molgenis-prometheus/templates/getTargets-cronjob.yaml @@ -1,5 +1,4 @@ -{{- if eq .Values.environment "production" -}} -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: molgenis-prometheus-cronjob @@ -49,8 +48,7 @@ spec: token=$(cat /run/secrets/kubernetes.io/serviceaccount/token) && wget {{ .Values.secret.githubtoken }}@raw.githubusercontent.com/molgenis/molgenis-ops-helm/master/charts/molgenis-prometheus/scripts/serverlistRequester.py && python serverlistRequester.py $(echo $serverlistuser | base64 -d ) $(echo $serverlistpassword | base64 -d ) && - kubectl --token=$token create configmap targets-configmap --from-file node-targets.yml --from-file blackbox-targets.yml -o yaml --dry-run | kubectl --token=$token replace -f - && + kubectl --token=$token create configmap targets-configmap --from-file node-targets.yml --from-file website-targets.yml -o yaml --dry-run | kubectl --token=$token replace -f - && rm -rf /tmp/*"] serviceAccountName: update-configmap-bot restartPolicy: OnFailure -{{- end -}} diff --git a/charts/molgenis-prometheus/templates/serverfiles-configmap.yaml b/charts/molgenis-prometheus/templates/serverfiles-configmap.yaml index 0007c3e4e..74c6575f0 100644 --- a/charts/molgenis-prometheus/templates/serverfiles-configmap.yaml +++ b/charts/molgenis-prometheus/templates/serverfiles-configmap.yaml @@ -1,11 +1,10 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Release.Name }}-serverfiles-configmap + name: {{ .Release.Name }}-serverFiles-configmap data: - alerts: | + alerting_rules.yml: | groups: - {{ if eq .Values.environment "production" -}} - name: SiteMonitoring rules: - alert: UnreachableURL @@ -19,17 +18,8 @@ data: description: {{ printf "\" Website: {{$labels.instance}} \\n ID: {{ $labels.id }} \\n project: {{$labels.project}} \\n Type: {{$labels.type}} \\n \"" }} - name: virtualmachines rules: - - alert: InstanceProdDownOldInfra - expr: up{job="node-targets", type="production"} == 0 - for: 5m - labels: - severity: CRITICAL - frequency: 15m - annotations: - summary: {{ printf "\"Node_exporter production instance UNREACHABLE on old infra\"" }} - description: {{ printf "\" Instance: {{$labels.instance}} \\n ID: {{ $labels.id }} \\n project: {{ $labels.project }} \\n type: {{$labels.type}} \\n \"" }} - alert: InstanceProdDownNewInfra - expr: up{job="openstack23"} == 0 + expr: up{job="merlin"} == 0 for: 5m labels: severity: CRITICAL @@ -55,35 +45,14 @@ data: annotations: summary: {{ printf "\"Out of disk space (instance {{ $labels.instance }}) \"" }} description: {{ printf "\" Disk is almost full (< 10 percent left)\\n VALUE = {{ $value }}\\n Project: {{ $labels.id }}/{{ $labels.project }}\\n Type: {{ $labels.type }}\\n \\n \"" }} - {{ end }} - {{ if eq .Values.environment "development" }} - - name: nodes - rules: - - alert: NodeIsDown - expr: up{job="kubernetes-nodes"} == 0 - labels: - severity: ERROR - annotations: - summary: {{ printf "\"Node DOWN on rancher cluster\"" }} - description: {{ printf "\" Node {{$labels.instance}} is down\\n \"" }} - - name: pods - rules: - - alert: PodIsDown - expr: up == 0 - labels: - severity: WARN - annotations: - summary: {{ printf "\"Pod down on rancher cluster\"" }} - description: {{ printf "\" Pod is down\\n Instance: {{$labels.instance}} \\n Namespace: {{ $labels.kubernetes_namespace }} \\n \"" }} - {{ end }} - rules: | + + recording_rules.yml: | {} prometheus.yml: | rule_files: - - /etc/config/rules - - /etc/config/alerts + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml scrape_configs: - {{ if eq .Values.environment "production" -}} - job_name: 'prometheus-rancher-prod' static_configs: - targets: ['localhost:9090'] @@ -100,7 +69,7 @@ data: module: [http_2xx] file_sd_configs: - files: - - '/etc/configProm/blackbox-targets.yml' + - '/etc/configProm/website-targets.yml' relabel_configs: - source_labels: [__address__] target_label: __param_target @@ -121,289 +90,6 @@ data: static_configs: - targets: - 195.169.22.41:9090 - {{ end -}} - {{ if eq .Values.environment "development" -}} - - job_name: elasticsearch - static_configs: - # - targets: [':9114'] - - job_name: molgenis - metrics_path: /api/metrics/prometheus - static_configs: - # - targets: [''] - - job_name: node - static_configs: - # - targets: [':9100'] - - job_name: postgres - static_configs: - # - targets: [':9187'] - - job_name: jenkins - metrics_path: '/prometheus/' - scheme: https - static_configs: - - targets: ['jenkins.dev.molgenis.org:443'] - - job_name: prometheus-rancher-dev - static_configs: - - targets: - - localhost:9090 - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - # Scrape config for API servers. - # - # Kubernetes exposes API servers as endpoints to the default/kubernetes - # service so this uses `endpoints` role and uses relabelling to only keep - # the endpoints associated with the default/kubernetes service using the - # default named port `https`. This works for single API server deployments as - # well as HA API server deployments. - - job_name: 'kubernetes-apiservers' - kubernetes_sd_configs: - - role: endpoints - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - # Keep only the default/kubernetes service endpoints for the https port. This - # will add targets for each API server which Kubernetes adds an endpoint to - # the default/kubernetes service. - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https - - job_name: 'kubernetes-nodes' - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/${1}/proxy/metrics - - job_name: 'kubernetes-nodes-cadvisor' - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - kubernetes_sd_configs: - - role: node - # This configuration will work only on kubelet 1.7.3+ - # As the scrape endpoints for cAdvisor have changed - # if you are using older version you need to change the replacement to - # replacement: /api/v1/nodes/${1}:4194/proxy/metrics - # more info here https://github.com/coreos/prometheus-operator/issues/633 - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - # Only for Kubernetes ^1.7.3. - # See: https://github.com/prometheus/prometheus/issues/2916 - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor - metric_relabel_configs: - - action: replace - source_labels: [id] - regex: '^/machine\.slice/machine-rkt\\x2d([^\\]+)\\.+/([^/]+)\.service$' - target_label: rkt_container_name - replacement: '${2}-${1}' - - action: replace - source_labels: [id] - regex: '^/system\.slice/(.+)\.service$' - target_label: systemd_service_name - replacement: '${1}' - # Scrape config for service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - - job_name: 'kubernetes-service-endpoints' - kubernetes_sd_configs: - - role: endpoints - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: instance - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - job_name: 'prometheus-pushgateway' - honor_labels: true - kubernetes_sd_configs: - - role: service - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: pushgateway - # Example scrape config for probing services via the Blackbox Exporter. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/probe`: Only probe services that have a value of `true` - # - job_name: 'kubernetes-services' - # metrics_path: /probe - # params: - # module: [http_2xx] - # kubernetes_sd_configs: - # - role: service - # relabel_configs: - # - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - # action: keep - # regex: true - # - source_labels: [__address__] - # target_label: __param_target - # - target_label: __address__ - # replacement: blackbox - # - source_labels: [__param_target] - # target_label: instance - # - action: labelmap - # regex: __meta_kubernetes_service_label_(.+) - # - source_labels: [__meta_kubernetes_namespace] - # target_label: kubernetes_namespace - # - source_labels: [__meta_kubernetes_service_name] - # target_label: kubernetes_name - - job_name: kubernetes-services - kubernetes_sd_configs: - - role: service - metrics_path: /metrics - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: - - __meta_kubernetes_service_label_molgenis_org_environment - action: replace - target_label: environment - regex: (.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__address__] - action: replace - target_label: __param_target - - source_labels: [__param_target] - action: replace - target_label: instance - # Example scrape config for pods - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - {{ end }} alerting: alertmanagers: - kubernetes_sd_configs: diff --git a/charts/molgenis-prometheus/templates/targets-configmap.yaml b/charts/molgenis-prometheus/templates/targets-configmap.yaml index 0171d3e62..04baabcfc 100644 --- a/charts/molgenis-prometheus/templates/targets-configmap.yaml +++ b/charts/molgenis-prometheus/templates/targets-configmap.yaml @@ -6,7 +6,7 @@ data: {{ if eq .Values.environment "production" -}} node-targets.yml: | #get replaced with master branch server via cronjob - blackbox-targets.yml: | + website-targets.yml: | #get replaced with 2.0 branch servers via cronjob extra-targets.yml: | #get replaced with 1.0 branch servers via cronjob diff --git a/charts/molgenis-prometheus/templates/update-configmap-role.yaml b/charts/molgenis-prometheus/templates/update-configmap-role.yaml index a1501b31e..1260b4c18 100644 --- a/charts/molgenis-prometheus/templates/update-configmap-role.yaml +++ b/charts/molgenis-prometheus/templates/update-configmap-role.yaml @@ -5,5 +5,5 @@ metadata: rules: - apiGroups: [""] # "" indicates the core API group resources: ["configmaps"] - resourceName: ["targets-configmap"] + resourceNames: ["targets-configmap"] verbs: ["create", "delete", "patch", "update"] \ No newline at end of file diff --git a/charts/molgenis-prometheus/values.yaml b/charts/molgenis-prometheus/values.yaml index 326c73511..d0f057f46 100644 --- a/charts/molgenis-prometheus/values.yaml +++ b/charts/molgenis-prometheus/values.yaml @@ -9,34 +9,32 @@ prometheus: requests: cpu: 10m memory: 32Mi - alertmanagerFiles: - alertmanager.yml: - global: - slack_api_url: '' - receivers: - - name: default-receiver - slack_configs: - - channel: '#alerts' - send_resolved: true - text: "{{ range .Alerts }}{{ .Annotations.summary }}\n{{ .Annotations.description }}\n{{ end }}" - route: - group_wait: 10s - group_interval: 5m - receiver: default-receiver - repeat_interval: 3h - routes: - - receiver: default-receiver - match: - frequency: daily - continue: false - repeat_interval: 24h - - receiver: default-receiver - match: - frequency: 15m - continue: false - repeat_interval: 15m + global: + slack_api_url: '' + receivers: + - name: default-receiver + slack_configs: + - channel: '#alerts' + send_resolved: true + text: "{{ range .Alerts }}{{ .Annotations.summary }}\n{{ .Annotations.description }}\n{{ end }}" + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + routes: + - receiver: default-receiver + match: + frequency: daily + continue: false + repeat_interval: 24h + - receiver: default-receiver + match: + frequency: 15m + continue: false + repeat_interval: 15m - kubeStateMetrics: + kube-state-metrics: enabled: true nodeSelector: deployPod: "true" @@ -48,19 +46,10 @@ prometheus: cpu: 20m memory: 128Mi - nodeExporter: + prometheus-node-exporter: enabled: false - nodeSelector: - deployPod: "true" - resources: - limits: - cpu: 200m - memory: 50Mi - requests: - cpu: 100m - memory: 30Mi - pushgateway: + prometheus-pushgateway: nodeSelector: deployPod: "true" resources: @@ -81,10 +70,10 @@ prometheus: requests: cpu: 1 memory: 6Gi - configMapOverrideName: "serverfiles-configmap" + configMapOverrideName: "serverFiles-configmap" extraConfigmapMounts: - name: targets-configmap - mountPath: /etc/configProm + mountPath: /etc/targets subPath: "" configMap: targets-configmap readOnly: true @@ -99,7 +88,7 @@ prometheus: memory: 64Mi extraConfigmapMounts: - name: targets-configmap - mountPath: /etc/configProm + mountPath: /etc/targets subPath: "" configMap: targets-configmap readOnly: true @@ -131,4 +120,3 @@ secret: githubtoken: "xxxx" serverlistuser: "xxxx" serverlistpassword: "xxxx" -environment: "development"