From 682da050381fa057e61fec21efc42a0a8537fd2f Mon Sep 17 00:00:00 2001 From: Benjamin Ludwig Date: Fri, 29 Nov 2024 11:59:21 +0100 Subject: [PATCH 001/224] [Octavia] Bump utils to enable secrets injector in osprofiler --- openstack/octavia/Chart.lock | 6 +++--- openstack/octavia/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/openstack/octavia/Chart.lock b/openstack/octavia/Chart.lock index 761a5b8011c..adc90a451ca 100644 --- a/openstack/octavia/Chart.lock +++ b/openstack/octavia/Chart.lock @@ -13,12 +13,12 @@ dependencies: version: 0.11.1 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.18.3 + version: 0.19.7 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:d97844ddbeb370cf844250e47a8207d2048ecd28516a4c4f1313adbdb745ca4a -generated: "2024-09-13T17:52:44.208638386+02:00" +digest: sha256:8b764e30a05b01f227c6825be05c39f916ad22137a19ede4bd70cf2ab4de185e +generated: "2024-11-29T11:48:17.569776618+01:00" diff --git a/openstack/octavia/Chart.yaml b/openstack/octavia/Chart.yaml index 47688ffd9d8..29a875187f5 100644 --- a/openstack/octavia/Chart.yaml +++ b/openstack/octavia/Chart.yaml @@ -26,7 +26,7 @@ dependencies: version: 0.11.1 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.18.3 + version: 0.19.7 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 From bf08f39c2500a7e1542755d1070f76a43ea016f7 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Wed, 18 Dec 2024 15:48:58 +0100 Subject: [PATCH 002/224] [opensearch-logs] adding audit user for audit logs --- .../templates/config/_internal_users.yml.tpl | 6 ++++++ .../templates/config/_roles.yml.tpl | 20 +++++++++++++++++++ .../templates/config/_roles_mapping.yml.tpl | 5 +++++ 3 files changed, 31 insertions(+) diff --git a/system/opensearch-logs/templates/config/_internal_users.yml.tpl b/system/opensearch-logs/templates/config/_internal_users.yml.tpl index 501f7da3d8d..08ce9b9af41 100644 --- a/system/opensearch-logs/templates/config/_internal_users.yml.tpl +++ b/system/opensearch-logs/templates/config/_internal_users.yml.tpl @@ -38,6 +38,12 @@ otel: backend_roles: - "otel" +audit: + hash: "{{ .Values.users.audit.nohash }}" + reserved: true + backend_roles: + - "audit" + otellogs: hash: "{{ .Values.users.otellogs.nohash }}" reserved: true diff --git a/system/opensearch-logs/templates/config/_roles.yml.tpl b/system/opensearch-logs/templates/config/_roles.yml.tpl index b94137542a1..0c8d6688199 100644 --- a/system/opensearch-logs/templates/config/_roles.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles.yml.tpl @@ -249,6 +249,26 @@ compute: - "indices:admin/create" - "indices:data/write/bulk*" - "indices:data/write/index" +audit: + reserved: false + cluster_permissions: + - "cluster_monitor" + - "cluster_composite_ops" + - "cluster:admin/ingest/pipeline/put" + - "cluster:admin/ingest/pipeline/get" + - "indices:admin/template/get" + - "cluster_manage_index_templates" + - "cluster:admin/opensearch/ml/predict" + index_permissions: + - index_patterns: + - "audit-*" + allowed_actions: + - "indices:admin/template/get" + - "indices:admin/template/put" + - "indices:admin/mapping/put" + - "indices:admin/create" + - "indices:data/write/bulk*" + - "indices:data/write/index" otel: reserved: false cluster_permissions: diff --git a/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl b/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl index f7177dd9b0a..4e25c9be659 100644 --- a/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl @@ -32,6 +32,11 @@ greenhouse: users: - "greenhouse" +audit: + reserved: false + users: + - "audit" + jump: reserved: false users: From 21f98fcc60a04c774d7b3f477c6ec92dfbf5d852 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Wed, 18 Dec 2024 16:40:23 +0200 Subject: [PATCH 003/224] [pxc-operator] Don't set unnecessary prometheus.io/scrape annotation --- system/percona-xtradb-cluster-operator/Chart.yaml | 2 +- system/percona-xtradb-cluster-operator/values.yaml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/system/percona-xtradb-cluster-operator/Chart.yaml b/system/percona-xtradb-cluster-operator/Chart.yaml index 7950bbc53d9..402539f8e90 100644 --- a/system/percona-xtradb-cluster-operator/Chart.yaml +++ b/system/percona-xtradb-cluster-operator/Chart.yaml @@ -4,7 +4,7 @@ name: percona-xtradb-cluster-operator description: A Helm chart to install Percona XtraDB Cluster Operator. home: "https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator" type: application -version: 0.2.2 +version: 0.2.3 appVersion: "1.15.1" kubeVersion: ">=1.26.0-0" maintainers: diff --git a/system/percona-xtradb-cluster-operator/values.yaml b/system/percona-xtradb-cluster-operator/values.yaml index e16004db633..c84a3eade72 100644 --- a/system/percona-xtradb-cluster-operator/values.yaml +++ b/system/percona-xtradb-cluster-operator/values.yaml @@ -93,7 +93,7 @@ kube-state-metrics: registry: keppel.global.cloud.sap/ccloud-registry-k8s-io-mirror repository: kube-state-metrics/kube-state-metrics rbac: - create: true # NOTE: need to create a custom role instead with access only to custom resources + create: true # only a limited role is created, because all collectors are disabled, except custom-resource-state extraRules: - apiGroups: ["pxc.percona.com"] resources: @@ -114,6 +114,7 @@ kube-state-metrics: selfMonitor: enabled: true + prometheusScrape: false # don't set prometheus.io/scrape annotation on kube-state-metrics service, because we use ServiceMonitor collectors: null From 99590326a3a09279a646c7c2936485a08869396e Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Thu, 19 Dec 2024 14:42:29 +0100 Subject: [PATCH 004/224] [ironic] Move image verification to agent The ironic-python-agent gets some config values over the lookup api as 'config' in the json. And two values are unintuitively taken from the conductor config section. So, we have to set them in the api node as well, in order to have the settings take hold in the agent. With a current ironic-python-agent, we do not need to validate then in the conductor itself. --- openstack/ironic/templates/etc/_ironic.conf.tpl | 12 ++++++++++++ openstack/ironic/values.yaml | 1 + 2 files changed, 13 insertions(+) diff --git a/openstack/ironic/templates/etc/_ironic.conf.tpl b/openstack/ironic/templates/etc/_ironic.conf.tpl index 9cccb40a258..899ffaa2a84 100644 --- a/openstack/ironic/templates/etc/_ironic.conf.tpl +++ b/openstack/ironic/templates/etc/_ironic.conf.tpl @@ -132,3 +132,15 @@ metrics_enabled = {{ if .Values.audit.metrics_enabled -}}True{{- else -}}False{{ {{- include "osprofiler" . }} {{- include "ini_sections.cache" . }} + + +{{- if or .Values.conductor.defaults.conductor.permitted_image_formats .Values.conductor.defaults.conductor.disable_deep_image_inspection }} + +[conductor] + {{- if .Values.conductor.defaults.conductor.disable_deep_image_inspection }} +disable_deep_image_inspection = {{ .Values.conductor.defaults.conductor.disable_deep_image_inspection }} + {{- end }} + {{- if .Values.conductor.defaults.conductor.permitted_image_formats }} +permitted_image_formats = {{ .Values.conductor.defaults.conductor.permitted_image_formats }} + {{- end }} +{{- end }} diff --git a/openstack/ironic/values.yaml b/openstack/ironic/values.yaml index aaee1476c03..18551752cb7 100644 --- a/openstack/ironic/values.yaml +++ b/openstack/ironic/values.yaml @@ -209,6 +209,7 @@ conductor: redfish: swift_object_expiry_timeout: "5400" conductor: + conductor_always_validates_images: False # We only use the direct interface, so leave it to the agent permitted_image_formats: 'raw,qcow2,iso,vmdk' agent: From 9556770ca662db46296a382291bfd68fe34d20eb Mon Sep 17 00:00:00 2001 From: IvoGoman Date: Thu, 19 Dec 2024 15:34:23 +0100 Subject: [PATCH 005/224] feat(greenhouse-ccloud): add CAM JoinURL to teams (#7570) --- system/greenhouse-ccloud/Chart.yaml | 2 +- system/greenhouse-ccloud/ci/test-values.yaml | 4 ++++ system/greenhouse-ccloud/templates/teams.yaml | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/system/greenhouse-ccloud/Chart.yaml b/system/greenhouse-ccloud/Chart.yaml index 82998fd3637..7a20615ead9 100644 --- a/system/greenhouse-ccloud/Chart.yaml +++ b/system/greenhouse-ccloud/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v2 name: ccloud description: A Helm chart for the CCloud organization in Greenhouse. type: application -version: 1.11.7 +version: 1.12.0 diff --git a/system/greenhouse-ccloud/ci/test-values.yaml b/system/greenhouse-ccloud/ci/test-values.yaml index 7c438b90586..5282d7b5ecc 100644 --- a/system/greenhouse-ccloud/ci/test-values.yaml +++ b/system/greenhouse-ccloud/ci/test-values.yaml @@ -13,6 +13,10 @@ teams: description: Team number 2 mappedIdPGroup: team2 isSupportGroup: false + team3: + description: Team number 2 + mappedIdPGroup: team three + isSupportGroup: false teamRoleBindings: - teamRef: team1 diff --git a/system/greenhouse-ccloud/templates/teams.yaml b/system/greenhouse-ccloud/templates/teams.yaml index a00c1f5ddda..c8c6eef5ae2 100644 --- a/system/greenhouse-ccloud/templates/teams.yaml +++ b/system/greenhouse-ccloud/templates/teams.yaml @@ -13,4 +13,5 @@ metadata: spec: description: {{ $team.description | default (printf "Team %s" $teamName) }} mappedIdPGroup: {{ required "Team mappedGroupID is missing" $team.mappedIdPGroup | quote }} + joinUrl: "https://cam.int.sap/cam/ui/admin?item=request&profile={{$team.mappedIdPGroup | replace " " "%20"}}" {{- end }} From 760c518aba36f4c41c5b34faf2d1770c52b45419 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Thu, 19 Dec 2024 16:06:11 +0100 Subject: [PATCH 006/224] [ceph] prepare upgrade to rook v1.16 (#7573) * [ceph] prepare upgrade to rook v1.16 * [ceph] support multi-instance rgw (#7578) * support multi-instance rgw --------- Co-authored-by: Artem --- system/cc-ceph/Chart.lock | 8 +- system/cc-ceph/Chart.yaml | 8 +- .../templates/cephobjectstore-extra.yaml | 110 ++++++++++++++++++ .../cephobjectstore-placement-pools.yaml | 2 - system/cc-ceph/templates/cephobjectstore.yaml | 18 +++ .../cc-ceph/templates/certificate-extra.yaml | 25 ++++ system/cc-ceph/templates/record-extra.yaml | 26 +++++ system/cc-ceph/templates/service-extra.yaml | 25 ++++ system/cc-ceph/values.yaml | 28 ++++- 9 files changed, 239 insertions(+), 11 deletions(-) create mode 100644 system/cc-ceph/templates/cephobjectstore-extra.yaml create mode 100644 system/cc-ceph/templates/certificate-extra.yaml create mode 100644 system/cc-ceph/templates/record-extra.yaml create mode 100644 system/cc-ceph/templates/service-extra.yaml diff --git a/system/cc-ceph/Chart.lock b/system/cc-ceph/Chart.lock index 0731771e991..b3d632ad9b9 100644 --- a/system/cc-ceph/Chart.lock +++ b/system/cc-ceph/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 1.0.0 - name: rook-ceph repository: https://charts.rook.io/release - version: v1.15.0 + version: v1.16.0 - name: rook-crds repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.0.2-rook.1.15.0 -digest: sha256:d8d894ba706ae4a8216ecf28bb9561fafc3b88908f183ba0b6d4b77f296e92f8 -generated: "2024-09-12T10:38:14.973622-04:00" + version: 0.0.2-rook.1.16.0 +digest: sha256:d9a8ec1509dcec3a634aada46cbf9434897bfa7bf85bad9adb8c6af157aa08d4 +generated: "2024-12-18T11:34:58.26072907+01:00" diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 79568fb7d38..8cf0b10c477 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.1 -appVersion: "1.15.0" +version: 1.1.2 +appVersion: "1.16.0" dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm @@ -11,7 +11,7 @@ dependencies: - name: rook-ceph # version update should be done in the rook-crds chart as well repository: https://charts.rook.io/release - version: 1.15.0 + version: 1.16.0 - name: rook-crds repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: '0.0.2-rook.1.15.0' + version: '0.0.2-rook.1.16.0' diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml new file mode 100644 index 00000000000..3772a5ad81a --- /dev/null +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -0,0 +1,110 @@ +{{- if .Values.objectstore.multiInstance.enabled }} +apiVersion: ceph.rook.io/v1 +kind: CephObjectRealm +metadata: + name: {{ .Values.objectstore.name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectZoneGroup +metadata: + name: {{ .Values.objectstore.name }} + namespace: {{ .Release.Namespace }} +spec: + realm: {{ .Values.objectstore.name }} +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectZone +metadata: + name: {{ .Values.objectstore.name }} + namespace: {{ .Release.Namespace }} +spec: + zoneGroup: {{ .Values.objectstore.name }} +{{- if and .Values.rgwTargetPlacements.useRookCRD .Values.rgwTargetPlacements.placements }} + sharedPools: + poolPlacements: +{{- range $target := .Values.rgwTargetPlacements.placements }} + - name: {{ $target.name }} + metadataPoolName: {{ $target.name }}.rgw.buckets.index + dataPoolName: {{ $target.name }}.rgw.buckets.data + dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec + default: {{ $target.default | default false }} +{{- end }} +{{- else }} + metadataPool: {{ toYaml .Values.objectstore.metadataPool | nindent 4 }} + dataPool: {{ toYaml .Values.objectstore.dataPool | nindent 4 }} +{{- end }} +{{- range $instance := .Values.objectstore.multiInstance.extraInstances }} +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: {{ $instance.name }} + namespace: {{ $.Release.Namespace }} +spec: + zone: + name: {{ $.Values.objectstore.name }} + hosting: +{{- if gt (len $instance.gateway.dnsNames) 0 }} + advertiseEndpoint: + dnsName: {{ $instance.gateway.dnsNames | first }} + port: 443 + useTls: true + dnsNames: {{ toYaml $instance.gateway.dnsNames | nindent 8 }} +{{- end }} + gateway: + instances: {{ $instance.gateway.instances | default $.Values.objectstore.gateway.instances }} + {{- if or $instance.gateway.port $.Values.objectstore.gateway.port }} + port: {{ $instance.gateway.port | default $.Values.objectstore.gateway.port }} + {{- end }} + {{- if or $instance.gateway.securePort $.Values.objectstore.gateway.securePort }} + securePort: {{ $instance.gateway.securePort | default $.Values.objectstore.gateway.securePort }} + {{- end }} + placement: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.metal.cloud.sap/role + operator: In + values: + - {{ $.Values.osd.nodeRole }} + # since the CephCluster's network provider is "host", we need to isolate 80/443 port listeners from each other + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-rgw + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + sslCertificateRef: {{ $instance.gateway.sslCertificateRef | default $.Values.objectstore.gateway.sslCertificateRef }} + resources: {{ toYaml ( $instance.gateway.resources | default $.Values.objectstore.gateway.resources) | nindent 6 }} + preservePoolsOnDelete: true +{{- if and $.Values.objectstore.keystone.enabled }} +{{- with $.Values.objectstore.keystone }} + auth: + keystone: + acceptedRoles: +{{- range $_, $role := .accepted_roles }} + - {{ $role }} +{{- end }} + implicitTenants: {{ .implicit_tenants | quote }} + serviceUserSecretName: ceph-keystone-secret + tokenCacheSize: {{ .token_cache_size }} + url: {{ .url }} + protocols: +{{- if $instance.enabledAPIs }} + enableAPIs: {{ toYaml $instance.enabledAPIs | nindent 6 }} +{{- end }} + s3: + authUseKeystone: true + swift: + accountInUrl: {{ .swift_account_in_url }} + versioningEnabled: {{ .swift_versioning_enabled }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/cephobjectstore-placement-pools.yaml b/system/cc-ceph/templates/cephobjectstore-placement-pools.yaml index 44f18cf52c5..5eba4595b61 100644 --- a/system/cc-ceph/templates/cephobjectstore-placement-pools.yaml +++ b/system/cc-ceph/templates/cephobjectstore-placement-pools.yaml @@ -1,4 +1,3 @@ -{{- if .Values.rgwTargetPlacements.enabled }} {{- range $target := .Values.rgwTargetPlacements.placements }} --- apiVersion: ceph.rook.io/v1 @@ -80,7 +79,6 @@ spec: nodelete: {{ $.Values.pool.nodelete | quote }} nosizechange: {{ $.Values.pool.nosizechange | quote }} {{- end }} -{{- end }} {{- if .Values.rgwTargetPlacements.premiumPlacements }} {{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} --- diff --git a/system/cc-ceph/templates/cephobjectstore.yaml b/system/cc-ceph/templates/cephobjectstore.yaml index a17a95da7c9..8f8b729dfa4 100644 --- a/system/cc-ceph/templates/cephobjectstore.yaml +++ b/system/cc-ceph/templates/cephobjectstore.yaml @@ -4,8 +4,23 @@ metadata: name: {{ .Values.objectstore.name }} namespace: {{ .Release.Namespace }} spec: +{{- if .Values.objectstore.multiInstance.enabled }} + zone: + name: {{ .Values.objectstore.name }} +{{- else if and .Values.rgwTargetPlacements.useRookCRD .Values.rgwTargetPlacements.placements }} + sharedPools: + poolPlacements: +{{- range $target := .Values.rgwTargetPlacements.placements }} + - name: {{ $target.name }} + metadataPoolName: {{ $target.name }}.rgw.buckets.index + dataPoolName: {{ $target.name }}.rgw.buckets.data + dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec + default: {{ $target.default | default false }} +{{- end }} +{{- else }} metadataPool: {{ toYaml .Values.objectstore.metadataPool | nindent 4 }} dataPool: {{ toYaml .Values.objectstore.dataPool | nindent 4 }} +{{- end }} hosting: {{- if gt (len .Values.objectstore.gateway.dnsNames) 0 }} advertiseEndpoint: @@ -60,6 +75,9 @@ spec: tokenCacheSize: {{ .token_cache_size }} url: {{ .url }} protocols: +{{- if $.Values.objectstore.enabledAPIs }} + enableAPIs: {{ toYaml $.Values.objectstore.enabledAPIs | nindent 6 }} +{{- end }} s3: authUseKeystone: true swift: diff --git a/system/cc-ceph/templates/certificate-extra.yaml b/system/cc-ceph/templates/certificate-extra.yaml new file mode 100644 index 00000000000..852c5168623 --- /dev/null +++ b/system/cc-ceph/templates/certificate-extra.yaml @@ -0,0 +1,25 @@ +{{- if .Values.objectstore.multiInstance.enabled }} +{{- range $instance := .Values.objectstore.multiInstance.extraInstances }} +{{- range $key, $record := $instance.gateway.dnsNames }} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ $record }} +spec: + dnsNames: + - "*.{{ $record }}" + - "{{ $record }}" + uris: + - rook-ceph-rgw-{{ $instance.name }}.rook-ceph.svc + issuerRef: + group: certmanager.cloud.sap + kind: ClusterIssuer + name: digicert-issuer + secretName: {{ $instance.gateway.sslCertificateRef }} + usages: + - digital signature + - key encipherment +{{- end }} +{{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/record-extra.yaml b/system/cc-ceph/templates/record-extra.yaml new file mode 100644 index 00000000000..ad2c07d5c81 --- /dev/null +++ b/system/cc-ceph/templates/record-extra.yaml @@ -0,0 +1,26 @@ +{{- if .Values.objectstore.multiInstance.enabled }} +{{- range $instance := .Values.objectstore.multiInstance.extraInstances }} +{{- range $key, $record := $instance.gateway.dnsNames }} +--- +apiVersion: disco.stable.sap.cc/v1 +kind: Record +metadata: + name: "{{ $record }}" +spec: + type: A + record: {{ $instance.service.externalIP }} + hosts: + - "{{ $record }}." +--- +apiVersion: disco.stable.sap.cc/v1 +kind: Record +metadata: + name: "{{ $record }}-wildcard" +spec: + type: CNAME + record: "{{ $record }}." + hosts: + - "*.{{ $record }}." +{{- end }} +{{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/service-extra.yaml b/system/cc-ceph/templates/service-extra.yaml new file mode 100644 index 00000000000..3259f479bfc --- /dev/null +++ b/system/cc-ceph/templates/service-extra.yaml @@ -0,0 +1,25 @@ +{{- if .Values.objectstore.multiInstance.enabled }} +{{- range $instance := .Values.objectstore.multiInstance.extraInstances }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $instance.service.name }} + namespace: {{ $.Release.Namespace }} +spec: + externalIPs: + - {{ $instance.service.externalIP }} + type: NodePort + sessionAffinity: None + externalTrafficPolicy: Local + ports: + - port: {{ $instance.service.port }} + targetPort: {{ $instance.service.port }} + protocol: TCP + name: rgw-ssl + selector: + app: {{ $instance.service.selector.app }} + rook_cluster: {{ $instance.service.selector.rook_cluster }} + rook_object_store: {{ $instance.name }} +{{- end }} +{{- end }} diff --git a/system/cc-ceph/values.yaml b/system/cc-ceph/values.yaml index 4fb53bd0a44..c03a65f36bf 100644 --- a/system/cc-ceph/values.yaml +++ b/system/cc-ceph/values.yaml @@ -90,6 +90,7 @@ dashboard: objectstore: enabled: true name: objectstore + enabledAPIs: [] # empty - all enabled. See: https://docs.ceph.com/en/reef/radosgw/config-ref/#confval-rgw_enable_apis gateway: instances: 6 port: 80 @@ -149,6 +150,30 @@ objectstore: password: XXX domain: XXX project: XXX + multiInstance: + enabled: false + extraInstances: + # can inherit/override all config options from objectstore: + # - name: objectstore-admin + # gateway: + # instances: 2 + # sslCertificateRef: "" + # dnsNames: + # - dns1-adm + # - dns2-adm + # resources: + # requests: + # cpu: 1 + # memory: 2Gi + # service: + # name: ceph-objectstore-admin-external + # port: 443 + # externalIP: "10.0.0.1" + # selector: + # app: rgw + # define other RGW instances here: + # - name: other-instance-name + prysm: enabled: true repository: @@ -158,7 +183,8 @@ objectstore: interval: "10" rgwTargetPlacements: - enabled: false + # enabled: false deprecate rgwTargetPlacements.enabled because it is true on all envs + useRookCRD: false # !!!WARNING set 'true' only for new clusters. Upgrade will not work now. defaultRgwPools: enabled: false # create default rgw pools, see: https://github.com/sapcc/helm-charts/issues/6670 From 7fde4d8c83213895b0ba4981b872a2c06f5a4318 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 20 Dec 2024 10:25:11 +0100 Subject: [PATCH 007/224] [opensearch-logs] testing new index pattern names --- .../config/_install-index-pattern.sh.tpl | 61 ++++++++++++++++--- 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl index a5e199bb084..016cc0d3f9a 100644 --- a/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl @@ -1,16 +1,59 @@ #!/bin/bash +export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} -# 0. Check for index policy -for i in $(curl -s -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") + + +# Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") do + #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" + export ALIAS_EXISTS=`curl -s -i -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases/${i}"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` + if [[ "$ALIAS_EXISTS" -gt 0 ]] + then + echo "Alias and dashboard index pattern for index ${i} already exists. Nothing to do." + else + echo "setting OpenSearch dashboard index mapping for index $i" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER}"${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" + fi + echo "Deleting old index pattern based on index-* format" + export DASHBOARD_PATTERN=`curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` + if [[ "$DASHBOARD_PATTERN" -gt 0 ]] + then + echo "Old dashboard pattern exists for for index ${i}, it will be removed" + curl -s -XDELETE -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" + else + echo "No old dashboard pattern for index $i" + fi +done + +# Dashboard index pattern for all available aliases, which are not datastreams +for i in $(curl -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep -v "\-ds"|grep -v "^\."|awk '{ print $1 }'|uniq) + do + echo "using alias $i from Opensearch-Logs" + echo "Setting OpenSearch dashboard index mapping for alias $i" + curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + if [ $? -eq 0 ] + then + echo "index pattern for alias ${i} already exists in Opensearch dashboard, nothing to do" + else + echo "INFO: creating index-pattern in Dashboards for datastream alias $i" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" + fi +done + + +# Dashboard index pattern for all available datastreams +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) + do + echo "using datastream $i from Opensearch-Logs" echo "setting OpenSearch dashboard index mapping for index $i" - curl --header "content-type: application/JSON" --fail -XGET -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" - if [ $? -eq 0 ] + curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + if [ $? -eq 0 ] then - echo "index ${i} already exists in Opensearch dashboard" - else - echo "INFO: creating index-pattern in Dashboards for $i logs" - curl -XPOST --header "content-type: application/JSON" -u ${ADMIN_USER}:${ADMIN_PASSWORD} "https://${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}-*\", \"timeFieldName\": \"@timestamp\" } }" - fi + echo "index ${i} already exists in Opensearch dashboard" + else + echo "INFO: creating index-pattern in Dashboards for datastream alias $i" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" + fi done From f6851dfe1369180334230f781474d1ae8aa8a56c Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 20 Dec 2024 10:54:21 +0100 Subject: [PATCH 008/224] [opensearch-logs] remove unused config --- .../config/_install-dashboard-pattern.sh.tpl | 54 +++++++++++++---- .../config/_install-index-pattern.sh.tpl | 59 ------------------- .../install-dashboard-pattern-job.yaml | 1 - 3 files changed, 42 insertions(+), 72 deletions(-) delete mode 100644 system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 350074ecb1c..016cc0d3f9a 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -1,29 +1,59 @@ #!/bin/bash +export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} -# 0. Check for index policy -for i in $(curl -s -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") + + +# Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") do + #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" - echo "setting OpenSearch dashboard index mapping for index $i" - curl --header "content-type: application/JSON" --fail -XGET -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" - if [ $? -eq 0 ]; then - echo "index ${i} already exists in Opensearch dashboard" + export ALIAS_EXISTS=`curl -s -i -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases/${i}"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` + if [[ "$ALIAS_EXISTS" -gt 0 ]] + then + echo "Alias and dashboard index pattern for index ${i} already exists. Nothing to do." + else + echo "setting OpenSearch dashboard index mapping for index $i" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER}"${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" + fi + echo "Deleting old index pattern based on index-* format" + export DASHBOARD_PATTERN=`curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` + if [[ "$DASHBOARD_PATTERN" -gt 0 ]] + then + echo "Old dashboard pattern exists for for index ${i}, it will be removed" + curl -s -XDELETE -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" + else + echo "No old dashboard pattern for index $i" + fi +done + +# Dashboard index pattern for all available aliases, which are not datastreams +for i in $(curl -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep -v "\-ds"|grep -v "^\."|awk '{ print $1 }'|uniq) + do + echo "using alias $i from Opensearch-Logs" + echo "Setting OpenSearch dashboard index mapping for alias $i" + curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + if [ $? -eq 0 ] + then + echo "index pattern for alias ${i} already exists in Opensearch dashboard, nothing to do" else - echo "INFO: creating index-pattern in Dashboards for $i logs" - curl -XPOST --header "content-type: application/JSON" -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}-*\", \"timeFieldName\": \"@timestamp\" } }" + echo "INFO: creating index-pattern in Dashboards for datastream alias $i" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" fi done + # Dashboard index pattern for all available datastreams -for i in $(curl -s -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) do echo "using datastream $i from Opensearch-Logs" echo "setting OpenSearch dashboard index mapping for index $i" - curl --header "content-type: application/JSON" --fail -XGET -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" - if [ $? -eq 0 ]; then + curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + if [ $? -eq 0 ] + then echo "index ${i} already exists in Opensearch dashboard" else echo "INFO: creating index-pattern in Dashboards for datastream alias $i" - curl -XPOST --header "content-type: application/JSON" -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" fi done diff --git a/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl deleted file mode 100644 index 016cc0d3f9a..00000000000 --- a/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} - - - -# Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") - do - #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries - echo "using index $i from Opensearch-Logs" - export ALIAS_EXISTS=`curl -s -i -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases/${i}"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` - if [[ "$ALIAS_EXISTS" -gt 0 ]] - then - echo "Alias and dashboard index pattern for index ${i} already exists. Nothing to do." - else - echo "setting OpenSearch dashboard index mapping for index $i" - curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER}"${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" - fi - echo "Deleting old index pattern based on index-* format" - export DASHBOARD_PATTERN=`curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` - if [[ "$DASHBOARD_PATTERN" -gt 0 ]] - then - echo "Old dashboard pattern exists for for index ${i}, it will be removed" - curl -s -XDELETE -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" - else - echo "No old dashboard pattern for index $i" - fi -done - -# Dashboard index pattern for all available aliases, which are not datastreams -for i in $(curl -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep -v "\-ds"|grep -v "^\."|awk '{ print $1 }'|uniq) - do - echo "using alias $i from Opensearch-Logs" - echo "Setting OpenSearch dashboard index mapping for alias $i" - curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" - if [ $? -eq 0 ] - then - echo "index pattern for alias ${i} already exists in Opensearch dashboard, nothing to do" - else - echo "INFO: creating index-pattern in Dashboards for datastream alias $i" - curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" - fi -done - - -# Dashboard index pattern for all available datastreams -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) - do - echo "using datastream $i from Opensearch-Logs" - echo "setting OpenSearch dashboard index mapping for index $i" - curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" - if [ $? -eq 0 ] - then - echo "index ${i} already exists in Opensearch dashboard" - else - echo "INFO: creating index-pattern in Dashboards for datastream alias $i" - curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" - fi -done diff --git a/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml index 3836eb76199..75a777aac35 100644 --- a/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml @@ -14,7 +14,6 @@ metadata: # job is considered part of the release. "helm.sh/hook": post-install,post-upgrade "helm.sh/hook-weight": "-5" - "helm.sh/hook-delete-policy": hook-succeeded spec: template: metadata: From 34622176e1e17ada64b07160f11e5ebb4ebda345 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 20 Dec 2024 11:01:27 +0100 Subject: [PATCH 009/224] [opensearch-logs] fix typo in dashboard pattern script --- .../templates/config/_install-dashboard-pattern.sh.tpl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 016cc0d3f9a..25209288216 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -1,8 +1,6 @@ #!/bin/bash export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} - - # Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") do @@ -14,7 +12,7 @@ for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk echo "Alias and dashboard index pattern for index ${i} already exists. Nothing to do." else echo "setting OpenSearch dashboard index mapping for index $i" - curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER}"${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" fi echo "Deleting old index pattern based on index-* format" export DASHBOARD_PATTERN=`curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` From 4ab75b383753cd0f1ad81ebc49e3b8f0e966ac35 Mon Sep 17 00:00:00 2001 From: D074096 Date: Fri, 20 Dec 2024 11:16:24 +0100 Subject: [PATCH 010/224] Update cc-gardener to v1.107.3 --- global/cc-gardener/Chart.lock | 6 +++--- global/cc-gardener/Chart.yaml | 6 +++--- global/cc-gardener/README.md | 4 ++-- global/cc-gardener/values.yaml | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/global/cc-gardener/Chart.lock b/global/cc-gardener/Chart.lock index 92f37b8fc8c..23c5ef29d4c 100644 --- a/global/cc-gardener/Chart.lock +++ b/global/cc-gardener/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.106.2 + version: v1.107.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:ffaebe29ba84a7c044a3385549be4b53edc0616dbc43becb21054aea5a1a44f0 -generated: "2024-12-12T13:41:45.025946+01:00" +digest: sha256:13ee0b33f9f1808405ed0cda29d914d1da765a1b3268788d427cf9f6dbfe1a21 +generated: "2024-12-20T11:15:14.811777+01:00" diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 0dbc8c3f54d..55691a51e78 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,13 +2,13 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.2.0 -appVersion: "v1.106.2" +version: 0.3.0 +appVersion: "v1.107.3" home: https://github.com/gardener/gardener dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.106.2 + version: v1.107.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/global/cc-gardener/README.md b/global/cc-gardener/README.md index 13d5c823daf..d40c8469024 100644 --- a/global/cc-gardener/README.md +++ b/global/cc-gardener/README.md @@ -2,8 +2,8 @@ - setup operator CRDs ```sh -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.105.1/charts/gardener/operator/templates/crd-extensions.yaml -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.105.1/charts/gardener/operator/templates/crd-gardens.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.107.3/charts/gardener/operator/templates/crd-extensions.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.107.3/charts/gardener/operator/templates/crd-gardens.yaml k label crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud app.kubernetes.io/managed-by=Helm k annotate crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud meta.helm.sh/release-name=cc-gardener meta.helm.sh/release-namespace=garden ``` diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index 1808800f15e..2c74308e9be 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -4,7 +4,7 @@ global: operator: image: repository: keppel.global.cloud.sap/ccloud-europe-docker-pkg-dev-mirror/gardener-project/releases/gardener/operator - tag: v1.106.2 # also the gardener version, which will be used + tag: v1.107.3 # also the gardener version, which will be used config: featureGates: HVPA: false From decc22594b1bf8e3823727e5767e8e9199122c63 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 20 Dec 2024 10:22:12 +0200 Subject: [PATCH 011/224] [pxc-operator] Update pxc-operator and CRDs to v1.16.0 major release --- system/percona-xtradb-cluster-crds/Chart.yaml | 2 +- ...axtradbclusterbackups.pxc.percona.com.yaml | 5 +- ...xtradbclusterrestores.pxc.percona.com.yaml | 4 +- ...perconaxtradbclusters.pxc.percona.com.yaml | 209 +++++++++++++++++- .../Chart.lock | 6 +- .../Chart.yaml | 4 +- 6 files changed, 221 insertions(+), 9 deletions(-) diff --git a/system/percona-xtradb-cluster-crds/Chart.yaml b/system/percona-xtradb-cluster-crds/Chart.yaml index c57240e4bff..18f1de57c53 100644 --- a/system/percona-xtradb-cluster-crds/Chart.yaml +++ b/system/percona-xtradb-cluster-crds/Chart.yaml @@ -3,4 +3,4 @@ apiVersion: v2 name: percona-xtradb-cluster-crds description: A Helm chart containing Percona CRDs. type: application -version: 0.0.1-percona1.15.1 +version: 0.0.1-percona1.16.0 diff --git a/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterbackups.pxc.percona.com.yaml b/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterbackups.pxc.percona.com.yaml index e2d27de47e3..356c51767e9 100644 --- a/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterbackups.pxc.percona.com.yaml +++ b/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterbackups.pxc.percona.com.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: perconaxtradbclusterbackups.pxc.percona.com spec: group: pxc.percona.com @@ -56,6 +56,9 @@ spec: type: string spec: properties: + activeDeadlineSeconds: + format: int64 + type: integer containerOptions: properties: args: diff --git a/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterrestores.pxc.percona.com.yaml b/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterrestores.pxc.percona.com.yaml index 890518a0b3a..df99da2b1c4 100644 --- a/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterrestores.pxc.percona.com.yaml +++ b/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusterrestores.pxc.percona.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: perconaxtradbclusterrestores.pxc.percona.com spec: group: pxc.percona.com @@ -325,6 +325,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object diff --git a/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusters.pxc.percona.com.yaml b/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusters.pxc.percona.com.yaml index 5f66b1e6077..6d55eba329b 100644 --- a/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusters.pxc.percona.com.yaml +++ b/system/percona-xtradb-cluster-crds/crds/perconaxtradbclusters.pxc.percona.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: perconaxtradbclusters.pxc.percona.com spec: group: pxc.percona.com @@ -325,6 +325,9 @@ spec: type: boolean backup: properties: + activeDeadlineSeconds: + format: int64 + type: integer allowParallel: type: boolean annotations: @@ -358,6 +361,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1053,6 +1058,8 @@ spec: runAsUser: format: int64 type: integer + seLinuxChangePolicy: + type: string seLinuxOptions: properties: level: @@ -1079,6 +1086,8 @@ spec: type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -1113,6 +1122,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2101,6 +2112,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2209,6 +2221,8 @@ spec: runAsUser: format: int64 type: integer + seLinuxChangePolicy: + type: string seLinuxOptions: properties: level: @@ -2235,6 +2249,8 @@ spec: type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2284,6 +2300,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2372,6 +2389,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2585,6 +2604,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2635,10 +2656,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -2998,6 +3021,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -3011,6 +3041,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -3259,6 +3290,7 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: @@ -3266,6 +3298,7 @@ spec: type: array x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean @@ -3277,6 +3310,7 @@ spec: type: object x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -3285,6 +3319,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -3302,6 +3337,7 @@ spec: sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -3629,6 +3665,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -3735,6 +3772,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -3816,6 +3854,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3933,6 +3973,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -4251,6 +4292,78 @@ spec: type: array initContainer: properties: + containerSecurityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object image: type: string resources: @@ -4260,6 +4373,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4380,6 +4495,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4502,6 +4619,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5259,6 +5378,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5367,6 +5487,8 @@ spec: runAsUser: format: int64 type: integer + seLinuxChangePolicy: + type: string seLinuxOptions: properties: level: @@ -5393,6 +5515,8 @@ spec: type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -5442,6 +5566,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5522,6 +5647,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5735,6 +5862,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5785,10 +5914,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -6148,6 +6279,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -6161,6 +6299,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -6409,6 +6548,7 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: @@ -6416,6 +6556,7 @@ spec: type: array x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean @@ -6427,6 +6568,7 @@ spec: type: object x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -6435,6 +6577,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -6452,6 +6595,7 @@ spec: sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -6779,6 +6923,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -6885,6 +7030,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -6966,6 +7112,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7083,6 +7231,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -8119,6 +8268,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -8227,6 +8377,8 @@ spec: runAsUser: format: int64 type: integer + seLinuxChangePolicy: + type: string seLinuxOptions: properties: level: @@ -8253,6 +8405,8 @@ spec: type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -8302,6 +8456,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -8415,6 +8570,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -8628,6 +8785,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -8678,10 +8837,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -9041,6 +9202,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -9054,6 +9222,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -9302,6 +9471,7 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: @@ -9309,6 +9479,7 @@ spec: type: array x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean @@ -9320,6 +9491,7 @@ spec: type: object x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -9328,6 +9500,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -9345,6 +9518,7 @@ spec: sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -9672,6 +9846,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -9778,6 +9953,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -9859,6 +10035,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -9976,6 +10154,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -10332,6 +10511,34 @@ spec: versionServiceEndpoint: type: string type: object + users: + items: + properties: + dbs: + items: + type: string + type: array + grants: + items: + type: string + type: array + hosts: + items: + type: string + type: array + name: + type: string + passwordSecretRef: + properties: + key: + type: string + name: + type: string + type: object + withGrantOption: + type: boolean + type: object + type: array vaultSecretName: type: string type: object diff --git a/system/percona-xtradb-cluster-operator/Chart.lock b/system/percona-xtradb-cluster-operator/Chart.lock index 55fa76ef09b..fe9f113b7ba 100644 --- a/system/percona-xtradb-cluster-operator/Chart.lock +++ b/system/percona-xtradb-cluster-operator/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: pxc-operator repository: https://percona.github.io/percona-helm-charts/ - version: 1.15.1 + version: 1.16.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 @@ -11,5 +11,5 @@ dependencies: - name: kube-state-metrics repository: https://prometheus-community.github.io/helm-charts version: 5.27.0 -digest: sha256:a8bfa4d2f15b126812ed173a2046f37feb9c0de86253c91b3f75c3350db3f21f -generated: "2024-12-17T12:59:23.701796+02:00" +digest: sha256:6be0b9a595266078f58be0e459f3165d29fcf6082604ba7745b80f22e34df935 +generated: "2024-12-20T10:20:46.691862+02:00" diff --git a/system/percona-xtradb-cluster-operator/Chart.yaml b/system/percona-xtradb-cluster-operator/Chart.yaml index 402539f8e90..3ed48d750bf 100644 --- a/system/percona-xtradb-cluster-operator/Chart.yaml +++ b/system/percona-xtradb-cluster-operator/Chart.yaml @@ -5,7 +5,7 @@ description: A Helm chart to install Percona XtraDB Cluster Operator. home: "https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator" type: application version: 0.2.3 -appVersion: "1.15.1" +appVersion: "1.16.0" kubeVersion: ">=1.26.0-0" maintainers: - name: Birk Bohne @@ -19,7 +19,7 @@ sources: - https://github.com/percona/percona-helm-charts/tree/main dependencies: - name: pxc-operator - version: 1.15.1 + version: 1.16.0 repository: https://percona.github.io/percona-helm-charts/ - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 5ac07f4366bbc4cb0800a0513166d8f96b6e48d5 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 20 Dec 2024 11:24:36 +0100 Subject: [PATCH 012/224] [opensearch-logs] fix alias creation for indexes with more than one dash --- .../templates/config/_install-dashboard-pattern.sh.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 25209288216..4ea9d4f00b9 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -2,7 +2,7 @@ export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} # Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|uniq|tr -d '0-9.'|awk '{print substr($0,1,length($0)-1)}'|uniq) do #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" From f4dfa414b9c5ad504dbf01acd73b1a204318c38e Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 20 Dec 2024 11:39:25 +0100 Subject: [PATCH 013/224] [opensearch-logs] remove date from aliases --- .../templates/config/_install-dashboard-pattern.sh.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 4ea9d4f00b9..3a9bd33f194 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -2,7 +2,7 @@ export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} # Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|uniq|tr -d '0-9.'|awk '{print substr($0,1,length($0)-1)}'|uniq) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|sed 's/-[0-9].*\.[0-9].*\.[0-9].*$//'|uniq) do #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" From e884452ee1a29d922795ad9fb2e4955ff0ca9b0f Mon Sep 17 00:00:00 2001 From: D074096 Date: Fri, 20 Dec 2024 11:43:15 +0100 Subject: [PATCH 014/224] Update cc-gardener to v1.108.1 --- global/cc-gardener/Chart.lock | 6 +++--- global/cc-gardener/Chart.yaml | 6 +++--- global/cc-gardener/README.md | 4 ++-- global/cc-gardener/templates/garden.yaml | 2 +- global/cc-gardener/values.yaml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/global/cc-gardener/Chart.lock b/global/cc-gardener/Chart.lock index 23c5ef29d4c..11323836d42 100644 --- a/global/cc-gardener/Chart.lock +++ b/global/cc-gardener/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.107.3 + version: v1.108.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:13ee0b33f9f1808405ed0cda29d914d1da765a1b3268788d427cf9f6dbfe1a21 -generated: "2024-12-20T11:15:14.811777+01:00" +digest: sha256:3cdbd08cd8cf76fecec985ad316c968301ced6074cbde6f1c460840ae5a1afd4 +generated: "2024-12-20T11:38:20.60181+01:00" diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 55691a51e78..19b8f673b09 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,13 +2,13 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.3.0 -appVersion: "v1.107.3" +version: 0.4.0 +appVersion: "v1.108.1" home: https://github.com/gardener/gardener dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.107.3 + version: v1.108.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/global/cc-gardener/README.md b/global/cc-gardener/README.md index d40c8469024..5802b199bb6 100644 --- a/global/cc-gardener/README.md +++ b/global/cc-gardener/README.md @@ -2,8 +2,8 @@ - setup operator CRDs ```sh -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.107.3/charts/gardener/operator/templates/crd-extensions.yaml -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.107.3/charts/gardener/operator/templates/crd-gardens.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.108.1/charts/gardener/operator/templates/crd-extensions.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.108.1/charts/gardener/operator/templates/crd-gardens.yaml k label crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud app.kubernetes.io/managed-by=Helm k annotate crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud meta.helm.sh/release-name=cc-gardener meta.helm.sh/release-namespace=garden ``` diff --git a/global/cc-gardener/templates/garden.yaml b/global/cc-gardener/templates/garden.yaml index ae095792f7f..3e8fe947b6d 100644 --- a/global/cc-gardener/templates/garden.yaml +++ b/global/cc-gardener/templates/garden.yaml @@ -37,7 +37,7 @@ spec: virtualCluster: dns: domains: - - virtual-garden.{{ required ".Values.global.cluster missing" .Values.global.cluster }}.{{ required ".Values.global.region missing" .Values.global.region }}.cloud.sap + - name: virtual-garden.{{ required ".Values.global.cluster missing" .Values.global.cluster }}.{{ required ".Values.global.region missing" .Values.global.region }}.cloud.sap etcd: main: backup: diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index 2c74308e9be..29168a93b3a 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -4,7 +4,7 @@ global: operator: image: repository: keppel.global.cloud.sap/ccloud-europe-docker-pkg-dev-mirror/gardener-project/releases/gardener/operator - tag: v1.107.3 # also the gardener version, which will be used + tag: v1.108.1 # also the gardener version, which will be used config: featureGates: HVPA: false From 1dfddb3cf25deef56a5328aa74cd8e0a727178b6 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 20 Dec 2024 11:43:44 +0100 Subject: [PATCH 015/224] [opensearch-logs] remove index from alias --- .../templates/config/_install-dashboard-pattern.sh.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 3a9bd33f194..899538c3026 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -2,7 +2,7 @@ export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} # Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|sed 's/-[0-9].*\.[0-9].*\.[0-9].*$//'|uniq) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|sed 's/-[0-9].*\.[0-9].*\.[0-9].*$//'|uniq|grep -v index) do #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" From 8b4b3de0cc2674401ed24a4aa52bdde1d5fc7c3f Mon Sep 17 00:00:00 2001 From: D074096 Date: Fri, 20 Dec 2024 11:52:50 +0100 Subject: [PATCH 016/224] Fix garden DNS in cc-gardener --- global/cc-gardener/Chart.yaml | 2 +- global/cc-gardener/templates/garden.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 19b8f673b09..6dc95dd926d 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.4.0 +version: 0.4.1 appVersion: "v1.108.1" home: https://github.com/gardener/gardener dependencies: diff --git a/global/cc-gardener/templates/garden.yaml b/global/cc-gardener/templates/garden.yaml index 3e8fe947b6d..be3702394d6 100644 --- a/global/cc-gardener/templates/garden.yaml +++ b/global/cc-gardener/templates/garden.yaml @@ -21,7 +21,7 @@ spec: runtimeCluster: ingress: domains: - - runtime-garden.{{ required ".Values.global.cluster missing" .Values.global.cluster }}.{{ required ".Values.global.region missing" .Values.global.region }}.cloud.sap + - name: runtime-garden.{{ required ".Values.global.cluster missing" .Values.global.cluster }}.{{ required ".Values.global.region missing" .Values.global.region }}.cloud.sap controller: kind: nginx networking: From 0bf86b19999937ec2358b1b41faa01e60c1533b0 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 20 Dec 2024 12:30:59 +0200 Subject: [PATCH 017/224] [pxc-operator] Bump chart version and update CODEOWNERS * Add version override function to be able to set imageTag in values * Use ccloud repository instead of mirror by default * Bump chart version to 0.3.0 * Update CODEOWNERS * Bump linkerd-support dependency --- .github/CODEOWNERS | 6 +++--- system/percona-xtradb-cluster-operator/Chart.lock | 6 +++--- system/percona-xtradb-cluster-operator/Chart.yaml | 4 ++-- .../templates/_helpers.tpl | 13 +++++++++++++ system/percona-xtradb-cluster-operator/values.yaml | 2 +- 5 files changed, 22 insertions(+), 9 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index cccf4db4b47..bed6095f1eb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -21,7 +21,7 @@ /common/prometheus-pushgateway @viennaa # old /common/prometheus-server @viennaa @richardtief @Kuckkuck @IvoGoman @timojohlo /common/prometheus-server-pre7 @viennaa @richardtief @Kuckkuck @IvoGoman @timojohlo -/common/pxc-db @s10 @businessbean @bashar-alkhateeb +/common/pxc-db @s10 @businessbean @galkindmitrii /common/rabbitmq @galkindmitrii @fwiesel @Carthaca @bashar-alkhateeb @dusandordevicsap @businessbean /common/rabbitmq-cluster @galkindmitrii @notandy @defo89 @businessbean /common/redis* @majewsky @SuperSandro2000 @VoigtS @Nuckal777 @@ -266,8 +266,8 @@ /system/prometheus-operator @viennaa @richardtief /system/provider-kubernikus @defo89 /system/provider-metal3 @defo89 @Nuckal777 -/system/percona-xtradb-cluster-crds @s10 @businessbean @bashar-alkhateeb -/system/percona-xtradb-cluster-operator @s10 @businessbean @bashar-alkhateeb +/system/percona-xtradb-cluster-crds @s10 @businessbean @galkindmitrii +/system/percona-xtradb-cluster-operator @s10 @businessbean @galkindmitrii /system/rabbitmq-operator @galkindmitrii /system/runtime-extension-maintenance-controller @Nuckal777 @defo89 @majewsky @SuperSandro2000 @VoigtS /system/secrets-injector @Nuckal777 @majewsky @SuperSandro2000 @VoigtS diff --git a/system/percona-xtradb-cluster-operator/Chart.lock b/system/percona-xtradb-cluster-operator/Chart.lock index fe9f113b7ba..a21f21bbbe1 100644 --- a/system/percona-xtradb-cluster-operator/Chart.lock +++ b/system/percona-xtradb-cluster-operator/Chart.lock @@ -7,9 +7,9 @@ dependencies: version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.0 + version: 1.1.0 - name: kube-state-metrics repository: https://prometheus-community.github.io/helm-charts version: 5.27.0 -digest: sha256:6be0b9a595266078f58be0e459f3165d29fcf6082604ba7745b80f22e34df935 -generated: "2024-12-20T10:20:46.691862+02:00" +digest: sha256:178152c2db0f6266d3722718e06dea813c2798442244b4c0a095e22c4164794b +generated: "2024-12-20T12:29:55.128909+02:00" diff --git a/system/percona-xtradb-cluster-operator/Chart.yaml b/system/percona-xtradb-cluster-operator/Chart.yaml index 3ed48d750bf..2982a809a95 100644 --- a/system/percona-xtradb-cluster-operator/Chart.yaml +++ b/system/percona-xtradb-cluster-operator/Chart.yaml @@ -4,7 +4,7 @@ name: percona-xtradb-cluster-operator description: A Helm chart to install Percona XtraDB Cluster Operator. home: "https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator" type: application -version: 0.2.3 +version: 0.3.0 appVersion: "1.16.0" kubeVersion: ">=1.26.0-0" maintainers: @@ -26,7 +26,7 @@ dependencies: version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.0 + version: 1.1.0 - name: kube-state-metrics repository: https://prometheus-community.github.io/helm-charts version: 5.27.0 diff --git a/system/percona-xtradb-cluster-operator/templates/_helpers.tpl b/system/percona-xtradb-cluster-operator/templates/_helpers.tpl index c1aafc379ae..4e564554380 100644 --- a/system/percona-xtradb-cluster-operator/templates/_helpers.tpl +++ b/system/percona-xtradb-cluster-operator/templates/_helpers.tpl @@ -6,3 +6,16 @@ app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/managed-by: "helm" helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} {{- end }} + +{{/* +Override function returns image URI according to parameters set +*/}} +{{- define "pxc-operator.image" -}} +{{- if .Values.image }} +{{- .Values.image }} +{{- else if .Values.imageTag }} +{{- printf "%s:%s" .Values.operatorImageRepository .Values.imageTag }} +{{- else }} +{{- printf "%s:%s" .Values.operatorImageRepository .Chart.AppVersion }} +{{- end }} +{{- end -}} diff --git a/system/percona-xtradb-cluster-operator/values.yaml b/system/percona-xtradb-cluster-operator/values.yaml index c84a3eade72..85e6d4fdcd0 100644 --- a/system/percona-xtradb-cluster-operator/values.yaml +++ b/system/percona-xtradb-cluster-operator/values.yaml @@ -7,7 +7,7 @@ pxc-operator: replicaCount: 1 - operatorImageRepository: keppel.global.cloud.sap/ccloud-dockerhub-mirror/percona/percona-xtradb-cluster-operator + operatorImageRepository: keppel.global.cloud.sap/ccloud/percona-xtradb-cluster-operator imagePullPolicy: IfNotPresent image: "" From 9c8b4bc754b31dfd4ffe0cc03bd01f2b13b362d4 Mon Sep 17 00:00:00 2001 From: D074096 Date: Fri, 20 Dec 2024 12:10:01 +0100 Subject: [PATCH 018/224] Update cc-gardener to v1.109.0 --- global/cc-gardener/Chart.lock | 6 +++--- global/cc-gardener/Chart.yaml | 6 +++--- global/cc-gardener/README.md | 4 ++-- global/cc-gardener/values.yaml | 5 +---- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/global/cc-gardener/Chart.lock b/global/cc-gardener/Chart.lock index 11323836d42..570a56bb945 100644 --- a/global/cc-gardener/Chart.lock +++ b/global/cc-gardener/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.108.1 + version: v1.109.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:3cdbd08cd8cf76fecec985ad316c968301ced6074cbde6f1c460840ae5a1afd4 -generated: "2024-12-20T11:38:20.60181+01:00" +digest: sha256:fde98e3025485600e064e63f9aef4769851f277fa2f1651904b6b5cba6460e87 +generated: "2024-12-20T12:09:05.735008+01:00" diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 6dc95dd926d..a86e3e4366c 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,13 +2,13 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.4.1 -appVersion: "v1.108.1" +version: 0.5.0 +appVersion: "v1.109.0" home: https://github.com/gardener/gardener dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.108.1 + version: v1.109.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/global/cc-gardener/README.md b/global/cc-gardener/README.md index 5802b199bb6..52971fe105e 100644 --- a/global/cc-gardener/README.md +++ b/global/cc-gardener/README.md @@ -2,8 +2,8 @@ - setup operator CRDs ```sh -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.108.1/charts/gardener/operator/templates/crd-extensions.yaml -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.108.1/charts/gardener/operator/templates/crd-gardens.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.109.0/charts/gardener/operator/templates/crd-extensions.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.109.0/charts/gardener/operator/templates/crd-gardens.yaml k label crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud app.kubernetes.io/managed-by=Helm k annotate crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud meta.helm.sh/release-name=cc-gardener meta.helm.sh/release-namespace=garden ``` diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index 29168a93b3a..0ac331c08e9 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -4,10 +4,7 @@ global: operator: image: repository: keppel.global.cloud.sap/ccloud-europe-docker-pkg-dev-mirror/gardener-project/releases/gardener/operator - tag: v1.108.1 # also the gardener version, which will be used - config: - featureGates: - HVPA: false + tag: v1.109.0 # also the gardener version, which will be used garden: name: garden # externalIP: "" From 8088ea702655534f41221d0e635879e7ab05935f Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 20 Dec 2024 11:14:09 +0200 Subject: [PATCH 019/224] [pxc-db] Update PXC CRD to 1.16.0 * Update operator image from 1.15.1 to 1.16.0 * Update pxc image from 8.0.36 to 8.0.39 * Use custom build image for cluster init containers * Update default haproxy image from 2.8.5 to 2.8.11 * Use 1.16.0-pxc8.0-backup-pxb8.0.35 for backup and init jobs * Set backup jobs activeDeadlineSeconds to 1 hour (default is infinite) * Increase backoffLimit from 1 to 2 --- common/pxc-db/Chart.yaml | 4 ++-- common/pxc-db/templates/cluster.yaml | 5 ++++- common/pxc-db/values.yaml | 26 ++++++++++++++------------ 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index 529769a13c7..b76a55058ac 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,13 +16,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.22 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.15.1" +appVersion: "1.16.0" dependencies: - name: owner-info diff --git a/common/pxc-db/templates/cluster.yaml b/common/pxc-db/templates/cluster.yaml index 8836457f8d7..4505c3e6cde 100644 --- a/common/pxc-db/templates/cluster.yaml +++ b/common/pxc-db/templates/cluster.yaml @@ -30,7 +30,7 @@ spec: {{- if $.Values.initContainer.image.override }} image: {{ $.Values.initContainer.image.override }} {{- else }} - image: {{ required ".Values.global.dockerHubMirrorAlternateRegion is missing" .Values.global.dockerHubMirrorAlternateRegion }}/{{ $.Values.initContainer.image.name }}:{{ $.Values.initContainer.image.tag }} + image: {{ required ".Values.global.registryAlternateRegion is missing" .Values.global.registryAlternateRegion }}/{{ $.Values.initContainer.image.name }}:{{ $.Values.initContainer.image.tag }} {{- end }} {{- if .Values.initContainer.resources }} resources: @@ -251,6 +251,9 @@ spec: {{- if $backup.backoffLimit }} backoffLimit: {{ $backup.backoffLimit }} {{- end }} + {{- if $backup.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ $backup.activeDeadlineSeconds }} + {{- end }} pitr: {{- if not $backup.pitr.enabled }} enabled: false diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index ce23a7dd8c1..0942c5cc6f2 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -1,3 +1,4 @@ +--- # Default values for percona-xtradb-cluster. # This is a YAML-formatted file. # Declare variables to be passed into your templates. @@ -72,8 +73,8 @@ pause: false initContainer: image: - name: percona/percona-xtradb-cluster-operator - tag: 1.15.1 + name: percona-xtradb-cluster-operator + tag: 1.16.0 override: null resources: requests: @@ -87,7 +88,7 @@ pxc: size: 3 image: name: percona/percona-xtradb-cluster - tag: 8.0.36-28.1 + tag: 8.0.39-30.1 override: null imagePullPolicy: IfNotPresent annotations: {} @@ -131,17 +132,17 @@ pxc: wsrep_retry_autocommit: 3 pxc_strict_mode: MASTER # default is ENFORCING binlog_format: ROW - binlog_expire_logs_seconds: 345600 # default 30 days -> 4 days - sync_binlog: 1 # default value for PXC + binlog_expire_logs_seconds: 345600 # default 30 days -> 4 days + sync_binlog: 1 # default value for PXC net_read_timeout: 30 net_write_timeout: 60 connect_timeout: 30 wait_timeout: 3800 interactive_timeout: 1800 - innodb_lock_wait_timeout: 30 # default 50 seconds -> 30 seconds + innodb_lock_wait_timeout: 30 # default 50 seconds -> 30 seconds max_connections: 1024 - max_connect_errors: "4294967295" # to avoid failed connections because of loadbalancer health checks - innodb_flush_log_at_trx_commit: 1 # for better performance set (2): write at commit, flush once per second + max_connect_errors: "4294967295" # to avoid failed connections because of loadbalancer health checks + innodb_flush_log_at_trx_commit: 1 # for better performance set (2): write at commit, flush once per second innodb_flush_method: O_DIRECT innodb_file_per_table: 1 innodb_autoinc_lock_mode: 2 @@ -228,7 +229,7 @@ haproxy: size: 2 image: name: percona/haproxy - tag: 2.8.5 + tag: 2.8.11 override: null imagePullPolicy: Always annotations: {} @@ -325,7 +326,7 @@ haproxy: initdb: image: name: percona/percona-xtradb-cluster-operator - tag: 1.15.1-pxc8.0-backup-pxb8.0.35 + tag: 1.16.0-pxc8.0-backup-pxb8.0.35 backup: enabled: false @@ -333,10 +334,11 @@ backup: labels: {} image: name: percona/percona-xtradb-cluster-operator - tag: 1.15.1-pxc8.0-backup-pxb8.0.35 + tag: 1.16.0-pxc8.0-backup-pxb8.0.35 override: null imagePullPolicy: Always - backoffLimit: 1 + backoffLimit: 2 + activeDeadlineSeconds: 3600 priority_class: "critical-infrastructure" resources: requests: {} From a1c9da6f774b495b4fd459de39009ca963b04f2c Mon Sep 17 00:00:00 2001 From: Nuckal777 Date: Fri, 20 Dec 2024 13:28:55 +0100 Subject: [PATCH 020/224] Update cc-gardener to v1.110.1 --- global/cc-gardener/Chart.lock | 6 +++--- global/cc-gardener/Chart.yaml | 6 +++--- global/cc-gardener/README.md | 4 ++-- global/cc-gardener/values.yaml | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/global/cc-gardener/Chart.lock b/global/cc-gardener/Chart.lock index 570a56bb945..edd5f889ae1 100644 --- a/global/cc-gardener/Chart.lock +++ b/global/cc-gardener/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.109.0 + version: v1.110.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:fde98e3025485600e064e63f9aef4769851f277fa2f1651904b6b5cba6460e87 -generated: "2024-12-20T12:09:05.735008+01:00" +digest: sha256:d3c3aeb024c4a6bcd4c5e1392505792e4a9896c63002e2e861229276671038d0 +generated: "2024-12-20T13:27:20.6187+01:00" diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index a86e3e4366c..1a6138f6c96 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,13 +2,13 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.5.0 -appVersion: "v1.109.0" +version: 0.6.0 +appVersion: "v1.110.1" home: https://github.com/gardener/gardener dependencies: - name: operator repository: oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener - version: v1.109.0 + version: v1.110.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/global/cc-gardener/README.md b/global/cc-gardener/README.md index 52971fe105e..94c753659e2 100644 --- a/global/cc-gardener/README.md +++ b/global/cc-gardener/README.md @@ -2,8 +2,8 @@ - setup operator CRDs ```sh -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.109.0/charts/gardener/operator/templates/crd-extensions.yaml -k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.109.0/charts/gardener/operator/templates/crd-gardens.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.110.1/charts/gardener/operator/templates/crd-extensions.yaml +k apply -f https://raw.githubusercontent.com/gardener/gardener/refs/tags/v1.110.1/charts/gardener/operator/templates/crd-gardens.yaml k label crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud app.kubernetes.io/managed-by=Helm k annotate crd gardens.operator.gardener.cloud extensions.operator.gardener.cloud meta.helm.sh/release-name=cc-gardener meta.helm.sh/release-namespace=garden ``` diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index 0ac331c08e9..be987b2d310 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -4,7 +4,7 @@ global: operator: image: repository: keppel.global.cloud.sap/ccloud-europe-docker-pkg-dev-mirror/gardener-project/releases/gardener/operator - tag: v1.109.0 # also the gardener version, which will be used + tag: v1.110.1 # also the gardener version, which will be used garden: name: garden # externalIP: "" From 40ee7c9176deccc83235169831f74f32a9f7c6ea Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 20 Dec 2024 14:20:18 +0200 Subject: [PATCH 021/224] [designate] bump pxc-db chart * Update pxc-db chart for designate - PXC 8.0.36 to 8.0.39 - HAProxy 2.8.5 to 2.8.11 - Backup improvements and fixes * Don't use mysql_native_password for designate user - It's no longer needed with proxysql 2.7.1 --- openstack/designate/Chart.lock | 6 +++--- openstack/designate/Chart.yaml | 4 ++-- openstack/designate/values.yaml | 1 - 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/openstack/designate/Chart.lock b/openstack/designate/Chart.lock index 5314e4f4470..5a0bf68f922 100644 --- a/openstack/designate/Chart.lock +++ b/openstack/designate/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.9 - name: pxc-db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.1.22 + version: 0.2.0 - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.15.2 @@ -29,5 +29,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:846a31a104e64691eb61d27224e2b559207054bdd972603c35557cb85fd66810 -generated: "2024-12-18T13:33:16.963527+02:00" +digest: sha256:07e3fe91666e0333a6d4cf70f2badd53461a4b0737de76156e87e1c854e7e396 +generated: "2024-12-20T14:18:27.9057+02:00" diff --git a/openstack/designate/Chart.yaml b/openstack/designate/Chart.yaml index d63c0d5387a..eea8777cee7 100644 --- a/openstack/designate/Chart.yaml +++ b/openstack/designate/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 description: A Helm chart for Kubernetes to deploy Openstack Designate (DNSaaS) name: designate -version: 0.4.5 +version: 0.4.6 appVersion: "xena" dependencies: - condition: percona_cluster.enabled @@ -13,7 +13,7 @@ dependencies: name: pxc-db alias: pxc_db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.1.22 + version: 0.2.0 - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm diff --git a/openstack/designate/values.yaml b/openstack/designate/values.yaml index 4d9e8688fb2..3b4b6eaf9e8 100644 --- a/openstack/designate/values.yaml +++ b/openstack/designate/values.yaml @@ -148,7 +148,6 @@ pxc_db: users: designate: name: designate - auth_plugin: 'mysql_native_password' grants: - "ALL PRIVILEGES on designate.*" pxc: From 2710898d282af59861254ae4828b4776d9e8ba4a Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Mon, 23 Dec 2024 05:12:59 +0000 Subject: [PATCH 022/224] global/ccloud-hedgedoc: run helm dep up --- global/ccloud-hedgedoc/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/global/ccloud-hedgedoc/Chart.lock b/global/ccloud-hedgedoc/Chart.lock index 2e6339360a2..d0ab6b34703 100644 --- a/global/ccloud-hedgedoc/Chart.lock +++ b/global/ccloud-hedgedoc/Chart.lock @@ -7,12 +7,12 @@ dependencies: version: 1.2.5 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.7 + version: 1.1.8 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:15aadf9dae7e4a465203e4b2055db236ae3b7f1826eec2afa1b3876af5be8548 -generated: "2024-12-02T05:06:00.33797619Z" +digest: sha256:68c6dfb3add8a77d3289bbd2642cf458875bec207fba9428e489c1ed129ab756 +generated: "2024-12-23T05:12:57.284085638Z" From 197c1c7ba46d5b30e6a3f9cdea4c2bca4c0649a5 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Mon, 23 Dec 2024 05:31:26 +0000 Subject: [PATCH 023/224] openstack/elektra: run helm dep up --- openstack/elektra/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openstack/elektra/Chart.lock b/openstack/elektra/Chart.lock index 4c451607379..9e60fe2d7cf 100644 --- a/openstack/elektra/Chart.lock +++ b/openstack/elektra/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.2.5 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.7 + version: 1.1.8 - name: pgmetrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.2 @@ -14,5 +14,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:1989774bfd2db1ad8e1ee5f2b940199f2a5cce6d0d1129555bf77a7f5de77264 -generated: "2024-12-02T05:15:34.604582609Z" +digest: sha256:586f0ef648aac2129d4d6e46956c559f1f2df682b022d7a390c9d22180cec9ed +generated: "2024-12-23T05:31:24.773396672Z" From 848addbb26a6ebc69578c7c9421fc947726545c5 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 23 Dec 2024 09:00:46 +0100 Subject: [PATCH 024/224] [opensearch-logs] mv dashboard/alias script to cron --- .../config/_install-dashboard-pattern.sh.tpl | 9 ++------- ... => cron-install-dashboard-pattern-job.yaml} | 17 ++++------------- 2 files changed, 6 insertions(+), 20 deletions(-) rename system/opensearch-logs/templates/{install-dashboard-pattern-job.yaml => cron-install-dashboard-pattern-job.yaml} (75%) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 899538c3026..228e8ba5e34 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -7,13 +7,8 @@ for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" export ALIAS_EXISTS=`curl -s -i -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases/${i}"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` - if [[ "$ALIAS_EXISTS" -gt 0 ]] - then - echo "Alias and dashboard index pattern for index ${i} already exists. Nothing to do." - else - echo "setting OpenSearch dashboard index mapping for index $i" - curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" - fi + echo "Creating for updating alias $i, because alias setting is only valid for indexes, which were created before the alias creation timestamp" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" echo "Deleting old index pattern based on index-* format" export DASHBOARD_PATTERN=`curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` if [[ "$DASHBOARD_PATTERN" -gt 0 ]] diff --git a/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml similarity index 75% rename from system/opensearch-logs/templates/install-dashboard-pattern-job.yaml rename to system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index 75a777aac35..beadbfdedcf 100644 --- a/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -1,5 +1,5 @@ apiVersion: batch/v1 -kind: Job +kind: CronJob metadata: name: "install-dashboard-pattern" labels: @@ -9,20 +9,11 @@ metadata: helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" ccloud/service: logs ccloud/support-group: observability - annotations: - # This is what defines this resource as a hook. Without this line, the - # job is considered part of the release. - "helm.sh/hook": post-install,post-upgrade - "helm.sh/hook-weight": "-5" spec: - template: - metadata: - name: "install-dashboard-pattern" - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + schedule: "30 0,6,12,18 * * *" + jobTemplate: spec: + backoffLimit: 3 restartPolicy: Never containers: - name: install-dashboard-pattern From 93eac60fc66af90c51297bbc36e6e5c706cc3f9d Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 23 Dec 2024 09:07:13 +0100 Subject: [PATCH 025/224] [opensearch-logs] fix formatting --- .../cron-install-dashboard-pattern-job.yaml | 61 ++++++++++--------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index beadbfdedcf..2ba4e9f41b2 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -9,37 +9,40 @@ metadata: helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" ccloud/service: logs ccloud/support-group: observability +spec.jobTemplate.spec.template.spec.restartPolicy spec: schedule: "30 0,6,12,18 * * *" jobTemplate: spec: backoffLimit: 3 - restartPolicy: Never - containers: - - name: install-dashboard-pattern - image: "{{ .Values.global.registry }}/unified-kubernetes-toolbox:latest" - command: ["/bin/bash", "/scripts/install-dashboard-pattern.sh"] - env: - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: cron-secrets - key: ADMIN_USER - - name: ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: cron-secrets - key: ADMIN_PASSWORD - - name: CLUSTER_HOST - value: "https://opensearch-logs-client.{{ .Values.global.clusterType }}.{{ .Values.global.region }}.{{ .Values.global.tld }}:{{ .Values.httpPort }}" - - name: DASHBOARD_HOST - value: "https://logs.{{ .Values.global.region }}.{{ .Values.global.tld }}" - volumeMounts: - - mountPath: /scripts/install-dashboard-pattern.sh - name: security-config - subPath: install-dashboard-pattern.sh - volumes: - - name: security-config - secret: - defaultMode: 420 - secretName: security-config + template: + spec: + restartPolicy: Never + containers: + - name: install-dashboard-pattern + image: "{{ .Values.global.registry }}/unified-kubernetes-toolbox:latest" + command: ["/bin/bash", "/scripts/install-dashboard-pattern.sh"] + env: + - name: ADMIN_USER + valueFrom: + secretKeyRef: + name: cron-secrets + key: ADMIN_USER + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: cron-secrets + key: ADMIN_PASSWORD + - name: CLUSTER_HOST + value: "https://opensearch-logs-client.{{ .Values.global.clusterType }}.{{ .Values.global.region }}.{{ .Values.global.tld }}:{{ .Values.httpPort }}" + - name: DASHBOARD_HOST + value: "https://logs.{{ .Values.global.region }}.{{ .Values.global.tld }}" + volumeMounts: + - mountPath: /scripts/install-dashboard-pattern.sh + name: security-config + subPath: install-dashboard-pattern.sh + volumes: + - name: security-config + secret: + defaultMode: 420 + secretName: security-config From b74b958df77db51826dba7c3835fa5a06d7268ac Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 23 Dec 2024 09:09:39 +0100 Subject: [PATCH 026/224] [opensearch-logs] fix formatting --- .../templates/cron-install-dashboard-pattern-job.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index 2ba4e9f41b2..f52e372c4f4 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -9,7 +9,6 @@ metadata: helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" ccloud/service: logs ccloud/support-group: observability -spec.jobTemplate.spec.template.spec.restartPolicy spec: schedule: "30 0,6,12,18 * * *" jobTemplate: From 73d23d1bb87aee3caa105a0a69fac42631ee04a5 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 23 Dec 2024 09:24:21 +0100 Subject: [PATCH 027/224] [opensearch-logs] testing cron --- .../templates/cron-install-dashboard-pattern-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index f52e372c4f4..c6db9ec4294 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -10,7 +10,7 @@ metadata: ccloud/service: logs ccloud/support-group: observability spec: - schedule: "30 0,6,12,18 * * *" + schedule: "30 0,6,8,12,18 * * *" jobTemplate: spec: backoffLimit: 3 From 44d1c1e1f38f660c93976b52a15951d9a593581f Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 23 Dec 2024 09:42:49 +0100 Subject: [PATCH 028/224] [opensearch-logs] fix alias creation for indexes with more than one dash --- .../templates/config/_install-dashboard-pattern.sh.tpl | 2 +- .../templates/cron-install-dashboard-pattern-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 228e8ba5e34..103d3d681f6 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -2,7 +2,7 @@ export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} # Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|sed 's/-[0-9].*\.[0-9].*\.[0-9].*$//'|uniq|grep -v index) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|sed 's/-[0-9].*\.[0-9].*\.[0-9].*$//'|uniq|grep -v index|grep -v "alerts-other"|grep -v deployments|grep -v maillog|grep -v ss4o|grep -v sample) do #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index c6db9ec4294..493ea446bde 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -10,7 +10,7 @@ metadata: ccloud/service: logs ccloud/support-group: observability spec: - schedule: "30 0,6,8,12,18 * * *" + schedule: "30,50 0,6,8,12,18 * * *" jobTemplate: spec: backoffLimit: 3 From 43f28155c1e8d6f1702667f0e8e01dc4ed24d72d Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 23 Dec 2024 10:45:23 +0100 Subject: [PATCH 029/224] [opensearch-logs] remove test time from cron --- .../templates/cron-install-dashboard-pattern-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index 493ea446bde..c6db9ec4294 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -10,7 +10,7 @@ metadata: ccloud/service: logs ccloud/support-group: observability spec: - schedule: "30,50 0,6,8,12,18 * * *" + schedule: "30 0,6,8,12,18 * * *" jobTemplate: spec: backoffLimit: 3 From 47933f77be2e72e54cd1cf6ddc2e3875841aeb11 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Wed, 18 Dec 2024 15:48:58 +0100 Subject: [PATCH 030/224] [opensearch-logs] adding audit user for audit logs, job for alias and dashboard patterns --- .../config/_install-dashboard-pattern.sh.tpl | 47 ++++++++++++---- .../config/_install-index-pattern.sh.tpl | 16 ------ .../templates/config/_internal_users.yml.tpl | 6 ++ .../templates/config/_roles.yml.tpl | 20 +++++++ .../templates/config/_roles_mapping.yml.tpl | 5 ++ .../cron-install-dashboard-pattern-job.yaml | 47 ++++++++++++++++ .../install-dashboard-pattern-job.yaml | 55 ------------------- 7 files changed, 113 insertions(+), 83 deletions(-) delete mode 100644 system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl create mode 100644 system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml delete mode 100644 system/opensearch-logs/templates/install-dashboard-pattern-job.yaml diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 350074ecb1c..103d3d681f6 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -1,29 +1,52 @@ #!/bin/bash +export BASIC_AUTH_HEADER=${ADMIN_USER}:${ADMIN_PASSWORD} -# 0. Check for index policy -for i in $(curl -s -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") +# Creating aliases for all indexes, because logstash-* is also selecting datastreams besides the logstash-2024... indexes. +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|grep -v "^\."|sort|sed 's/-[0-9].*\.[0-9].*\.[0-9].*$//'|uniq|grep -v index|grep -v "alerts-other"|grep -v deployments|grep -v maillog|grep -v ss4o|grep -v sample) do + #Creating an alias for all standard indexes, which are not datastreams to mitigate the issue with indexes, where for example storage-* is selecting the index and also the datastream, which shows up in dashboards as duplicate entries echo "using index $i from Opensearch-Logs" - echo "setting OpenSearch dashboard index mapping for index $i" - curl --header "content-type: application/JSON" --fail -XGET -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" - if [ $? -eq 0 ]; then - echo "index ${i} already exists in Opensearch dashboard" + export ALIAS_EXISTS=`curl -s -i -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases/${i}"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` + echo "Creating for updating alias $i, because alias setting is only valid for indexes, which were created before the alias creation timestamp" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_aliases" -H "osd-xsrf: true" -d "{ \"actions\": [ { \"add\": { \"index\": \"${i}-2*\", \"alias\": \"${i}\" } } ] }" + echo "Deleting old index pattern based on index-* format" + export DASHBOARD_PATTERN=`curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*"|grep "content-length"|awk -F: '{ print $2 }'|tr -d '[:space:]'` + if [[ "$DASHBOARD_PATTERN" -gt 0 ]] + then + echo "Old dashboard pattern exists for for index ${i}, it will be removed" + curl -s -XDELETE -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" + else + echo "No old dashboard pattern for index $i" + fi +done + +# Dashboard index pattern for all available aliases, which are not datastreams +for i in $(curl -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep -v "\-ds"|grep -v "^\."|awk '{ print $1 }'|uniq) + do + echo "using alias $i from Opensearch-Logs" + echo "Setting OpenSearch dashboard index mapping for alias $i" + curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + if [ $? -eq 0 ] + then + echo "index pattern for alias ${i} already exists in Opensearch dashboard, nothing to do" else - echo "INFO: creating index-pattern in Dashboards for $i logs" - curl -XPOST --header "content-type: application/JSON" -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}-*\", \"timeFieldName\": \"@timestamp\" } }" + echo "INFO: creating index-pattern in Dashboards for datastream alias $i" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" fi done + # Dashboard index pattern for all available datastreams -for i in $(curl -s -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) do echo "using datastream $i from Opensearch-Logs" echo "setting OpenSearch dashboard index mapping for index $i" - curl --header "content-type: application/JSON" --fail -XGET -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" - if [ $? -eq 0 ]; then + curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + if [ $? -eq 0 ] + then echo "index ${i} already exists in Opensearch dashboard" else echo "INFO: creating index-pattern in Dashboards for datastream alias $i" - curl -XPOST --header "content-type: application/JSON" -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" + curl -s -XPOST --header "content-type: application/JSON" -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}\", \"timeFieldName\": \"@timestamp\" } }" fi done diff --git a/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl deleted file mode 100644 index a5e199bb084..00000000000 --- a/system/opensearch-logs/templates/config/_install-index-pattern.sh.tpl +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# 0. Check for index policy -for i in $(curl -s -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${CLUSTER_HOST}/_cat/indices?v"|awk '{ print $3 }'|awk -F- '{ print $1 }'|sort|uniq|grep -v "\."|grep -v "index") - do - echo "using index $i from Opensearch-Logs" - echo "setting OpenSearch dashboard index mapping for index $i" - curl --header "content-type: application/JSON" --fail -XGET -u ${ADMIN_USER}:${ADMIN_PASSWORD} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" - if [ $? -eq 0 ] - then - echo "index ${i} already exists in Opensearch dashboard" - else - echo "INFO: creating index-pattern in Dashboards for $i logs" - curl -XPOST --header "content-type: application/JSON" -u ${ADMIN_USER}:${ADMIN_PASSWORD} "https://${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}-*" -H "osd-xsrf: true" -d "{ \"attributes\": { \"title\": \"${i}-*\", \"timeFieldName\": \"@timestamp\" } }" - fi -done diff --git a/system/opensearch-logs/templates/config/_internal_users.yml.tpl b/system/opensearch-logs/templates/config/_internal_users.yml.tpl index 501f7da3d8d..08ce9b9af41 100644 --- a/system/opensearch-logs/templates/config/_internal_users.yml.tpl +++ b/system/opensearch-logs/templates/config/_internal_users.yml.tpl @@ -38,6 +38,12 @@ otel: backend_roles: - "otel" +audit: + hash: "{{ .Values.users.audit.nohash }}" + reserved: true + backend_roles: + - "audit" + otellogs: hash: "{{ .Values.users.otellogs.nohash }}" reserved: true diff --git a/system/opensearch-logs/templates/config/_roles.yml.tpl b/system/opensearch-logs/templates/config/_roles.yml.tpl index b94137542a1..0c8d6688199 100644 --- a/system/opensearch-logs/templates/config/_roles.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles.yml.tpl @@ -249,6 +249,26 @@ compute: - "indices:admin/create" - "indices:data/write/bulk*" - "indices:data/write/index" +audit: + reserved: false + cluster_permissions: + - "cluster_monitor" + - "cluster_composite_ops" + - "cluster:admin/ingest/pipeline/put" + - "cluster:admin/ingest/pipeline/get" + - "indices:admin/template/get" + - "cluster_manage_index_templates" + - "cluster:admin/opensearch/ml/predict" + index_permissions: + - index_patterns: + - "audit-*" + allowed_actions: + - "indices:admin/template/get" + - "indices:admin/template/put" + - "indices:admin/mapping/put" + - "indices:admin/create" + - "indices:data/write/bulk*" + - "indices:data/write/index" otel: reserved: false cluster_permissions: diff --git a/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl b/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl index f7177dd9b0a..4e25c9be659 100644 --- a/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl @@ -32,6 +32,11 @@ greenhouse: users: - "greenhouse" +audit: + reserved: false + users: + - "audit" + jump: reserved: false users: diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml new file mode 100644 index 00000000000..c6db9ec4294 --- /dev/null +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -0,0 +1,47 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: "install-dashboard-pattern" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + ccloud/service: logs + ccloud/support-group: observability +spec: + schedule: "30 0,6,8,12,18 * * *" + jobTemplate: + spec: + backoffLimit: 3 + template: + spec: + restartPolicy: Never + containers: + - name: install-dashboard-pattern + image: "{{ .Values.global.registry }}/unified-kubernetes-toolbox:latest" + command: ["/bin/bash", "/scripts/install-dashboard-pattern.sh"] + env: + - name: ADMIN_USER + valueFrom: + secretKeyRef: + name: cron-secrets + key: ADMIN_USER + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: cron-secrets + key: ADMIN_PASSWORD + - name: CLUSTER_HOST + value: "https://opensearch-logs-client.{{ .Values.global.clusterType }}.{{ .Values.global.region }}.{{ .Values.global.tld }}:{{ .Values.httpPort }}" + - name: DASHBOARD_HOST + value: "https://logs.{{ .Values.global.region }}.{{ .Values.global.tld }}" + volumeMounts: + - mountPath: /scripts/install-dashboard-pattern.sh + name: security-config + subPath: install-dashboard-pattern.sh + volumes: + - name: security-config + secret: + defaultMode: 420 + secretName: security-config diff --git a/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml deleted file mode 100644 index 3836eb76199..00000000000 --- a/system/opensearch-logs/templates/install-dashboard-pattern-job.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: "install-dashboard-pattern" - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - app.kubernetes.io/version: {{ .Chart.AppVersion }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - ccloud/service: logs - ccloud/support-group: observability - annotations: - # This is what defines this resource as a hook. Without this line, the - # job is considered part of the release. - "helm.sh/hook": post-install,post-upgrade - "helm.sh/hook-weight": "-5" - "helm.sh/hook-delete-policy": hook-succeeded -spec: - template: - metadata: - name: "install-dashboard-pattern" - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - spec: - restartPolicy: Never - containers: - - name: install-dashboard-pattern - image: "{{ .Values.global.registry }}/unified-kubernetes-toolbox:latest" - command: ["/bin/bash", "/scripts/install-dashboard-pattern.sh"] - env: - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: cron-secrets - key: ADMIN_USER - - name: ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: cron-secrets - key: ADMIN_PASSWORD - - name: CLUSTER_HOST - value: "https://opensearch-logs-client.{{ .Values.global.clusterType }}.{{ .Values.global.region }}.{{ .Values.global.tld }}:{{ .Values.httpPort }}" - - name: DASHBOARD_HOST - value: "https://logs.{{ .Values.global.region }}.{{ .Values.global.tld }}" - volumeMounts: - - mountPath: /scripts/install-dashboard-pattern.sh - name: security-config - subPath: install-dashboard-pattern.sh - volumes: - - name: security-config - secret: - defaultMode: 420 - secretName: security-config From d3644bb762d49a45493c245dee6d8622ca122754 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 20 Dec 2024 17:52:05 +0200 Subject: [PATCH 031/224] [pxc-db] Add prometheus scrape annotations to backup jobs and deployments Add prometheus scrape labels to backup jobs and deployments This will enable metrics scraping for binlog-collector deployments --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/templates/cluster.yaml | 2 +- common/pxc-db/values.yaml | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index b76a55058ac..f8722e23ed8 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.2.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/cluster.yaml b/common/pxc-db/templates/cluster.yaml index 4505c3e6cde..6c2d8e3fd4f 100644 --- a/common/pxc-db/templates/cluster.yaml +++ b/common/pxc-db/templates/cluster.yaml @@ -274,7 +274,7 @@ spec: s3-backups-{{ $storage }}: type: s3 annotations: -{{ merge (include "pxc-db.linkerdPodAnnotations" $ | fromYaml) ($backup.annotations) | toYaml | indent 10 }} +{{ merge (include "pxc-db.linkerdPodAnnotations" $ | fromYaml) (include "pxc-db.metricsAnnotations" $ | fromYaml) ($backup.annotations) | toYaml | indent 10 }} labels: {{ merge (include "pxc-db.backupLabels" $ | fromYaml) ($backup.labels) | toYaml | indent 10 }} resources: diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index 0942c5cc6f2..bc01511a4fe 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -345,7 +345,6 @@ backup: limits: {} pitr: enabled: false - annotations: {} storageName: s3-backups-binlogs timeBetweenUploads: 300 timeoutSeconds: 60 From aa1994f966ff958c917ddfeab4a61e94e405798f Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 23 Dec 2024 15:58:15 +0200 Subject: [PATCH 032/224] [pxc-db] Add kube-state-metrics-based alerts for PXC cluster Add kube-state-metrics-based alerts: * GaleraClusterResourceNotReady * GaleraClusterBackupNotSucceeded * GaleraClusterBackupMissing --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/templates/alerts.yaml | 3 ++ .../templates/alerts/_backup.alerts.tpl | 29 +++++++++++++++++++ .../pxc-db/templates/alerts/_pxc.alerts.tpl | 16 ++++++++++ 4 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 common/pxc-db/templates/alerts/_backup.alerts.tpl diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index f8722e23ed8..f3737cfb86f 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.1 +version: 0.2.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/alerts.yaml b/common/pxc-db/templates/alerts.yaml index dec13d38943..a9f532d233e 100644 --- a/common/pxc-db/templates/alerts.yaml +++ b/common/pxc-db/templates/alerts.yaml @@ -12,4 +12,7 @@ metadata: spec: groups: {{ include (print .Template.BasePath "/alerts/_pxc.alerts.tpl") . | indent 2 }} +{{- if .Values.backup.enabled }} +{{ include (print .Template.BasePath "/alerts/_backup.alerts.tpl") . | indent 2 }} +{{- end }} {{- end }} diff --git a/common/pxc-db/templates/alerts/_backup.alerts.tpl b/common/pxc-db/templates/alerts/_backup.alerts.tpl new file mode 100644 index 00000000000..a122eabbf03 --- /dev/null +++ b/common/pxc-db/templates/alerts/_backup.alerts.tpl @@ -0,0 +1,29 @@ +- name: pxc-backup.alerts + rules: + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBackupNotSucceeded + expr: (kube_customresource_perconaxtradbclusterbackup_status{app_kubernetes_io_instance="{{ include "pxc-db.fullname" . }}",state="Succeeded"} != 1) + for: 10m + labels: + context: database + service: {{ include "pxc-db.alerts.service" . }} + severity: info + tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' + support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} + annotations: + description: "{{ include "pxc-db.fullname" . }} cluster backup is not succeeded." + summary: "{{ include "pxc-db.fullname" . }} cluster backup is not succeeded." + + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBackupMissing + expr: (time() - max by (app_kubernetes_io_instance) (kube_customresource_perconaxtradbclusterbackup_completed{app_kubernetes_io_instance="{{ include "pxc-db.fullname" . }}") > 129600) + for: 30m + labels: + context: database + service: {{ include "pxc-db.alerts.service" . }} + severity: info + tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' + support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} + annotations: + description: "{{ include "pxc-db.fullname" . }} cluster has no new backups completed earlier than 36 hours ago." + summary: "{{ include "pxc-db.fullname" . }} cluster has no new backups completed earlier than 36 hours ago." diff --git a/common/pxc-db/templates/alerts/_pxc.alerts.tpl b/common/pxc-db/templates/alerts/_pxc.alerts.tpl index 9cba2602c88..996fb6f542b 100644 --- a/common/pxc-db/templates/alerts/_pxc.alerts.tpl +++ b/common/pxc-db/templates/alerts/_pxc.alerts.tpl @@ -8,6 +8,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} has too many connections open. Please check the service containers. @@ -35,6 +36,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} has queries waiting for lock more than 20 sec. Deadlock possible. @@ -137,3 +139,17 @@ annotations: description: "{{ include "pxc-db.fullname" . }} Galera cluster reports at least 1 node with 25% paused replication in the last 30 minutes" summary: "{{ include "pxc-db.fullname" . }} Galera cluster node replication paused" + + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterResourceNotReady + expr: (kube_customresource_perconaxtradbcluster_status{app="{{ include "pxc-db.fullname" . }}",state='ready'} != 1) + for: 10m + labels: + context: database + service: {{ include "pxc-db.alerts.service" . }} + severity: info + tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' + support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} + annotations: + description: "{{ include "pxc-db.fullname" . }} cluster resource is not in ready state." + summary: "{{ include "pxc-db.fullname" . }} cluster resource is not in ready state." From 7c02f036f74f64f041bc3015c21e753aac70b723 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 23 Dec 2024 16:06:41 +0200 Subject: [PATCH 033/224] [designate] bump pxc-db chart Update pxc-db chart from 0.2.0 to 0.2.2 Adds binlog-collector metrics and custom resource alerts --- openstack/designate/Chart.lock | 6 +++--- openstack/designate/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/designate/Chart.lock b/openstack/designate/Chart.lock index 5a0bf68f922..e8f7cf78469 100644 --- a/openstack/designate/Chart.lock +++ b/openstack/designate/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.9 - name: pxc-db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.0 + version: 0.2.2 - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.15.2 @@ -29,5 +29,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:07e3fe91666e0333a6d4cf70f2badd53461a4b0737de76156e87e1c854e7e396 -generated: "2024-12-20T14:18:27.9057+02:00" +digest: sha256:943eef603bbce1612e8c1abca86585326ca4a9d4ca35b46852d7629aaca10909 +generated: "2024-12-23T16:05:55.433486+02:00" diff --git a/openstack/designate/Chart.yaml b/openstack/designate/Chart.yaml index eea8777cee7..8be83c1a9d7 100644 --- a/openstack/designate/Chart.yaml +++ b/openstack/designate/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 description: A Helm chart for Kubernetes to deploy Openstack Designate (DNSaaS) name: designate -version: 0.4.6 +version: 0.4.7 appVersion: "xena" dependencies: - condition: percona_cluster.enabled @@ -13,7 +13,7 @@ dependencies: name: pxc-db alias: pxc_db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.0 + version: 0.2.2 - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 7a2bef82f5220ba163e37da6a570722a2387936e Mon Sep 17 00:00:00 2001 From: Max Lendrich Date: Mon, 23 Dec 2024 16:51:07 +0100 Subject: [PATCH 034/224] [octobus-query-exporter] Try to avoid duplicate alerts after reboot --- .../vendor/octobus-query-exporter/alerts/scaleout/events.alerts | 1 + 1 file changed, 1 insertion(+) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts index 4ef0fef8e77..9b1ef200e8f 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts @@ -22,6 +22,7 @@ vrops_hostsystem_runtime_connectionstate{state="connected"} and on(hostsystem) vrops_hostsystem_runtime_maintenancestate{state="notInMaintenance"} and on(hostsystem) elasticsearch_octobus_Mellanox_issue_hostsystem_doc_count + for: 3m labels: severity: critical service: compute From 0ae718e90c9b22abfa605d273bf848d6c938386f Mon Sep 17 00:00:00 2001 From: Max Lendrich Date: Mon, 23 Dec 2024 17:44:29 +0100 Subject: [PATCH 035/224] Use more specific log string as provided by vmware --- .../files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg index 7429e58f547..b781428aea3 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg @@ -8,7 +8,7 @@ QueryJson = { "query": { "bool": { "must": [ - { "match_phrase": { "syslog_message": "command failed: IO was aborted" }}, + { "match_phrase": { "syslog_message": "Health: Miss counters detected" }}, { "match": { "syslog_message": "NMLX_ERR" }} ], "filter": [ From a1a1a9ae7ada89f0a93e39274ce77682c1d5b10d Mon Sep 17 00:00:00 2001 From: Maximilian Lendrich Date: Mon, 23 Dec 2024 17:54:58 +0100 Subject: [PATCH 036/224] Revert "[octobus-query-exporter] Try to avoid duplicate alerts after reboot" This reverts commit 7a2bef82f5220ba163e37da6a570722a2387936e. --- .../vendor/octobus-query-exporter/alerts/scaleout/events.alerts | 1 - 1 file changed, 1 deletion(-) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts index 9b1ef200e8f..4ef0fef8e77 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/scaleout/events.alerts @@ -22,7 +22,6 @@ vrops_hostsystem_runtime_connectionstate{state="connected"} and on(hostsystem) vrops_hostsystem_runtime_maintenancestate{state="notInMaintenance"} and on(hostsystem) elasticsearch_octobus_Mellanox_issue_hostsystem_doc_count - for: 3m labels: severity: critical service: compute From a2e21c32f379beba7635f64126566bcabcb88290 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 23 Dec 2024 17:44:45 +0200 Subject: [PATCH 037/224] [pxc-db] Add binlog-collector alerts Add binlog-collector alerts based on the last successful event timestamp * GaleraClusterBinlogProcessingTooOld * GaleraClusterBinlogUploadTooOld --- common/pxc-db/Chart.yaml | 2 +- .../templates/alerts/_backup.alerts.tpl | 34 +++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index f3737cfb86f..b4085a05e3b 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.2 +version: 0.2.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/alerts/_backup.alerts.tpl b/common/pxc-db/templates/alerts/_backup.alerts.tpl index a122eabbf03..d07702faf54 100644 --- a/common/pxc-db/templates/alerts/_backup.alerts.tpl +++ b/common/pxc-db/templates/alerts/_backup.alerts.tpl @@ -25,5 +25,35 @@ playbook: '' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: - description: "{{ include "pxc-db.fullname" . }} cluster has no new backups completed earlier than 36 hours ago." - summary: "{{ include "pxc-db.fullname" . }} cluster has no new backups completed earlier than 36 hours ago." + description: "{{ include "pxc-db.fullname" . }} cluster has no new full backups completed earlier than 36 hours ago." + summary: "{{ include "pxc-db.fullname" . }} cluster has no new full backups completed earlier than 36 hours ago." + +{{- if .Values.backup.pitr.enabled }} + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBinlogProcessingTooOld + expr: (time() - pxc_binlog_collector_last_processing_timestamp > 1800) + for: 30m + labels: + context: database + service: {{ include "pxc-db.alerts.service" . }} + severity: info + tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' + support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} + annotations: + description: "More than 30 minutes passed since the last cluster {{ include "pxc-db.fullname" . }} binlog processing." + summary: "{{ include "pxc-db.fullname" . }} cluster binlog processing is too old." + + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBinlogUploadTooOld + expr: (time() - pxc_binlog_collector_last_upload_timestamp > 1800) + for: 30m + labels: + context: database + service: {{ include "pxc-db.alerts.service" . }} + severity: info + tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} + playbook: '' + support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} + annotations: + description: "More than 30 minutes passed since the last cluster {{ include "pxc-db.fullname" . }} binlog upload." + summary: "{{ include "pxc-db.fullname" . }} cluster binlog upload is too old." +{{- end }} From 282a1cc25374a9f961c5ddec053c26d7c38c68cd Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 23 Dec 2024 18:34:35 +0200 Subject: [PATCH 038/224] [pxc-db] Fix typo in the backup alert rule --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/templates/alerts/_backup.alerts.tpl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index b4085a05e3b..44f59b4d51c 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.3 +version: 0.2.4 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/alerts/_backup.alerts.tpl b/common/pxc-db/templates/alerts/_backup.alerts.tpl index d07702faf54..737091c34c6 100644 --- a/common/pxc-db/templates/alerts/_backup.alerts.tpl +++ b/common/pxc-db/templates/alerts/_backup.alerts.tpl @@ -15,7 +15,7 @@ summary: "{{ include "pxc-db.fullname" . }} cluster backup is not succeeded." - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBackupMissing - expr: (time() - max by (app_kubernetes_io_instance) (kube_customresource_perconaxtradbclusterbackup_completed{app_kubernetes_io_instance="{{ include "pxc-db.fullname" . }}") > 129600) + expr: (time() - max by (app_kubernetes_io_instance) (kube_customresource_perconaxtradbclusterbackup_completed{app_kubernetes_io_instance="{{ include "pxc-db.fullname" . }}"}) > 129600) for: 30m labels: context: database From 0367da381339c3626e5e29b819c24db242780323 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 23 Dec 2024 21:49:57 +0200 Subject: [PATCH 039/224] [designate] bump pxc-db charts with alert fixes --- openstack/designate/Chart.lock | 6 +++--- openstack/designate/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/designate/Chart.lock b/openstack/designate/Chart.lock index e8f7cf78469..697c4ad5fed 100644 --- a/openstack/designate/Chart.lock +++ b/openstack/designate/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.9 - name: pxc-db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.2 + version: 0.2.4 - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.15.2 @@ -29,5 +29,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:943eef603bbce1612e8c1abca86585326ca4a9d4ca35b46852d7629aaca10909 -generated: "2024-12-23T16:05:55.433486+02:00" +digest: sha256:205b25f93f690e91f8e99353fa528101532ab5705436ed8c989a2f11ceea0e95 +generated: "2024-12-23T21:49:33.263766+02:00" diff --git a/openstack/designate/Chart.yaml b/openstack/designate/Chart.yaml index 8be83c1a9d7..24afd27be42 100644 --- a/openstack/designate/Chart.yaml +++ b/openstack/designate/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 description: A Helm chart for Kubernetes to deploy Openstack Designate (DNSaaS) name: designate -version: 0.4.7 +version: 0.4.8 appVersion: "xena" dependencies: - condition: percona_cluster.enabled @@ -13,7 +13,7 @@ dependencies: name: pxc-db alias: pxc_db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.2 + version: 0.2.4 - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From c8b4b0914275af2f5b7c084088ed0f453b1c8b24 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 24 Dec 2024 15:30:52 +0200 Subject: [PATCH 040/224] [cc-gardener] metal extension image values configurable --- global/cc-gardener/Chart.yaml | 2 +- .../managedresources/extension-metal.yaml | 16 +++++----------- global/cc-gardener/values.yaml | 13 +++++++++++++ 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 1a6138f6c96..792a4e1e516 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.6.0 +version: 0.6.1 appVersion: "v1.110.1" home: https://github.com/gardener/gardener dependencies: diff --git a/global/cc-gardener/managedresources/extension-metal.yaml b/global/cc-gardener/managedresources/extension-metal.yaml index 0da561cb63a..5098e2ac641 100644 --- a/global/cc-gardener/managedresources/extension-metal.yaml +++ b/global/cc-gardener/managedresources/extension-metal.yaml @@ -7,17 +7,11 @@ helm: rawChart: H4sIFAAAAAAA/ykAK2FIUjBjSE02THk5NWIzVjBkUzVpWlM5Nk9WVjZNV2xqYW5keVRRbz1IZWxtAOw9f3PbtpL9m58CQ7fTNFdSkiXLebzp3Lm2k3qaHxrbL72btueByJWEmgRYAFSiJvnuN/hBiqQoUXRSt3nPmMlEBLGLBbC7WOwu6DnmEVDgHryVQAVh1Es5W5IIuJeAxHHvdIG59Fc4ib+4Y+n3+/3xaKT/7/f79f/7h8PRF4PReHQ8Oj4a9Idf9AfHx8PRF6h/1w67lExIzL/of3Rf9cF9JgWn5DVwte4BWg4cnKbFozvw+64TgQg5SaWuOkE/QJygULEEmjGO5ALQM8tCSPMLmlj2QQVHORQnEKBWVnOWedd9f+D3nb96bv4dSrv8L3GcgfgYBdAi/8Pj4bgq/4f9w/6D/N9LIQmeQ+AgxCFlgkjGVwGaL0LuE9YjnNGQcfAiWPbaxRchiecBirEEIR2E0iyOJywm4SpAF7OXTE44CKDScTikMQnxKcuoDNDASfDbf1K8xCTG0xhszVXG5xCgo/5XCkCwjIcgAvTug7NMsSIZqGodBUjyDPQQTCPbp4MQQgmhJ3HM3kBknhEK00whTexjAoke83j0gjgIZWmEZQWDqXnBIgiQe5JJ5jpOApKTUEwYlwF60n/SdxaAY7koKgaOEzIqOYtj4EIhso9pjCkYxCGjYcY5UHm1oqEI0JGDkMETLiC83d2KyylgadpwoPDmgkrgSxxfQchoJAI07DsIETrjWEiehTLjuzp+w/gt8B0NyJwyDq9S4FhtBieUMonNtjDDsQDHiYhQC3JaGjj6+de8+ieYLhi7NXUG2eV6VS2KA/RqCfwNJxJQyJKUUaASaSYViMwQBYggQuDPfUQoUnypmI3QOQK6VPyaAJXCOdAgryGUjBcYA/TeOUAWW6B+IuQhszmFMcsib71mXoIpngM3rRAyhF6WhYTIRTb1Q5ZU5cRgqomGxdIqZLuAtXC5YoG9J0/+cXw0mrnVISQ4XBAKDYPYgnD/MXVG3TrQzhjXox+OB/gwgtroQ0G8iJPlXZZMd+RtYmgfxRZAQ+xSmTFD13HeGNY/ZXRG5krEBPAlUYpKqYvRaGirgJuaQf/wqO84BwdIgFTMLbSxZbXOt4b9Rchxqhh/ukICQHNNAnIBmXAODnIVpXo7OLCqMsdQwOqXhSa9srVWoSoNZukNYwJUnjJKIdQir9UEDkNIVa0EKq9XqZJjnGrNrhr1fhOM5vokb7Klxe+pUMPu64dpxoWaBa3AQIZWdQvJuN2sNNIYC/GyYln6RnxmWG8/uhFOcUjkKkCHR8+IrdQMpjYw4AG6zabAKUgQanXnIXhpZNstWZwl8D2hEaFzswH8hIl8yvhTwoUauMgS4A5CM8BKvT5TO5/eoRQfnBm9l5vHV2bNT8JQ7XqnHCrKMx+CGl1hBruug+zgYrC6fqMrpFffvMxFJzC2+B4WdLv9JyFJ9Zbeu1lAnAIXvkw7mYIt9t9gsGH/HQ6Hgwf77z7Ku3ceimBGKCBXKVIXeR8+OO22noIDGunWThlJjKcQCx+nqX8LK4NOP1TkTHVVwbEFhT58uLYFIjSMs6ig1EcWcAchm7B1AhWWAG1pYfvXPW2OglAhMQ1Bg/uXEAMW4Cud1EhZQZq2QAxlCKk3ZIYWWEw4zMhbvckdHo0DF/mvzdlLt/clnqMCIuWEyhlyvxL//ZWot1zvW7tQQCygCWFwZ4Q0Ko/7w4eHE/zfvnTR/8YeSHDq6cVfauvaY7l5vdVF0Ob/G42HNf1/PBgfP+j/+yhW+1SkunZsMrqv4ia8JTQKkLFnX+BUGZs4wlKfyo053qytmxnHAokUN6lSXW2UjFHMQYM6V+jfI0IjdVwcqdY5Oea0d1PlUnUSVP3sGnUV3b+qUrub/MdsPid0vqdTsM3/Nz6s+f8PR4fj8YP830f5VIJtOcIzPFKVaMNjJflVR5WC34RfPb5ZNsu4PR9ZzIVAz0gsgXslS0wBKIlWaH9+evH8+vzyV3uEQ0hbY7WSYi70uc2UF1iGi1qTEvrH23xDj+3xf9vrooMfYXVTpyNm8+L9RNOzSQBOiXULVAm+BF19c4Ylzuuu9ZH9z5qC7e4aOwk7GvybTMO+Dq1/pflYe75K435ceMX+1kNNWERmq72HWjQ8ZTQiSjWVGqrh6aPiTaLQgEDqYPR/a/hf/CXmv/gxm/+i1JXEhAIXv/iPtikPz/+P/7pR/3brmPeNK1AHFuSbgvyTKKrPBLq5yVXwTZLFkkigmMobEt3cIKYd/oz/Z6bm/zOb3h1CmU/Rozb99X5vyf5Uk3yf+/+d7L+OweAW++9wOD6q+//Go+GD/Xcf5VPZfwVv/KmHOdNLcYRTYuV5nglGlAaiGdfP+dgvmZoWQc3iXA5wnC7wQCMqpsAK+2nZHHVq52WLrx4fMSPcFjdpjpy8e4c4/J4RDhFyW/D7mwgQEQW820ZfE7wluRat6URVCbIbOWXAgg4dE+rU/+9px2lQAEV/NuzUqUcN061PA1J1Kaju14GujVBXJdhV8lvYLhSkb0H8ouV6YOUwWBu0bZjTt8nquvkUh7dZahohyf4XJ/GudoUwj2vjbu6iHOLKR1EJe23vtQK6tdu/Wun+jUqX/T+CNGarBGjHdNDd+/9gcHxY3/+H/XH/Yf+/j1LeNnGail5hBJwVq723FbDn3q9jYWWnczUZyCDCRX6RyJOc7Pv67m3AA+RKnoFbVatdjAyRQmgy4Ux2wA9EacXVc5IQGaC+yZGLSYhFRY2Wc9lMpwJi7Uu2GWjqyPJ8PzrGFoHkWMLcJqApM4TQ+T91Hlq+JdQz5koEVV+tt4F1Sl21sa40zXJZt5SXll1bLfUlUWXPAEKxEy0gvBVZUnImmwNYY2SgwmiPdIgSfelfWyr977GACZYL5O4Vm3K/0XNtwqsiS8p01TbjLaTuNH7vQOxeZJEZwjRar5jJ4fGrOTtloHUikE9YT+f6rMWjuZGRarchjL2GOEARzHAWyyKNCGiUMjVMQlHpdMwzKkkCW7pKdYqTW+bBdTZlpcONNSn7j1WhIN8wfkvohkUvWU6FV7iQAoRNJuh+8BEVHSHSbBqT0LNtOkNzssQS9gPfqgwl83Ace2LBmBTaSb6eAE+GqTcaDdeYW9TRk3zqc9VoVpMwTuTqdDP/yhMrISHx/mETuYpEN5v09HL33pEfPoz3Ju/Pa9tyTDGJzJVWNs1is9mklJhcDwP666zlijJgSYJptOY9D+2VEZ039qyS8GYkhu96IMNe84CscuiVjrp1NEUesZfgt946WdfjoB5IDOK7qp2f5+P6ZWi/luZbHq7qqZSK3L2jEvAe/ZhkZq8wHr7b4TdoAtQJ0B6xGdCeMCnQO2gzcH5T4nS9l2oGdfeZqMK3TYYxZjyWp1l76013awfbErPruE2Kd/cRGLg2ym2Sa87nHdeyBm31hqc2izJZlUxav5RGuwc+4HugMym4dWw2hb3kAq5M1mbiO3qPfmOEIvdbdxsu23cTojxVfguWmslV3T4rfdlX3pTQyMNRxEGI74KtW+8ua2itEbZjW1+CqEMWqtLmtpYHXexg9l0ZFuiyrHLNTvD8/OTs/PLm/Pn56fXFq5c3L09enF9NTk7Pi5YI6XDBU86SoFSJ0IxAHF3CrFpr65V1FhRWr19w8F1t3Zzeixcnz85fn59ev7q8efX6/PKny4vrDVoD1NPX6kqpKr3G3JVdixSTJVAQYsLZFMpjXEiZPstTiPOS6vH2zKr9UX2ljbTWpVVFhAtQo/zh+npSekEokQTHZxDjVXElZdAvWnDAEelMq4Ja3QupR065A7HJhbmGMVqjhK4wYSZ1yvZRN9pclixkcYCuTyd1FxmvnpLzuczv0TR4xtYQ7xG1ht2g3+CAREXK+wtlrDUMuUjuyEuiGhqxaTdpPlaMtuWONRGzIUqldoqHXtF4VdwbW1NVmQ08mynOWAUlTohOqCQnGy9Q4f89yzih86twAVGmTu0XenO21edvIczKcQAzPG15X1WcBqVRYRkuzt+mStlWT945+C2stuYvFxnONShUhBwDdEE3Xpo7n/WuVGd75EmXASRLWczmqx8VjW41e3rBhNRMYiEM723Y/TXmCfPYVJm6vUNTebEnWXOpY3SYq6VOnLkfX3ant43Pd9De3b/dxf+bsigigmf6Mvg0i+awnyO4Lf/3aHRYz/87Pn64/3EvxXL8XKJHhMpGp+Y3aFBPAU71+XjtK56w6KzgjO81Z3xqp/HdfLkNblKRTVvH+tE+3M8m2NRF/vkUh3f5DkBr/u9xPf4zGA8e7v/fS/E8ryLZeo1xJheMkz+0F8G/faK363V2SJwJCfySxdBFyLuIL89iZQh4CKfkGWdZqq0Cb3vSsFMxglXT0FApzEPJ7dVQ0xMSy8y8qLprGuvKzY1vpPJ7/XoJfGrJURpR23pEmB9vlErRv9Lil/nUwOawt3l7N0dtEtOiorZKhPvY3UQeMsYjQstrvYlXK+QatpAD1obJxqg+so+8Xut/866qk7wYsHrQOproK8Re0+dlCkdf81psm2597dbT4VD9Y1os0FbpMABRQrSRzmFOdAyv3iAlJQ4uvSh59rfNTrFHitpjb0YojskfORfCUn8BQf0UEHKQBcMbK9i2sqEb87SO7G8890SIY7DotLtNlB+w8eyLssxxFsNGxdRcozb16xYbr35jU/MjZdH6R88kC3soySSWhM7tUbpyV8D2mQnJknz29JVLsn5rZ1obMGQv+bDpn77AqRa6xtVRkO2ocCaZms5d67wELkmI45RFeXOzsruRm4lSVlkdpZ3CuuVeQVkR57WIlES7rLLKSqskR+YhghiUUH3UvmLv3P9p2wuLwToB80ncQaFT5ASWNr4WekQ2/Q1CqfcwA1y9/v9pbOJPsP93sf+syHc2AVvsv8F4XL//dTgcPZz/7qU05v9aXv30R7iNDJJ9QtozzhLvDeNx5EnmGW8s+vrnd27uKHUD9/p04n7rqndusJ/D9cOvX3ejQIfVASLPJFR4EnOlRj0bS68QVqejGmb5tk55F1qKifbyI6rttHRGdYN3NV9bJajhBq5B6naahZRFntaoRc/rIJ+HY4JFUDLQ98vyuse0Lqnzma0Ov5hUzvj7pWWVAgHeZthhj9Bk1bNvPaSajTqFCj6l/N9B/1uTb/9toM3/N+xv5H8e948e9P99lF36P7dV/lJPnjKCdWSnStQ1u4Xi40l/9SR+xqWT/JtLAvqGQRcjsE3+j0eDuvwPH+7/30+x/n/4fTMdQxk7xSUq5GpucOuRgPziSP0wd2XqdZJgswLpcImli77okjluzqoeox6hSxyTyDNH2cLo0Lbdax2XPH+bYmrGrMPGlY/ItQyn1FaR2PBduRYEGxAKTYo5TkDaVElj3qSRJ0TUIfzQRf6XKb7TZ4Dbzn/D4/r3f9TTg/zfR6lFvNUS28/q1gTd3XRe9ZYD14r7a+u1mrDopPBa7X97dJniPY2H3JpvoLv6+V+jC5o+CVzKzzFOwHXGQfHKZCp//fjrIu6++SFhlH9MuPHG4CZF/hqFH6ZZ863BdrBKhkX+9eI7kGBA70KFhax/9y0/yxTpdU13i1Bxw7Z0v6gtJaL1u8wl9alILzf21+0+q8DsPZV2/R+x0J+zj+mj1f4bHNX//sOg//D9t3spvR66mpz9j/eUxHDK0hUn84W8hrcyQIf9wxG6Opmgq3PEOMJUP+DZjMQE2+9zY7rSl5MuOKOnjIOJq5JpJhkXTo78OQmBCvAulJlGZkSZTCcpDhfgHfp9x+n15iyYKzZUeMUCeSFyp1gs0JfPTi7Pzl+eX978cHL6483ZxWUvb1f+Hkcl4CYWtXAh8tGXj0Iske/3fL/3+vzy6uLVy2/sI7zFSRpDbxs6ZfHknwOY6K+3G6QX1S+rm8qfzFfUja2sBoYmOLzFc7B/M8PsrQJVxpulKbN/T8NWEjrXf1ojZJxDKEuXq1CFNictY3/QaA/loTyULuX/AwAA//8mYT44AGoAAA== values: image: - tag: sha-681e007 - imageVectorOverwrite: | - images: - - name: cloud-controller-manager - sourceRepository: github.com/ironcore-dev/cloud-provider-metal - repository: ghcr.io/ironcore-dev/metal-cloud-controller-manager - tag: sha-62079d8 - - name: machine-controller-manager-provider-metal - sourceRepository: github.com/ironcore-dev/machine-controller-manager-provider-metal - repository: ghcr.io/ironcore-dev/machine-controller-manager-provider-metal - tag: sha-1591c41 + repository: "{{required ".Values.extensions.metal.image.repository is missing" .Values.extensions.metal.imege.repository }}" + tag: "{{required ".Values.extensions.metal.image.tag is missing" .Values.extensions.metal.image.tag }}" + {{- if .Values.extensions.metal.imageVectorOverwrites }} + imageVectorOverwrite: "{{ .Values.extensions.metal.imageVectorOverwrites }}" + {{ end -}} --- apiVersion: core.gardener.cloud/v1beta1 kind: ControllerRegistration diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index be987b2d310..48263dce015 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -54,6 +54,19 @@ extensions: zones: [] metal: enabled: false + image: + repository: ghcr.io/ironcore-dev/gardener-extension-provider-metal + tag: sha-681e007 + imageVectorOverwrites: | + images: + - name: cloud-controller-manager + sourceRepository: github.com/ironcore-dev/cloud-provider-metal + repository: ghcr.io/ironcore-dev/metal-cloud-controller-manager + tag: sha-62079d8 + - name: machine-controller-manager-provider-metal + sourceRepository: github.com/ironcore-dev/machine-controller-manager-provider-metal + repository: ghcr.io/ironcore-dev/machine-controller-manager-provider-metal + tag: sha-1591c41 gardenlet: enabled: false owner-info: From 3d3f01c7c62d9016a52b73ef7aa73fb3e32cdd01 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 24 Dec 2024 15:34:11 +0200 Subject: [PATCH 041/224] [cc-gardener] fix typo --- global/cc-gardener/managedresources/extension-metal.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/global/cc-gardener/managedresources/extension-metal.yaml b/global/cc-gardener/managedresources/extension-metal.yaml index 5098e2ac641..cc8b5e4e8b7 100644 --- a/global/cc-gardener/managedresources/extension-metal.yaml +++ b/global/cc-gardener/managedresources/extension-metal.yaml @@ -7,7 +7,7 @@ helm: rawChart: H4sIFAAAAAAA/ykAK2FIUjBjSE02THk5NWIzVjBkUzVpWlM5Nk9WVjZNV2xqYW5keVRRbz1IZWxtAOw9f3PbtpL9m58CQ7fTNFdSkiXLebzp3Lm2k3qaHxrbL72btueByJWEmgRYAFSiJvnuN/hBiqQoUXRSt3nPmMlEBLGLBbC7WOwu6DnmEVDgHryVQAVh1Es5W5IIuJeAxHHvdIG59Fc4ib+4Y+n3+/3xaKT/7/f79f/7h8PRF4PReHQ8Oj4a9Idf9AfHx8PRF6h/1w67lExIzL/of3Rf9cF9JgWn5DVwte4BWg4cnKbFozvw+64TgQg5SaWuOkE/QJygULEEmjGO5ALQM8tCSPMLmlj2QQVHORQnEKBWVnOWedd9f+D3nb96bv4dSrv8L3GcgfgYBdAi/8Pj4bgq/4f9w/6D/N9LIQmeQ+AgxCFlgkjGVwGaL0LuE9YjnNGQcfAiWPbaxRchiecBirEEIR2E0iyOJywm4SpAF7OXTE44CKDScTikMQnxKcuoDNDASfDbf1K8xCTG0xhszVXG5xCgo/5XCkCwjIcgAvTug7NMsSIZqGodBUjyDPQQTCPbp4MQQgmhJ3HM3kBknhEK00whTexjAoke83j0gjgIZWmEZQWDqXnBIgiQe5JJ5jpOApKTUEwYlwF60n/SdxaAY7koKgaOEzIqOYtj4EIhso9pjCkYxCGjYcY5UHm1oqEI0JGDkMETLiC83d2KyylgadpwoPDmgkrgSxxfQchoJAI07DsIETrjWEiehTLjuzp+w/gt8B0NyJwyDq9S4FhtBieUMonNtjDDsQDHiYhQC3JaGjj6+de8+ieYLhi7NXUG2eV6VS2KA/RqCfwNJxJQyJKUUaASaSYViMwQBYggQuDPfUQoUnypmI3QOQK6VPyaAJXCOdAgryGUjBcYA/TeOUAWW6B+IuQhszmFMcsib71mXoIpngM3rRAyhF6WhYTIRTb1Q5ZU5cRgqomGxdIqZLuAtXC5YoG9J0/+cXw0mrnVISQ4XBAKDYPYgnD/MXVG3TrQzhjXox+OB/gwgtroQ0G8iJPlXZZMd+RtYmgfxRZAQ+xSmTFD13HeGNY/ZXRG5krEBPAlUYpKqYvRaGirgJuaQf/wqO84BwdIgFTMLbSxZbXOt4b9Rchxqhh/ukICQHNNAnIBmXAODnIVpXo7OLCqMsdQwOqXhSa9srVWoSoNZukNYwJUnjJKIdQir9UEDkNIVa0EKq9XqZJjnGrNrhr1fhOM5vokb7Klxe+pUMPu64dpxoWaBa3AQIZWdQvJuN2sNNIYC/GyYln6RnxmWG8/uhFOcUjkKkCHR8+IrdQMpjYw4AG6zabAKUgQanXnIXhpZNstWZwl8D2hEaFzswH8hIl8yvhTwoUauMgS4A5CM8BKvT5TO5/eoRQfnBm9l5vHV2bNT8JQ7XqnHCrKMx+CGl1hBruug+zgYrC6fqMrpFffvMxFJzC2+B4WdLv9JyFJ9Zbeu1lAnAIXvkw7mYIt9t9gsGH/HQ6Hgwf77z7Ku3ceimBGKCBXKVIXeR8+OO22noIDGunWThlJjKcQCx+nqX8LK4NOP1TkTHVVwbEFhT58uLYFIjSMs6ig1EcWcAchm7B1AhWWAG1pYfvXPW2OglAhMQ1Bg/uXEAMW4Cud1EhZQZq2QAxlCKk3ZIYWWEw4zMhbvckdHo0DF/mvzdlLt/clnqMCIuWEyhlyvxL//ZWot1zvW7tQQCygCWFwZ4Q0Ko/7w4eHE/zfvnTR/8YeSHDq6cVfauvaY7l5vdVF0Ob/G42HNf1/PBgfP+j/+yhW+1SkunZsMrqv4ia8JTQKkLFnX+BUGZs4wlKfyo053qytmxnHAokUN6lSXW2UjFHMQYM6V+jfI0IjdVwcqdY5Oea0d1PlUnUSVP3sGnUV3b+qUrub/MdsPid0vqdTsM3/Nz6s+f8PR4fj8YP830f5VIJtOcIzPFKVaMNjJflVR5WC34RfPb5ZNsu4PR9ZzIVAz0gsgXslS0wBKIlWaH9+evH8+vzyV3uEQ0hbY7WSYi70uc2UF1iGi1qTEvrH23xDj+3xf9vrooMfYXVTpyNm8+L9RNOzSQBOiXULVAm+BF19c4Ylzuuu9ZH9z5qC7e4aOwk7GvybTMO+Dq1/pflYe75K435ceMX+1kNNWERmq72HWjQ8ZTQiSjWVGqrh6aPiTaLQgEDqYPR/a/hf/CXmv/gxm/+i1JXEhAIXv/iPtikPz/+P/7pR/3brmPeNK1AHFuSbgvyTKKrPBLq5yVXwTZLFkkigmMobEt3cIKYd/oz/Z6bm/zOb3h1CmU/Rozb99X5vyf5Uk3yf+/+d7L+OweAW++9wOD6q+//Go+GD/Xcf5VPZfwVv/KmHOdNLcYRTYuV5nglGlAaiGdfP+dgvmZoWQc3iXA5wnC7wQCMqpsAK+2nZHHVq52WLrx4fMSPcFjdpjpy8e4c4/J4RDhFyW/D7mwgQEQW820ZfE7wluRat6URVCbIbOWXAgg4dE+rU/+9px2lQAEV/NuzUqUcN061PA1J1Kaju14GujVBXJdhV8lvYLhSkb0H8ouV6YOUwWBu0bZjTt8nquvkUh7dZahohyf4XJ/GudoUwj2vjbu6iHOLKR1EJe23vtQK6tdu/Wun+jUqX/T+CNGarBGjHdNDd+/9gcHxY3/+H/XH/Yf+/j1LeNnGail5hBJwVq723FbDn3q9jYWWnczUZyCDCRX6RyJOc7Pv67m3AA+RKnoFbVatdjAyRQmgy4Ux2wA9EacXVc5IQGaC+yZGLSYhFRY2Wc9lMpwJi7Uu2GWjqyPJ8PzrGFoHkWMLcJqApM4TQ+T91Hlq+JdQz5koEVV+tt4F1Sl21sa40zXJZt5SXll1bLfUlUWXPAEKxEy0gvBVZUnImmwNYY2SgwmiPdIgSfelfWyr977GACZYL5O4Vm3K/0XNtwqsiS8p01TbjLaTuNH7vQOxeZJEZwjRar5jJ4fGrOTtloHUikE9YT+f6rMWjuZGRarchjL2GOEARzHAWyyKNCGiUMjVMQlHpdMwzKkkCW7pKdYqTW+bBdTZlpcONNSn7j1WhIN8wfkvohkUvWU6FV7iQAoRNJuh+8BEVHSHSbBqT0LNtOkNzssQS9gPfqgwl83Ace2LBmBTaSb6eAE+GqTcaDdeYW9TRk3zqc9VoVpMwTuTqdDP/yhMrISHx/mETuYpEN5v09HL33pEfPoz3Ju/Pa9tyTDGJzJVWNs1is9mklJhcDwP666zlijJgSYJptOY9D+2VEZ039qyS8GYkhu96IMNe84CscuiVjrp1NEUesZfgt946WdfjoB5IDOK7qp2f5+P6ZWi/luZbHq7qqZSK3L2jEvAe/ZhkZq8wHr7b4TdoAtQJ0B6xGdCeMCnQO2gzcH5T4nS9l2oGdfeZqMK3TYYxZjyWp1l76013awfbErPruE2Kd/cRGLg2ym2Sa87nHdeyBm31hqc2izJZlUxav5RGuwc+4HugMym4dWw2hb3kAq5M1mbiO3qPfmOEIvdbdxsu23cTojxVfguWmslV3T4rfdlX3pTQyMNRxEGI74KtW+8ua2itEbZjW1+CqEMWqtLmtpYHXexg9l0ZFuiyrHLNTvD8/OTs/PLm/Pn56fXFq5c3L09enF9NTk7Pi5YI6XDBU86SoFSJ0IxAHF3CrFpr65V1FhRWr19w8F1t3Zzeixcnz85fn59ev7q8efX6/PKny4vrDVoD1NPX6kqpKr3G3JVdixSTJVAQYsLZFMpjXEiZPstTiPOS6vH2zKr9UX2ljbTWpVVFhAtQo/zh+npSekEokQTHZxDjVXElZdAvWnDAEelMq4Ja3QupR065A7HJhbmGMVqjhK4wYSZ1yvZRN9pclixkcYCuTyd1FxmvnpLzuczv0TR4xtYQ7xG1ht2g3+CAREXK+wtlrDUMuUjuyEuiGhqxaTdpPlaMtuWONRGzIUqldoqHXtF4VdwbW1NVmQ08mynOWAUlTohOqCQnGy9Q4f89yzih86twAVGmTu0XenO21edvIczKcQAzPG15X1WcBqVRYRkuzt+mStlWT945+C2stuYvFxnONShUhBwDdEE3Xpo7n/WuVGd75EmXASRLWczmqx8VjW41e3rBhNRMYiEM723Y/TXmCfPYVJm6vUNTebEnWXOpY3SYq6VOnLkfX3ant43Pd9De3b/dxf+bsigigmf6Mvg0i+awnyO4Lf/3aHRYz/87Pn64/3EvxXL8XKJHhMpGp+Y3aFBPAU71+XjtK56w6KzgjO81Z3xqp/HdfLkNblKRTVvH+tE+3M8m2NRF/vkUh3f5DkBr/u9xPf4zGA8e7v/fS/E8ryLZeo1xJheMkz+0F8G/faK363V2SJwJCfySxdBFyLuIL89iZQh4CKfkGWdZqq0Cb3vSsFMxglXT0FApzEPJ7dVQ0xMSy8y8qLprGuvKzY1vpPJ7/XoJfGrJURpR23pEmB9vlErRv9Lil/nUwOawt3l7N0dtEtOiorZKhPvY3UQeMsYjQstrvYlXK+QatpAD1obJxqg+so+8Xut/866qk7wYsHrQOproK8Re0+dlCkdf81psm2597dbT4VD9Y1os0FbpMABRQrSRzmFOdAyv3iAlJQ4uvSh59rfNTrFHitpjb0YojskfORfCUn8BQf0UEHKQBcMbK9i2sqEb87SO7G8890SIY7DotLtNlB+w8eyLssxxFsNGxdRcozb16xYbr35jU/MjZdH6R88kC3soySSWhM7tUbpyV8D2mQnJknz29JVLsn5rZ1obMGQv+bDpn77AqRa6xtVRkO2ocCaZms5d67wELkmI45RFeXOzsruRm4lSVlkdpZ3CuuVeQVkR57WIlES7rLLKSqskR+YhghiUUH3UvmLv3P9p2wuLwToB80ncQaFT5ASWNr4WekQ2/Q1CqfcwA1y9/v9pbOJPsP93sf+syHc2AVvsv8F4XL//dTgcPZz/7qU05v9aXv30R7iNDJJ9QtozzhLvDeNx5EnmGW8s+vrnd27uKHUD9/p04n7rqndusJ/D9cOvX3ejQIfVASLPJFR4EnOlRj0bS68QVqejGmb5tk55F1qKifbyI6rttHRGdYN3NV9bJajhBq5B6naahZRFntaoRc/rIJ+HY4JFUDLQ98vyuse0Lqnzma0Ov5hUzvj7pWWVAgHeZthhj9Bk1bNvPaSajTqFCj6l/N9B/1uTb/9toM3/N+xv5H8e948e9P99lF36P7dV/lJPnjKCdWSnStQ1u4Xi40l/9SR+xqWT/JtLAvqGQRcjsE3+j0eDuvwPH+7/30+x/n/4fTMdQxk7xSUq5GpucOuRgPziSP0wd2XqdZJgswLpcImli77okjluzqoeox6hSxyTyDNH2cLo0Lbdax2XPH+bYmrGrMPGlY/ItQyn1FaR2PBduRYEGxAKTYo5TkDaVElj3qSRJ0TUIfzQRf6XKb7TZ4Dbzn/D4/r3f9TTg/zfR6lFvNUS28/q1gTd3XRe9ZYD14r7a+u1mrDopPBa7X97dJniPY2H3JpvoLv6+V+jC5o+CVzKzzFOwHXGQfHKZCp//fjrIu6++SFhlH9MuPHG4CZF/hqFH6ZZ863BdrBKhkX+9eI7kGBA70KFhax/9y0/yxTpdU13i1Bxw7Z0v6gtJaL1u8wl9alILzf21+0+q8DsPZV2/R+x0J+zj+mj1f4bHNX//sOg//D9t3spvR66mpz9j/eUxHDK0hUn84W8hrcyQIf9wxG6Opmgq3PEOMJUP+DZjMQE2+9zY7rSl5MuOKOnjIOJq5JpJhkXTo78OQmBCvAulJlGZkSZTCcpDhfgHfp9x+n15iyYKzZUeMUCeSFyp1gs0JfPTi7Pzl+eX978cHL6483ZxWUvb1f+Hkcl4CYWtXAh8tGXj0Iske/3fL/3+vzy6uLVy2/sI7zFSRpDbxs6ZfHknwOY6K+3G6QX1S+rm8qfzFfUja2sBoYmOLzFc7B/M8PsrQJVxpulKbN/T8NWEjrXf1ojZJxDKEuXq1CFNictY3/QaA/loTyULuX/AwAA//8mYT44AGoAAA== values: image: - repository: "{{required ".Values.extensions.metal.image.repository is missing" .Values.extensions.metal.imege.repository }}" + repository: "{{required ".Values.extensions.metal.image.repository is missing" .Values.extensions.metal.image.repository }}" tag: "{{required ".Values.extensions.metal.image.tag is missing" .Values.extensions.metal.image.tag }}" {{- if .Values.extensions.metal.imageVectorOverwrites }} imageVectorOverwrite: "{{ .Values.extensions.metal.imageVectorOverwrites }}" From f1ee0013d20ff58025b15d4593ce6b4fe5b76d7c Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 24 Dec 2024 16:04:49 +0200 Subject: [PATCH 042/224] [cc-gardener] image overrides per values --- global/cc-gardener/Chart.yaml | 2 +- .../managedresources/extension-metal.yaml | 13 ++++++++++--- global/cc-gardener/values.yaml | 17 +++++++---------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 792a4e1e516..6f18572b236 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.6.1 +version: 0.6.2 appVersion: "v1.110.1" home: https://github.com/gardener/gardener dependencies: diff --git a/global/cc-gardener/managedresources/extension-metal.yaml b/global/cc-gardener/managedresources/extension-metal.yaml index cc8b5e4e8b7..df94715780b 100644 --- a/global/cc-gardener/managedresources/extension-metal.yaml +++ b/global/cc-gardener/managedresources/extension-metal.yaml @@ -9,9 +9,16 @@ helm: image: repository: "{{required ".Values.extensions.metal.image.repository is missing" .Values.extensions.metal.image.repository }}" tag: "{{required ".Values.extensions.metal.image.tag is missing" .Values.extensions.metal.image.tag }}" - {{- if .Values.extensions.metal.imageVectorOverwrites }} - imageVectorOverwrite: "{{ .Values.extensions.metal.imageVectorOverwrites }}" - {{ end -}} + imageVectorOverwrite: | + images: + - name: cloud-controller-manager + sourceRepository: github.com/ironcore-dev/cloud-provider-metal + repository: "{{required ".Values.extensions.metal.imageVectorOverwrite.ccm.repository is missing" .Values.extensions.metal.imageVectorOverwrite.ccm.repository }}" + tag: "{{required ".Values.extensions.metal.imageVectorOverwrite.ccm.tag is missing" .Values.extensions.metal.imageVectorOverwrite.ccm.tag }}" + - name: machine-controller-manager-provider-metal + sourceRepository: github.com/ironcore-dev/machine-controller-manager-provider-metal + repository: "{{required ".Values.extensions.metal.imageVectorOverwrite.mcm.repository is missing" .Values.extensions.metal.imageVectorOverwrite.mcm.repository }}" + tag: "{{required ".Values.extensions.metal.imageVectorOverwrite.mcm.tag is missing" .Values.extensions.metal.imageVectorOverwrite.mcm.tag }}" --- apiVersion: core.gardener.cloud/v1beta1 kind: ControllerRegistration diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index 48263dce015..a17a6a0b19a 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -57,16 +57,13 @@ extensions: image: repository: ghcr.io/ironcore-dev/gardener-extension-provider-metal tag: sha-681e007 - imageVectorOverwrites: | - images: - - name: cloud-controller-manager - sourceRepository: github.com/ironcore-dev/cloud-provider-metal - repository: ghcr.io/ironcore-dev/metal-cloud-controller-manager - tag: sha-62079d8 - - name: machine-controller-manager-provider-metal - sourceRepository: github.com/ironcore-dev/machine-controller-manager-provider-metal - repository: ghcr.io/ironcore-dev/machine-controller-manager-provider-metal - tag: sha-1591c41 + imageVectorOverwrites: + ccm: + repository: ghcr.io/ironcore-dev/metal-cloud-controller-manager + tag: sha-62079d8 + mcm: + repository: ghcr.io/ironcore-dev/machine-controller-manager-provider-metal + tag: sha-1591c41 gardenlet: enabled: false owner-info: From ab82b1ea9deaa32cb442d5317d22d15b02e1e6cc Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 24 Dec 2024 16:07:41 +0200 Subject: [PATCH 043/224] [cc-gardener] fix typo in value --- global/cc-gardener/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/global/cc-gardener/values.yaml b/global/cc-gardener/values.yaml index a17a6a0b19a..f4889fd4714 100644 --- a/global/cc-gardener/values.yaml +++ b/global/cc-gardener/values.yaml @@ -57,7 +57,7 @@ extensions: image: repository: ghcr.io/ironcore-dev/gardener-extension-provider-metal tag: sha-681e007 - imageVectorOverwrites: + imageVectorOverwrite: ccm: repository: ghcr.io/ironcore-dev/metal-cloud-controller-manager tag: sha-62079d8 From a44a2b51c7280d99a5c32f3b157e8465ef636c63 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Wed, 25 Dec 2024 12:16:25 +0200 Subject: [PATCH 044/224] init ipam-operator-remote chart --- system/Makefile | 16 + system/ipam-operator-remote/.helmignore | 23 + system/ipam-operator-remote/Chart.yaml | 21 + .../managedresources/kustomize.yaml | 820 ++++++++++++++++++ .../templates/_helpers.tpl | 62 ++ .../templates/deployment.yaml | 61 ++ .../templates/managedresource.yaml | 24 + .../templates/manager-config.yaml | 9 + .../templates/remote-kubeconfig.yaml | 31 + system/ipam-operator-remote/values.yaml | 39 + .../kustomization.yaml | 7 + .../ipam-operator-remote/kustomization.yaml | 6 + .../ipam-operator-remote/managedresource.yaml | 24 + .../manager-remote-patch.yaml | 38 + .../remote-kubeconfig.yaml | 31 + 15 files changed, 1212 insertions(+) create mode 100644 system/ipam-operator-remote/.helmignore create mode 100644 system/ipam-operator-remote/Chart.yaml create mode 100644 system/ipam-operator-remote/managedresources/kustomize.yaml create mode 100644 system/ipam-operator-remote/templates/_helpers.tpl create mode 100644 system/ipam-operator-remote/templates/deployment.yaml create mode 100644 system/ipam-operator-remote/templates/managedresource.yaml create mode 100644 system/ipam-operator-remote/templates/manager-config.yaml create mode 100644 system/ipam-operator-remote/templates/remote-kubeconfig.yaml create mode 100644 system/ipam-operator-remote/values.yaml create mode 100644 system/kustomize/ipam-operator-managedresources/kustomization.yaml create mode 100644 system/kustomize/ipam-operator-remote/kustomization.yaml create mode 100644 system/kustomize/ipam-operator-remote/managedresource.yaml create mode 100644 system/kustomize/ipam-operator-remote/manager-remote-patch.yaml create mode 100644 system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml diff --git a/system/Makefile b/system/Makefile index 24d5dd823c0..44d0f940097 100644 --- a/system/Makefile +++ b/system/Makefile @@ -181,6 +181,22 @@ build-ipam-operator: @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator/values.yaml @yq -i '.version="0.0.2"' ipam-operator/Chart.yaml +build-ipam-operator-remote: + @rm -rf ipam-operator-remote + @cat kustomize/ipam-operator-remote/kustomization.yaml > kustomization.yaml + @kubectl kustomize | helmify -crd-dir ipam-operator-remote + @cp kustomize/ipam-operator-remote/remote-kubeconfig.yaml ipam-operator-remote/templates + @cp kustomize/ipam-operator-remote/managedresource.yaml ipam-operator-remote/templates + @mkdir ipam-operator-remote/managedresources + @kubectl kustomize kustomize/ipam-operator-managedresources > ipam-operator-remote/managedresources/kustomize.yaml + @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator-remote/values.yaml + @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml + @yq -i '.remote.ca=""' ipam-operator-remote/values.yaml + @yq -i '.remote.server=""' ipam-operator-remote/values.yaml + @yq -i '.version="0.1.0"' ipam-operator-remote/Chart.yaml + @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml + @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml + # chart name, source url, tag define build-chart @echo "Generating Helm chart for $(1) version $(3)" diff --git a/system/ipam-operator-remote/.helmignore b/system/ipam-operator-remote/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/system/ipam-operator-remote/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml new file mode 100644 index 00000000000..87ada3df7b6 --- /dev/null +++ b/system/ipam-operator-remote/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: ipam-operator-remote +description: A Helm chart for Kubernetes +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/system/ipam-operator-remote/managedresources/kustomize.yaml b/system/ipam-operator-remote/managedresources/kustomize.yaml new file mode 100644 index 00000000000..e4e22a2e68d --- /dev/null +++ b/system/ipam-operator-remote/managedresources/kustomize.yaml @@ -0,0 +1,820 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + controller-gen.kubebuilder.io/version: v0.14.0 + name: ips.ipam.metal.ironcore.dev +spec: + group: ipam.metal.ironcore.dev + names: + kind: IP + listKind: IPList + plural: ips + singular: ip + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: IP Address + jsonPath: .status.reserved + name: IP + type: string + - description: Subnet + jsonPath: .spec.subnet.name + name: Subnet + type: string + - description: Consumer Group + jsonPath: .spec.consumer.apiVersion + name: Consumer Group + type: string + - description: Consumer Kind + jsonPath: .spec.consumer.kind + name: Consumer Kind + type: string + - description: Consumer Name + jsonPath: .spec.consumer.name + name: Consumer Name + type: string + - description: Processing state + jsonPath: .status.state + name: State + type: string + - description: Message + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: IP is the Schema for the ips API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IPSpec defines the desired state of IP + properties: + consumer: + description: Consumer refers to resource IP has been booked for + properties: + apiVersion: + description: APIVersion is resource's API group + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-./a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: Kind is CRD Kind for lookup + maxLength: 63 + minLength: 1 + pattern: ^[A-Z]([-A-Za-z0-9]*[A-Za-z0-9])?$ + type: string + name: + description: Name is CRD Name for lookup + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - kind + - name + type: object + ip: + description: IP allows to set desired IP address explicitly + type: string + subnet: + description: SubnetName is referring to parent subnet that holds requested + IP + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - subnet + type: object + status: + description: IPStatus defines the observed state of IP + properties: + message: + description: Message contains error details if the one has occurred + type: string + reserved: + description: Reserved is a reserved IP + type: string + state: + description: State is a network creation request processing state + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: networkcounters.ipam.metal.ironcore.dev +spec: + group: ipam.metal.ironcore.dev + names: + kind: NetworkCounter + listKind: NetworkCounterList + plural: networkcounters + singular: networkcounter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NetworkCounter is the Schema for the networkcounters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NetworkCounterSpec stores the state of assigned IDs for network + type. + properties: + vacant: + description: Vacant is a list of unassigned network IDs. + items: + description: |- + NetworkIDInterval represents inclusive interval for network IDs. + Used to represent intervals of unassigned IDs. + properties: + begin: + description: Begin is a first available value in interval + type: string + end: + description: End is a last available value in interval + type: string + exact: + description: Exact represents a single value in interval + type: string + type: object + type: array + type: object + status: + description: NetworkCounterStatus defines the observed state of NetworkCounter + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: networks.ipam.metal.ironcore.dev +spec: + group: ipam.metal.ironcore.dev + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Network Type + jsonPath: .spec.type + name: Type + type: string + - description: Reserved Network ID + jsonPath: .status.reserved + name: Reserved + type: string + - description: Total IPv4 address capacity in all ranges + jsonPath: .status.ipv4Capacity + name: IPv4 Capacity + type: string + - description: Total IPv4 address capacity in all ranges + jsonPath: .status.ipv6Capacity + name: IPv6 Capacity + type: string + - description: Description + jsonPath: .spec.description + name: Description + type: string + - description: Request state + jsonPath: .status.state + name: State + type: string + - description: Message about request processing resutls + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Network is the Schema for the networks API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NetworkSpec defines the desired state of Network + properties: + description: + description: Description contains a human readable description of + network + type: string + id: + description: |- + ID is a unique network identifier. + For VXLAN it is a single 24 bit value. First 100 values are reserved. + For GENEVE it is a single 24 bit value. First 100 values are reserved. + For MLPS it is a set of 20 bit values. First 16 values are reserved. + Represented with number encoded to string. + type: string + type: + description: NetworkType is a type of network id is assigned to. + enum: + - VXLAN + - GENEVE + - MPLS + type: string + type: object + status: + description: NetworkStatus defines the observed state of Network + properties: + ipv4Capacity: + anyOf: + - type: integer + - type: string + description: IPv4Capacity is a total address capacity of all IPv4 + CIDRs in Ranges + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + ipv4Ranges: + description: IPv4Ranges is a list of IPv4 ranges booked by child subnets + items: + type: string + type: array + ipv6Capacity: + anyOf: + - type: integer + - type: string + description: IPv6Capacity is a total address capacity of all IPv4 + CIDRs in Ranges + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + ipv6Ranges: + description: IPv6Ranges is a list of IPv6 ranges booked by child subnets + items: + type: string + type: array + message: + description: Message contains error details if the one has occurred + type: string + reserved: + description: Reserved is a reserved network ID + type: string + state: + description: State is a network creation request processing state + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: subnets.ipam.metal.ironcore.dev +spec: + group: ipam.metal.ironcore.dev + names: + kind: Subnet + listKind: SubnetList + plural: subnets + singular: subnet + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Parent Subnet + jsonPath: .spec.parentSubnet.name + name: Parent Subnet + type: string + - description: Parent Network + jsonPath: .spec.network.name + name: Parent Network + type: string + - description: Reserved CIDR + jsonPath: .status.reserved + name: Reserved + type: string + - description: Address Type + jsonPath: .status.type + name: Address Type + type: string + - description: Locality + jsonPath: .status.locality + name: Locality + type: string + - description: Amount of ones in netmask + jsonPath: .status.prefixBits + name: Prefix Bits + type: string + - description: Capacity + jsonPath: .status.capacity + name: Capacity + type: string + - description: Capacity Left + jsonPath: .status.capacityLeft + name: Capacity Left + type: string + - description: Consumer Group + jsonPath: .spec.consumer.apiVersion + name: Consumer Group + type: string + - description: Consumer Kind + jsonPath: .spec.consumer.kind + name: Consumer Kind + type: string + - description: Consumer Name + jsonPath: .spec.consumer.name + name: Consumer Name + type: string + - description: State + jsonPath: .status.state + name: State + type: string + - description: Message + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Subnet is the Schema for the subnets API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubnetSpec defines the desired state of Subnet + properties: + capacity: + anyOf: + - type: integer + - type: string + description: Capacity is a desired amount of addresses; will be ceiled + to the closest power of 2. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + cidr: + description: CIDR represents the IP Address Range + type: string + consumer: + description: Consumer refers to resource Subnet has been booked for + properties: + apiVersion: + description: APIVersion is resource's API group + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-./a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: Kind is CRD Kind for lookup + maxLength: 63 + minLength: 1 + pattern: ^[A-Z]([-A-Za-z0-9]*[A-Za-z0-9])?$ + type: string + name: + description: Name is CRD Name for lookup + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - kind + - name + type: object + network: + description: NetworkName contains a reference (name) to the network + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + parentSubnet: + description: ParentSubnetName contains a reference (name) to the parent + subent + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + prefixBits: + description: PrefixBits is an amount of ones zero bits at the beginning + of the netmask + maximum: 128 + minimum: 0 + type: integer + regions: + description: Regions represents the network service location + items: + properties: + availabilityZones: + items: + type: string + minItems: 1 + type: array + name: + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-./a-z0-9]*[a-z0-9])?$ + type: string + required: + - availabilityZones + - name + type: object + type: array + required: + - network + type: object + status: + description: SubnetStatus defines the observed state of Subnet + properties: + capacity: + anyOf: + - type: integer + - type: string + description: Capacity shows total capacity of CIDR + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + capacityLeft: + anyOf: + - type: integer + - type: string + description: CapacityLeft shows remaining capacity (excluding capacity + of child subnets) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + locality: + description: Locality represents subnet regional coverated + type: string + message: + description: Message contains an error string for the failed State + type: string + prefixBits: + description: PrefixBits is an amount of ones zero bits at the beginning + of the netmask + type: integer + reserved: + description: Reserved is a CIDR that was reserved + type: string + state: + description: State represents the cunnet processing state + type: string + type: + description: Type represents whether CIDR is an IPv4 or IPv6 + type: string + vacant: + description: Vacant shows CIDR ranges available for booking + items: + type: string + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ipam-operator-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ipam-operator-leader-election-role + namespace: kube-system +rules: +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ipam-operator-manager-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch +- apiGroups: + - '*' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - ips + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - ips/finalizers + verbs: + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - ips/status + verbs: + - get + - patch + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - networkcounters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - networkcounters/finalizers + verbs: + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - networkcounters/status + verbs: + - get + - patch + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - networks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - networks/finalizers + verbs: + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - networks/status + verbs: + - get + - patch + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - subnets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - subnets/finalizers + verbs: + - update +- apiGroups: + - ipam.metal.ironcore.dev + resources: + - subnets/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ipam-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ipam-operator-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ipam-operator-leader-election-rolebinding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ipam-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: ipam-operator-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ipam-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ipam-operator-manager-role +subjects: +- kind: ServiceAccount + name: ipam-operator-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ipam-operator-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ipam-operator-proxy-role +subjects: +- kind: ServiceAccount + name: ipam-operator-controller-manager + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: ipam-operator-controller-manager-metrics-service + namespace: kube-system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/system/ipam-operator-remote/templates/_helpers.tpl b/system/ipam-operator-remote/templates/_helpers.tpl new file mode 100644 index 00000000000..13dbaa3b348 --- /dev/null +++ b/system/ipam-operator-remote/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ipam-operator-remote.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ipam-operator-remote.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ipam-operator-remote.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ipam-operator-remote.labels" -}} +helm.sh/chart: {{ include "ipam-operator-remote.chart" . }} +{{ include "ipam-operator-remote.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ipam-operator-remote.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ipam-operator-remote.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ipam-operator-remote.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "ipam-operator-remote.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/system/ipam-operator-remote/templates/deployment.yaml b/system/ipam-operator-remote/templates/deployment.yaml new file mode 100644 index 00000000000..c6eeec5fc68 --- /dev/null +++ b/system/ipam-operator-remote/templates/deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ipam-operator-remote.fullname" . }}-controller-manager + labels: + control-plane: controller-manager + {{- include "ipam-operator-remote.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controllerManager.replicas }} + selector: + matchLabels: + control-plane: controller-manager + {{- include "ipam-operator-remote.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + control-plane: controller-manager + {{- include "ipam-operator-remote.selectorLabels" . | nindent 8 }} + spec: + containers: + - args: {{- toYaml .Values.controllerManager.manager.args | nindent 8 }} + command: + - /manager + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: {{ quote .Values.kubernetesClusterDomain }} + image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag + | default .Chart.AppVersion }} + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: 8081 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10 + }} + securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext + | nindent 10 }} + volumeMounts: + - mountPath: /kubeconfig + name: remote-kubeconfig + readOnly: true + securityContext: {{- toYaml .Values.controllerManager.podSecurityContext | nindent + 8 }} + serviceAccountName: default + terminationGracePeriodSeconds: 10 + volumes: + - name: remote-kubeconfig + secret: + secretName: ipam-operator-remote-kubeconfig \ No newline at end of file diff --git a/system/ipam-operator-remote/templates/managedresource.yaml b/system/ipam-operator-remote/templates/managedresource.yaml new file mode 100644 index 00000000000..be285e8f073 --- /dev/null +++ b/system/ipam-operator-remote/templates/managedresource.yaml @@ -0,0 +1,24 @@ +apiVersion: resources.gardener.cloud/v1alpha1 +kind: ManagedResource +metadata: + name: {{ include "ipam-operator-remote.fullname" . }}-resources +spec: + secretRefs: + - name: {{ include "ipam-operator-remote.fullname" . }}-resources +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ipam-operator-remote.fullname" . }}-resources +type: Opaque +data: + # Cannot use .Files.AsSecrets because it would create a map + # of "file: base64 data" instead of concatenating the data + # and encoding that + objects.yaml: |- + {{- $combined := "" }} + {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} + {{- $combined = print $combined ($.Files.Get $path) "\n"}} + {{- end }} + {{- $encoded := $combined | b64enc }} + {{ $encoded }} diff --git a/system/ipam-operator-remote/templates/manager-config.yaml b/system/ipam-operator-remote/templates/manager-config.yaml new file mode 100644 index 00000000000..09f39d32e74 --- /dev/null +++ b/system/ipam-operator-remote/templates/manager-config.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "ipam-operator-remote.fullname" . }}-manager-config + labels: + {{- include "ipam-operator-remote.labels" . | nindent 4 }} +data: + controller_manager_config.yaml: {{ .Values.managerConfig.controllerManagerConfigYaml + | toYaml | indent 1 }} \ No newline at end of file diff --git a/system/ipam-operator-remote/templates/remote-kubeconfig.yaml b/system/ipam-operator-remote/templates/remote-kubeconfig.yaml new file mode 100644 index 00000000000..17e2a714524 --- /dev/null +++ b/system/ipam-operator-remote/templates/remote-kubeconfig.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ipam-operator-remote-kubeconfig + labels: + resources.gardener.cloud/purpose: token-requestor + resources.gardener.cloud/class: shoot + annotations: + serviceaccount.resources.gardener.cloud/name: ipam-operator-controller-manager + serviceaccount.resources.gardener.cloud/namespace: kube-system +stringData: + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: {{ .Values.remote.ca }} + server: {{ .Values.remote.server }} + name: remote-cluster + contexts: + - context: + cluster: remote-cluster + user: ipam-operator-controller-manager + namespace: kube-system + name: remote-cluster + current-context: remote-cluster + kind: Config + preferences: {} + users: + - name: ipam-operator-controller-manager + user: + token: "" diff --git a/system/ipam-operator-remote/values.yaml b/system/ipam-operator-remote/values.yaml new file mode 100644 index 00000000000..1c0eded8276 --- /dev/null +++ b/system/ipam-operator-remote/values.yaml @@ -0,0 +1,39 @@ +controllerManager: + manager: + args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --kubeconfig=/kubeconfig/kubeconfig + containerSecurityContext: + allowPrivilegeEscalation: false + image: + repository: ironcore-dev/ipam + tag: 6faf501000c5d7ff9744a3c111ca5ecf3339c00c + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi + podSecurityContext: + runAsNonRoot: true + replicas: 1 +kubernetesClusterDomain: cluster.local +managerConfig: + controllerManagerConfigYaml: |- + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + kind: ControllerManagerConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 + leaderElection: + leaderElect: true + resourceName: f42c18d5.ironcore.dev +fullnameOverride: ipam-operator +remote: + ca: "" + server: "" diff --git a/system/kustomize/ipam-operator-managedresources/kustomization.yaml b/system/kustomize/ipam-operator-managedresources/kustomization.yaml new file mode 100644 index 00000000000..8ee00e257c9 --- /dev/null +++ b/system/kustomize/ipam-operator-managedresources/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +namePrefix: ipam-operator- +resources: +- github.com/ironcore-dev/ipam//config/crd +- github.com/ironcore-dev/ipam//config/rbac diff --git a/system/kustomize/ipam-operator-remote/kustomization.yaml b/system/kustomize/ipam-operator-remote/kustomization.yaml new file mode 100644 index 00000000000..2713aeb6cc1 --- /dev/null +++ b/system/kustomize/ipam-operator-remote/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- https://github.com/ironcore-dev/ipam//config/manager +patches: +- path: kustomize/ipam-operator-remote/manager-remote-patch.yaml diff --git a/system/kustomize/ipam-operator-remote/managedresource.yaml b/system/kustomize/ipam-operator-remote/managedresource.yaml new file mode 100644 index 00000000000..be285e8f073 --- /dev/null +++ b/system/kustomize/ipam-operator-remote/managedresource.yaml @@ -0,0 +1,24 @@ +apiVersion: resources.gardener.cloud/v1alpha1 +kind: ManagedResource +metadata: + name: {{ include "ipam-operator-remote.fullname" . }}-resources +spec: + secretRefs: + - name: {{ include "ipam-operator-remote.fullname" . }}-resources +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ipam-operator-remote.fullname" . }}-resources +type: Opaque +data: + # Cannot use .Files.AsSecrets because it would create a map + # of "file: base64 data" instead of concatenating the data + # and encoding that + objects.yaml: |- + {{- $combined := "" }} + {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} + {{- $combined = print $combined ($.Files.Get $path) "\n"}} + {{- end }} + {{- $encoded := $combined | b64enc }} + {{ $encoded }} diff --git a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml new file mode 100644 index 00000000000..f4fcc8aa9a6 --- /dev/null +++ b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --kubeconfig=/kubeconfig/kubeconfig + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: 8081 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + volumeMounts: + - name: remote-kubeconfig + mountPath: /kubeconfig + readOnly: true + volumes: + - name: remote-kubeconfig + secret: + secretName: ipam-operator-remote-kubeconfig diff --git a/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml b/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml new file mode 100644 index 00000000000..17e2a714524 --- /dev/null +++ b/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ipam-operator-remote-kubeconfig + labels: + resources.gardener.cloud/purpose: token-requestor + resources.gardener.cloud/class: shoot + annotations: + serviceaccount.resources.gardener.cloud/name: ipam-operator-controller-manager + serviceaccount.resources.gardener.cloud/namespace: kube-system +stringData: + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: {{ .Values.remote.ca }} + server: {{ .Values.remote.server }} + name: remote-cluster + contexts: + - context: + cluster: remote-cluster + user: ipam-operator-controller-manager + namespace: kube-system + name: remote-cluster + current-context: remote-cluster + kind: Config + preferences: {} + users: + - name: ipam-operator-controller-manager + user: + token: "" From d3ea0a3ab68f948cddbd16734778758a5d287dd6 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Wed, 25 Dec 2024 12:52:27 +0200 Subject: [PATCH 045/224] [ipam-operator-remote] disable webhooks, change ports --- system/Makefile | 2 +- system/ipam-operator-remote/Chart.yaml | 2 +- .../ipam-operator-remote/templates/deployment.yaml | 7 +++++-- system/ipam-operator-remote/values.yaml | 6 ++++-- .../ipam-operator-remote/manager-remote-patch.yaml | 12 ++++++++---- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/system/Makefile b/system/Makefile index 44d0f940097..0457e0ac35b 100644 --- a/system/Makefile +++ b/system/Makefile @@ -193,7 +193,7 @@ build-ipam-operator-remote: @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml @yq -i '.remote.ca=""' ipam-operator-remote/values.yaml @yq -i '.remote.server=""' ipam-operator-remote/values.yaml - @yq -i '.version="0.1.0"' ipam-operator-remote/Chart.yaml + @yq -i '.version="0.1.1"' ipam-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml index 87ada3df7b6..63ef80a0cd8 100644 --- a/system/ipam-operator-remote/Chart.yaml +++ b/system/ipam-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.1.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/ipam-operator-remote/templates/deployment.yaml b/system/ipam-operator-remote/templates/deployment.yaml index c6eeec5fc68..b701178927f 100644 --- a/system/ipam-operator-remote/templates/deployment.yaml +++ b/system/ipam-operator-remote/templates/deployment.yaml @@ -22,6 +22,8 @@ spec: command: - /manager env: + - name: ENABLE_WEBHOOKS + value: {{ quote .Values.controllerManager.manager.env.enableWebhooks }} - name: KUBERNETES_CLUSTER_DOMAIN value: {{ quote .Values.kubernetesClusterDomain }} image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag @@ -30,7 +32,7 @@ spec: failureThreshold: 3 httpGet: path: /healthz - port: 8081 + port: 30081 scheme: HTTP initialDelaySeconds: 15 periodSeconds: 20 @@ -39,7 +41,7 @@ spec: failureThreshold: 3 httpGet: path: /readyz - port: 8081 + port: 30081 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 10 @@ -51,6 +53,7 @@ spec: - mountPath: /kubeconfig name: remote-kubeconfig readOnly: true + hostNetwork: true securityContext: {{- toYaml .Values.controllerManager.podSecurityContext | nindent 8 }} serviceAccountName: default diff --git a/system/ipam-operator-remote/values.yaml b/system/ipam-operator-remote/values.yaml index 1c0eded8276..309d53730a0 100644 --- a/system/ipam-operator-remote/values.yaml +++ b/system/ipam-operator-remote/values.yaml @@ -1,11 +1,13 @@ controllerManager: manager: args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --health-probe-bind-address=:30081 + - --metrics-bind-address=127.0.0.1:30082 - --kubeconfig=/kubeconfig/kubeconfig containerSecurityContext: allowPrivilegeEscalation: false + env: + enableWebhooks: "false" image: repository: ironcore-dev/ipam tag: 6faf501000c5d7ff9744a3c111ca5ecf3339c00c diff --git a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml index f4fcc8aa9a6..fd51620fe7a 100644 --- a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml +++ b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml @@ -6,17 +6,18 @@ metadata: spec: template: spec: + hostNetwork: true containers: - name: manager args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --health-probe-bind-address=:30081 + - --metrics-bind-address=127.0.0.1:30082 - --kubeconfig=/kubeconfig/kubeconfig livenessProbe: failureThreshold: 3 httpGet: path: /healthz - port: 8081 + port: 30081 scheme: HTTP initialDelaySeconds: 15 periodSeconds: 20 @@ -24,10 +25,13 @@ spec: failureThreshold: 3 httpGet: path: /readyz - port: 8081 + port: 30081 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 10 + env: + - name: ENABLE_WEBHOOKS + value: "false" volumeMounts: - name: remote-kubeconfig mountPath: /kubeconfig From b9dc4873549fdc09dabe9db27acb065c6dc9af62 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Thu, 26 Dec 2024 11:56:49 +0200 Subject: [PATCH 046/224] [greenhouse-extras-storage] bump absent-metrics-operator --- system/greenhouse-extras-storage/Chart.lock | 6 +++--- system/greenhouse-extras-storage/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/system/greenhouse-extras-storage/Chart.lock b/system/greenhouse-extras-storage/Chart.lock index b184d15d220..58285499ba5 100644 --- a/system/greenhouse-extras-storage/Chart.lock +++ b/system/greenhouse-extras-storage/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: absent-metrics-operator repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.9.11 -digest: sha256:1fd232cd05ea202c20ea164b9ae903fa08673e85c32155c0685a9221367b5525 -generated: "2024-10-10T15:08:31.224865+02:00" + version: 1.0.2 +digest: sha256:ae589799690882a3b70fe191c2949e7d8d5d062a37a0919e694c4cfc22fa9427 +generated: "2024-12-26T11:56:17.206465+02:00" diff --git a/system/greenhouse-extras-storage/Chart.yaml b/system/greenhouse-extras-storage/Chart.yaml index 2edaa3f79f2..391c1ee8422 100644 --- a/system/greenhouse-extras-storage/Chart.yaml +++ b/system/greenhouse-extras-storage/Chart.yaml @@ -2,9 +2,9 @@ apiVersion: v2 name: greenhouse-extras-storage description: Stuff that doesn't come via Greenhouse type: application -version: 0.4.0 +version: 0.4.1 appVersion: "0.1.0" dependencies: - name: absent-metrics-operator repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.9.11 + version: 1.0.2 From aaca272ea826af27be7981bec97100da8cf9a2fc Mon Sep 17 00:00:00 2001 From: Mikhail Samoylov Date: Thu, 26 Dec 2024 18:22:45 +0400 Subject: [PATCH 047/224] [tempest] Enable neutron DNS integration tests (#7594) --- openstack/tempest/neutron-tempest/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack/tempest/neutron-tempest/values.yaml b/openstack/tempest/neutron-tempest/values.yaml index 126043c672b..bc7d64dc3b0 100644 --- a/openstack/tempest/neutron-tempest/values.yaml +++ b/openstack/tempest/neutron-tempest/values.yaml @@ -1,5 +1,5 @@ # You can override subchart values here, e.g. concurrency -run_pattern: neutron_tempest_plugin.api|tempest.api.network +run_pattern: neutron_tempest_plugin.api|tempest.api.network|neutron_tempest_plugin.scenario.test_dns_integration concurrency: 2 owner-info: helm-chart-url: 'https://github.com/sapcc/helm-charts/tree/master/openstack/tempest/neutron-tempest' From a975ecfd0bdfc76bd72aaf2e3328a97dac6f703e Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 27 Dec 2024 11:20:36 +0200 Subject: [PATCH 048/224] [pxc-operator] Update to 1.16.1 bugfix release --- system/percona-xtradb-cluster-crds/Chart.yaml | 2 +- system/percona-xtradb-cluster-operator/Chart.lock | 6 +++--- system/percona-xtradb-cluster-operator/Chart.yaml | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/system/percona-xtradb-cluster-crds/Chart.yaml b/system/percona-xtradb-cluster-crds/Chart.yaml index 18f1de57c53..17bada5f62e 100644 --- a/system/percona-xtradb-cluster-crds/Chart.yaml +++ b/system/percona-xtradb-cluster-crds/Chart.yaml @@ -3,4 +3,4 @@ apiVersion: v2 name: percona-xtradb-cluster-crds description: A Helm chart containing Percona CRDs. type: application -version: 0.0.1-percona1.16.0 +version: 0.0.1-percona1.16.1 diff --git a/system/percona-xtradb-cluster-operator/Chart.lock b/system/percona-xtradb-cluster-operator/Chart.lock index a21f21bbbe1..bdea549082f 100644 --- a/system/percona-xtradb-cluster-operator/Chart.lock +++ b/system/percona-xtradb-cluster-operator/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: pxc-operator repository: https://percona.github.io/percona-helm-charts/ - version: 1.16.0 + version: 1.16.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 @@ -11,5 +11,5 @@ dependencies: - name: kube-state-metrics repository: https://prometheus-community.github.io/helm-charts version: 5.27.0 -digest: sha256:178152c2db0f6266d3722718e06dea813c2798442244b4c0a095e22c4164794b -generated: "2024-12-20T12:29:55.128909+02:00" +digest: sha256:9cdca7190409a5433ca301713f855873805301e8327d4bc92e51e7e182645055 +generated: "2024-12-27T11:19:13.293876+02:00" diff --git a/system/percona-xtradb-cluster-operator/Chart.yaml b/system/percona-xtradb-cluster-operator/Chart.yaml index 2982a809a95..75d57b43b17 100644 --- a/system/percona-xtradb-cluster-operator/Chart.yaml +++ b/system/percona-xtradb-cluster-operator/Chart.yaml @@ -4,8 +4,8 @@ name: percona-xtradb-cluster-operator description: A Helm chart to install Percona XtraDB Cluster Operator. home: "https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator" type: application -version: 0.3.0 -appVersion: "1.16.0" +version: 0.3.1 +appVersion: "1.16.1" kubeVersion: ">=1.26.0-0" maintainers: - name: Birk Bohne @@ -19,7 +19,7 @@ sources: - https://github.com/percona/percona-helm-charts/tree/main dependencies: - name: pxc-operator - version: 1.16.0 + version: 1.16.1 repository: https://percona.github.io/percona-helm-charts/ - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 62c198ddeee7050ae425f0874d5e6d17ae7723b1 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 27 Dec 2024 11:33:42 +0200 Subject: [PATCH 049/224] [pxc-db] Update to 1.16.1 bugfix release --- common/pxc-db/Chart.yaml | 4 ++-- common/pxc-db/values.yaml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index 44f59b4d51c..4b3d81abab7 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,13 +16,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.4 +version: 0.2.5 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.16.0" +appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index bc01511a4fe..fe6a5603e21 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -74,7 +74,7 @@ pause: false initContainer: image: name: percona-xtradb-cluster-operator - tag: 1.16.0 + tag: 1.16.1 override: null resources: requests: @@ -326,7 +326,7 @@ haproxy: initdb: image: name: percona/percona-xtradb-cluster-operator - tag: 1.16.0-pxc8.0-backup-pxb8.0.35 + tag: 1.16.1-pxc8.0-backup-pxb8.0.35 backup: enabled: false @@ -334,7 +334,7 @@ backup: labels: {} image: name: percona/percona-xtradb-cluster-operator - tag: 1.16.0-pxc8.0-backup-pxb8.0.35 + tag: 1.16.1-pxc8.0-backup-pxb8.0.35 override: null imagePullPolicy: Always backoffLimit: 2 From da457d94deb7cd1326e123e7d921b3df7e47877f Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Mon, 23 Dec 2024 12:36:06 +0100 Subject: [PATCH 050/224] [unbound] don't use named ports No need for the named ports. Let's simplify things. We're mapping the unbound ports 1-to-1, i.e. service port 53 maps to unbound port 53, etc. --- system/unbound/templates/deployment.yaml | 9 --------- system/unbound/templates/service.yaml | 2 -- 2 files changed, 11 deletions(-) diff --git a/system/unbound/templates/deployment.yaml b/system/unbound/templates/deployment.yaml index b60953d8f79..38dd3abb960 100644 --- a/system/unbound/templates/deployment.yaml +++ b/system/unbound/templates/deployment.yaml @@ -46,15 +46,6 @@ spec: {{ toYaml .Values.resources.unbound | indent 10 }} securityContext: privileged: true - ports: -{{- range $.Values.unbound.externalPorts | required "externalPorts missing" }} - - name: dns-tcp-{{.}} - containerPort: {{.}} - protocol: TCP - - name: dns-udp-{{.}} - containerPort: {{.}} - protocol: UDP -{{- end }} volumeMounts: - name: unbound-conf mountPath: /etc/unbound diff --git a/system/unbound/templates/service.yaml b/system/unbound/templates/service.yaml index b7849027adb..10eda868d2f 100644 --- a/system/unbound/templates/service.yaml +++ b/system/unbound/templates/service.yaml @@ -19,11 +19,9 @@ spec: - name: dns-tcp-{{.}} protocol: TCP port: {{.}} - targetPort: dns-tcp-{{.}} - name: dns-udp-{{.}} protocol: UDP port: {{.}} - targetPort: dns-udp-{{.}} {{- end }} externalIPs: {{- required "A valid .Values.unbound.externalIPs required!" .Values.unbound.externalIPs | toYaml | nindent 2 }} From 270b82f71b27eb48af3e6381160c67d365d846d9 Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Fri, 27 Dec 2024 13:15:39 +0100 Subject: [PATCH 051/224] [unbound] split the unbound services in two, UDP and TCP We can't reliably maintain the existing service if it's using the same port numbers for UDP and TCP. See [1]. Adding new ports would result in a borked service, potentially bringing the whole thing down. With one service per protocol the port numbers will be unique within the service, so we should be good. [1] https://github.com/kubernetes/kubernetes/issues/39188 --- system/unbound/templates/service.yaml | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/system/unbound/templates/service.yaml b/system/unbound/templates/service.yaml index 10eda868d2f..9de8794b3f0 100644 --- a/system/unbound/templates/service.yaml +++ b/system/unbound/templates/service.yaml @@ -1,27 +1,28 @@ +{{- range tuple "udp" "tcp"}} +{{- $proto := . }} +--- apiVersion: v1 kind: Service metadata: - name: {{ .Values.unbound.name }} + name: {{ $.Values.unbound.name }}-{{ $proto }} annotations: prometheus.io/scrape: "true" - prometheus.io/port: "{{.Values.unbound.port_unbound_exporter}}" - prometheus.io/targets: {{ required ".Values.alerts.prometheus missing" .Values.alerts.prometheus | quote }} + prometheus.io/port: "{{$.Values.unbound.port_unbound_exporter}}" + prometheus.io/targets: {{ required "$.Values.alerts.prometheus missing" $.Values.alerts.prometheus | quote }} parrot.sap.cc/announce: 'true' service.alpha.kubernetes.io/reject-traffic-on-external-ip: "false" spec: type: LoadBalancer externalTrafficPolicy: Local selector: - app: {{ .Values.unbound.name }} + app: {{ $.Values.unbound.name }} type: dns - ports: -{{- range $.Values.unbound.externalPorts | required ".Values.unbound.externalPorts missing" }} - - name: dns-tcp-{{.}} - protocol: TCP - port: {{.}} - - name: dns-udp-{{.}} - protocol: UDP + ports: +{{- range $.Values.unbound.externalPorts | required "$.Values.unbound.externalPorts missing" }} + - name: dns-{{ $proto }}-{{.}} + protocol: {{ $proto | upper }} port: {{.}} {{- end }} externalIPs: - {{- required "A valid .Values.unbound.externalIPs required!" .Values.unbound.externalIPs | toYaml | nindent 2 }} + {{- required "A valid $.Values.unbound.externalIPs required!" $.Values.unbound.externalIPs | toYaml | nindent 2 }} +{{- end }} From 4d8477a97fea8fa4bd99fdb2ef7374b2d16cb3b1 Mon Sep 17 00:00:00 2001 From: Maximilian Lendrich Date: Fri, 27 Dec 2024 15:44:45 +0100 Subject: [PATCH 052/224] Revert "Update vmware_esxi_nic_mellanox_issue.cfg" This reverts commit 6c51fc192972d3bab061e9fef95f435ed262860e. --- .../files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg index b781428aea3..ad2ef7714cb 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg @@ -13,7 +13,7 @@ QueryJson = { ], "filter": [ { "term": { "sap.cc.audit.source.keyword": "ESXi" }}, - { "range": { "@timestamp": { "gte": "now-15m" }}} + { "range": { "@timestamp": { "gte": "now-1h" }}} ] } } From 6e355a07f1891a3dd3e2b8f821e41f2b9c3df4a1 Mon Sep 17 00:00:00 2001 From: Max Lendrich Date: Fri, 27 Dec 2024 16:08:43 +0100 Subject: [PATCH 053/224] [octobus-query-exporter] Specify index --- .../files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg index ad2ef7714cb..9046ba53ae9 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/files/queries/vmware/vmware_esxi_nic_mellanox_issue.cfg @@ -1,4 +1,5 @@ [query_elasticsearch_octobus_Mellanox_issue] +QueryIndices = c0001_log* QueryOnMissing = drop QueryJson = { "size": 0, From 3fb04bdfe7879cd6e81374a027fc7da48f406c95 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Thu, 2 Jan 2025 11:22:52 +0100 Subject: [PATCH 054/224] [opensearch-logs] fix datastream list --- .../templates/config/_install-dashboard-pattern.sh.tpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 103d3d681f6..633d52b3a2a 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -37,11 +37,11 @@ done # Dashboard index pattern for all available datastreams -for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|uniq) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep "\-ds"|awk '{ print $1 }'|sort|uniq) do echo "using datastream $i from Opensearch-Logs" echo "setting OpenSearch dashboard index mapping for index $i" - curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + curl -s --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" if [ $? -eq 0 ] then echo "index ${i} already exists in Opensearch dashboard" From fdad6878344c243e617fd4bc69ed0a4e299c5dde Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Thu, 2 Jan 2025 12:33:07 +0100 Subject: [PATCH 055/224] [opensearch-logs] adding second data user --- .../templates/config/_internal_users.yml.tpl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system/opensearch-logs/templates/config/_internal_users.yml.tpl b/system/opensearch-logs/templates/config/_internal_users.yml.tpl index 08ce9b9af41..d3ca294c168 100644 --- a/system/opensearch-logs/templates/config/_internal_users.yml.tpl +++ b/system/opensearch-logs/templates/config/_internal_users.yml.tpl @@ -14,6 +14,12 @@ data: backend_roles: - "data" +data2: + hash: "{{ .Values.users.data2.nohash }}" + reserved: true + backend_roles: + - "data" + greenhouse: hash: "{{ .Values.users.greenhouse.nohash }}" reserved: true From f730ef3b61c21f4fc7cc08ee576a5c18a92fe675 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Thu, 2 Jan 2025 13:52:31 +0100 Subject: [PATCH 056/224] [opensearch-logs] testing index pattern creation --- .../templates/config/_install-dashboard-pattern.sh.tpl | 4 ++-- .../templates/cron-install-dashboard-pattern-job.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl index 633d52b3a2a..14b3ae92aa6 100644 --- a/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl +++ b/system/opensearch-logs/templates/config/_install-dashboard-pattern.sh.tpl @@ -21,11 +21,11 @@ for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/indices?v"|awk done # Dashboard index pattern for all available aliases, which are not datastreams -for i in $(curl -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep -v "\-ds"|grep -v "^\."|awk '{ print $1 }'|uniq) +for i in $(curl -s -u ${BASIC_AUTH_HEADER} "${CLUSTER_HOST}/_cat/aliases?v"|grep -v "\-ds"|grep -v "^\."|awk '{ print $1 }'|sort|uniq) do echo "using alias $i from Opensearch-Logs" echo "Setting OpenSearch dashboard index mapping for alias $i" - curl -s --header "content-type: application/JSON" --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" + curl -s --fail -XGET -u ${BASIC_AUTH_HEADER} "${DASHBOARD_HOST}/api/saved_objects/index-pattern/${i}" if [ $? -eq 0 ] then echo "index pattern for alias ${i} already exists in Opensearch dashboard, nothing to do" diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index c6db9ec4294..3b1cef92e3f 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -10,7 +10,7 @@ metadata: ccloud/service: logs ccloud/support-group: observability spec: - schedule: "30 0,6,8,12,18 * * *" + schedule: "30 0,6,8,12,13,18 * * *" jobTemplate: spec: backoffLimit: 3 From fb4d9e1f85bbaa92b3b2cee97d62bfe16194e09c Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Thu, 2 Jan 2025 14:34:02 +0100 Subject: [PATCH 057/224] [opensearch-logs] remove unused cron entry --- .../templates/cron-install-dashboard-pattern-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml index 3b1cef92e3f..c6db9ec4294 100644 --- a/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml +++ b/system/opensearch-logs/templates/cron-install-dashboard-pattern-job.yaml @@ -10,7 +10,7 @@ metadata: ccloud/service: logs ccloud/support-group: observability spec: - schedule: "30 0,6,8,12,13,18 * * *" + schedule: "30 0,6,8,12,18 * * *" jobTemplate: spec: backoffLimit: 3 From ec861b103e833afe6869ac5ab8ed70e17ef83937 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Thu, 2 Jan 2025 16:29:05 +0100 Subject: [PATCH 058/224] [opensearch-logs] second user for otel --- .../templates/config/_internal_users.yml.tpl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system/opensearch-logs/templates/config/_internal_users.yml.tpl b/system/opensearch-logs/templates/config/_internal_users.yml.tpl index d3ca294c168..c50141825a9 100644 --- a/system/opensearch-logs/templates/config/_internal_users.yml.tpl +++ b/system/opensearch-logs/templates/config/_internal_users.yml.tpl @@ -44,6 +44,12 @@ otel: backend_roles: - "otel" +otel2: + hash: "{{ .Values.users.otel2.nohash }}" + reserved: true + backend_roles: + - "otel" + audit: hash: "{{ .Values.users.audit.nohash }}" reserved: true From b15a986fb9fb9c2172d42f5beeee70eb00dcebd4 Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Fri, 3 Jan 2025 13:02:52 +0100 Subject: [PATCH 059/224] [unbound] moved the prometheus annotations to the deployment This is making sure the scrape targets are not discovered twice. We've also removed the "prometheus.io/port" annotation and relying on the metrics port definition. This way the prometheus service discovery will be ending up with the metric container. The monitors will have to be adjusted. --- system/unbound/templates/deployment.yaml | 2 ++ system/unbound/templates/service.yaml | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/system/unbound/templates/deployment.yaml b/system/unbound/templates/deployment.yaml index 38dd3abb960..4ff654c15b7 100644 --- a/system/unbound/templates/deployment.yaml +++ b/system/unbound/templates/deployment.yaml @@ -18,6 +18,8 @@ spec: type: dns annotations: checksum/unbound.config: {{ include "unbound/templates/config.yaml" . | sha256sum }} + prometheus.io/scrape: "true" + prometheus.io/targets: {{ required "$.Values.alerts.prometheus missing" $.Values.alerts.prometheus | quote }} spec: affinity: nodeAffinity: diff --git a/system/unbound/templates/service.yaml b/system/unbound/templates/service.yaml index 9de8794b3f0..2eb03a63cd0 100644 --- a/system/unbound/templates/service.yaml +++ b/system/unbound/templates/service.yaml @@ -6,9 +6,6 @@ kind: Service metadata: name: {{ $.Values.unbound.name }}-{{ $proto }} annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{$.Values.unbound.port_unbound_exporter}}" - prometheus.io/targets: {{ required "$.Values.alerts.prometheus missing" $.Values.alerts.prometheus | quote }} parrot.sap.cc/announce: 'true' service.alpha.kubernetes.io/reject-traffic-on-external-ip: "false" spec: From 106323481299edc871907e5022f971b1d5720a7c Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Mon, 6 Jan 2025 13:05:13 +0100 Subject: [PATCH 060/224] gatekeeper: update from 3.17.0 to 3.18.1 As usual, this updates the vendored copy that we need to patch in the `linkerd.io/inject: enabled` annotations. --- system/gatekeeper/Chart.lock | 6 +- system/gatekeeper/Chart.yaml | 2 +- .../vendor/gatekeeper-upstream/Chart.yaml | 4 +- .../vendor/gatekeeper-upstream/README.md | 28 +++++--- .../crds/config-customresourcedefinition.yaml | 34 +++++++++ ...figpodstatus-customresourcedefinition.yaml | 72 +++++++++++++++++++ ...intpodstatus-customresourcedefinition.yaml | 18 +++++ ...atepodstatus-customresourcedefinition.yaml | 11 +++ .../gatekeeper-audit-deployment.yaml | 16 ++++- ...ekeeper-controller-manager-deployment.yaml | 4 +- ...ontroller-manager-poddisruptionbudget.yaml | 4 +- .../gatekeeper-manager-role-clusterrole.yaml | 12 ++++ .../vendor/gatekeeper-upstream/values.yaml | 17 ++--- 13 files changed, 194 insertions(+), 34 deletions(-) create mode 100644 system/gatekeeper/vendor/gatekeeper-upstream/crds/configpodstatus-customresourcedefinition.yaml diff --git a/system/gatekeeper/Chart.lock b/system/gatekeeper/Chart.lock index a98955d2670..55f29d24b18 100644 --- a/system/gatekeeper/Chart.lock +++ b/system/gatekeeper/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: gatekeeper repository: file://vendor/gatekeeper-upstream - version: 3.17.0 + version: 3.18.1 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:38dce78a5f10e4b66aca70a10246d6b200464d7dc510548495202b6d01c89423 -generated: "2024-11-04T05:33:56.066005165Z" +digest: sha256:cfb3f5d55e060482fa357c191d0c364a4b87ced83b5b80bac7384f325a4e603e +generated: "2025-01-06T12:55:12.614113542+01:00" diff --git a/system/gatekeeper/Chart.yaml b/system/gatekeeper/Chart.yaml index c3c1b9c5069..fe69d678efe 100644 --- a/system/gatekeeper/Chart.yaml +++ b/system/gatekeeper/Chart.yaml @@ -7,7 +7,7 @@ version: 1.0.0 # please leave like this; this does not use Chartmuseum dependencies: - name: gatekeeper alias: gatekeeper-upstream - version: 3.17.0 + version: 3.18.1 # repository: https://open-policy-agent.github.io/gatekeeper/charts repository: file://vendor/gatekeeper-upstream # ^ We have to vendor to apply custom patches for linkerd support. diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml index d92d3825c14..33e59a9ea87 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v3.17.0 +appVersion: v3.18.1 description: A Helm chart for Gatekeeper home: https://github.com/open-policy-agent/gatekeeper icon: https://open-policy-agent.github.io/gatekeeper/website/img/logo.svg @@ -8,4 +8,4 @@ keywords: name: gatekeeper sources: - https://github.com/open-policy-agent/gatekeeper.git -version: 3.17.0 +version: 3.18.1 diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/README.md b/system/gatekeeper/vendor/gatekeeper-upstream/README.md index de8642868e3..ed144406f12 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/README.md +++ b/system/gatekeeper/vendor/gatekeeper-upstream/README.md @@ -74,7 +74,7 @@ information._ | postInstall.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post install hooks | `[]` | | postInstall.labelNamespace.extraAnnotations | Extra annotations added to the post install Job | `{}` | | postInstall.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0` | +| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.18.1` | | postInstall.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postInstall.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postInstall.labelNamespace.extraRules | Extra rules for the gatekeeper-update-namespace-label Role | `[]` | @@ -97,7 +97,7 @@ information._ | postUpgrade.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post upgrade hooks | `[]` | | postUpgrade.labelNamespace.extraAnnotations | Extra annotations added to the post upgrade Job | `{}` | | postUpgrade.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0` | +| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.18.1` | | postUpgrade.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postUpgrade.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postUpgrade.labelNamespace.priorityClassName | Priority class name for gatekeeper-update-namespace-label-post-upgrade Job | `` | @@ -107,10 +107,10 @@ information._ | postUpgrade.resources | The resource request/limits for the container image in postUpgrade hook jobs | `{}` | | postUpgrade.securityContext | Security context applied on the container | `{ "allowPrivilegeEscalation": false, "capabilities": "drop": [all], "readOnlyRootFilesystem": true, "runAsGroup": 999, "runAsNonRoot": true, "runAsUser": 1000 }` | | preInstall.crdRepository.image.repository | Image with kubectl to update the CRDs. If not set, the `image.crdRepository` is used instead. | `null` | -| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.17.0` | +| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.18.1` | | preUninstall.deleteWebhookConfigurations.enabled | Delete webhooks before gatekeeper itself is uninstalled | `false` | | preUninstall.deleteWebhookConfigurations.image.repository | Image with kubectl to delete the webhooks | `openpolicyagent/gatekeeper-crds` | -| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.17.0` | +| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.18.1` | | preUninstall.deleteWebhookConfigurations.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | preUninstall.deleteWebhookConfigurations.image.pullSecrets | Image pullSecrets | `[]` | | preUninstall.deleteWebhookConfigurations.extraRules | Extra rules for the gatekeeper-delete-webhook-configs Role | `[]` | @@ -132,7 +132,7 @@ information._ | auditInterval | The frequency with which audit is run | `60` | | constraintViolationsLimit | The maximum # of audit violations reported on a constraint | `20` | | auditFromCache | Take the roster of resources to audit from the audit cache | `false` | -| auditChunkSize | Chunk size for listing cluster resources for audit (alpha feature) | `500` | +| auditChunkSize | (alpha) Chunk size for listing cluster resources for audit | `500` | | auditMatchKindOnly | Only check resources of the kinds specified in all constraints defined in the cluster. | `false` | | disableAudit | Disable audit controller | `false` | | disableMutation | Disable mutation | `false` | @@ -164,18 +164,21 @@ information._ | mutatingWebhookTimeoutSeconds | The timeout for the mutating webhook in seconds | `3` | | mutatingWebhookCustomRules | Custom rules for selecting which API resources trigger the webhook. NOTE: If you change this, ensure all your constraints are still being enforced. | `{}` | | mutatingWebhookURL | Custom URL for Kubernetes API server to use to reach the mutating webhook pod. If not set, the default of connecting via the kubernetes service endpoint is used. | `null` | -| emitAdmissionEvents | Emit K8s events in configurable namespace for admission violations (alpha feature) | `false` | -| emitAuditEvents | Emit K8s events in configurable namespace for audit violations (alpha feature) | `false` | -| enableK8sNativeValidation | Enable the K8s Native Validating driver to allow constraint templates to use rules written in VAP-style CEL (beta feature) | `true` | -| defaultCreateVAPForTemplates | Create VAP resource for template containing VAP-style CEL source. Allowed values are false: do not create Validating Admission Policy unless generateVAP: true is set on constraint template explicitly, true: create Validating Admission Policy unless generateVAP: false is set on constraint template explicitly. (alpha feature) | `false` | -| defaultCreateVAPBindingForConstraints | Create VAPBinding resource for constraint of the template containing VAP-style CEL source. Allowed values are false: do not create Validating Admission Policy Binding, true: create Validating Admission Policy Binding. (alpha feature) | `false` | +| emitAdmissionEvents | (alpha) Emit K8s events in configurable namespace for admission violations | `false` | +| emitAuditEvents | (alpha) Emit K8s events in configurable namespace for audit violations | `false` | +| logStatsAdmission | (alpha) Log stats for admission webhook | `false` | +| logStatsAudit | (alpha) Log stats metrics for the audit run | `false` | +| enableK8sNativeValidation | Enable the K8s Native Validating driver to allow constraint templates to use rules written in VAP-style CEL | `true` | +| defaultCreateVAPForTemplates | (alpha) Create VAP resource for template containing VAP-style CEL source. Allowed values are false: do not create Validating Admission Policy unless generateVAP: true is set on constraint template explicitly, true: create Validating Admission Policy unless generateVAP: false is set on constraint template explicitly. | `false` | +| defaultCreateVAPBindingForConstraints | (alpha) Create VAPBinding resource for constraint of the template containing VAP-style CEL source. Allowed values are false: do not create Validating Admission Policy Binding, true: create Validating Admission Policy Binding. | `false` | +| defaultWaitForVAPBGeneration | (alpha) Wait time in seconds before generating a ValidatingAdmissionPolicyBinding after a constraint CRD is created. | `30` | | auditEventsInvolvedNamespace | Emit audit events for each violation in the involved objects namespace, the default (false) generates events in the namespace Gatekeeper is installed in. Audit events from cluster-scoped resources will continue to generate events in the namespace that Gatekeeper is installed in | `false` | | admissionEventsInvolvedNamespace | Emit admission events for each violation in the involved objects namespace, the default (false) generates events in the namespace Gatekeeper is installed in. Admission events from cluster-scoped resources will continue to generate events in the namespace that Gatekeeper is installed in | `false` | | logDenies | Log detailed info on each deny | `false` | | logLevel | Minimum log level | `INFO` | | image.pullPolicy | The image pull policy | `IfNotPresent` | | image.repository | Image repository | `openpolicyagent/gatekeeper` | -| image.release | The image release tag to use | Current release version: `v3.17.0` | +| image.release | The image release tag to use | Current release version: `v3.18.1` | | image.pullSecrets | Specify an array of imagePullSecrets | `[]` | | resources | The resource request/limits for the container image | limits: 1 CPU, 512Mi, requests: 100mCPU, 256Mi | | nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` | @@ -216,6 +219,9 @@ information._ | audit.readinessTimeout | Timeout in seconds for audit's readiness probe | `1` | | audit.livenessTimeout | Timeout in seconds for the audit's liveness probe | `1` | | audit.logLevel | The minimum log level for audit, takes precedence over `logLevel` when specified | `null` | +| audit.enablePubsub | (alpha) Enabled pubsub to publish messages | `false` | +| audit.connection | (alpha) Connection name for publishing audit violation messages | `audit-connection` | +| audit.channel | (alpha) Channel name for publishing audit violation messages | `audit-channel` | | replicas | The number of Gatekeeper replicas to deploy for the webhook | `3` | | podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | | podLabels | The labels to add to the Gatekeeper pods | `{}` | diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/crds/config-customresourcedefinition.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/crds/config-customresourcedefinition.yaml index 11a5d922789..8a5afdeb640 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/crds/config-customresourcedefinition.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/crds/config-customresourcedefinition.yaml @@ -112,7 +112,41 @@ spec: type: object status: description: ConfigStatus defines the observed state of Config. + properties: + byPod: + items: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: array type: object type: object served: true storage: true + subresources: + status: {} diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/crds/configpodstatus-customresourcedefinition.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/crds/configpodstatus-customresourcedefinition.yaml new file mode 100644 index 00000000000..f351b718375 --- /dev/null +++ b/system/gatekeeper/vendor/gatekeeper-upstream/crds/configpodstatus-customresourcedefinition.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + gatekeeper.sh/system: "yes" + name: configpodstatuses.status.gatekeeper.sh +spec: + group: status.gatekeeper.sh + names: + kind: ConfigPodStatus + listKind: ConfigPodStatusList + plural: configpodstatuses + singular: configpodstatus + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: object + served: true + storage: true diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/crds/constraintpodstatus-customresourcedefinition.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/crds/constraintpodstatus-customresourcedefinition.yaml index 85942c0dbcc..9caedd58716 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/crds/constraintpodstatus-customresourcedefinition.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/crds/constraintpodstatus-customresourcedefinition.yaml @@ -50,6 +50,24 @@ spec: type: string enforced: type: boolean + enforcementPointsStatus: + items: + description: EnforcementPointStatus represents the status of a single enforcement point. + properties: + enforcementPoint: + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + state: + type: string + required: + - enforcementPoint + - state + type: object + type: array errors: items: description: Error represents a single error caught while adding a constraint to engine. diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/crds/constrainttemplatepodstatus-customresourcedefinition.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/crds/constrainttemplatepodstatus-customresourcedefinition.yaml index 2d4bd1c8bf2..09b0b9c64e8 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/crds/constrainttemplatepodstatus-customresourcedefinition.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/crds/constrainttemplatepodstatus-customresourcedefinition.yaml @@ -73,6 +73,17 @@ spec: don't ONLY use UUIDs, this is an alias to string. Being a type captures intent and helps make sure that UIDs and names do not get conflated. type: string + vapGenerationStatus: + description: VAPGenerationStatus represents the status of VAP generation. + properties: + observedGeneration: + format: int64 + type: integer + state: + type: string + warning: + type: string + type: object type: object type: object served: true diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-audit-deployment.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-audit-deployment.yaml index 46084a527bb..4c334c95366 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-audit-deployment.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-audit-deployment.yaml @@ -4,6 +4,7 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: + {{- include "gatekeeper.commonLabels" . | nindent 4 }} app: '{{ template "gatekeeper.name" . }}' chart: '{{ template "gatekeeper.name" . }}' control-plane: audit-controller @@ -63,15 +64,21 @@ spec: - --validating-webhook-configuration-name={{ .Values.validatingWebhookName }} - --mutating-webhook-configuration-name={{ .Values.mutatingWebhookName }} - --audit-from-cache={{ .Values.auditFromCache }} - - --audit-chunk-size={{ .Values.auditChunkSize }} + {{ if hasKey .Values "auditChunkSize" }}- --audit-chunk-size={{ .Values.auditChunkSize }}{{- end }} - --audit-match-kind-only={{ .Values.auditMatchKindOnly }} - - --emit-audit-events={{ .Values.emitAuditEvents }} + {{ if hasKey .Values "emitAuditEvents" }}- --emit-audit-events={{ .Values.emitAuditEvents }}{{- end }} + {{ if hasKey .Values "logStatsAudit" }}- --log-stats-audit={{ .Values.logStatsAudit }}{{- end }} - --audit-events-involved-namespace={{ .Values.auditEventsInvolvedNamespace }} - --operation=audit - --operation=status - {{ if .Values.audit.enablePubsub}} + - --operation=generate + {{ if hasKey .Values.audit "enablePubsub" }} - --enable-pub-sub={{ .Values.audit.enablePubsub }} + {{- end }} + {{ if hasKey .Values.audit "connection" }} - --audit-connection={{ .Values.audit.connection }} + {{- end }} + {{ if hasKey .Values.audit "channel" }} - --audit-channel={{ .Values.audit.channel }} {{- end }} {{ if not .Values.disableMutation}}- --operation=mutation-status{{- end }} @@ -99,6 +106,9 @@ spec: {{- if hasKey .Values "defaultCreateVAPBindingForConstraints"}} - --default-create-vap-binding-for-constraints={{ .Values.defaultCreateVAPBindingForConstraints }} {{- end }} + {{ if hasKey .Values "defaultWaitForVAPBGeneration"}} + - --default-wait-for-vapb-generation={{ .Values.defaultWaitForVAPBGeneration }} + {{- end }} command: - /manager env: diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-deployment.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-deployment.yaml index 5be740a1bdd..432c68804fa 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-deployment.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-deployment.yaml @@ -3,6 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: + {{- include "gatekeeper.commonLabels" . | nindent 4 }} app: '{{ template "gatekeeper.name" . }}' chart: '{{ template "gatekeeper.name" . }}' control-plane: controller-manager @@ -64,7 +65,8 @@ spec: - --prometheus-port={{ .Values.controllerManager.metricsPort }} - --logtostderr - --log-denies={{ .Values.logDenies }} - - --emit-admission-events={{ .Values.emitAdmissionEvents }} + {{ if hasKey .Values "emitAdmissionEvents" }}- --emit-admission-events={{ .Values.emitAdmissionEvents }}{{- end }} + {{ if hasKey .Values "logStatsAdmission" }}- --log-stats-admission={{ .Values.logStatsAdmission }}{{- end }} - --admission-events-involved-namespace={{ .Values.admissionEventsInvolvedNamespace }} - --log-level={{ (.Values.controllerManager.logLevel | empty | not) | ternary .Values.controllerManager.logLevel .Values.logLevel }} - --exempt-namespace={{ .Release.Namespace }} diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml index 609270a92de..140c55f8895 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml @@ -1,6 +1,6 @@ --- -{{- $v1 := .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} -{{- $v1beta1 := .Capabilities.APIVersions.Has "policy/v1beta1/PodDisruptionBudget" -}} +{{ $v1 := .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} +{{ $v1beta1 := .Capabilities.APIVersions.Has "policy/v1beta1/PodDisruptionBudget" -}} apiVersion: policy/v1{{- if and (not $v1) $v1beta1 -}}beta1{{- end }} kind: PodDisruptionBudget metadata: diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-manager-role-clusterrole.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-manager-role-clusterrole.yaml index a6306b3a285..591d36dc566 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-manager-role-clusterrole.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/templates/gatekeeper-manager-role-clusterrole.yaml @@ -63,6 +63,18 @@ rules: - patch - update - watch +- apiGroups: + - config.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - config.gatekeeper.sh resources: diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml index 06e0a34c511..c0d8df4cb74 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml @@ -39,8 +39,6 @@ auditChunkSize: 500 logLevel: INFO logDenies: false logMutations: false -emitAdmissionEvents: false -emitAuditEvents: false admissionEventsInvolvedNamespace: false auditEventsInvolvedNamespace: false resourceQuota: true @@ -49,14 +47,14 @@ enableK8sNativeValidation: true image: repository: openpolicyagent/gatekeeper crdRepository: openpolicyagent/gatekeeper-crds - release: v3.17.0 + release: v3.18.1 pullPolicy: IfNotPresent pullSecrets: [] preInstall: crdRepository: image: repository: null - tag: v3.17.0 + tag: v3.18.1 postUpgrade: labelNamespace: serviceAccount: @@ -65,7 +63,7 @@ postUpgrade: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0 + tag: v3.18.1 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -99,7 +97,7 @@ postInstall: extraRules: [] image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0 + tag: v3.18.1 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -143,7 +141,7 @@ preUninstall: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0 + tag: v3.18.1 pullPolicy: IfNotPresent pullSecrets: [] priorityClassName: "" @@ -222,16 +220,13 @@ controllerManager: extraRules: [] networkPolicy: enabled: false - ingress: { } + ingress: [] # - from: # - ipBlock: # cidr: 0.0.0.0/0 audit: serviceAccount: name: gatekeeper-admin - enablePubsub: false - connection: audit-connection - channel: audit-channel hostNetwork: false dnsPolicy: ClusterFirst metricsPort: 8888 From 16be2550e22f09ef8d65a285cce607942c4b8777 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Thu, 2 Jan 2025 15:34:46 +0200 Subject: [PATCH 061/224] [pxc-db] Improve chart documentation --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/values.yaml | 164 ++++++++++++++++++++++++++++++-------- 2 files changed, 133 insertions(+), 33 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index 4b3d81abab7..9dd0619a3cd 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.5 +version: 0.2.6 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index fe6a5603e21..092927b8963 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -2,31 +2,58 @@ # Default values for percona-xtradb-cluster. # This is a YAML-formatted file. # Declare variables to be passed into your templates. + +# -- Enable linkerd annotations for services, pods and other cluster resources linkerd: enabled: true +# -- Enable owner_info injector chart dependency to allow installation as stand-alone chart owner_info: enabled: false +# -- Name of the cluster +# This is used as base name for all resources created by the chart +# Example: test +# This creates PerconaXtraDBCluster with name test-db name: null -# NOTE: don't use internal TLS by default, linkerd mesh is preferred +# -- Enable internal TLS +# https://docs.percona.com/percona-operator-for-mysql/pxc/TLS.html +# NOTE: We don't use internal TLS by default, because linkerd mesh is preferred tls: enabled: false +# -- Various configuration options to prevent users from configuring a cluster with unsafe parameters. +# Translates to https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html#unsafe-flags-section +# Example: +# unsafeFlags: +# pxcSize: false +# proxySize: false +# backupIfUnhealthy: false unsafeFlags: {} -#unsafeFlags: -# pxcSize: false -# proxySize: false -# backupIfUnhealthy: false -# Enable init.sql job +### InitDB configuration +# -- InitDB job options +# -- Image used by the initdb job +initdb: + image: + name: percona/percona-xtradb-cluster-operator + tag: 1.16.1-pxc8.0-backup-pxb8.0.35 +# -- Enable InitDB job that creates databases and users +# Disabled by default initdb_job: null -# Set charachter set and collation in init.sql +# -- Set default charachter set and collation in init.sql character_set_server: "utf8mb4" collation_server: "utf8mb4_0900_ai_ci" - +# -- Enable the creation of a local-only root user `ccroot` without a password +ccroot_user: + enabled: false +# -- List of databases to create +# Example: +# databases: +# - test databases: {} +# -- List of users to create users: {} # backup: # name: backup @@ -41,10 +68,14 @@ users: {} # # used as credentials. It should be possible to change the latter, # # without having to change the first. # password: null # Causes users not be be created, and even maybe to get locked -# auth_plugin: 'mysql_native_password' +# auth_plugin: 'caching_sha2_password' # 'mysql_native_password' is deprecated # grants: # - ALL ON example.* +### End of InitDB configuration +# -- Default system-level Percona XtraDB Cluster users +# All credentials are mandatory +# See https://docs.percona.com/percona-operator-for-mysql/pxc/users.html#system-users system_users: root: password: null @@ -59,18 +90,26 @@ system_users: replication: password: null -ccroot_user: - enabled: false - +# -- Options to ignore injected labels and annotations +# https://docs.percona.com/percona-operator-for-mysql/pxc/annotations.html#specifying-labels-and-annotations-ignored-by-the-operator +# -- List of ignored annotations ignoreAnnotations: [] +# -- List of ignored labels ignoreLabels: - ccloud/service - ccloud/support-group +# -- Additional labels to apply to PerconaXtraDBCluster resource +# https://docs.percona.com/percona-operator-for-mysql/pxc/annotations.html annotations: {} +# -- Pause/resume Percona XtraDB Cluster +# https://docs.percona.com/percona-operator-for-mysql/pxc/pause.html pause: false +# -- The initContainer section in the custom resource +# Allows to override the image used by the initContainer and configure resources limits and requests +# https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html#initcontainer-configuration-section initContainer: image: name: percona-xtradb-cluster-operator @@ -84,6 +123,21 @@ initContainer: memory: 200M cpu: 200m +# -- Configuration of the mysqld-exporter sidecar container +# Provides prometheus metrics for the Percona XtraDB Cluster +metrics: + enabled: true + flags: + - collect.binlog_size + - collect.info_schema.processlist + - collect.info_schema.query_response_time + - collect.info_schema.innodb_tablespaces + image: + name: prom/mysqld-exporter + tag: v0.16.0 + override: null + +# -- Percona XtraDB Cluster statefulset configuration pxc: size: 3 image: @@ -115,6 +169,8 @@ pxc: fsGroup: 1001 supplementalGroups: [1001] fsGroupChangePolicy: "Always" + # -- Expose cluster pods as separate services + # https://docs.percona.com/percona-operator-for-mysql/pxc/expose.html#service-per-pod expose: enabled: false type: ClusterIP @@ -122,7 +178,11 @@ pxc: internalTrafficPolicy: Cluster annotations: config.linkerd.io/opaque-ports: "3306,3307,3009,4444,4567,4568,33060,33062" + # -- Automatic crash recovery + # https://docs.percona.com/percona-operator-for-mysql/pxc/recovery.html#automatic-crash-recovery autoRecovery: true + # -- Custom my.cnf configuration + # https://docs.percona.com/percona-operator-for-mysql/pxc/options.html configuration: performance_schema: true options: @@ -160,6 +220,8 @@ pxc: long_query_time: 5 log_error_suppression_list: "MY-010055,MY-013360" priority_class: "critical-infrastructure" + # -- Advanced affinity configuration for PXC databse pods + # https://docs.percona.com/percona-operator-for-mysql/pxc/constraints.html#simple-approach-use-topologykey-of-the-percona-operator-for-mysql affinity: advanced: nodeAffinity: @@ -206,11 +268,13 @@ pxc: values: - pxc topologyKey: kubernetes.io/hostname + # -- Resource configuration for cluster pods resources: requests: memory: 1G cpu: 500m limits: {} + # -- Storage configuration for cluster nodes persistence: enabled: true ## percona data Persistent Volume Storage Class @@ -224,6 +288,7 @@ pxc: accessMode: ReadWriteOnce size: 10Gi +# -- HAProxy statefulset configuration haproxy: enabled: true size: 2 @@ -256,6 +321,9 @@ haproxy: fsGroup: 1001 supplementalGroups: [1001] service: + # -- Primary HAProxy service configuration + # Exposes *only one* primary cluster member + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#haproxyexposeprimaryenabled primary: enabled: true type: ClusterIP @@ -263,6 +331,9 @@ haproxy: internalTrafficPolicy: Cluster annotations: config.linkerd.io/opaque-ports: "3306,3307,3009,4444,4567,4568,33060,33062" + # -- Replicas HAPRoxy service configuration + # Exposes *all* cluster members + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#haproxyexposereplicasenabled replicas: enabled: true type: ClusterIP @@ -271,6 +342,8 @@ haproxy: annotations: config.linkerd.io/opaque-ports: "3306,3307,3009,4444,4567,4568,33060,33062" priority_class: "critical-infrastructure" + # -- Advanced affinity configuration for HAProxy pods + # https://docs.percona.com/percona-operator-for-mysql/pxc/constraints.html#simple-approach-use-topologykey-of-the-percona-operator-for-mysql affinity: advanced: nodeAffinity: @@ -317,17 +390,14 @@ haproxy: values: - haproxy topologyKey: kubernetes.io/hostname + # -- Resource configuration for HAProxy pods resources: requests: memory: 100M cpu: 500m limits: {} -initdb: - image: - name: percona/percona-xtradb-cluster-operator - tag: 1.16.1-pxc8.0-backup-pxb8.0.35 - +## - Scheduled backup configuration backup: enabled: false annotations: {} @@ -337,62 +407,92 @@ backup: tag: 1.16.1-pxc8.0-backup-pxb8.0.35 override: null imagePullPolicy: Always + # -- The number of retries to make a backup backoffLimit: 2 + # -- The timeout value in seconds, after which backup job will automatically fail. activeDeadlineSeconds: 3600 + # -- Priority class for the backup job priority_class: "critical-infrastructure" + # -- Resources configuration for the backup job resources: requests: {} limits: {} + # -- PiTR configuration + # When enabled, operator creates a PiTR binlog-collector deployment pitr: enabled: false storageName: s3-backups-binlogs + # -- Seconds between running the binlog uploader. + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backuppitrtimebetweenuploads timeBetweenUploads: 300 + # -- Timeout in seconds for the binlog to be uploaded + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backuppitrtimeoutseconds timeoutSeconds: 60 + # -- Priority class for the binlog-collector deployment + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupstoragesstorage-namepriorityclassname priority_class: "critical-infrastructure" + # -- Resource configuration for a PiTR binlog-collector deployment resources: requests: {} limits: {} + # -- S3 backup storage configuration s3: + # -- General configuration, that is being deep copied to the custom resource + # config: + # -- S3 credential name + # Default value should stay the same as the secret name created by the chart + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupstoragesstorage-names3credentialssecret credentialsSecret: "pxc-db-{{ .Values.name }}-backup-s3" + # -- S3 region name + # Default value is being taken from global variables + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupstoragesstorage-names3region region: "{{ .Values.global.region }}" + # -- S3 API endpoint URL + # This option must be set explicitly + # Example: https://objectstore-3.REGION.DOMAIN + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupstoragesstorage-names3endpointurl endpointUrl: null + # -- S3 credentials + # This map is being used by the chart k8s secret creation secrets: aws_access_key_id: null aws_secret_access_key: null + # -- S3 storages list + # Contains bucket name for each type of storage and enables/disables value + # The name of the storage adds backup configuration secrtion with `s3-backups` prefix + # Example: s3-backups-daily + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupstoragesstorage-names3bucket storages: + # -- Binlogs storage is being used by binglog-collector (PiTR) binlogs: enabled: true bucket: "pxc-backup-{{ .Values.global.region }}/binlogs/{{.Values.name}}/" + # -- Daily backups storage is enabled by default daily: enabled: true bucket: "pxc-backup-{{ .Values.global.region }}/backups/{{ .Values.name }}/daily" + # -- Hourly backups storage is disabled by default hourly: enabled: false bucket: "pxc-backup-{{ .Values.global.region }}/backups/{{ .Values.name }}/hourly" + # -- Custom backup storage is supposed to be used for manual backups custom: enabled: false bucket: "pxc-backup-{{ .Values.global.region }}/backups/{{ .Values.name }}/custom" + # -- Example of the backup schedule configuration schedule: - name: "daily-backup" + # -- Scheduled time to make a backup specified in the crontab format . + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupscheduleschedule schedule: "0 0 * * *" + # -- The amount of most recent backups to store. Older backups are automatically deleted. + # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupschedulekeep keep: 5 - storageName: s3-backups - -# Prometheus metrics. -metrics: - enabled: true - flags: - - collect.binlog_size - - collect.info_schema.processlist - - collect.info_schema.query_response_time - - collect.info_schema.innodb_tablespaces - image: - name: prom/mysqld-exporter - tag: v0.16.0 - override: null + # -- The name of the storage for the backups configured in the storages subsection + storageName: s3-backups-daily -# Default Prometheus alerts and rules. +# -- Default Prometheus alerts and rules. alerts: enabled: true From b437173a6ef75db5ea9ecba35a8b32dfb96fb17c Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 3 Jan 2025 17:02:57 +0200 Subject: [PATCH 062/224] [pxc-operator] Set correct target for the OperatorNotReady alerts * Set correct target for the OperatorNotReady alerts * Update maintainers list Pods metrics from kube-state-metrics present only in kubernetes prometheus --- system/percona-xtradb-cluster-operator/Chart.yaml | 3 +-- .../percona-xtradb-cluster-operator/templates/alerts.yaml | 8 ++++---- .../alerts/{ => kubernetes}/_pxc-operator.alerts.tpl | 6 +++--- system/percona-xtradb-cluster-operator/values.yaml | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) rename system/percona-xtradb-cluster-operator/templates/alerts/{ => kubernetes}/_pxc-operator.alerts.tpl (76%) diff --git a/system/percona-xtradb-cluster-operator/Chart.yaml b/system/percona-xtradb-cluster-operator/Chart.yaml index 75d57b43b17..f8dab249a84 100644 --- a/system/percona-xtradb-cluster-operator/Chart.yaml +++ b/system/percona-xtradb-cluster-operator/Chart.yaml @@ -4,9 +4,8 @@ name: percona-xtradb-cluster-operator description: A Helm chart to install Percona XtraDB Cluster Operator. home: "https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator" type: application -version: 0.3.1 +version: 0.3.2 appVersion: "1.16.1" -kubeVersion: ">=1.26.0-0" maintainers: - name: Birk Bohne url: https://github.com/businessbean diff --git a/system/percona-xtradb-cluster-operator/templates/alerts.yaml b/system/percona-xtradb-cluster-operator/templates/alerts.yaml index d70ce890b06..353b96eac7b 100644 --- a/system/percona-xtradb-cluster-operator/templates/alerts.yaml +++ b/system/percona-xtradb-cluster-operator/templates/alerts.yaml @@ -2,12 +2,12 @@ apiVersion: monitoring.coreos.com/v1 kind: PrometheusRule metadata: - name: percona-xtradb-cluster-operator-alerts + name: percona-xtradb-cluster-operator-kubernetes-alerts labels: - prometheus: openstack - app.kubernetes.io/component: percona-xtradb-cluster-operator-alerts + prometheus: kubernetes + app.kubernetes.io/component: percona-xtradb-cluster-operator-kubernetes-alerts {{- include "percona-xtradb-cluster-operator.labels" . | indent 4}} spec: groups: -{{ include (print .Template.BasePath "/alerts/_pxc-operator.alerts.tpl") . | indent 2 }} +{{ include (print .Template.BasePath "/alerts/kubernetes/_pxc-operator.alerts.tpl") . | indent 2 }} {{- end }} diff --git a/system/percona-xtradb-cluster-operator/templates/alerts/_pxc-operator.alerts.tpl b/system/percona-xtradb-cluster-operator/templates/alerts/kubernetes/_pxc-operator.alerts.tpl similarity index 76% rename from system/percona-xtradb-cluster-operator/templates/alerts/_pxc-operator.alerts.tpl rename to system/percona-xtradb-cluster-operator/templates/alerts/kubernetes/_pxc-operator.alerts.tpl index 86447c9cb5c..712d4c61d56 100644 --- a/system/percona-xtradb-cluster-operator/templates/alerts/_pxc-operator.alerts.tpl +++ b/system/percona-xtradb-cluster-operator/templates/alerts/kubernetes/_pxc-operator.alerts.tpl @@ -1,4 +1,4 @@ -- name: pxc-operator.alerts +- name: pxc-operator-kubernetes.alerts rules: - alert: PerconaXtraDBClusterOperatorNotReady expr: (sum(kube_pod_status_ready_normalized{condition="true", pod=~"percona-xtradb-cluster-operator.*"}) < 1) @@ -6,10 +6,10 @@ labels: severity: info # New Alerts MUST be initially implemented Info. context: availability - service: percona-xtradb-cluster-operator + service: {{ index .Values "owner-info" "service" | quote }} tier: os playbook: "docs/support/playbook/database/percona_xtradb_cluster_operator_not_ready/" - support_group: network-api + support_group: {{ index .Values "owner-info" "support-group" | quote }} annotations: description: percona-xtradb-cluster-operator is not ready for 10 minutes. summary: percona-xtradb-cluster-operator is not ready for 10 minutes. Please check the pod. diff --git a/system/percona-xtradb-cluster-operator/values.yaml b/system/percona-xtradb-cluster-operator/values.yaml index 85e6d4fdcd0..55683630415 100644 --- a/system/percona-xtradb-cluster-operator/values.yaml +++ b/system/percona-xtradb-cluster-operator/values.yaml @@ -243,8 +243,8 @@ kube-state-metrics: owner-info: support-group: network-api + service: percona-xtradb-cluster-operator maintainers: - Vladislav Gusev - - Bashar Alkhateeb - Birk Bohne helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator From 3ebdd572a249c0436580ede42b37ef540fd3dc41 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 6 Jan 2025 14:12:00 +0200 Subject: [PATCH 063/224] [pxc-db] Move linkerd service annotations out of values * Make linkerd service annotation optional * Enable pxc service exposure by default, because it's mandatory by openstack/utils --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/templates/cluster.yaml | 23 ++++++++++++++++++----- common/pxc-db/values.yaml | 11 ++++------- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index 9dd0619a3cd..cf16dcdbe2e 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.6 +version: 0.2.7 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/cluster.yaml b/common/pxc-db/templates/cluster.yaml index 6c2d8e3fd4f..6524d95a48f 100644 --- a/common/pxc-db/templates/cluster.yaml +++ b/common/pxc-db/templates/cluster.yaml @@ -73,10 +73,13 @@ spec: {{ merge (include "pxc-db.linkerdPodAnnotations" $ | fromYaml) (include "pxc-db.metricsAnnotations" $ | fromYaml ) ($pxc.annotations) | toYaml | indent 6 }} labels: {{ merge (include "pxc-db.appLabels" $ | fromYaml) ($pxc.labels) | toYaml | indent 6 }} - {{- if $pxc.expose }} expose: -{{ tpl ($pxc.expose | toYaml) $ | indent 6 }} - {{- end }} + enabled: {{ $pxc.expose.enabled }} + type: {{ $pxc.expose.type }} + externalTrafficPolicy: {{ $pxc.expose.externalTrafficPolicy }} + internalTrafficPolicy: {{ $pxc.expose.internalTrafficPolicy }} + annotations: +{{ merge (include "pxc-db.linkerdServiceAnnotations" $ | fromYaml) ($pxc.expose.annotations) | toYaml | indent 8 }} autoRecovery: {{ $pxc.autoRecovery }} readinessDelaySec: 15 livenessDelaySec: 600 @@ -221,9 +224,19 @@ spec: {{ tpl ($haproxy.podSecurityContext | toYaml) $ | indent 6 }} {{- end }} exposePrimary: -{{ $haproxy.service.primary | toYaml | indent 6 }} + enabled: {{ $haproxy.service.primary.enabled }} + type: {{ $haproxy.service.primary.type }} + externalTrafficPolicy: {{ $haproxy.service.primary.externalTrafficPolicy }} + internalTrafficPolicy: {{ $haproxy.service.primary.internalTrafficPolicy }} + annotations: +{{ merge (include "pxc-db.linkerdServiceAnnotations" $ | fromYaml) ($haproxy.service.primary.annotations) | toYaml | indent 8 }} exposeReplicas: -{{ $haproxy.service.replicas | toYaml | indent 6 }} + enabled: {{ $haproxy.service.replicas.enabled }} + type: {{ $haproxy.service.replicas.type }} + externalTrafficPolicy: {{ $haproxy.service.replicas.externalTrafficPolicy }} + internalTrafficPolicy: {{ $haproxy.service.replicas.internalTrafficPolicy }} + annotations: +{{ merge (include "pxc-db.linkerdServiceAnnotations" $ | fromYaml) ($haproxy.service.replicas.annotations) | toYaml | indent 8 }} resources: requests: {{ tpl ($haproxy.resources.requests | toYaml) $ | indent 8 }} diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index 092927b8963..2ee99f80207 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -172,12 +172,11 @@ pxc: # -- Expose cluster pods as separate services # https://docs.percona.com/percona-operator-for-mysql/pxc/expose.html#service-per-pod expose: - enabled: false + enabled: true type: ClusterIP externalTrafficPolicy: Cluster internalTrafficPolicy: Cluster - annotations: - config.linkerd.io/opaque-ports: "3306,3307,3009,4444,4567,4568,33060,33062" + annotations: {} # -- Automatic crash recovery # https://docs.percona.com/percona-operator-for-mysql/pxc/recovery.html#automatic-crash-recovery autoRecovery: true @@ -329,8 +328,7 @@ haproxy: type: ClusterIP externalTrafficPolicy: Cluster internalTrafficPolicy: Cluster - annotations: - config.linkerd.io/opaque-ports: "3306,3307,3009,4444,4567,4568,33060,33062" + annotations: {} # -- Replicas HAPRoxy service configuration # Exposes *all* cluster members # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#haproxyexposereplicasenabled @@ -339,8 +337,7 @@ haproxy: type: ClusterIP externalTrafficPolicy: Cluster internalTrafficPolicy: Cluster - annotations: - config.linkerd.io/opaque-ports: "3306,3307,3009,4444,4567,4568,33060,33062" + annotations: {} priority_class: "critical-infrastructure" # -- Advanced affinity configuration for HAProxy pods # https://docs.percona.com/percona-operator-for-mysql/pxc/constraints.html#simple-approach-use-topologykey-of-the-percona-operator-for-mysql From 22732657651c98af7e22b6256cf456860a7e45c1 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 6 Jan 2025 14:42:10 +0200 Subject: [PATCH 064/224] [pxc-db] Use template for endpointUrl s3 backups storage configuration Use template for endpointUrl s3 backups storage configuration This helps to avoid writing this common configuration for every service in each region --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/ci/test-values.yaml | 4 +--- common/pxc-db/values.yaml | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index cf16dcdbe2e..5e2f561205c 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.7 +version: 0.2.8 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/ci/test-values.yaml b/common/pxc-db/ci/test-values.yaml index 610a1b75cdb..5567cca57d9 100644 --- a/common/pxc-db/ci/test-values.yaml +++ b/common/pxc-db/ci/test-values.yaml @@ -3,6 +3,7 @@ global: dbUser: admin dbPassword: secret! region: regionOne + tld: test.corp registryAlternateRegion: my.docker.registry dockerHubMirrorAlternateRegion: my.dockerhub.mirror @@ -11,9 +12,6 @@ name: test backup: enabled: true s3: - config: - region: regionOne - endpointUrl: http://s3.default.svc.cluster.local secrets: aws_access_key_id: 'super-secret' aws_secret_access_key: 'super-secret' diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index 2ee99f80207..b8f8d8e41fa 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -449,7 +449,7 @@ backup: # This option must be set explicitly # Example: https://objectstore-3.REGION.DOMAIN # https://docs.percona.com/percona-operator-for-mysql/pxc/operator.html?#backupstoragesstorage-names3endpointurl - endpointUrl: null + endpointUrl: "https://objectstore-3.{{ .Values.global.region }}.{{ .Values.global.tld }}" # -- S3 credentials # This map is being used by the chart k8s secret creation secrets: From 59a3f02869dde71c639b6eae3ffe21c9efc29b73 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 6 Jan 2025 13:21:06 +0200 Subject: [PATCH 065/224] [pxc-db] Add app and name labels for backup jobs and pitr deployment * Add app and name labels for backup jobs and pitr deployment * Use correct label in backup alert expressions * Fix pxc_binlog alerts --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/templates/_helpers.tpl | 9 +++++++++ common/pxc-db/templates/alerts/_backup.alerts.tpl | 8 ++++---- common/pxc-db/templates/cluster.yaml | 4 ++-- 4 files changed, 16 insertions(+), 7 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index 5e2f561205c..c2fed60ead8 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.8 +version: 0.2.9 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/_helpers.tpl b/common/pxc-db/templates/_helpers.tpl index e6125e067c1..3870e609019 100644 --- a/common/pxc-db/templates/_helpers.tpl +++ b/common/pxc-db/templates/_helpers.tpl @@ -23,6 +23,14 @@ If release name contains chart name it will be used as a full name. {{- end }} {{- end }} +{{/* +Generate cluster custom resource name +Example: test-db +*/}} +{{- define "pxc-db.clusterName" -}} +{{ required ".Values.name is missing" .Values.name }}-db +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} @@ -74,6 +82,7 @@ Add owner-info if exists Backup jobs are created by operator and are not inherited from the parent cluster resource */}} {{- define "pxc-db.backupLabels" -}} +{{- include "pxc-db.appLabels" . }} {{- include "pxc-db.ownerLabels" . }} {{- end }} diff --git a/common/pxc-db/templates/alerts/_backup.alerts.tpl b/common/pxc-db/templates/alerts/_backup.alerts.tpl index 737091c34c6..a493da2de5f 100644 --- a/common/pxc-db/templates/alerts/_backup.alerts.tpl +++ b/common/pxc-db/templates/alerts/_backup.alerts.tpl @@ -1,7 +1,7 @@ - name: pxc-backup.alerts rules: - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBackupNotSucceeded - expr: (kube_customresource_perconaxtradbclusterbackup_status{app_kubernetes_io_instance="{{ include "pxc-db.fullname" . }}",state="Succeeded"} != 1) + expr: (kube_customresource_perconaxtradbclusterbackup_status{app_kubernetes_io_instance="{{ include "pxc-db.clusterName" . }}",state="Succeeded"} != 1) for: 10m labels: context: database @@ -15,7 +15,7 @@ summary: "{{ include "pxc-db.fullname" . }} cluster backup is not succeeded." - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBackupMissing - expr: (time() - max by (app_kubernetes_io_instance) (kube_customresource_perconaxtradbclusterbackup_completed{app_kubernetes_io_instance="{{ include "pxc-db.fullname" . }}"}) > 129600) + expr: (time() - max by (app_kubernetes_io_instance) (kube_customresource_perconaxtradbclusterbackup_completed{app_kubernetes_io_instance="{{ include "pxc-db.clusterName" . }}"}) > 129600) for: 30m labels: context: database @@ -30,7 +30,7 @@ {{- if .Values.backup.pitr.enabled }} - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBinlogProcessingTooOld - expr: (time() - pxc_binlog_collector_last_processing_timestamp > 1800) + expr: (time() - pxc_binlog_collector_last_processing_timestamp{app_kubernetes_io_instance="{{ include "pxc-db.clusterName" . }}"} > 1800) for: 30m labels: context: database @@ -44,7 +44,7 @@ summary: "{{ include "pxc-db.fullname" . }} cluster binlog processing is too old." - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterBinlogUploadTooOld - expr: (time() - pxc_binlog_collector_last_upload_timestamp > 1800) + expr: (time() - pxc_binlog_collector_last_upload_timestamp{app_kubernetes_io_instance="{{ include "pxc-db.clusterName" . }}"} > 1800) for: 30m labels: context: database diff --git a/common/pxc-db/templates/cluster.yaml b/common/pxc-db/templates/cluster.yaml index 6524d95a48f..328ee15ff28 100644 --- a/common/pxc-db/templates/cluster.yaml +++ b/common/pxc-db/templates/cluster.yaml @@ -3,7 +3,7 @@ apiVersion: pxc.percona.com/v1 kind: PerconaXtraDBCluster metadata: namespace: {{ $.Release.Namespace }} - name: {{ .Values.name }}-db + name: {{ include "pxc-db.clusterName" . }} finalizers: - percona.com/delete-pxc-pods-in-order {{- with .Values.annotations }} @@ -137,7 +137,7 @@ spec: - name: MYSQLD_EXPORTER_PASSWORD valueFrom: secretKeyRef: - name: internal-{{ .Values.name }}-db + name: internal-{{ include "pxc-db.clusterName" . }} key: monitor ports: - name: metrics From bafcb3d51af3dd2654f58abc33333471a72dcd9c Mon Sep 17 00:00:00 2001 From: Tommy Sauer Date: Mon, 6 Jan 2025 14:00:38 +0100 Subject: [PATCH 066/224] fix not existing metric (#7602) --- common/prometheus-server-pre7/CHANGELOG.md | 4 ++++ common/prometheus-server-pre7/Chart.yaml | 2 +- .../templates/alerts/_thanos-store.alerts.tpl | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/common/prometheus-server-pre7/CHANGELOG.md b/common/prometheus-server-pre7/CHANGELOG.md index 9583a0452d7..20e1db3bebb 100644 --- a/common/prometheus-server-pre7/CHANGELOG.md +++ b/common/prometheus-server-pre7/CHANGELOG.md @@ -1,3 +1,7 @@ +## 6.6.2 + +* another fix for ThanosStoreSeriesGateLatencyHigh + ## 6.6.1 * fixed missing metric for ThanosStoreSeriesGateLatencyHigh alert diff --git a/common/prometheus-server-pre7/Chart.yaml b/common/prometheus-server-pre7/Chart.yaml index d537e058422..40bf295453a 100644 --- a/common/prometheus-server-pre7/Chart.yaml +++ b/common/prometheus-server-pre7/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Prometheus via operator. name: prometheus-server-pre7 -version: 6.7.1 +version: 6.7.2 appVersion: v2.54.1 icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png maintainers: diff --git a/common/prometheus-server-pre7/templates/alerts/_thanos-store.alerts.tpl b/common/prometheus-server-pre7/templates/alerts/_thanos-store.alerts.tpl index df874bcd33b..a9553914293 100644 --- a/common/prometheus-server-pre7/templates/alerts/_thanos-store.alerts.tpl +++ b/common/prometheus-server-pre7/templates/alerts/_thanos-store.alerts.tpl @@ -24,9 +24,9 @@ groups: - alert: ThanosStoreSeriesGateLatencyHigh expr: | ( - histogram_quantile(0.99, sum by (prometheus, le) (rate(thanos_bucket_store_series_gate_duration_seconds_bucket{job=~".*thanos.*store.*", prometheus="{{ include "prometheus.name" . }}"}[5m]))) > 2 + histogram_quantile(0.99, sum by (prometheus, le) (rate(thanos_bucket_store_series_gate_queries_duration_seconds_bucket{job=~".*thanos.*store.*", prometheus="{{ include "prometheus.name" . }}"}[5m]))) > 2 and - sum by (thanos) (rate(thanos_bucket_store_series_gate_queries_duration_seconds_bucket{job=~".*thanos.*store.*", thanos="{{ include "thanos.name" . }}"}[5m])) > 0 + sum by (prometheus) (rate(thanos_bucket_store_series_gate_queries_duration_seconds_count{job=~".*thanos.*store.*", prometheus="{{ include "prometheus.name" . }}"}[5m])) > 0 ) for: 10m labels: From c541af7b4626e809ab91b9ad256d50de89c7d26c Mon Sep 17 00:00:00 2001 From: Jan Knipper Date: Mon, 6 Jan 2025 14:34:29 +0100 Subject: [PATCH 067/224] Bump servicemesh/linkerd charts --- system/servicemesh-crds/Chart.lock | 6 +++--- system/servicemesh-crds/Chart.yaml | 6 +++--- system/servicemesh-crds/values.yaml | 5 ++++- system/servicemesh/Chart.lock | 8 ++++---- system/servicemesh/Chart.yaml | 8 ++++---- 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/system/servicemesh-crds/Chart.lock b/system/servicemesh-crds/Chart.lock index 173742f2e4f..771563cbda1 100644 --- a/system/servicemesh-crds/Chart.lock +++ b/system/servicemesh-crds/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: linkerd-crds repository: https://helm.linkerd.io/edge - version: 2024.8.2 + version: 2024.11.8 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:7d129d0770ad307f084394fd46b9f65c499340d3808af5bb0efc1479cc8d3683 -generated: "2024-08-22T15:19:22.485087908+02:00" +digest: sha256:7b9ad1664fc9b6f1638e4bba66871c89abe8d269532614d05d479ee6069ab603 +generated: "2025-01-06T11:57:54.080936835+01:00" diff --git a/system/servicemesh-crds/Chart.yaml b/system/servicemesh-crds/Chart.yaml index 528f1af21e6..81fb39f8d16 100644 --- a/system/servicemesh-crds/Chart.yaml +++ b/system/servicemesh-crds/Chart.yaml @@ -1,13 +1,13 @@ apiVersion: v2 description: Linkerd service-mesh for our control-plane name: servicemesh-crds -version: 2024.8.2 -appVersion: 2024.8.2 +version: 2024.11.8 +appVersion: 2024.11.8 home: https://github.com/sapcc/helm-charts/tree/master/system/servicemesh-crds dependencies: - name: linkerd-crds repository: https://helm.linkerd.io/edge - version: 2024.8.2 + version: 2024.11.8 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 diff --git a/system/servicemesh-crds/values.yaml b/system/servicemesh-crds/values.yaml index 199c6881e97..836154199c4 100644 --- a/system/servicemesh-crds/values.yaml +++ b/system/servicemesh-crds/values.yaml @@ -1,3 +1,6 @@ owner-info: support-group: containers - helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/servicemesh + service: linkerd + helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/servicemesh-crds + maintainers: + - Jan Knipper diff --git a/system/servicemesh/Chart.lock b/system/servicemesh/Chart.lock index 6464e2be01f..75d75bb4f39 100644 --- a/system/servicemesh/Chart.lock +++ b/system/servicemesh/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: linkerd-control-plane repository: https://helm.linkerd.io/edge - version: 2024.8.2 + version: 2024.11.8 - name: linkerd-viz repository: https://helm.linkerd.io/edge - version: 2024.8.2 + version: 2024.11.8 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:36f11fa54c1f06f9b1e74b65f634e0d80fad10943f5b84ff58d52d7a4dbf585e -generated: "2024-08-22T09:59:09.522428718+02:00" +digest: sha256:4a52d698698d8382ebadd50bfeeb506c17445ecf32a21cfdba1154f5d039d1ea +generated: "2025-01-06T11:48:31.3981799+01:00" diff --git a/system/servicemesh/Chart.yaml b/system/servicemesh/Chart.yaml index 271385937f7..c8c2b177b07 100644 --- a/system/servicemesh/Chart.yaml +++ b/system/servicemesh/Chart.yaml @@ -1,17 +1,17 @@ apiVersion: v2 description: Linkerd service-mesh for our control-plane name: servicemesh -version: 2024.8.4 -appVersion: 2024.8.2 +version: 2024.11.8 +appVersion: 2024.11.8 home: https://github.com/sapcc/helm-charts/tree/master/system/servicemesh dependencies: - name: linkerd-control-plane repository: https://helm.linkerd.io/edge - version: 2024.8.2 + version: 2024.11.8 condition: linkerd-control-plane.enabled - name: linkerd-viz repository: https://helm.linkerd.io/edge - version: 2024.8.2 + version: 2024.11.8 condition: linkerd-viz.enabled - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 2b23294958a33d3ae49a5c5cc7ca2b38ec20ce41 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Mon, 6 Jan 2025 15:33:22 +0200 Subject: [PATCH 068/224] [designate][pxc-db] Bump chart dependency Bump designate pxc-db chart dependecy Includes fixes in labels and alerts --- openstack/designate/Chart.lock | 6 +++--- openstack/designate/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/designate/Chart.lock b/openstack/designate/Chart.lock index 697c4ad5fed..beef2298a6e 100644 --- a/openstack/designate/Chart.lock +++ b/openstack/designate/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.9 - name: pxc-db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.4 + version: 0.2.9 - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.15.2 @@ -29,5 +29,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:205b25f93f690e91f8e99353fa528101532ab5705436ed8c989a2f11ceea0e95 -generated: "2024-12-23T21:49:33.263766+02:00" +digest: sha256:24f3a3722d109558c030acc84e5003cf44d77890fc8714fd067eb307c2df4189 +generated: "2025-01-06T15:22:41.779968+02:00" diff --git a/openstack/designate/Chart.yaml b/openstack/designate/Chart.yaml index 24afd27be42..9520b3e6ca5 100644 --- a/openstack/designate/Chart.yaml +++ b/openstack/designate/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 description: A Helm chart for Kubernetes to deploy Openstack Designate (DNSaaS) name: designate -version: 0.4.8 +version: 0.4.9 appVersion: "xena" dependencies: - condition: percona_cluster.enabled @@ -13,7 +13,7 @@ dependencies: name: pxc-db alias: pxc_db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.4 + version: 0.2.9 - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From b0f3725909d958af363bc4e713c5c0fca6c5742b Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Mon, 6 Jan 2025 15:41:50 +0100 Subject: [PATCH 069/224] [designate] add renamed policies for upgrade to 2024.1 --- openstack/designate/templates/etc/_designate-policy.yaml.tpl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openstack/designate/templates/etc/_designate-policy.yaml.tpl b/openstack/designate/templates/etc/_designate-policy.yaml.tpl index 7a0fb85a5b2..f6746016cf1 100644 --- a/openstack/designate/templates/etc/_designate-policy.yaml.tpl +++ b/openstack/designate/templates/etc/_designate-policy.yaml.tpl @@ -61,10 +61,12 @@ create_super_zone: rule:context_is_cloud_admin get_zones: rule:context_is_viewer get_zone: rule:context_is_viewer get_shared_zone: rule:context_is_viewer +get_zone_share: rule:context_is_viewer get_zone_servers: rule:context_is_viewer find_zones: rule:context_is_viewer find_zone: rule:context_is_viewer find_shared_zones: rule:context_is_viewer +find_zone_shares: rule:context_is_viewer update_zone: rule:context_is_master update_sub_zone: rule:context_is_master delete_zone: rule:context_is_master From 1ef5101021f8b156284bd69557d2a281cf91e64f Mon Sep 17 00:00:00 2001 From: dhalimi Date: Mon, 6 Jan 2025 11:45:24 -0500 Subject: [PATCH 070/224] new flag maxProxyRetries --- openstack/cronus/templates/cronus/_config.yaml.tpl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/openstack/cronus/templates/cronus/_config.yaml.tpl b/openstack/cronus/templates/cronus/_config.yaml.tpl index 171635f2554..42789f5ccae 100644 --- a/openstack/cronus/templates/cronus/_config.yaml.tpl +++ b/openstack/cronus/templates/cronus/_config.yaml.tpl @@ -50,6 +50,9 @@ cronus: {{- if .Values.config.retry.maxConnectionRetries }} maxConnectionRetries: {{ .Values.config.retry.maxConnectionRetries }} {{- end }} +{{- if .Values.config.retry.maxProxyRetries }} + maxProxyRetries: {{ .Values.config.retry.maxProxyRetries }} +{{- end }} {{- if .Values.config.retry.retryInterval }} retryInterval: {{ .Values.config.retry.retryInterval }} {{- end }} From 205c21cc854acff5930487f6e4f2ca2af4aecd72 Mon Sep 17 00:00:00 2001 From: dhalimi Date: Mon, 6 Jan 2025 11:50:23 -0500 Subject: [PATCH 071/224] Adding maxProxyRetries to values.yaml --- openstack/cronus/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/openstack/cronus/values.yaml b/openstack/cronus/values.yaml index e8d0915c31d..070329f465c 100644 --- a/openstack/cronus/values.yaml +++ b/openstack/cronus/values.yaml @@ -594,6 +594,7 @@ poller: # service that handles received emails debug: false # debug HTTP requests retry: # retry settings for failed connections maxConnectionRetries: 5 # 5 retries per connection + maxProxyRetries: 2 # 2 retries per connection when in proxy mode retryInterval: 0.5s # 500ms to wait after each try, e.g. in total 2.5 seconds, used in HTTP retry connectionTimeout: 0.5s # 500ms to wait after each try, e.g. in total 2.5 seconds, used in SMTP net retry commandTimeout: 3s From 90d1eba91c5c62d45714510e0279f96bb5f52a16 Mon Sep 17 00:00:00 2001 From: Tommy Sauer Date: Mon, 6 Jan 2025 18:20:41 +0100 Subject: [PATCH 072/224] fix not working thanos alert (#7611) --- openstack/maia/Chart.lock | 6 +++--- openstack/maia/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/openstack/maia/Chart.lock b/openstack/maia/Chart.lock index 9b2662bae30..1530961c70e 100644 --- a/openstack/maia/Chart.lock +++ b/openstack/maia/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: prometheus-server-pre7 repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 6.7.0 + version: 6.7.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:de7c5ed4f5f5ea20db49974650ed18a9f23d5a2561f16c866de795f5d0701958 -generated: "2024-10-02T11:09:53.330264+02:00" +digest: sha256:9625037466f099b81729adf2eee0290ebf00b314dc3bf88c50923478f58e770b +generated: "2025-01-06T14:02:24.075654+01:00" diff --git a/openstack/maia/Chart.yaml b/openstack/maia/Chart.yaml index 09f1db155f8..71db45ece26 100644 --- a/openstack/maia/Chart.yaml +++ b/openstack/maia/Chart.yaml @@ -7,7 +7,7 @@ dependencies: alias: prometheus_server condition: prometheus_server.enabled repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 6.7.0 + version: 6.7.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 From 2524ff2552e6b0cbe2f740a8c1b0512fd518c552 Mon Sep 17 00:00:00 2001 From: Nuckal777 Date: Tue, 7 Jan 2025 09:38:23 +0100 Subject: [PATCH 073/224] Switch to internal auth endpoint in maintenance-controller chart --- system/maintenance-controller/Chart.yaml | 2 +- system/maintenance-controller/templates/ingress.yaml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/system/maintenance-controller/Chart.yaml b/system/maintenance-controller/Chart.yaml index 5e3b5223d99..f0556801dbc 100644 --- a/system/maintenance-controller/Chart.yaml +++ b/system/maintenance-controller/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: maintenance-controller description: A controller to manage node maintenance type: application -version: 1.0.6 +version: 1.0.7 appVersion: "1.7.0" home: https://github.com/sapcc/maintenance-controller dependencies: diff --git a/system/maintenance-controller/templates/ingress.yaml b/system/maintenance-controller/templates/ingress.yaml index 2e41be7183e..2d5f22315ee 100644 --- a/system/maintenance-controller/templates/ingress.yaml +++ b/system/maintenance-controller/templates/ingress.yaml @@ -20,10 +20,10 @@ metadata: kubernetes.io/tls-acme: "true" prometheus.io/probe: "true" {{- if .Values.ingress.oauthProxy }} - ingress.kubernetes.io/auth-url: "https://auth.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/auth" - nginx.ingress.kubernetes.io/auth-url: "https://auth.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/auth" - ingress.kubernetes.io/auth-signin: "https://auth.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/start" - nginx.ingress.kubernetes.io/auth-signin: "https://auth.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/start" + ingress.kubernetes.io/auth-url: "https://auth-internal.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/auth" + nginx.ingress.kubernetes.io/auth-url: "https://auth-internal.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/auth" + ingress.kubernetes.io/auth-signin: "https://auth-internal.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/start" + nginx.ingress.kubernetes.io/auth-signin: "https://auth-internal.{{ .Values.global.region}}.{{ .Values.global.tld}}/oauth2/start" ingress.kubernetes.io/auth-request-redirect: $escaped_request_uri nginx.ingress.kubernetes.io/auth-request-redirect: $escaped_request_uri ingress.kubernetes.io/auth-response-headers: "Authorization, X-Auth-Request-Email, X-Auth-Request-User, X-Forwarded-Access-Token" From 57470c5a3bcf00e3f334523aaaf76590032466d2 Mon Sep 17 00:00:00 2001 From: Erik Schubert Date: Tue, 7 Jan 2025 09:39:43 +0100 Subject: [PATCH 074/224] Mount ServiceAccount tokens of ironcore operators directly (#7617) * Mount ServiceAccount tokens of ironcore operators directly This enables token rotation within client-go. See: client-go/transport/round_trippers.go line 296. * Fix metal-token-dealer kubeconfig --- system/Makefile | 7 ++---- system/boot-operator-remote/Chart.yaml | 2 +- .../templates/deployment.yaml | 9 +++++++- .../templates/remote-kubeconfig.yaml | 23 +++---------------- system/boot-operator-remote/values.yaml | 8 +++---- .../manager-remote-patch.yaml | 11 +++++++-- .../remote-kubeconfig.yaml | 23 +++---------------- .../manager-remote-patch.yaml | 11 +++++++-- .../remote-kubeconfig.yaml | 23 +++---------------- system/metal-operator-remote/Chart.yaml | 2 +- .../managedresources/kustomize.yaml | 7 +++++- .../templates/deployment.yaml | 9 +++++++- .../templates/remote-kubeconfig.yaml | 23 +++---------------- system/metal-operator-remote/values.yaml | 6 ++--- 14 files changed, 62 insertions(+), 102 deletions(-) diff --git a/system/Makefile b/system/Makefile index 0457e0ac35b..f11382c063d 100644 --- a/system/Makefile +++ b/system/Makefile @@ -128,9 +128,8 @@ build-metal-operator-remote: @yq -i '.controllerManager.manager.image.tag="$(METAL_OPERATOR_VERSION)"' metal-operator-remote/values.yaml @yq -i '.fullnameOverride="metal-operator"' metal-operator-remote/values.yaml @yq -i '.remote.ca=""' metal-operator-remote/values.yaml - @yq -i '.remote.server=""' metal-operator-remote/values.yaml @echo 'macdb: {}' >> metal-operator-remote/values.yaml - @yq -i '.version="0.1.2"' metal-operator-remote/Chart.yaml + @yq -i '.version="0.2.0"' metal-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' metal-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' metal-operator-remote/managedresources/kustomize.yaml @@ -152,9 +151,7 @@ build-boot-operator-remote: @kubectl kustomize kustomize/boot-operator-managedresources > boot-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(BOOT_OPERATOR_VERSION)"' boot-operator-remote/values.yaml @yq -i '.fullnameOverride="boot-operator"' boot-operator-remote/values.yaml - @yq -i '.remote.ca=""' boot-operator-remote/values.yaml - @yq -i '.remote.server=""' boot-operator-remote/values.yaml - @yq -i '.version="0.1.2"' boot-operator-remote/Chart.yaml + @yq -i '.version="0.2.0"' boot-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' boot-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' boot-operator-remote/managedresources/kustomize.yaml diff --git a/system/boot-operator-remote/Chart.yaml b/system/boot-operator-remote/Chart.yaml index c5f7f5d7e41..2b316aec17e 100644 --- a/system/boot-operator-remote/Chart.yaml +++ b/system/boot-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.2 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/boot-operator-remote/templates/deployment.yaml b/system/boot-operator-remote/templates/deployment.yaml index c7a7bdd4ec8..cdc764ca07f 100644 --- a/system/boot-operator-remote/templates/deployment.yaml +++ b/system/boot-operator-remote/templates/deployment.yaml @@ -27,6 +27,8 @@ spec: command: - /manager env: + - name: KUBERNETES_SERVICE_HOST + value: {{ quote .Values.controllerManager.manager.env.kubernetesServiceHost }} - name: KUBERNETES_CLUSTER_DOMAIN value: {{ quote .Values.kubernetesClusterDomain }} image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag @@ -53,7 +55,7 @@ spec: securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext | nindent 10 }} volumeMounts: - - mountPath: /kubeconfig + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: remote-kubeconfig readOnly: true hostNetwork: true @@ -64,4 +66,9 @@ spec: volumes: - name: remote-kubeconfig secret: + items: + - key: token + path: token + - key: bundle.crt + path: ca.crt secretName: boot-operator-remote-kubeconfig \ No newline at end of file diff --git a/system/boot-operator-remote/templates/remote-kubeconfig.yaml b/system/boot-operator-remote/templates/remote-kubeconfig.yaml index 0c5864797cf..d1bffd98417 100644 --- a/system/boot-operator-remote/templates/remote-kubeconfig.yaml +++ b/system/boot-operator-remote/templates/remote-kubeconfig.yaml @@ -8,24 +8,7 @@ metadata: annotations: serviceaccount.resources.gardener.cloud/name: boot-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system + serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true" stringData: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ .Values.remote.ca }} - server: {{ .Values.remote.server }} - name: remote-cluster - contexts: - - context: - cluster: remote-cluster - user: boot-operator-controller-manager - namespace: kube-system - name: remote-cluster - current-context: remote-cluster - kind: Config - preferences: {} - users: - - name: boot-operator-controller-manager - user: - token: "" + token: "" + bundle.crt: "" diff --git a/system/boot-operator-remote/values.yaml b/system/boot-operator-remote/values.yaml index 98513f94ea3..15255d068c9 100644 --- a/system/boot-operator-remote/values.yaml +++ b/system/boot-operator-remote/values.yaml @@ -12,15 +12,16 @@ controllerManager: - --metrics-bind-address=127.0.0.1:8080 - --leader-elect - --controllers=httpbootconfig,ipxebootconfig,serverbootconfighttp,serverbootconfigpxe - - --kubeconfig=/kubeconfig/kubeconfig containerSecurityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL + env: + kubernetesServiceHost: apiserver-url image: repository: controller - tag: 9ad7325e76fc00b1b6e5b5535404d732539ecbce + tag: 4f2452ce450dc65342d68daf611edfb182585020 resources: limits: cpu: 500m @@ -33,6 +34,3 @@ controllerManager: replicas: 1 kubernetesClusterDomain: cluster.local fullnameOverride: boot-operator -remote: - ca: "" - server: "" diff --git a/system/kustomize/boot-operator-remote/manager-remote-patch.yaml b/system/kustomize/boot-operator-remote/manager-remote-patch.yaml index 2dbfa1dd0d5..b6087dc4dd9 100644 --- a/system/kustomize/boot-operator-remote/manager-remote-patch.yaml +++ b/system/kustomize/boot-operator-remote/manager-remote-patch.yaml @@ -14,7 +14,6 @@ spec: - --metrics-bind-address=127.0.0.1:8080 - --leader-elect - --controllers=httpbootconfig,ipxebootconfig,serverbootconfighttp,serverbootconfigpxe - - --kubeconfig=/kubeconfig/kubeconfig livenessProbe: httpGet: port: 8087 @@ -27,9 +26,17 @@ spec: protocol: TCP volumeMounts: - name: remote-kubeconfig - mountPath: /kubeconfig + mountPath: /var/run/secrets/kubernetes.io/serviceaccount readOnly: true + env: + - name: KUBERNETES_SERVICE_HOST + value: "apiserver-url" volumes: - name: remote-kubeconfig secret: secretName: boot-operator-remote-kubeconfig + items: + - key: token + path: token + - key: bundle.crt + path: ca.crt diff --git a/system/kustomize/boot-operator-remote/remote-kubeconfig.yaml b/system/kustomize/boot-operator-remote/remote-kubeconfig.yaml index 0c5864797cf..d1bffd98417 100644 --- a/system/kustomize/boot-operator-remote/remote-kubeconfig.yaml +++ b/system/kustomize/boot-operator-remote/remote-kubeconfig.yaml @@ -8,24 +8,7 @@ metadata: annotations: serviceaccount.resources.gardener.cloud/name: boot-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system + serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true" stringData: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ .Values.remote.ca }} - server: {{ .Values.remote.server }} - name: remote-cluster - contexts: - - context: - cluster: remote-cluster - user: boot-operator-controller-manager - namespace: kube-system - name: remote-cluster - current-context: remote-cluster - kind: Config - preferences: {} - users: - - name: boot-operator-controller-manager - user: - token: "" + token: "" + bundle.crt: "" diff --git a/system/kustomize/metal-operator-remote/manager-remote-patch.yaml b/system/kustomize/metal-operator-remote/manager-remote-patch.yaml index b139bc002b8..91e16b26db3 100644 --- a/system/kustomize/metal-operator-remote/manager-remote-patch.yaml +++ b/system/kustomize/metal-operator-remote/manager-remote-patch.yaml @@ -15,18 +15,25 @@ spec: - --probe-os-image=ghcr.io/ironcore-dev/os-images/gardenlinux:1443.3 - --insecure=false - --registry-url=http://[2a10:afc0:e013:d002::]:30010 - - --kubeconfig=/kubeconfig/kubeconfig - --manager-namespace=metal-operator-system volumeMounts: - name: remote-kubeconfig - mountPath: /kubeconfig + mountPath: /var/run/secrets/kubernetes.io/serviceaccount readOnly: true - mountPath: /etc/macdb/ name: macdb + env: + - name: KUBERNETES_SERVICE_HOST + value: "apiserver-url" volumes: - name: remote-kubeconfig secret: secretName: metal-operator-remote-kubeconfig + items: + - key: token + path: token + - key: bundle.crt + path: ca.crt - name: macdb secret: secretName: macdb diff --git a/system/kustomize/metal-operator-remote/remote-kubeconfig.yaml b/system/kustomize/metal-operator-remote/remote-kubeconfig.yaml index c89c4cb82a1..6c842a5b870 100644 --- a/system/kustomize/metal-operator-remote/remote-kubeconfig.yaml +++ b/system/kustomize/metal-operator-remote/remote-kubeconfig.yaml @@ -8,24 +8,7 @@ metadata: annotations: serviceaccount.resources.gardener.cloud/name: metal-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system + serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true" stringData: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ .Values.remote.ca }} - server: {{ .Values.remote.server }} - name: remote-cluster - contexts: - - context: - cluster: remote-cluster - user: metal-operator-controller-manager - namespace: kube-system - name: remote-cluster - current-context: remote-cluster - kind: Config - preferences: {} - users: - - name: metal-operator-controller-manager - user: - token: "" + token: "" + bundle.crt: "" diff --git a/system/metal-operator-remote/Chart.yaml b/system/metal-operator-remote/Chart.yaml index d699989afca..44693d37c0d 100644 --- a/system/metal-operator-remote/Chart.yaml +++ b/system/metal-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.2 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/metal-operator-remote/managedresources/kustomize.yaml b/system/metal-operator-remote/managedresources/kustomize.yaml index f1bd70c993b..a05d4a22413 100644 --- a/system/metal-operator-remote/managedresources/kustomize.yaml +++ b/system/metal-operator-remote/managedresources/kustomize.yaml @@ -991,8 +991,13 @@ spec: type: string type: object x-kubernetes-map-type: atomic + systemUUID: + description: SystemUUID is the unique identifier for the server. + type: string uuid: - description: UUID is the unique identifier for the server. + description: |- + UUID is the unique identifier for the server. + Deprecated in favor of systemUUID. type: string required: - uuid diff --git a/system/metal-operator-remote/templates/deployment.yaml b/system/metal-operator-remote/templates/deployment.yaml index 18852ad8dcc..8033d9cd313 100644 --- a/system/metal-operator-remote/templates/deployment.yaml +++ b/system/metal-operator-remote/templates/deployment.yaml @@ -27,6 +27,8 @@ spec: command: - /manager env: + - name: KUBERNETES_SERVICE_HOST + value: {{ quote .Values.controllerManager.manager.env.kubernetesServiceHost }} - name: KUBERNETES_CLUSTER_DOMAIN value: {{ quote .Values.kubernetesClusterDomain }} image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag @@ -49,7 +51,7 @@ spec: securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext | nindent 10 }} volumeMounts: - - mountPath: /kubeconfig + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: remote-kubeconfig readOnly: true - mountPath: /etc/macdb/ @@ -62,6 +64,11 @@ spec: volumes: - name: remote-kubeconfig secret: + items: + - key: token + path: token + - key: bundle.crt + path: ca.crt secretName: metal-operator-remote-kubeconfig - name: macdb secret: diff --git a/system/metal-operator-remote/templates/remote-kubeconfig.yaml b/system/metal-operator-remote/templates/remote-kubeconfig.yaml index c89c4cb82a1..6c842a5b870 100644 --- a/system/metal-operator-remote/templates/remote-kubeconfig.yaml +++ b/system/metal-operator-remote/templates/remote-kubeconfig.yaml @@ -8,24 +8,7 @@ metadata: annotations: serviceaccount.resources.gardener.cloud/name: metal-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system + serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true" stringData: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ .Values.remote.ca }} - server: {{ .Values.remote.server }} - name: remote-cluster - contexts: - - context: - cluster: remote-cluster - user: metal-operator-controller-manager - namespace: kube-system - name: remote-cluster - current-context: remote-cluster - kind: Config - preferences: {} - users: - - name: metal-operator-controller-manager - user: - token: "" + token: "" + bundle.crt: "" diff --git a/system/metal-operator-remote/values.yaml b/system/metal-operator-remote/values.yaml index 18e9bd775f0..716efefb335 100644 --- a/system/metal-operator-remote/values.yaml +++ b/system/metal-operator-remote/values.yaml @@ -6,16 +6,17 @@ controllerManager: - --probe-os-image=ghcr.io/ironcore-dev/os-images/gardenlinux:1443.3 - --insecure=false - --registry-url=http://[2a10:afc0:e013:d002::]:30010 - - --kubeconfig=/kubeconfig/kubeconfig - --manager-namespace=metal-operator-system containerSecurityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL + env: + kubernetesServiceHost: apiserver-url image: repository: controller - tag: 05f9869ffd931027435514002d9b8f3ee9b009d3 + tag: 4dc7a47f2cf412966e6362aa5da5624845dadd46 resources: limits: cpu: 500m @@ -37,5 +38,4 @@ metalRegistryService: fullnameOverride: metal-operator remote: ca: "" - server: "" macdb: {} From 68649871c32d0eb6d307700b60767423d25ae12d Mon Sep 17 00:00:00 2001 From: Kevin Fischer <49786038+kevin-fischer@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:55:32 +0100 Subject: [PATCH 075/224] [prometheus-vmware-rules] Revise NVMeSwapDatastoreMissing alert (#7599) - Bump chart version to 1.0.9 - Fix typo --- prometheus-rules/prometheus-vmware-rules/Chart.yaml | 2 +- .../prometheus-vmware-rules/alerts/datastore.alerts | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/prometheus-rules/prometheus-vmware-rules/Chart.yaml b/prometheus-rules/prometheus-vmware-rules/Chart.yaml index 892fee06315..40bbf94b602 100644 --- a/prometheus-rules/prometheus-vmware-rules/Chart.yaml +++ b/prometheus-rules/prometheus-vmware-rules/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: A collection of Prometheus alert rules. name: prometheus-vmware-rules -version: 1.0.8 +version: 1.0.9 dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm diff --git a/prometheus-rules/prometheus-vmware-rules/alerts/datastore.alerts b/prometheus-rules/prometheus-vmware-rules/alerts/datastore.alerts index 8bf02a7789c..4a16f2491c4 100644 --- a/prometheus-rules/prometheus-vmware-rules/alerts/datastore.alerts +++ b/prometheus-rules/prometheus-vmware-rules/alerts/datastore.alerts @@ -189,9 +189,11 @@ groups: - alert: NVMeSwapDatastoreMissing expr: > - vrops_hostsystem_hardware_model{vccluster=~"^productionbb\\d+$", hardware_model!~"^Cisco Systems Inc.+"} and on (hostsystem) vrops_hostsystem_runtime_maintenancestate{state!="inMaintenance"} + vrops_hostsystem_hardware_model{vccluster=~"^productionbb\\d+$", hardware_model!~"^Cisco Systems Inc.+"} + and on (hostsystem) vrops_hostsystem_runtime_maintenancestate{state!="inMaintenance"} unless on (hostsystem) (label_replace(label_join(vrops_datastore_summary_datastore_accessible{type="NVMe"}, "hostsystem", "", "datastore", "vcenter"), "hostsystem", "$1$2", "hostsystem", - "(.+)-swapvc-[a-z]-\\d+(.+)")) unless on (hostsystem) vrops_hostsystem_summary_custom_tag_nvme{summary_custom_tag_nvme="false"} + "(.+)-swapvc-[a-z]-\\d+(.+)")) + unless on (hostsystem) vrops_hostsystem_summary_custom_tag_nvme{summary_custom_tag_nvme="false"} for: 5m labels: severity: info @@ -203,7 +205,7 @@ groups: playbook: docs/support/playbook/datastore/NVMeSwapDatastoreMissing annotations: description: "NVMe swap datastore {{ $labels.datastore }} missing. ({{ $labels.vcenter }}, {{ $labels.datacenter }})" - summary: "NVMe swap datastore {{ $labels.datastore }} missing. ({{ $labels.vcenter }}, {{ $labels.datacenter }} + summary: "NVMe swap datastore {{ $labels.datastore }} missing. ({{ $labels.vcenter }}, {{ $labels.datacenter }})" - alert: DatastoreDisconnectedWithVmsOnIt expr: > From 264f7d5e0134c9db31a917516064ee66fd52ddad Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 7 Jan 2025 11:58:01 +0200 Subject: [PATCH 076/224] [ipam-operator-remote] mount SA tokens directly --- system/Makefile | 4 +--- system/ipam-operator-remote/Chart.yaml | 2 +- .../templates/deployment.yaml | 7 +++++- .../templates/remote-kubeconfig.yaml | 22 ++----------------- system/ipam-operator-remote/values.yaml | 3 --- .../manager-remote-patch.yaml | 10 ++++++++- .../remote-kubeconfig.yaml | 22 ++----------------- 7 files changed, 21 insertions(+), 49 deletions(-) diff --git a/system/Makefile b/system/Makefile index f11382c063d..d792e684e32 100644 --- a/system/Makefile +++ b/system/Makefile @@ -188,9 +188,7 @@ build-ipam-operator-remote: @kubectl kustomize kustomize/ipam-operator-managedresources > ipam-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator-remote/values.yaml @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml - @yq -i '.remote.ca=""' ipam-operator-remote/values.yaml - @yq -i '.remote.server=""' ipam-operator-remote/values.yaml - @yq -i '.version="0.1.1"' ipam-operator-remote/Chart.yaml + @yq -i '.version="0.1.2"' ipam-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml index 63ef80a0cd8..901adc24cdf 100644 --- a/system/ipam-operator-remote/Chart.yaml +++ b/system/ipam-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.1 +version: 0.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/ipam-operator-remote/templates/deployment.yaml b/system/ipam-operator-remote/templates/deployment.yaml index b701178927f..c1125a7c575 100644 --- a/system/ipam-operator-remote/templates/deployment.yaml +++ b/system/ipam-operator-remote/templates/deployment.yaml @@ -50,7 +50,7 @@ spec: securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext | nindent 10 }} volumeMounts: - - mountPath: /kubeconfig + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: remote-kubeconfig readOnly: true hostNetwork: true @@ -61,4 +61,9 @@ spec: volumes: - name: remote-kubeconfig secret: + items: + - key: token + path: token + - key: bundle.crt + path: ca.crt secretName: ipam-operator-remote-kubeconfig \ No newline at end of file diff --git a/system/ipam-operator-remote/templates/remote-kubeconfig.yaml b/system/ipam-operator-remote/templates/remote-kubeconfig.yaml index 17e2a714524..bcf43e4cabc 100644 --- a/system/ipam-operator-remote/templates/remote-kubeconfig.yaml +++ b/system/ipam-operator-remote/templates/remote-kubeconfig.yaml @@ -9,23 +9,5 @@ metadata: serviceaccount.resources.gardener.cloud/name: ipam-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system stringData: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ .Values.remote.ca }} - server: {{ .Values.remote.server }} - name: remote-cluster - contexts: - - context: - cluster: remote-cluster - user: ipam-operator-controller-manager - namespace: kube-system - name: remote-cluster - current-context: remote-cluster - kind: Config - preferences: {} - users: - - name: ipam-operator-controller-manager - user: - token: "" + token: "" + bundle.crt: "" diff --git a/system/ipam-operator-remote/values.yaml b/system/ipam-operator-remote/values.yaml index 309d53730a0..adf4f1729b4 100644 --- a/system/ipam-operator-remote/values.yaml +++ b/system/ipam-operator-remote/values.yaml @@ -36,6 +36,3 @@ managerConfig: leaderElect: true resourceName: f42c18d5.ironcore.dev fullnameOverride: ipam-operator -remote: - ca: "" - server: "" diff --git a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml index fd51620fe7a..d3327605b86 100644 --- a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml +++ b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml @@ -34,9 +34,17 @@ spec: value: "false" volumeMounts: - name: remote-kubeconfig - mountPath: /kubeconfig + mountPath: /var/run/secrets/kubernetes.io/serviceaccount readOnly: true + env: + - name: KUBERNETES_SERVICE_HOST + value: "apiserver-url" volumes: - name: remote-kubeconfig secret: secretName: ipam-operator-remote-kubeconfig + items: + - key: token + path: token + - key: bundle.crt + path: ca.crt diff --git a/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml b/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml index 17e2a714524..bcf43e4cabc 100644 --- a/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml +++ b/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml @@ -9,23 +9,5 @@ metadata: serviceaccount.resources.gardener.cloud/name: ipam-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system stringData: - kubeconfig: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ .Values.remote.ca }} - server: {{ .Values.remote.server }} - name: remote-cluster - contexts: - - context: - cluster: remote-cluster - user: ipam-operator-controller-manager - namespace: kube-system - name: remote-cluster - current-context: remote-cluster - kind: Config - preferences: {} - users: - - name: ipam-operator-controller-manager - user: - token: "" + token: "" + bundle.crt: "" From d0ae6741572fb14d8b6dc1c893d2a07da4bccff7 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 7 Jan 2025 12:04:59 +0200 Subject: [PATCH 077/224] [ipam-operator-remote] rm kubeconfig arg --- system/Makefile | 2 +- system/ipam-operator-remote/Chart.yaml | 2 +- system/ipam-operator-remote/values.yaml | 1 - system/kustomize/ipam-operator-remote/manager-remote-patch.yaml | 1 - 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/system/Makefile b/system/Makefile index d792e684e32..ec10d1a979d 100644 --- a/system/Makefile +++ b/system/Makefile @@ -188,7 +188,7 @@ build-ipam-operator-remote: @kubectl kustomize kustomize/ipam-operator-managedresources > ipam-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator-remote/values.yaml @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml - @yq -i '.version="0.1.2"' ipam-operator-remote/Chart.yaml + @yq -i '.version="0.1.3"' ipam-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml index 901adc24cdf..b3c13a3479b 100644 --- a/system/ipam-operator-remote/Chart.yaml +++ b/system/ipam-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.2 +version: 0.1.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/ipam-operator-remote/values.yaml b/system/ipam-operator-remote/values.yaml index adf4f1729b4..9904ffd501b 100644 --- a/system/ipam-operator-remote/values.yaml +++ b/system/ipam-operator-remote/values.yaml @@ -3,7 +3,6 @@ controllerManager: args: - --health-probe-bind-address=:30081 - --metrics-bind-address=127.0.0.1:30082 - - --kubeconfig=/kubeconfig/kubeconfig containerSecurityContext: allowPrivilegeEscalation: false env: diff --git a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml index d3327605b86..03e5b80c22e 100644 --- a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml +++ b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml @@ -12,7 +12,6 @@ spec: args: - --health-probe-bind-address=:30081 - --metrics-bind-address=127.0.0.1:30082 - - --kubeconfig=/kubeconfig/kubeconfig livenessProbe: failureThreshold: 3 httpGet: From 14c08afe4d888a6d7c359f248621a0d8cec4987c Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Tue, 7 Jan 2025 11:21:30 +0100 Subject: [PATCH 078/224] content-replication: bump statsd-exporter to 0.28.0 --- openstack/backup-replication/values.yaml | 2 +- openstack/content-repo/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openstack/backup-replication/values.yaml b/openstack/backup-replication/values.yaml index 3bda4c2ef6b..e1d336d0e4a 100644 --- a/openstack/backup-replication/values.yaml +++ b/openstack/backup-replication/values.yaml @@ -19,7 +19,7 @@ swift_http_import: image_repository: null statsd: - exporter_image_version: 'v0.26.1' + exporter_image_version: 'v0.28.0' alerts: enabled: true diff --git a/openstack/content-repo/values.yaml b/openstack/content-repo/values.yaml index 8cc49ebf928..f94cae3e6ad 100644 --- a/openstack/content-repo/values.yaml +++ b/openstack/content-repo/values.yaml @@ -18,7 +18,7 @@ owner-info: image_version: DEFINED_IN_VALUES_FILE debug: false -image_version_auxiliary_statsd_exporter: 'v0.26.1' +image_version_auxiliary_statsd_exporter: 'v0.28.0' auth_url: DEFINED_IN_VALUES_FILE From a6edd7a84966d56037399838b4c3cdc8bc1420b7 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 7 Jan 2025 12:23:35 +0200 Subject: [PATCH 079/224] [ipam-operator-remote] inject ca bundle to secret --- system/Makefile | 2 +- system/ipam-operator-remote/Chart.yaml | 2 +- system/ipam-operator-remote/templates/remote-kubeconfig.yaml | 1 + system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/system/Makefile b/system/Makefile index ec10d1a979d..6e5e8ad8bf8 100644 --- a/system/Makefile +++ b/system/Makefile @@ -188,7 +188,7 @@ build-ipam-operator-remote: @kubectl kustomize kustomize/ipam-operator-managedresources > ipam-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator-remote/values.yaml @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml - @yq -i '.version="0.1.3"' ipam-operator-remote/Chart.yaml + @yq -i '.version="0.1.4"' ipam-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml index b3c13a3479b..66766980066 100644 --- a/system/ipam-operator-remote/Chart.yaml +++ b/system/ipam-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.3 +version: 0.1.4 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/ipam-operator-remote/templates/remote-kubeconfig.yaml b/system/ipam-operator-remote/templates/remote-kubeconfig.yaml index bcf43e4cabc..662e977bdcb 100644 --- a/system/ipam-operator-remote/templates/remote-kubeconfig.yaml +++ b/system/ipam-operator-remote/templates/remote-kubeconfig.yaml @@ -8,6 +8,7 @@ metadata: annotations: serviceaccount.resources.gardener.cloud/name: ipam-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system + serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true" stringData: token: "" bundle.crt: "" diff --git a/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml b/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml index bcf43e4cabc..662e977bdcb 100644 --- a/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml +++ b/system/kustomize/ipam-operator-remote/remote-kubeconfig.yaml @@ -8,6 +8,7 @@ metadata: annotations: serviceaccount.resources.gardener.cloud/name: ipam-operator-controller-manager serviceaccount.resources.gardener.cloud/namespace: kube-system + serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true" stringData: token: "" bundle.crt: "" From fae34f4330e6fccff09ec516eb1f7eb5fcb949a8 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Tue, 7 Jan 2025 12:29:41 +0200 Subject: [PATCH 080/224] [ipam-operator-remote] fix env --- system/Makefile | 2 +- system/ipam-operator-remote/Chart.yaml | 2 +- system/ipam-operator-remote/templates/deployment.yaml | 2 ++ system/ipam-operator-remote/values.yaml | 1 + .../kustomize/ipam-operator-remote/manager-remote-patch.yaml | 5 ++--- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/system/Makefile b/system/Makefile index 6e5e8ad8bf8..02943ba64e1 100644 --- a/system/Makefile +++ b/system/Makefile @@ -188,7 +188,7 @@ build-ipam-operator-remote: @kubectl kustomize kustomize/ipam-operator-managedresources > ipam-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator-remote/values.yaml @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml - @yq -i '.version="0.1.4"' ipam-operator-remote/Chart.yaml + @yq -i '.version="0.1.5"' ipam-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml index 66766980066..9346816a5a2 100644 --- a/system/ipam-operator-remote/Chart.yaml +++ b/system/ipam-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.4 +version: 0.1.5 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/ipam-operator-remote/templates/deployment.yaml b/system/ipam-operator-remote/templates/deployment.yaml index c1125a7c575..1376a3abb36 100644 --- a/system/ipam-operator-remote/templates/deployment.yaml +++ b/system/ipam-operator-remote/templates/deployment.yaml @@ -24,6 +24,8 @@ spec: env: - name: ENABLE_WEBHOOKS value: {{ quote .Values.controllerManager.manager.env.enableWebhooks }} + - name: KUBERNETES_SERVICE_HOST + value: {{ quote .Values.controllerManager.manager.env.kubernetesServiceHost }} - name: KUBERNETES_CLUSTER_DOMAIN value: {{ quote .Values.kubernetesClusterDomain }} image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag diff --git a/system/ipam-operator-remote/values.yaml b/system/ipam-operator-remote/values.yaml index 9904ffd501b..2c72c07bb11 100644 --- a/system/ipam-operator-remote/values.yaml +++ b/system/ipam-operator-remote/values.yaml @@ -7,6 +7,7 @@ controllerManager: allowPrivilegeEscalation: false env: enableWebhooks: "false" + kubernetesServiceHost: apiserver-url image: repository: ironcore-dev/ipam tag: 6faf501000c5d7ff9744a3c111ca5ecf3339c00c diff --git a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml index 03e5b80c22e..4da13c7de83 100644 --- a/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml +++ b/system/kustomize/ipam-operator-remote/manager-remote-patch.yaml @@ -31,13 +31,12 @@ spec: env: - name: ENABLE_WEBHOOKS value: "false" + - name: KUBERNETES_SERVICE_HOST + value: "apiserver-url" volumeMounts: - name: remote-kubeconfig mountPath: /var/run/secrets/kubernetes.io/serviceaccount readOnly: true - env: - - name: KUBERNETES_SERVICE_HOST - value: "apiserver-url" volumes: - name: remote-kubeconfig secret: From 6091752265b5a4666462ae16416f9b25704f8532 Mon Sep 17 00:00:00 2001 From: Jens Sandmann Date: Tue, 7 Jan 2025 13:10:15 +0100 Subject: [PATCH 081/224] [ironic] dependency updates mariadb: - return immutable deployment selector label - Update mysqld-exporter to 0.16.0 - version info added to labels - Update MariaDB to the 10.5.27 version - priorityClassName updated to new naming convention - fixes unnecessary quotes for sse_customer_key memcached: - [memcached] return immutable deployment selector label - Change openstack-service-critical to critical-infrastructure PriorityClass - memcached version bumped to 1.6.31-alpine3.20 - priorityClassName updated to new naming convention mysql_metrics: -Fix imageTag should be a string, not a number - Update to the latest 0.5.8 ts build rabiitmq: - return immutable deployment selector label - Change openstack-service-critical to critical-infrastructure PriorityClass - rabbitmq version bumped to 3.13.7-management - priorityClassName updated to new naming convention utils: - Configure Galera monitor in ProxySQL sidecars - Set default transaction and query timeouts - Use ProxySQL 2.7.1 as a default version --- openstack/ironic/Chart.lock | 16 ++++++++-------- openstack/ironic/Chart.yaml | 12 ++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/openstack/ironic/Chart.lock b/openstack/ironic/Chart.lock index d99e8f1836a..bafe409f113 100644 --- a/openstack/ironic/Chart.lock +++ b/openstack/ironic/Chart.lock @@ -1,27 +1,27 @@ dependencies: - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.2 + version: 0.15.2 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.5.3 + version: 0.6.1 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.3.5 + version: 0.4.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.11.1 + version: 0.12.1 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.18.3 + version: 0.21.0 - name: ironic-exporter repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.3 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.0 -digest: sha256:779c746792ddf33fb65ca2da90f6d0910f5eb41ddc3c1a0434595d5689753270 -generated: "2024-09-17T16:06:19.224770281+02:00" + version: 1.1.0 +digest: sha256:ffd6daea23296f101996ad5971d1dc7246209c1645f8a1c3cbd2ee91ccc86e89 +generated: "2025-01-07T13:09:51.01360416+01:00" diff --git a/openstack/ironic/Chart.yaml b/openstack/ironic/Chart.yaml index e823d86061d..e179317784c 100644 --- a/openstack/ironic/Chart.yaml +++ b/openstack/ironic/Chart.yaml @@ -7,26 +7,26 @@ dependencies: - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.14.2 + version: ~0.15.2 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.5.3 + version: ~0.6.1 - condition: mysql_metrics.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.3.5 + version: ~0.4.1 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: ~1.0.0 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.11.1 + version: ~0.12.1 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.18.3 + version: ~0.21.0 - name: ironic-exporter repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: ~1.0.3 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~1.0.0 + version: ~1.1.0 From 354e3730cfc4d1dd5d37ec415fd844400ea4772d Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Tue, 7 Jan 2025 13:36:52 +0100 Subject: [PATCH 082/224] [designate] add new config sections in preparation for Dalmatian - not changing the current config yet --- .../templates/etc/_designate.conf.tpl | 69 ++++++++++++------- 1 file changed, 45 insertions(+), 24 deletions(-) diff --git a/openstack/designate/templates/etc/_designate.conf.tpl b/openstack/designate/templates/etc/_designate.conf.tpl index be32c7270de..7d0ea433f48 100644 --- a/openstack/designate/templates/etc/_designate.conf.tpl +++ b/openstack/designate/templates/etc/_designate.conf.tpl @@ -33,7 +33,7 @@ api_paste_config = /etc/designate/api-paste.ini network_api = neutron # Supported record types -#supported_record_type = A, AAAA, CNAME, MX, SRV, TXT, SPF, NS, PTR, SSHFP, SOA +#supported_record_type = A,AAAA,CNAME,MX,SRV,TXT,SPF,NS,PTR,SSHFP,SOA,NAPTR,CAA,CERT # Setting SOA defaults default_soa_refresh_min = 3500 @@ -322,14 +322,38 @@ workers = {{ .Values.producer_workers }} # RPC topic name for producer (string value) topic = producer -#------------------------ -# Deleted domains purging -#------------------------ -[producer_task:zone_purge] -# -# From designate.producer -# +[producer_task:delayed_notify] +# Run interval in seconds (integer value) +#interval = 5 + +# Default amount of results returned per page (integer value) +#per_page = 100 + +# How many zones to receive NOTIFY on each run (integer value) +#batch_size = 100 + +[producer_task:periodic_exists] +# Run interval in seconds (integer value) +#interval = 3600 + +# Default amount of results returned per page (integer value) +#per_page = 100 + +[producer_task:periodic_secondary_refresh] +# Run interval in seconds (integer value) +#interval = 3600 + +# Default amount of results returned per page (integer value) +#per_page = 100 +[producer_task:worker_periodic_recovery] +# Run interval in seconds (integer value) +#interval = 120 + +# Default amount of results returned per page (integer value) +#per_page = 100 + +[producer_task:zone_purge] # Run interval in seconds (integer value) interval = {{ .Values.zone_purge.interval }} @@ -343,16 +367,6 @@ time_threshold = {{ .Values.zone_purge.time_threshold }} # How many zones to be purged on each run (integer value) batch_size = {{ .Values.zone_purge.batch_size }} -#------------------------ -# Delayed zones NOTIFY -#------------------------ -[zone_manager_task:delayed_notify] -# How frequently to scan for zones pending NOTIFY, in seconds -#interval = 5 - -# How many zones to receive NOTIFY on each run -#batch_size = 100 - #----------------------- # Worker Service #----------------------- @@ -390,6 +404,19 @@ poll_delay = {{ .Values.worker_poll_delay }} # Whether to allow worker to send NOTIFYs. NOTIFY requests to mdns will noop notify = {{ .Values.worker_notify }} +# Timeout in seconds for XFR's. (integer value) +#xfr_timeout = 10 + +# The maximum number of times to retry fetching a zones serial. (integer value) +#serial_max_retries = 3 + +# The time to wait before retrying a zone serial request. (integer value) +#serial_retry_delay = 1 + +# Timeout in seconds before giving up on fetching a zones serial. (integer +# value) +#serial_timeout = 1 + # Whether to enforce worker to send messages over TCP all_tcp = {{ .Values.worker_all_tcp }} @@ -414,12 +441,6 @@ insecure = True [storage:sqlalchemy] mysql_sql_mode = TRADITIONAL -#connection_debug = 0 -#connection_trace = False -#sqlite_synchronous = True -#connection_recycle_time = 3600 -#max_retries = 10 - max_pool_size = {{ .Values.max_pool_size | default .Values.global.max_pool_size | default 100 }} max_overflow = {{ .Values.max_overflow | default .Values.global.max_overflow | default 50 }} From 1e32ade29750e7dd30c9878fd9b4c74ef0d91e41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Tue, 7 Jan 2025 13:54:59 +0100 Subject: [PATCH 083/224] Drop with limes auto quota grow useless OpenstackRepoObjectstoreQuota alert --- .../swift-utils/alerts/content-repo.alerts | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 openstack/swift-utils/alerts/content-repo.alerts diff --git a/openstack/swift-utils/alerts/content-repo.alerts b/openstack/swift-utils/alerts/content-repo.alerts deleted file mode 100644 index b625f5480c4..00000000000 --- a/openstack/swift-utils/alerts/content-repo.alerts +++ /dev/null @@ -1,18 +0,0 @@ -# vim: set ft=yaml: - -groups: -- name: openstack-swift-content-repo.alerts - rules: - - alert: OpenstackRepoObjectstoreQuota - expr: floor(limes_project_usage{service="object-store",resource="capacity",domain="ccadmin",project="master"}/limes_project_quota{service="object-store",domain="ccadmin",project="master"}*100) >= 95 - for: 1h - labels: - support_group: containers - tier: os - service: repo # NOTE: this cannot be deployed as part of the content-repo chart since scaleout does not have the requisite metrics - severity: warning - context: repo-swift-quota - meta: "Swift usage for project ccadmin/master reached 95%" - annotations: - description: Swift usage for project ccadmin/master reached 95%. Increase quota if possible, otherwise clean up immediately. - summary: Swift usage reached 95% in ccadmin/master From 0d3840bc7f5f018897b3b20543e1d276430fecff Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Tue, 7 Jan 2025 14:18:53 +0100 Subject: [PATCH 084/224] [designate] prepare policies for Dalmatian - also change default fallback rule to cloud_dns_admin --- .../designate/templates/etc/_designate-policy.yaml.tpl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/openstack/designate/templates/etc/_designate-policy.yaml.tpl b/openstack/designate/templates/etc/_designate-policy.yaml.tpl index f6746016cf1..fac98587e3c 100644 --- a/openstack/designate/templates/etc/_designate-policy.yaml.tpl +++ b/openstack/designate/templates/etc/_designate-policy.yaml.tpl @@ -31,7 +31,7 @@ context_is_master: rule:context_is_dns_support or rule:context_is_zonemaster or zone_primary_or_dns_ops: "('PRIMARY':%(zone_type)s and rule:context_is_dns_ops) or ('SECONDARY':%(zone_type)s and is_admin:True)" -default: rule:context_is_viewer +default: rule:context_is_cloud_admin all_tenants: rule:context_is_dns_support or rule:cloud_dns_viewer edit_managed_records: rule:context_is_master use_low_ttl: rule:context_is_dns_support @@ -56,6 +56,7 @@ share_zone: rule:context_is_master unshare_zone: rule:context_is_master create_zone: rule:context_is_dns_ops move_zone: rule:context_is_dns_ops +pool_move_zone: rule:context_is_dns_op create_sub_zone: rule:context_is_zonemaster create_super_zone: rule:context_is_cloud_admin get_zones: rule:context_is_viewer @@ -63,10 +64,12 @@ get_zone: rule:context_is_viewer get_shared_zone: rule:context_is_viewer get_zone_share: rule:context_is_viewer get_zone_servers: rule:context_is_viewer +get_zone_ns_records: rule:context_is_viewer find_zones: rule:context_is_viewer find_zone: rule:context_is_viewer find_shared_zones: rule:context_is_viewer find_zone_shares: rule:context_is_viewer +find_project_zone_share: rule:context_is_viewer update_zone: rule:context_is_master update_sub_zone: rule:context_is_master delete_zone: rule:context_is_master @@ -136,6 +139,7 @@ update_record: rule:context_is_master delete_record: rule:context_is_master count_records: rule:context_is_viewer use_sudo: rule:context_is_dns_ops +hard_delete: rule:context_is_dns_ops create_blacklist: rule:context_is_dns_ops find_blacklist: rule:context_is_dns_support find_blacklists: rule:context_is_dns_support @@ -185,6 +189,7 @@ create_zone_export: rule:context_is_master find_zone_exports: rule:context_is_master get_zone_export: rule:context_is_master update_zone_export: rule:context_is_master +delete_zone_export: rule:context_is_master find_service_status: rule:admin find_service_statuses: rule:admin From 503a08c4539d32ececda2a2a0293101beca8d74b Mon Sep 17 00:00:00 2001 From: Birk Bohne Date: Mon, 16 Dec 2024 11:19:24 +0100 Subject: [PATCH 085/224] memcached version bumped to 1.6.33-alpine3.21 - chart version bumped Signed-off-by: Birk Bohne --- common/memcached/CHANGELOG.md | 5 +++++ common/memcached/Chart.yaml | 2 +- common/memcached/values.yaml | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/common/memcached/CHANGELOG.md b/common/memcached/CHANGELOG.md index 718b2572e66..9dc7de14d33 100644 --- a/common/memcached/CHANGELOG.md +++ b/common/memcached/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## v0.6.2 - 2024/12/16 + +* memcached [version](https://github.com/memcached/memcached/wiki/ReleaseNotes1633) bumped to `1.6.33-alpine3.21` +* chart version bumped + ## v0.6.1 - 2024/11/28 * `app` selector label returned, because deployment selector is immutable * chart version bumped diff --git a/common/memcached/Chart.yaml b/common/memcached/Chart.yaml index d537309dc97..83a877fce78 100644 --- a/common/memcached/Chart.yaml +++ b/common/memcached/Chart.yaml @@ -1,7 +1,7 @@ --- apiVersion: v1 name: memcached -version: 0.6.1 +version: 0.6.2 description: Free & open source, high-performance, distributed memory object caching system. home: http://memcached.org/ sources: diff --git a/common/memcached/values.yaml b/common/memcached/values.yaml index 6ba752b0475..010a18a65cc 100644 --- a/common/memcached/values.yaml +++ b/common/memcached/values.yaml @@ -2,7 +2,7 @@ ## ref: https://hub.docker.com/r/library/memcached/tags/ ## image: library/memcached -imageTag: 1.6.31-alpine3.20 +imageTag: 1.6.33-alpine3.21 # set to true to use .Values.global.dockerHubMirrorAlternateRegion instead of .Values.global.dockerHubMirror use_alternate_registry: false From 78afa307ff12d938435a3d836d9b6b28f4444802 Mon Sep 17 00:00:00 2001 From: Martin Vossen Date: Tue, 7 Jan 2025 17:02:49 +0100 Subject: [PATCH 086/224] update owner-info (#7624) --- openstack/nannies/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openstack/nannies/values.yaml b/openstack/nannies/values.yaml index d9f43334d72..ccdd417ceb8 100644 --- a/openstack/nannies/values.yaml +++ b/openstack/nannies/values.yaml @@ -112,8 +112,8 @@ nova: enabled: false owner-info: - support-group: observability + support-group: compute-storage-api service: nannies maintainers: - - Thomas Graichen + - Johannes Kulik helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/openstack/nannies From a2f5f8122ecec95c037cbb10ed83b534c7f79a79 Mon Sep 17 00:00:00 2001 From: Nuckal777 Date: Tue, 7 Jan 2025 17:35:25 +0100 Subject: [PATCH 087/224] Update git-cert-shim --- global/git-cert-shim/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/global/git-cert-shim/Chart.yaml b/global/git-cert-shim/Chart.yaml index c63984afc17..f97639d9b16 100644 --- a/global/git-cert-shim/Chart.yaml +++ b/global/git-cert-shim/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 description: Issues certificates inside a Kubernetes cluster and then publishes them for use in services outside of Kubernetes name: git-cert-shim -version: 2.0.2 -appVersion: 2.1.2 +version: 2.0.3 +appVersion: 2.1.3 dependencies: - name: owner-info From 49eab1f70c3cd69af1d2b917a3d95dd867f5da55 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Tue, 7 Jan 2025 17:33:46 +0000 Subject: [PATCH 088/224] system/kube-system-kubernikus: run helm dep up --- system/kube-system-kubernikus/Chart.lock | 6 +++--- system/kube-system-kubernikus/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/kube-system-kubernikus/Chart.lock b/system/kube-system-kubernikus/Chart.lock index 4bbba1cfc31..3d3851e5f74 100644 --- a/system/kube-system-kubernikus/Chart.lock +++ b/system/kube-system-kubernikus/Chart.lock @@ -40,7 +40,7 @@ dependencies: version: 0.0.22 - name: maintenance-controller repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.6 + version: 1.0.7 - name: toolbox-prepull repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.7 @@ -53,5 +53,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:f5512eac004b19961f0a8b8815f7b598f406b1624ab7492de0bc2ca151bd0a09 -generated: "2024-12-10T13:54:42.848903+01:00" +digest: sha256:057c71a75b3f98e35c110c7e462723175e40dda3b8fb6d9fe2c8b101140aa028 +generated: "2025-01-07T17:33:37.279251128Z" diff --git a/system/kube-system-kubernikus/Chart.yaml b/system/kube-system-kubernikus/Chart.yaml index 706c8e362b2..94ea7e23acd 100644 --- a/system/kube-system-kubernikus/Chart.yaml +++ b/system/kube-system-kubernikus/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for Kubernikus control-plane clusters. name: kube-system-kubernikus -version: 3.5.30 +version: 3.5.31 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-kubernikus dependencies: - name: cc-rbac From 7d711964a98ea5701d04e7b1e9b7369e7be45674 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Tue, 7 Jan 2025 17:34:04 +0000 Subject: [PATCH 089/224] system/kube-system-scaleout: run helm dep up --- system/kube-system-scaleout/Chart.lock | 6 +++--- system/kube-system-scaleout/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/kube-system-scaleout/Chart.lock b/system/kube-system-scaleout/Chart.lock index d94522bdc47..8dbbbea2440 100644 --- a/system/kube-system-scaleout/Chart.lock +++ b/system/kube-system-scaleout/Chart.lock @@ -40,7 +40,7 @@ dependencies: version: 2.0.0 - name: maintenance-controller repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.6 + version: 1.0.7 - name: ldap-named-user repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 @@ -62,5 +62,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:a9bc1c5d6ee229ff803fe6a3a5e24af0c73f473fa8e0cedaa35c70dd6a0b0bd4 -generated: "2024-12-10T13:55:38.945626+01:00" +digest: sha256:0862fe5e0622dd60dbd7a90ce9b3a4e4886cfac4fa97c592b99b802b2dc0170a +generated: "2025-01-07T17:33:55.25116955Z" diff --git a/system/kube-system-scaleout/Chart.yaml b/system/kube-system-scaleout/Chart.yaml index 233ab97bd2c..4c4ed27a030 100644 --- a/system/kube-system-scaleout/Chart.yaml +++ b/system/kube-system-scaleout/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kube-System relevant Service collection for scaleout clusters. name: kube-system-scaleout -version: 5.7.40 +version: 5.7.41 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-scaleout dependencies: - name: cc-rbac From 0da13f3c1d89ab400884a45ac575177aa425b818 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Tue, 7 Jan 2025 17:34:09 +0000 Subject: [PATCH 090/224] system/kube-system-metal: run helm dep up --- system/kube-system-metal/Chart.lock | 6 +++--- system/kube-system-metal/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/kube-system-metal/Chart.lock b/system/kube-system-metal/Chart.lock index 45647dd7466..e31b81ee070 100644 --- a/system/kube-system-metal/Chart.lock +++ b/system/kube-system-metal/Chart.lock @@ -67,7 +67,7 @@ dependencies: version: 1.1.2 - name: maintenance-controller repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.6 + version: 1.0.7 - name: vpa-butler repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.3.9 @@ -101,5 +101,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:980224409a47ca113015f27ec4b5a5e896e82549603bc61224bd10ca57ddd8da -generated: "2024-12-16T15:40:31.853361427Z" +digest: sha256:855a768368cf258302b5fa4bfffd27b65b1420a7216c2a6ed59eedc1355e7cec +generated: "2025-01-07T17:33:56.778210003Z" diff --git a/system/kube-system-metal/Chart.yaml b/system/kube-system-metal/Chart.yaml index 9473a8966c1..08aa043cb2c 100644 --- a/system/kube-system-metal/Chart.yaml +++ b/system/kube-system-metal/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for metal clusters. name: kube-system-metal -version: 6.10.37 +version: 6.10.38 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-metal dependencies: - name: cc-rbac From b02f7ad760d2533b9a2569625f92ba14e0b4f491 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Tue, 7 Jan 2025 17:34:19 +0000 Subject: [PATCH 091/224] system/kube-system-virtual: run helm dep up --- system/kube-system-virtual/Chart.lock | 8 ++++---- system/kube-system-virtual/Chart.yaml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/system/kube-system-virtual/Chart.lock b/system/kube-system-virtual/Chart.lock index 9da2c2ed5a1..c87cf49766d 100644 --- a/system/kube-system-virtual/Chart.lock +++ b/system/kube-system-virtual/Chart.lock @@ -43,7 +43,7 @@ dependencies: version: 2.0.0 - name: maintenance-controller repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.6 + version: 1.0.7 - name: sysctl repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.9 @@ -55,7 +55,7 @@ dependencies: version: 1.0.0 - name: owner-label-injector repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.0.22 + version: 0.0.24 - name: velero repository: https://vmware-tanzu.github.io/helm-charts version: 5.0.2 @@ -68,5 +68,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:08358b958de302a4f11080c52d86b42f2b618790611165d76c374e8f9b85ea93 -generated: "2024-12-10T13:56:00.36094+01:00" +digest: sha256:c2a3dc3081470d3cb52a7abb9b479bdeb295f421e3b8775c27c11a648ddc8cd2 +generated: "2025-01-07T17:34:06.955127816Z" diff --git a/system/kube-system-virtual/Chart.yaml b/system/kube-system-virtual/Chart.yaml index eb715c97739..1d25d40233b 100644 --- a/system/kube-system-virtual/Chart.yaml +++ b/system/kube-system-virtual/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for virtual clusters. name: kube-system-virtual -version: 4.5.42 +version: 4.5.43 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-virtual dependencies: - name: cc-rbac From 23fef349a2131c21a9eed5cd31ccc3c684ee28a8 Mon Sep 17 00:00:00 2001 From: degricar <156818083+degricar@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:09:05 +0100 Subject: [PATCH 092/224] Add sr 4s2t hv flavor (#7608) * Add sr 4s2t hv flavor we currently have only the bm flavor for our Thinksystems SR850 v3. Adding the hv flavor --- openstack/ironic/templates/seed.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/openstack/ironic/templates/seed.yaml b/openstack/ironic/templates/seed.yaml index 0c92be52692..35fcc3a600e 100644 --- a/openstack/ironic/templates/seed.yaml +++ b/openstack/ironic/templates/seed.yaml @@ -472,6 +472,18 @@ spec: "resources:CUSTOM_BM_S4_C32_M4096_V0": "1" {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} + # New Sapphire Rapids 4S2T as Hypervisor in qa + - name: "hv_s4_c32_m2048_v0" + id: "2035" + vcpus: 128 + ram: 2097152 + disk: 447 + is_public: true + extra_specs: + "catalog:description": Hypervisor 4 sockets, 32 cores, 2TB RAM, Sapphire Rapids + "resources:CUSTOM_HV_S4_C32_M2048_V0": "1" + {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} + # New Sapphire Rapids 4S2T only for HD&A BM servers. # Therefore only as baremetal flavor and not as hypervisor flavor. - name: "bm_s4_c32_m2048_v0" From e3b16bde84da9df5f223d316c61c33d3b41ab0b3 Mon Sep 17 00:00:00 2001 From: degricar <156818083+degricar@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:09:41 +0100 Subject: [PATCH 093/224] adding bm flavor for 8s8t (#7615) * adding bm flavor for 8s8t lenovo sr950 v3 --- openstack/ironic/templates/seed.yaml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/openstack/ironic/templates/seed.yaml b/openstack/ironic/templates/seed.yaml index 35fcc3a600e..dff1276d500 100644 --- a/openstack/ironic/templates/seed.yaml +++ b/openstack/ironic/templates/seed.yaml @@ -509,7 +509,7 @@ spec: "resources:CUSTOM_HV_S1_C32_M512_V0": "1" {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} - # New Sapphire Rapids 8S8T servers + # New Sapphire Rapids 8S8T servers HV - name: "hv_s8_c32_m8192_v0" id: "2037" vcpus: 256 @@ -521,6 +521,18 @@ spec: "resources:CUSTOM_HV_S8_C32_M8192_V0": "1" {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} + # Sapphire Rapids 8S8T BM servers + - name: "bm_s8_c32_m8192_v0" + id: "22037" + vcpus: 256 + ram: 8388608 + disk: 480 + is_public: true + extra_specs: + "catalog:description": Baremetal 8 socket, 32 cores, 8TB RAM, Sapphire Rapids + "resources:CUSTOM_BM_S8_C32_M8192_V0": "1" + {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} + # new Sapphire Rapid 8S8T in qa-de-1 with less RAM - name: "hv_s8_c32_m2048_v0" id: "2038" From c6777700cb8c72d1c2c4d691bb2a981f49615fb0 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Tue, 7 Jan 2025 16:51:59 +0200 Subject: [PATCH 094/224] [mysql_metrics] Update sql-exporter version to 0.5.9 Update sql-exporter version to 0.5.9 (2025-01-07) --- common/mysql_metrics/CHANGELOG.md | 17 +++++++++++++++++ common/mysql_metrics/Chart.yaml | 2 +- common/mysql_metrics/ci/test-values.yaml | 1 + common/mysql_metrics/values.yaml | 3 ++- 4 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 common/mysql_metrics/CHANGELOG.md diff --git a/common/mysql_metrics/CHANGELOG.md b/common/mysql_metrics/CHANGELOG.md new file mode 100644 index 00000000000..0ebd7bc76ea --- /dev/null +++ b/common/mysql_metrics/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog + +## v0.4.2 - 2025/01/08 + +* Updated sql-exporter version to 0.5.9 (2025-01-07) +* Chart version bumped + +## v0.4.1 - 2024/11/27 + +* Fixed imageTag string value +* Chart version bumped + +## v0.4.0 - 2024/11/15 + +* Updated sql-exporter version to 0.5.8 build with latest vulnerability fixes (2024-11-14) +* Set kubectl.kubernetes.io/default-container annotation +* Chart version bumped diff --git a/common/mysql_metrics/Chart.yaml b/common/mysql_metrics/Chart.yaml index cb7aed318ff..60bd9cf4994 100644 --- a/common/mysql_metrics/Chart.yaml +++ b/common/mysql_metrics/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: A Helm chart for MySQL Metrics name: mysql_metrics -version: 0.4.1 +version: 0.4.2 dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm diff --git a/common/mysql_metrics/ci/test-values.yaml b/common/mysql_metrics/ci/test-values.yaml index 52afbb7a9e1..5c7e1c68724 100644 --- a/common/mysql_metrics/ci/test-values.yaml +++ b/common/mysql_metrics/ci/test-values.yaml @@ -1,3 +1,4 @@ +--- global: registry: test diff --git a/common/mysql_metrics/values.yaml b/common/mysql_metrics/values.yaml index d78852930cc..611e02d526d 100644 --- a/common/mysql_metrics/values.yaml +++ b/common/mysql_metrics/values.yaml @@ -1,3 +1,4 @@ +--- global: tld: cloud.sap region: local @@ -17,7 +18,7 @@ port_metrics: '9237' prometheus: openstack loglevel: info image: sql-exporter -imageTag: '20241114111033' +imageTag: '20250107141635' imagePullPolicy: IfNotPresent ## Affinity for pod assignment From c5102662624f4770db000d0ad87db103450ee708 Mon Sep 17 00:00:00 2001 From: dhalimi Date: Wed, 8 Jan 2025 07:55:02 -0500 Subject: [PATCH 095/224] error rate - increasing threshold --- .../alerts/global/infra-frontend/cronus.alerts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts b/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts index 0ebf6925b06..1597a51acea 100644 --- a/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts +++ b/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts @@ -28,7 +28,7 @@ groups: description: sending emails with internal relay fails summary: sending emails with internal relay fails - alert: SendingEmailsWithSESHighErrorRate - expr: (sum(sum_over_time(cronus_event_mails_sent_error_provider_rate_perminute{provider="aws"}[5m]) OR on() vector(0)) ) / (sum (sum_over_time(cronus_event_mails_sent_provider_rate_perminute{provider="aws"}[5m]) OR on() vector(0))) > 0.5 + expr: (sum(sum_over_time(cronus_event_mails_sent_error_provider_rate_perminute{provider="aws"}[5m]) OR on() vector(0)) ) / (sum (sum_over_time(cronus_event_mails_sent_provider_rate_perminute{provider="aws"}[5m]) OR on() vector(0))) > 0.8 for: 15m labels: service: email @@ -38,10 +38,10 @@ groups: kibana: "app/discover#/?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-5m,to:now))&_a=(columns:!(_source),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'logstash-*',key:kubernetes_labels_name,negate:!f,params:(query:cronus),type:phrase),query:(match_phrase:(kubernetes_labels_name:cronus)))),index:'logstash-*',interval:auto,query:(language:kuery,query:'%22email%20delivery%20result%20%231:%20%5C%22aws%5C%22%20250%22'),sort:!())" support_group: email annotations: - description: sending emails with aws ses relay high error rate greater than 50% - summary: sending emails with aws ses relay high error rate greater than 50% + description: sending emails with aws ses relay high error rate greater than 80% + summary: sending emails with aws ses relay high error rate greater than 80% - alert: SendingEmailsHighErrorRateByProjectProvider - expr: sum (sum_over_time(cronus_event_mails_sent_error_provider_rate_perminute[5m]) OR on() vector(0)) by (project_name, provider) / sum (sum_over_time(cronus_event_mails_sent_provider_rate_perminute[5m]) OR on() vector(0)) by (project_name, provider) > 0.5 + expr: sum (sum_over_time(cronus_event_mails_sent_error_provider_rate_perminute[5m]) OR on() vector(0)) by (project_name, provider) / sum (sum_over_time(cronus_event_mails_sent_provider_rate_perminute[5m]) OR on() vector(0)) by (project_name, provider) > 0.8 for: 15m labels: service: email @@ -51,8 +51,8 @@ groups: kibana: "app/discover#/?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-5m,to:now))&_a=(columns:!(_source),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'logstash-*',key:kubernetes_labels_name,negate:!f,params:(query:cronus),type:phrase),query:(match_phrase:(kubernetes_labels_name:cronus)))),index:'logstash-*',interval:auto,query:(language:kuery,query:'%22email%20delivery%20result%20%231:%20%5C%22aws%5C%22%20250%22'),sort:!())" support_group: email annotations: - description: "sending emails from project: {{ $labels.project_name }} relay: {{ $labels.provider }} high error rate greater than 50%" - summary: "sending emails from project: {{ $labels.project_name }} relay: {{ $labels.provider }} high error rate greater than 50%" + description: "sending emails from project: {{ $labels.project_name }} relay: {{ $labels.provider }} high error rate greater than 80%" + summary: "sending emails from project: {{ $labels.project_name }} relay: {{ $labels.provider }} high error rate greater than 80%" - alert: EmailHealthTest expr: avg (cronus_health_test) by (name) == 0 for: 30m From a1ecd75f4937db56ec8f518f24b9c5910e96d1c7 Mon Sep 17 00:00:00 2001 From: dhalimi Date: Wed, 8 Jan 2025 07:57:36 -0500 Subject: [PATCH 096/224] Update cronus.alerts --- .../alerts/global/infra-frontend/cronus.alerts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts b/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts index 1597a51acea..a6c8dc94e49 100644 --- a/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts +++ b/prometheus-exporters/cronus-exporter/alerts/global/infra-frontend/cronus.alerts @@ -29,7 +29,7 @@ groups: summary: sending emails with internal relay fails - alert: SendingEmailsWithSESHighErrorRate expr: (sum(sum_over_time(cronus_event_mails_sent_error_provider_rate_perminute{provider="aws"}[5m]) OR on() vector(0)) ) / (sum (sum_over_time(cronus_event_mails_sent_provider_rate_perminute{provider="aws"}[5m]) OR on() vector(0))) > 0.8 - for: 15m + for: 20m labels: service: email severity: warning @@ -42,7 +42,7 @@ groups: summary: sending emails with aws ses relay high error rate greater than 80% - alert: SendingEmailsHighErrorRateByProjectProvider expr: sum (sum_over_time(cronus_event_mails_sent_error_provider_rate_perminute[5m]) OR on() vector(0)) by (project_name, provider) / sum (sum_over_time(cronus_event_mails_sent_provider_rate_perminute[5m]) OR on() vector(0)) by (project_name, provider) > 0.8 - for: 15m + for: 20m labels: service: email severity: warning From df45ffca2c86228bbf6b494ae2c0b9e6f92157bd Mon Sep 17 00:00:00 2001 From: Martin Vossen Date: Wed, 8 Jan 2025 14:15:47 +0100 Subject: [PATCH 097/224] chore: bump image version (#7613) * chore: bump image version * bump chart version * add linkerd * update chart dependencies * Update prometheus-exporters/cloudprober/Chart.yaml Co-authored-by: Tommy Sauer * bump linkerd-support version --------- Co-authored-by: Tommy Sauer --- prometheus-exporters/cloudprober/Chart.lock | 7 +++++-- prometheus-exporters/cloudprober/Chart.yaml | 5 ++++- prometheus-exporters/cloudprober/values.yaml | 5 ++++- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/prometheus-exporters/cloudprober/Chart.lock b/prometheus-exporters/cloudprober/Chart.lock index 05ef445cc40..7f9f075e6b9 100644 --- a/prometheus-exporters/cloudprober/Chart.lock +++ b/prometheus-exporters/cloudprober/Chart.lock @@ -2,5 +2,8 @@ dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:45a74346d8c73d1b61264fa06d5cb48d3dc38abcfbaa2900b0b918fb532b52ba -generated: "2024-08-02T18:34:49.347055+02:00" +- name: linkerd-support + repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm + version: 1.1.0 +digest: sha256:10f13ced9bba9456dfa8a950fe0f09acdcb8efee172880e3063fbc0b947148b2 +generated: "2025-01-08T13:27:50.210194+01:00" diff --git a/prometheus-exporters/cloudprober/Chart.yaml b/prometheus-exporters/cloudprober/Chart.yaml index 4ad0852e4c1..703c4314f41 100644 --- a/prometheus-exporters/cloudprober/Chart.yaml +++ b/prometheus-exporters/cloudprober/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: cloudprober -version: 0.12.2 +version: 0.13.0 description: https://github.com/cloudprober/cloudprober maintainers: - name: Martin Vossen @@ -8,3 +8,6 @@ dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 + - name: linkerd-support + repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm + version: ~1 diff --git a/prometheus-exporters/cloudprober/values.yaml b/prometheus-exporters/cloudprober/values.yaml index 7c8f6471188..1a096944adf 100644 --- a/prometheus-exporters/cloudprober/values.yaml +++ b/prometheus-exporters/cloudprober/values.yaml @@ -2,6 +2,9 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. +global: + linkerd_requested: true + owner-info: maintainers: - Martin Vossen @@ -13,7 +16,7 @@ enabled: false image: name: cloudprober/cloudprober - tag: v0.11.7 + tag: v0.13.8 pullPolicy: IfNotPresent replicaCount: 1 From bd7aac6b4f831f89d3b560c7d6f5573b601c0c83 Mon Sep 17 00:00:00 2001 From: Martin Vossen Date: Wed, 8 Jan 2025 14:28:56 +0100 Subject: [PATCH 098/224] bump linkerd-support and owner-info version (#7626) * bump linkerd-support and owner-info version * bump minor chart version --- openstack/nannies/Chart.lock | 8 ++++---- openstack/nannies/Chart.yaml | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/openstack/nannies/Chart.lock b/openstack/nannies/Chart.lock index bc7a6f44386..317669df4bc 100644 --- a/openstack/nannies/Chart.lock +++ b/openstack/nannies/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 0.4.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.0 + version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.1.3 -digest: sha256:d9ef62ed459771f5c4f47dc03af7074f90f09937f4c122a906304544d21e66de -generated: "2024-04-12T16:00:48.701711+02:00" + version: 1.1.0 +digest: sha256:d26b2eb3eb2ede60c92e26194456b8abf1f0033ebe268e938483d19dcc450968 +generated: "2025-01-08T14:20:57.186635+01:00" diff --git a/openstack/nannies/Chart.yaml b/openstack/nannies/Chart.yaml index 3ac1274ad13..b96b262e99a 100644 --- a/openstack/nannies/Chart.yaml +++ b/openstack/nannies/Chart.yaml @@ -1,14 +1,14 @@ apiVersion: v2 description: nannies - taking care of leftovers and inconsistencies from daily operations name: nannies -version: 0.2.1 +version: 0.2.2 dependencies: - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.4.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.0 + version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.1.3 + version: ~1 From 8fbcd09ba1e0e6d74b956d5b100daafdb87bd4f8 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Wed, 8 Jan 2025 17:10:03 +0200 Subject: [PATCH 099/224] [cni-nanny] switch image to ghcr --- system/cni-nanny/Chart.yaml | 2 +- system/cni-nanny/templates/deployment.yaml | 2 +- system/cni-nanny/values.yaml | 3 --- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/system/cni-nanny/Chart.yaml b/system/cni-nanny/Chart.yaml index 275c0e46bb9..9b04f6b9f10 100644 --- a/system/cni-nanny/Chart.yaml +++ b/system/cni-nanny/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v2 name: cni-nanny description: A Helm chart for the CNI nanny. type: application -version: 0.0.4 +version: 0.0.5 diff --git a/system/cni-nanny/templates/deployment.yaml b/system/cni-nanny/templates/deployment.yaml index b8127b99b8d..252f7d0e379 100644 --- a/system/cni-nanny/templates/deployment.yaml +++ b/system/cni-nanny/templates/deployment.yaml @@ -48,7 +48,7 @@ spec: {{- end }} command: - /manager - image: "{{ required ".Values.global.registryAlternateRegion is missing" $.Values.global.registryAlternateRegion }}/{{ $.Values.images.manager.image }}:{{ $.Values.images.manager.tag }}" + image: "{{ required ".Values.global.ghcrIoMirrorAlternateRegion is missing" $.Values.global.ghcrIoMirrorAlternateRegion }}/{{ $.Values.images.manager.image }}:{{ $.Values.images.manager.tag }}" livenessProbe: httpGet: path: /healthz diff --git a/system/cni-nanny/values.yaml b/system/cni-nanny/values.yaml index a6d0f5b7f74..ecd88573c70 100644 --- a/system/cni-nanny/values.yaml +++ b/system/cni-nanny/values.yaml @@ -4,9 +4,6 @@ global: gcrIoMirrorAlternateRegion: test images: - proxy: - image: - tag: manager: image: tag: From 13e8631db2d5de72923d606260acfc305c1a93b0 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Wed, 8 Jan 2025 15:26:51 +0000 Subject: [PATCH 100/224] system/calico-cni: run helm dep up --- system/calico-cni/Chart.lock | 6 +++--- system/calico-cni/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/calico-cni/Chart.lock b/system/calico-cni/Chart.lock index deb0ea3b28b..c23752c5297 100644 --- a/system/calico-cni/Chart.lock +++ b/system/calico-cni/Chart.lock @@ -10,6 +10,6 @@ dependencies: version: 0.0.3 - name: cni-nanny repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.0.4 -digest: sha256:3f8334dcebcb3d7368f2eb6e4cb45689f0ac95de9ea08af647f9108799e853e4 -generated: "2024-11-27T11:00:37.742852927Z" + version: 0.0.5 +digest: sha256:99f5364304046b9507639ac9852000d63640bc2786536039ae9ad4d6cec66425 +generated: "2025-01-08T15:26:49.534478811Z" diff --git a/system/calico-cni/Chart.yaml b/system/calico-cni/Chart.yaml index 8de0dab4447..7066a75099d 100644 --- a/system/calico-cni/Chart.yaml +++ b/system/calico-cni/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: calico-cni description: A Helm chart for the all things CNI. type: application -version: 1.0.13 +version: 1.0.14 dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 2f0aea7008ad6e2813aa8ce0388d702e90d7dca1 Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Thu, 9 Jan 2025 13:15:35 +0100 Subject: [PATCH 101/224] [designate] set dbType as mariadb per default --- openstack/designate/ci/test-values.yaml | 2 ++ openstack/designate/values.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/openstack/designate/ci/test-values.yaml b/openstack/designate/ci/test-values.yaml index b3223e178bd..a6419a0bba2 100644 --- a/openstack/designate/ci/test-values.yaml +++ b/openstack/designate/ci/test-values.yaml @@ -20,6 +20,8 @@ image_version_designate: v1 image_version_designate_tempest: v1 db_name: designate +dbType: "mariadb" + max_pool_size: 100 max_overflow: 100 pool_id: testPoolUUID diff --git a/openstack/designate/values.yaml b/openstack/designate/values.yaml index 3b4b6eaf9e8..dd35c7c8cc5 100644 --- a/openstack/designate/values.yaml +++ b/openstack/designate/values.yaml @@ -9,6 +9,8 @@ global_setup: false tempest_enabled: false nanny_enabled: false +dbType: "mariadb" + sentry: enabled: true From 826c374dc98e902bd2b68e70e37db0c7d73eb2ed Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Thu, 9 Jan 2025 15:55:22 +0100 Subject: [PATCH 102/224] [unbound] take hec secrets from k8s secret - to support secrets-injector --- system/unbound/templates/deployment.yaml | 17 +++++++++++++++-- system/unbound/templates/hec-secrets.yaml | 10 ++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 system/unbound/templates/hec-secrets.yaml diff --git a/system/unbound/templates/deployment.yaml b/system/unbound/templates/deployment.yaml index 38dd3abb960..7217c609fed 100644 --- a/system/unbound/templates/deployment.yaml +++ b/system/unbound/templates/deployment.yaml @@ -102,9 +102,9 @@ spec: {{- if .Values.unbound.dnstap.hec_splunk_url }} {{- if .Values.unbound.dnstap.hec_splunk_token }} - -H - - {{ .Values.unbound.dnstap.hec_splunk_url }} + - "$(HEC_SPLUNK_URL)" - -token - - {{ .Values.unbound.dnstap.hec_splunk_token }} + - "$(HEC_SPLUNK_TOKEN)" {{- if .Values.unbound.dnstap.hec_splunk_server_uuid }} - -server_uuid - {{ .Values.unbound.dnstap.hec_splunk_server_uuid }} @@ -113,6 +113,19 @@ spec: {{- end }} {{- if .Values.unbound.dnstap.additional_cmdline_args }} {{ toYaml .Values.unbound.dnstap.additional_cmdline_args | indent 10 }} +{{- end }} +{{- if and $.Values.unbound.dnstap.hec_splunk_url $.Values.unbound.dnstap.hec_splunk_token }} + env: + - name: HEC_SPLUNK_URL + valueFrom: + secretKeyRef: + name: {{.Release.Name}}-hec-secrets + key: hec_splunk_url + - name: HEC_SPLUNK_TOKEN + valueFrom: + secretKeyRef: + name: {{.Release.Name}}-hec-secrets + key: hec_splunk_token {{- end }} volumeMounts: - name: dnstap-socket diff --git a/system/unbound/templates/hec-secrets.yaml b/system/unbound/templates/hec-secrets.yaml new file mode 100644 index 00000000000..aaef94b04f3 --- /dev/null +++ b/system/unbound/templates/hec-secrets.yaml @@ -0,0 +1,10 @@ +{{- if and $.Values.unbound.dnstap.hec_splunk_url $.Values.unbound.dnstap.hec_splunk_token }} +apiVersion: v1 +kind: Secret +metadata: + name: {{.Release.Name}}-hec-secrets +type: Opaque +data: + hec_splunk_url: {{ .Values.unbound.dnstap.hec_splunk_url | b64enc }} + hec_splunk_token: {{ .Values.unbound.dnstap.hec_splunk_token | b64enc }} +{{- end }} From 6c5d43847cdb90bd05f71bd4da53ab89c1abc2ed Mon Sep 17 00:00:00 2001 From: Walter Boring IV Date: Thu, 9 Jan 2025 15:21:55 -0500 Subject: [PATCH 103/224] [cinder] Change default name of storage_profile This patch updates the default name of the FCD storage profile from vmware-fcd to cinder-fcd. --- .../cinder/templates/vcenter_datacenter_cinder_secret.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack/cinder/templates/vcenter_datacenter_cinder_secret.yaml b/openstack/cinder/templates/vcenter_datacenter_cinder_secret.yaml index 35de84861d6..0aec8695091 100644 --- a/openstack/cinder/templates/vcenter_datacenter_cinder_secret.yaml +++ b/openstack/cinder/templates/vcenter_datacenter_cinder_secret.yaml @@ -62,7 +62,7 @@ template: | [vmware_fcd] volume_driver = cinder.volume.drivers.vmware.fcd.VMwareVStorageObjectDriver volume_backend_name = vmware_fcd - vmware_storage_profile: vmware-fcd + vmware_storage_profile: cinder-fcd extra_capabilities = '{"vcenter-shard": "{= host.split(".")[0] =}", "quality_type": "premium"}' {% endfilter -%} {{- end }} From ce390c5afd2d6156a4d46d5e408a5594749787a7 Mon Sep 17 00:00:00 2001 From: d032408 Date: Fri, 10 Jan 2025 10:08:18 +0100 Subject: [PATCH 104/224] [redfish-exporter] no longer distinguish between vPODs and bPODs --- .../templates/deployment.yaml | 12 ------- .../templates/scrapeconfig-bb.yaml | 2 +- .../templates/scrapeconfig-bm.yaml | 34 ------------------- .../redfish-exporter/templates/secrets.yaml | 4 --- .../redfish-exporter/values.yaml | 2 -- 5 files changed, 1 insertion(+), 53 deletions(-) delete mode 100644 prometheus-exporters/redfish-exporter/templates/scrapeconfig-bm.yaml diff --git a/prometheus-exporters/redfish-exporter/templates/deployment.yaml b/prometheus-exporters/redfish-exporter/templates/deployment.yaml index 59ddf680fc4..9c7bba9f41e 100644 --- a/prometheus-exporters/redfish-exporter/templates/deployment.yaml +++ b/prometheus-exporters/redfish-exporter/templates/deployment.yaml @@ -45,18 +45,6 @@ spec: value: "{{ required "listen_port needs to be defined in resfish_exporter values" $values.listen_port }}" - name: TIMEOUT value: "5" - {{- if $values.users.redfish_bm_username }} - - name: REDFISH_BM_USERNAME - valueFrom: - secretKeyRef: - name: {{ include "fullName" . }} - key: redfish_bm_username - - name: REDFISH_BM_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "fullName" . }} - key: redfish_bm_password - {{- end }} {{- if $values.users.redfish_cp_username }} - name: REDFISH_CP_USERNAME valueFrom: diff --git a/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bb.yaml b/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bb.yaml index 6f6a735e6d9..2ba4953f07e 100644 --- a/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bb.yaml +++ b/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bb.yaml @@ -14,7 +14,7 @@ spec: scrapeInterval: {{$values.scrapeInterval}} scrapeTimeout: {{$values.scrapeTimeout}} httpSDConfigs: - - url: {{ $values.httpSDConfigs.netbox_production_url }}/devices/?custom_labels=job=redfish-bb&target=mgmt_only&status=active&role=server&tenant=converged-cloud&platform=vmware-esxi&tag__n=no-redfish®ion={{ .Values.global.region }} + - url: {{ $values.httpSDConfigs.netbox_production_url }}/devices/?custom_labels=job=redfish-bb&target=mgmt_only&status=active&role=server&tenant=converged-cloud&tag__n=no-redfish®ion={{ .Values.global.region }} refreshInterval: {{ $values.httpSDConfigs.refreshInterval }} metricsPath: /health params: diff --git a/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bm.yaml b/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bm.yaml deleted file mode 100644 index 452fa645cd6..00000000000 --- a/prometheus-exporters/redfish-exporter/templates/scrapeconfig-bm.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- $values := .Values.redfish_exporter -}} -{{- if $values.enabled }} -apiVersion: monitoring.coreos.com/v1alpha1 -kind: ScrapeConfig - -metadata: - name: 'redfish-bm' - namespace: {{ $values.namespace }} - labels: - prometheus: {{ required "$values.prometheus missing" $values.prometheus }} - app.kubernetes.io/name: 'redfish-bm' - -spec: - scrapeInterval: {{$values.scrapeInterval}} - scrapeTimeout: {{$values.scrapeTimeout}} - httpSDConfigs: - - url: {{ $values.httpSDConfigs.netbox_production_url }}/devices/?custom_labels=job=redfish-bm&target=mgmt_only&status=active&role=server&tenant=converged-cloud&platform=ironic&tag__n=no-redfish®ion={{ .Values.global.region }} - refreshInterval: {{ $values.httpSDConfigs.refreshInterval }} - metricsPath: /health - params: - job: [redfish-bm] - relabelings: - - sourceLabels: [job] - regex: redfish-bm - action: keep - - sourceLabels: [__address__] - targetLabel: __param_target - - sourceLabels: [__param_target] - targetLabel: instance - - targetLabel: __address__ - replacement: redfish-exporter:{{$values.listen_port}} - - regex: 'device_type|cluster.*|role|platform|status' - action: labeldrop -{{- end }} diff --git a/prometheus-exporters/redfish-exporter/templates/secrets.yaml b/prometheus-exporters/redfish-exporter/templates/secrets.yaml index cbb2507f862..79d967c14a2 100644 --- a/prometheus-exporters/redfish-exporter/templates/secrets.yaml +++ b/prometheus-exporters/redfish-exporter/templates/secrets.yaml @@ -10,10 +10,6 @@ metadata: app: {{ include "fullName" . }} type: exporter data: - {{- if $values.users.redfish_bm_username }} - redfish_bm_username: {{ required "redfish_bm_username needs to be defined in users values" $values.users.redfish_bm_username | b64enc | quote }} - redfish_bm_password: {{ required "redfish_bm_username needs to be defined in users values" $values.users.redfish_bm_password | b64enc | quote }} - {{- end }} {{- if $values.users.redfish_cp_username }} redfish_cp_username: {{ required "redfish_cp_username needs to be defined in users values" $values.users.redfish_cp_username | b64enc | quote }} redfish_cp_password: {{ required "redfish_cp_password needs to be defined in users values" $values.users.redfish_cp_password | b64enc | quote }} diff --git a/prometheus-exporters/redfish-exporter/values.yaml b/prometheus-exporters/redfish-exporter/values.yaml index b1b3a1880f9..83e121aef26 100644 --- a/prometheus-exporters/redfish-exporter/values.yaml +++ b/prometheus-exporters/redfish-exporter/values.yaml @@ -55,8 +55,6 @@ redfish_exporter: # redfish users redfish_bb_username: DEFINED-IN-REGION-SECRETS redfish_bb_password: DEFINED-IN-REGION-SECRETS - redfish_bm_username: DEFINED-IN-REGION-SECRETS - redfish_bm_password: DEFINED-IN-REGION-SECRETS redfish_cp_username: DEFINED-IN-REGION-SECRETS redfish_cp_password: DEFINED-IN-REGION-SECRETS redfish_fw_username: DEFINED-IN-REGION-SECRETS From b19b82019926ed69f7fb646b3ac673f594fc05ff Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 10 Jan 2025 10:58:44 +0100 Subject: [PATCH 105/224] limes: split OpenstackLimesMismatchProjectQuota alert by service type This will allow silencing this alert for specific services only, in case of known issues. Also, wrap this alert and the overspent-quota alert in max() to avoid alert flapping because of pod restarts. --- openstack/limes/alerts/openstack/api.alerts | 6 +++--- openstack/limes/values.yaml | 10 ++++++++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/openstack/limes/alerts/openstack/api.alerts b/openstack/limes/alerts/openstack/api.alerts index 6275a89ff54..55462e33273 100644 --- a/openstack/limes/alerts/openstack/api.alerts +++ b/openstack/limes/alerts/openstack/api.alerts @@ -247,7 +247,7 @@ groups: limes-collect pods. - alert: OpenstackLimesMismatchProjectQuota - expr: limes_mismatch_project_quota_count > 0 + expr: max by (service_name) (limes_mismatch_project_quota_count > 0) for: 60m # every 30m, limes-collect scrapes quota/usage on each project service and at the same time tries to rectify this error; give it 1-2 chances before alerting labels: severity: info @@ -256,13 +256,13 @@ groups: service: limes annotations: summary: Mismatched Project Quota - description: Limes detected that the quota of some resource(s) in some project differ from the backend quota for that resource and project. + description: Limes detected that the quota of some {{ $labels.service_name }} resource(s) in some project differ from the backend quota for that resource and project. This may happen when Limes is unable to write a changed quota value into the backend, for example because of a service downtime. More details can be found in . - alert: OpenstackLimesOverspentProjectQuota - expr: limes_overspent_project_quota_count > 0 + expr: max(limes_overspent_project_quota_count > 0) for: 60m # every 30m, limes-collect scrapes quota/usage on each project service; give it 1-2 chances to observe a consistent usage value before alerting labels: severity: info diff --git a/openstack/limes/values.yaml b/openstack/limes/values.yaml index 97b3479bfdd..ab22072d252 100644 --- a/openstack/limes/values.yaml +++ b/openstack/limes/values.yaml @@ -157,9 +157,15 @@ pgmetrics: limes_mismatch_project_quota: query: > - SELECT COUNT(*) as count FROM project_resources - WHERE backend_quota != quota + SELECT ps.type AS service_name, COUNT(*) as count + FROM project_resources pr + JOIN project_services ps ON ps.id = pr.service_id + WHERE pr.backend_quota != pr.quota + GROUP BY ps.type metrics: + - service_name: + usage: "LABEL" + description: "Service type" - count: usage: "GAUGE" description: "Total number of project resources that have mismatched quota" From c59537cd7054adff2087f59b468dae1e80539cb9 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 10 Jan 2025 11:19:29 +0100 Subject: [PATCH 106/224] gatekeeper: bump to 3.18.2 --- system/gatekeeper/Chart.lock | 6 +++--- system/gatekeeper/Chart.yaml | 2 +- .../gatekeeper/vendor/gatekeeper-upstream/Chart.yaml | 4 ++-- system/gatekeeper/vendor/gatekeeper-upstream/README.md | 10 +++++----- .../gatekeeper/vendor/gatekeeper-upstream/values.yaml | 10 +++++----- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/system/gatekeeper/Chart.lock b/system/gatekeeper/Chart.lock index 55f29d24b18..93c09dbb907 100644 --- a/system/gatekeeper/Chart.lock +++ b/system/gatekeeper/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: gatekeeper repository: file://vendor/gatekeeper-upstream - version: 3.18.1 + version: 3.18.2 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:cfb3f5d55e060482fa357c191d0c364a4b87ced83b5b80bac7384f325a4e603e -generated: "2025-01-06T12:55:12.614113542+01:00" +digest: sha256:a849a05008b9dcad20763828987ca1892ab429d6641723818ef734784ddd5d55 +generated: "2025-01-10T11:19:20.252925838+01:00" diff --git a/system/gatekeeper/Chart.yaml b/system/gatekeeper/Chart.yaml index fe69d678efe..f61ed5ea20e 100644 --- a/system/gatekeeper/Chart.yaml +++ b/system/gatekeeper/Chart.yaml @@ -7,7 +7,7 @@ version: 1.0.0 # please leave like this; this does not use Chartmuseum dependencies: - name: gatekeeper alias: gatekeeper-upstream - version: 3.18.1 + version: 3.18.2 # repository: https://open-policy-agent.github.io/gatekeeper/charts repository: file://vendor/gatekeeper-upstream # ^ We have to vendor to apply custom patches for linkerd support. diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml index 33e59a9ea87..0700f23f4ca 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v3.18.1 +appVersion: v3.18.2 description: A Helm chart for Gatekeeper home: https://github.com/open-policy-agent/gatekeeper icon: https://open-policy-agent.github.io/gatekeeper/website/img/logo.svg @@ -8,4 +8,4 @@ keywords: name: gatekeeper sources: - https://github.com/open-policy-agent/gatekeeper.git -version: 3.18.1 +version: 3.18.2 diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/README.md b/system/gatekeeper/vendor/gatekeeper-upstream/README.md index ed144406f12..23c1f778eb1 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/README.md +++ b/system/gatekeeper/vendor/gatekeeper-upstream/README.md @@ -74,7 +74,7 @@ information._ | postInstall.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post install hooks | `[]` | | postInstall.labelNamespace.extraAnnotations | Extra annotations added to the post install Job | `{}` | | postInstall.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.18.1` | +| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.18.2` | | postInstall.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postInstall.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postInstall.labelNamespace.extraRules | Extra rules for the gatekeeper-update-namespace-label Role | `[]` | @@ -97,7 +97,7 @@ information._ | postUpgrade.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post upgrade hooks | `[]` | | postUpgrade.labelNamespace.extraAnnotations | Extra annotations added to the post upgrade Job | `{}` | | postUpgrade.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.18.1` | +| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.18.2` | | postUpgrade.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postUpgrade.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postUpgrade.labelNamespace.priorityClassName | Priority class name for gatekeeper-update-namespace-label-post-upgrade Job | `` | @@ -107,10 +107,10 @@ information._ | postUpgrade.resources | The resource request/limits for the container image in postUpgrade hook jobs | `{}` | | postUpgrade.securityContext | Security context applied on the container | `{ "allowPrivilegeEscalation": false, "capabilities": "drop": [all], "readOnlyRootFilesystem": true, "runAsGroup": 999, "runAsNonRoot": true, "runAsUser": 1000 }` | | preInstall.crdRepository.image.repository | Image with kubectl to update the CRDs. If not set, the `image.crdRepository` is used instead. | `null` | -| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.18.1` | +| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.18.2` | | preUninstall.deleteWebhookConfigurations.enabled | Delete webhooks before gatekeeper itself is uninstalled | `false` | | preUninstall.deleteWebhookConfigurations.image.repository | Image with kubectl to delete the webhooks | `openpolicyagent/gatekeeper-crds` | -| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.18.1` | +| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.18.2` | | preUninstall.deleteWebhookConfigurations.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | preUninstall.deleteWebhookConfigurations.image.pullSecrets | Image pullSecrets | `[]` | | preUninstall.deleteWebhookConfigurations.extraRules | Extra rules for the gatekeeper-delete-webhook-configs Role | `[]` | @@ -178,7 +178,7 @@ information._ | logLevel | Minimum log level | `INFO` | | image.pullPolicy | The image pull policy | `IfNotPresent` | | image.repository | Image repository | `openpolicyagent/gatekeeper` | -| image.release | The image release tag to use | Current release version: `v3.18.1` | +| image.release | The image release tag to use | Current release version: `v3.18.2` | | image.pullSecrets | Specify an array of imagePullSecrets | `[]` | | resources | The resource request/limits for the container image | limits: 1 CPU, 512Mi, requests: 100mCPU, 256Mi | | nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` | diff --git a/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml b/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml index c0d8df4cb74..bc77eb6284b 100644 --- a/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml +++ b/system/gatekeeper/vendor/gatekeeper-upstream/values.yaml @@ -47,14 +47,14 @@ enableK8sNativeValidation: true image: repository: openpolicyagent/gatekeeper crdRepository: openpolicyagent/gatekeeper-crds - release: v3.18.1 + release: v3.18.2 pullPolicy: IfNotPresent pullSecrets: [] preInstall: crdRepository: image: repository: null - tag: v3.18.1 + tag: v3.18.2 postUpgrade: labelNamespace: serviceAccount: @@ -63,7 +63,7 @@ postUpgrade: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.18.1 + tag: v3.18.2 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -97,7 +97,7 @@ postInstall: extraRules: [] image: repository: openpolicyagent/gatekeeper-crds - tag: v3.18.1 + tag: v3.18.2 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -141,7 +141,7 @@ preUninstall: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.18.1 + tag: v3.18.2 pullPolicy: IfNotPresent pullSecrets: [] priorityClassName: "" From cee4054e8cf7529d96814f521f67df67c10cfe1e Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Fri, 10 Jan 2025 12:46:40 +0200 Subject: [PATCH 107/224] [calico] use ghcr for exporter image --- system/calico/Chart.yaml | 2 +- system/calico/templates/daemonset-calico-node.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system/calico/Chart.yaml b/system/calico/Chart.yaml index 39df949ff5a..b90a66aac52 100644 --- a/system/calico/Chart.yaml +++ b/system/calico/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v2 name: calico description: A Helm chart for the Calico. type: application -version: 1.0.12 +version: 1.1.0 diff --git a/system/calico/templates/daemonset-calico-node.yaml b/system/calico/templates/daemonset-calico-node.yaml index 7bc11e957b4..a67200ad183 100644 --- a/system/calico/templates/daemonset-calico-node.yaml +++ b/system/calico/templates/daemonset-calico-node.yaml @@ -329,7 +329,7 @@ spec: readOnly: true {{- if .Values.config.monitoring.enabled }} - name: exporter - image: "{{ required ".Values.global.registryAlternateRegion is missing" $.Values.global.registryAlternateRegion }}/{{ $.Values.images.exporter.image }}:{{ $.Values.images.exporter.tag }}" + image: "{{ required ".Values.global.ghcrIoMirrorAlternateRegion is missing" $.Values.global.ghcrIoMirrorAlternateRegion }}/{{ $.Values.images.exporter.image }}:{{ $.Values.images.exporter.tag }}" args: - -format.new=true - -bird.socket=/var/run/calico/bird.ctl From 956f679049ba2233874b6e8ea62f3232ac8d0109 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Fri, 10 Jan 2025 10:52:22 +0000 Subject: [PATCH 108/224] system/calico-cni: run helm dep up --- system/calico-cni/Chart.lock | 6 +++--- system/calico-cni/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/calico-cni/Chart.lock b/system/calico-cni/Chart.lock index c23752c5297..4d73c26a331 100644 --- a/system/calico-cni/Chart.lock +++ b/system/calico-cni/Chart.lock @@ -4,12 +4,12 @@ dependencies: version: 1.0.0 - name: calico repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.12 + version: 1.1.0 - name: calico-apiserver repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.3 - name: cni-nanny repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.5 -digest: sha256:99f5364304046b9507639ac9852000d63640bc2786536039ae9ad4d6cec66425 -generated: "2025-01-08T15:26:49.534478811Z" +digest: sha256:aaa7823009c9d8724cc64272508296e68ca4834fdf5cd3e5dbfebbb341340475 +generated: "2025-01-10T10:52:19.095444075Z" diff --git a/system/calico-cni/Chart.yaml b/system/calico-cni/Chart.yaml index 7066a75099d..297ebbe826b 100644 --- a/system/calico-cni/Chart.yaml +++ b/system/calico-cni/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: calico-cni description: A Helm chart for the all things CNI. type: application -version: 1.0.14 +version: 1.0.15 dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From a9a762d6a8873345c0c641200787f91262bb2aca Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Fri, 10 Jan 2025 12:26:12 +0200 Subject: [PATCH 109/224] [pxc-operator] Remove prometheus.io/port annotation from operator pods Remove prometheus.io/port annotation from operator pods to avoid multiple scraping We have multiple containers in pxc-operator pods and metrics port already named `metrics`, so this annotation is not needed --- system/percona-xtradb-cluster-operator/Chart.yaml | 2 +- system/percona-xtradb-cluster-operator/values.yaml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/system/percona-xtradb-cluster-operator/Chart.yaml b/system/percona-xtradb-cluster-operator/Chart.yaml index f8dab249a84..89038918320 100644 --- a/system/percona-xtradb-cluster-operator/Chart.yaml +++ b/system/percona-xtradb-cluster-operator/Chart.yaml @@ -4,7 +4,7 @@ name: percona-xtradb-cluster-operator description: A Helm chart to install Percona XtraDB Cluster Operator. home: "https://github.com/sapcc/helm-charts/tree/master/system/percona-xtradb-cluster-operator" type: application -version: 0.3.2 +version: 0.3.3 appVersion: "1.16.1" maintainers: - name: Birk Bohne diff --git a/system/percona-xtradb-cluster-operator/values.yaml b/system/percona-xtradb-cluster-operator/values.yaml index 55683630415..f9690c64f19 100644 --- a/system/percona-xtradb-cluster-operator/values.yaml +++ b/system/percona-xtradb-cluster-operator/values.yaml @@ -73,7 +73,6 @@ pxc-operator: podAnnotations: prometheus.io/scrape: "true" - prometheus.io/port: "8080" prometheus.io/targets: "openstack" linkerd.io/inject: enabled From 70d24f8fc2b14c3002f98bb2ddbe76275adcb548 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 10 Jan 2025 11:58:22 +0100 Subject: [PATCH 110/224] all srs: bump Alpine to 3.21 in sidecar containers --- common/postgresql-ng/Chart.yaml | 2 +- common/postgresql-ng/templates/deployment.yaml | 4 ++-- common/postgresql-ng/templates/job.yaml | 2 +- openstack/content-repo/templates/deployment-check.yaml | 2 +- openstack/keppel/templates/daemonset-keep-image-pulled.yaml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/common/postgresql-ng/Chart.yaml b/common/postgresql-ng/Chart.yaml index 05cc914ef44..5c4a29e1363 100644 --- a/common/postgresql-ng/Chart.yaml +++ b/common/postgresql-ng/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: postgresql-ng -version: 1.2.5 # this version number is SemVer as it gets used to auto bump +version: 1.2.6 # this version number is SemVer as it gets used to auto bump description: Chart for PostgreSQL keywords: - postgresql diff --git a/common/postgresql-ng/templates/deployment.yaml b/common/postgresql-ng/templates/deployment.yaml index fa74f7ae578..38b1b8aba22 100644 --- a/common/postgresql-ng/templates/deployment.yaml +++ b/common/postgresql-ng/templates/deployment.yaml @@ -44,7 +44,7 @@ spec: initContainers: - name: generate-secrets - image: "{{ include "preferredRegistry" . }}/shared-app-images/alpine-kubectl:3.20-latest" + image: "{{ include "preferredRegistry" . }}/shared-app-images/alpine-kubectl:3.21-latest" imagePullPolicy: "Always" env: - name: DEBUG @@ -134,7 +134,7 @@ spec: {{- end }} {{- if .Values.crontab }} - name: cron - image: "{{ include "preferredRegistry" . }}/shared-app-images/alpine-psql:3.20-latest" + image: "{{ include "preferredRegistry" . }}/shared-app-images/alpine-psql:3.21-latest" imagePullPolicy: "Always" command: - /bin/sh diff --git a/common/postgresql-ng/templates/job.yaml b/common/postgresql-ng/templates/job.yaml index 7331bbbef85..0615f18c5a2 100644 --- a/common/postgresql-ng/templates/job.yaml +++ b/common/postgresql-ng/templates/job.yaml @@ -14,7 +14,7 @@ spec: spec: containers: - name: delete-secrets - image: "{{ include "preferredRegistry" . }}/shared-app-images/alpine-kubectl:3.20-latest" + image: "{{ include "preferredRegistry" . }}/shared-app-images/alpine-kubectl:3.21-latest" imagePullPolicy: "Always" env: - name: DEBUG diff --git a/openstack/content-repo/templates/deployment-check.yaml b/openstack/content-repo/templates/deployment-check.yaml index 5d1080edf13..b06d25dfd7f 100644 --- a/openstack/content-repo/templates/deployment-check.yaml +++ b/openstack/content-repo/templates/deployment-check.yaml @@ -37,7 +37,7 @@ spec: containers: {{- range $release, $config := .Values.rhn_entitlement_checks }} - name: check-{{ $release }} - image: {{$.Values.global.registry}}/shared-app-images/alpine-curl:3.20-latest + image: {{$.Values.global.registry}}/shared-app-images/alpine-curl:3.21-latest imagePullPolicy: Always args: - /bin/sh diff --git a/openstack/keppel/templates/daemonset-keep-image-pulled.yaml b/openstack/keppel/templates/daemonset-keep-image-pulled.yaml index ebee20c381a..6d05d089e3e 100644 --- a/openstack/keppel/templates/daemonset-keep-image-pulled.yaml +++ b/openstack/keppel/templates/daemonset-keep-image-pulled.yaml @@ -44,7 +44,7 @@ spec: imagePullPolicy: IfNotPresent {{ include "tmplKeepImagePulled" . }} - name: postgres-kubectl - image: "{{ .Values.global.registryAlternateRegion }}/shared-app-images/alpine-kubectl:3.20-latest" + image: "{{ .Values.global.registryAlternateRegion }}/shared-app-images/alpine-kubectl:3.21-latest" imagePullPolicy: Always {{ include "tmplKeepImagePulled" . }} - name: valkey # NOTE: redis-metrics is a separate pod, so we don't need to care about it From b9f3d700f9ff42139eff6db949d587d2bb9b6408 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Fri, 10 Jan 2025 11:59:59 +0000 Subject: [PATCH 111/224] system/tenso: run helm dep up --- system/tenso/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/tenso/Chart.lock b/system/tenso/Chart.lock index 541a4a5feda..130a2403ea3 100644 --- a/system/tenso/Chart.lock +++ b/system/tenso/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.0 - name: postgresql-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.2.5 + version: 1.2.6 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.8 @@ -14,5 +14,5 @@ dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:96318095377c7d994996fbbaa30b16a4090ce9e944e8c6ff327ce5da322122fa -generated: "2024-12-16T15:30:25.308926598Z" +digest: sha256:2de2bf8c9688aed0840a2707b48ca5078a8235d7b46103b3be168fb590ac0c18 +generated: "2025-01-10T11:59:57.562887964Z" From c7b3561096e2f90027c56bf02aba5b4210c31368 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Fri, 10 Jan 2025 11:59:57 +0000 Subject: [PATCH 112/224] openstack/keppel: run helm dep up --- openstack/keppel/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openstack/keppel/Chart.lock b/openstack/keppel/Chart.lock index d06447e7a88..319c7578881 100644 --- a/openstack/keppel/Chart.lock +++ b/openstack/keppel/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.0 - name: postgresql-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.2.5 + version: 1.2.6 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.8 @@ -17,5 +17,5 @@ dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:ebe2602036e91a5afafc3975f221327a95335c7d9c4560d33f4999ee711404ea -generated: "2024-12-16T15:30:20.958502182Z" +digest: sha256:661b55d18450bb4e98f7edcbb7de93fb05b36e484fd0f2396f83d0f0c467622b +generated: "2025-01-10T11:59:52.297447008Z" From 2ddae9efa9bc1450870e258c4e60edbb9c5d30ce Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Fri, 10 Jan 2025 12:00:06 +0000 Subject: [PATCH 113/224] openstack/castellum: run helm dep up --- openstack/castellum/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openstack/castellum/Chart.lock b/openstack/castellum/Chart.lock index 35cb4efeb20..4ff3ed54b97 100644 --- a/openstack/castellum/Chart.lock +++ b/openstack/castellum/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.0 - name: postgresql-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.2.5 + version: 1.2.6 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.8 @@ -14,5 +14,5 @@ dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:96318095377c7d994996fbbaa30b16a4090ce9e944e8c6ff327ce5da322122fa -generated: "2024-12-16T15:30:35.073142016Z" +digest: sha256:2de2bf8c9688aed0840a2707b48ca5078a8235d7b46103b3be168fb590ac0c18 +generated: "2025-01-10T12:00:04.580016387Z" From 8963c7f78b688ff1e78fd496d36f483bf19fe57e Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Fri, 10 Jan 2025 12:00:13 +0000 Subject: [PATCH 114/224] openstack/limes: run helm dep up --- openstack/limes/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openstack/limes/Chart.lock b/openstack/limes/Chart.lock index eefb20b13f7..35e66b4c4a4 100644 --- a/openstack/limes/Chart.lock +++ b/openstack/limes/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 1.1.0 - name: postgresql-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.2.5 + version: 1.2.6 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.8 @@ -14,5 +14,5 @@ dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:96318095377c7d994996fbbaa30b16a4090ce9e944e8c6ff327ce5da322122fa -generated: "2024-12-16T15:30:31.822417952Z" +digest: sha256:2de2bf8c9688aed0840a2707b48ca5078a8235d7b46103b3be168fb590ac0c18 +generated: "2025-01-10T12:00:11.425122988Z" From 00e81588ab36bc7fbd0defbb2aeb7ca7002c4c3d Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 10 Jan 2025 14:29:19 +0100 Subject: [PATCH 115/224] limes: fix AbsentContainersLimesMismatchProjectQuotaCount --- openstack/limes/values.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openstack/limes/values.yaml b/openstack/limes/values.yaml index ab22072d252..e742a24dbf0 100644 --- a/openstack/limes/values.yaml +++ b/openstack/limes/values.yaml @@ -156,12 +156,14 @@ pgmetrics: description: "Total number of projects that are failing rate data scrape" limes_mismatch_project_quota: + # The UNION SELECT adds a dummy row to satisfy the absent-metrics-operator. query: > SELECT ps.type AS service_name, COUNT(*) as count FROM project_resources pr JOIN project_services ps ON ps.id = pr.service_id WHERE pr.backend_quota != pr.quota GROUP BY ps.type + UNION SELECT 'none' AS service_name, 0 AS count metrics: - service_name: usage: "LABEL" From a3bf0495047cad6609810b6f92f99583c55b57da Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Thu, 9 Jan 2025 16:04:33 +0200 Subject: [PATCH 116/224] [pxc-db] Add links to the operation alerts playbooks * Add links to the alert playbooks * Fix typos in chart comments * Bump chart version --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/templates/_helpers.tpl | 2 +- .../templates/alerts/_backup.alerts.tpl | 8 ++-- .../pxc-db/templates/alerts/_pxc.alerts.tpl | 46 +++++++++---------- common/pxc-db/values.yaml | 4 +- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index c2fed60ead8..132f203249a 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.9 +version: 0.2.10 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/templates/_helpers.tpl b/common/pxc-db/templates/_helpers.tpl index 3870e609019..f0130747cb8 100644 --- a/common/pxc-db/templates/_helpers.tpl +++ b/common/pxc-db/templates/_helpers.tpl @@ -67,7 +67,7 @@ name: {{ include "pxc-db.fullname" . }} {{- end }} {{/* -owner-info lables +Charts owner-info labels */}} {{- define "pxc-db.ownerLabels" -}} {{- if index .Values "owner-info" }} diff --git a/common/pxc-db/templates/alerts/_backup.alerts.tpl b/common/pxc-db/templates/alerts/_backup.alerts.tpl index a493da2de5f..e8a9f26cb2b 100644 --- a/common/pxc-db/templates/alerts/_backup.alerts.tpl +++ b/common/pxc-db/templates/alerts/_backup.alerts.tpl @@ -8,7 +8,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_state_alerts#GaleraClusterBackupNotSucceeded' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "{{ include "pxc-db.fullname" . }} cluster backup is not succeeded." @@ -22,7 +22,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_state_alerts#GaleraClusterBackupMissing' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "{{ include "pxc-db.fullname" . }} cluster has no new full backups completed earlier than 36 hours ago." @@ -37,7 +37,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_state_alerts#GaleraClusterBinlogProcessingTooOld' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "More than 30 minutes passed since the last cluster {{ include "pxc-db.fullname" . }} binlog processing." @@ -51,7 +51,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_state_alerts#GaleraClusterBinlogUploadTooOld' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "More than 30 minutes passed since the last cluster {{ include "pxc-db.fullname" . }} binlog upload." diff --git a/common/pxc-db/templates/alerts/_pxc.alerts.tpl b/common/pxc-db/templates/alerts/_pxc.alerts.tpl index 996fb6f542b..83110a99863 100644 --- a/common/pxc-db/templates/alerts/_pxc.alerts.tpl +++ b/common/pxc-db/templates/alerts/_pxc.alerts.tpl @@ -4,11 +4,11 @@ expr: (mysql_global_variables_max_connections{app=~"{{ include "pxc-db.fullname" . }}"} - mysql_global_status_threads_connected{app=~"{{ include "pxc-db.fullname" . }}"} < 200) for: 10m labels: - context: datbase + context: database service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_mysql_alerts#GaleraClusterDBTooManyConnections' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} has too many connections open. Please check the service containers. @@ -22,7 +22,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: 'docs/support/playbook/database/MariaDBSlowQueries' + playbook: 'docs/support/playbook/database/db_pxc_mysql_alerts#GaleraClusterDBSlowQueries' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} has reported slow queries. Please check the DB. @@ -34,9 +34,9 @@ labels: context: database service: {{ include "pxc-db.alerts.service" . }} - severity: info + severity: warning tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_mysql_alerts#GaleraClusterDBWaitingForLock' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} has queries waiting for lock more than 20 sec. Deadlock possible. @@ -50,39 +50,39 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: 'docs/support/playbook/manila/mariadb_high_running_threads' + playbook: 'docs/support/playbook/database/db_pxc_mysql_alerts#GaleraClusterDBHighRunningThreads' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} has more than 20 running threads. summary: {{ include "pxc-db.fullname" . }} running threads high. - - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterIncomplete - expr: (mysql_global_status_wsrep_cluster_size{app=~"{{ include "pxc-db.fullname" . }}"} < {{ .Values.pxc.size }}) + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterInnoDBLogWaits + expr: (rate(mysql_global_status_innodb_log_waits{app=~"{{ include "pxc-db.fullname" . }}"}[10m]) > 10) for: 10m labels: context: database service: {{ include "pxc-db.alerts.service" . }} - severity: warning + severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_mysql_alerts#GaleraClusterInnoDBLogWaits' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: - description: {{ include "pxc-db.fullname" . }} reports cluster size of less than 3 nodes. - summary: {{ include "pxc-db.fullname" . }} cluster incomplete. + description: {{ include "pxc-db.fullname" . }} InnoDB log writes stalling. + summary: {{ include "pxc-db.fullname" . }} has problem writing to disk. - - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterInnoDBLogWaits - expr: (rate(mysql_global_status_innodb_log_waits{app=~"{{ include "pxc-db.fullname" . }}"}[10m]) > 10) + - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterIncomplete + expr: (mysql_global_status_wsrep_cluster_size{app=~"{{ include "pxc-db.fullname" . }}"} < {{ .Values.pxc.size }}) for: 10m labels: context: database service: {{ include "pxc-db.alerts.service" . }} - severity: info + severity: warning tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_galera_alerts#GaleraClusterIncomplete' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: - description: {{ include "pxc-db.fullname" . }} InnoDB log writes stalling. - summary: {{ include "pxc-db.fullname" . }} has problem writing to disk. + description: {{ include "pxc-db.fullname" . }} reports cluster size of less than 3 nodes. + summary: {{ include "pxc-db.fullname" . }} cluster incomplete. - alert: {{ include "pxc-db.alerts.service" . | camelcase }}GaleraClusterNodeNotReady expr: (mysql_global_status_wsrep_ready{app=~"{{ include "pxc-db.fullname" . }}"} != 1) @@ -92,7 +92,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_galera_alerts#GaleraClusterNodeNotReady' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} Cluster node not ready. @@ -106,7 +106,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_galera_alerts#GaleraClusterNodeNotSynced' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: {{ include "pxc-db.fullname" . }} Cluster node out of sync. @@ -120,7 +120,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_galera_alerts#GaleraClusterNodeSyncDelayed' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "{{ include "pxc-db.fullname" . }} Galera cluster reports at least 1 node with substantial replication delay in the last 30 minutes" @@ -134,7 +134,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_galera_alerts#GaleraClusterNodeReplicationPaused' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "{{ include "pxc-db.fullname" . }} Galera cluster reports at least 1 node with 25% paused replication in the last 30 minutes" @@ -148,7 +148,7 @@ service: {{ include "pxc-db.alerts.service" . }} severity: info tier: {{ required ".Values.alerts.tier missing" .Values.alerts.tier }} - playbook: '' + playbook: 'docs/support/playbook/database/db_pxc_state_alerts#GaleraClusterResourceNotReady' support_group: {{ required ".Values.alerts.support_group missing" .Values.alerts.support_group }} annotations: description: "{{ include "pxc-db.fullname" . }} cluster resource is not in ready state." diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index b8f8d8e41fa..af148d58977 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -42,7 +42,7 @@ initdb: # -- Enable InitDB job that creates databases and users # Disabled by default initdb_job: null -# -- Set default charachter set and collation in init.sql +# -- Set default character set and collation in init.sql character_set_server: "utf8mb4" collation_server: "utf8mb4_0900_ai_ci" # -- Enable the creation of a local-only root user `ccroot` without a password @@ -219,7 +219,7 @@ pxc: long_query_time: 5 log_error_suppression_list: "MY-010055,MY-013360" priority_class: "critical-infrastructure" - # -- Advanced affinity configuration for PXC databse pods + # -- Advanced affinity configuration for PXC database pods # https://docs.percona.com/percona-operator-for-mysql/pxc/constraints.html#simple-approach-use-topologykey-of-the-percona-operator-for-mysql affinity: advanced: From c550db0f0c8cea3e3471803caca0989a9c139f9e Mon Sep 17 00:00:00 2001 From: IvoGoman Date: Fri, 10 Jan 2025 16:24:34 +0100 Subject: [PATCH 117/224] chore(metis): bump mariadb chart to 0.14.3, db migration to 10 (#7630) --- system/metis/Chart.lock | 6 +++--- system/metis/Chart.yaml | 4 ++-- system/metis/values.yaml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/system/metis/Chart.lock b/system/metis/Chart.lock index 6248baa5c89..cc421b576ea 100644 --- a/system/metis/Chart.lock +++ b/system/metis/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.1 + version: 0.14.3 - name: prometheus-monitors repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.2 @@ -11,5 +11,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:a6bbd5b8017abbae97fcedbd5f7d990a9bf4f056436653465ddafc3df9184f47 -generated: "2024-10-10T16:55:49.917319+02:00" +digest: sha256:e46f21876126da39c89501cfdf0f9abc89d10c6f6bd53b72670710a83cc11eef +generated: "2025-01-09T15:47:39.303454+01:00" diff --git a/system/metis/Chart.yaml b/system/metis/Chart.yaml index 4821c03e919..e432a6aea44 100644 --- a/system/metis/Chart.yaml +++ b/system/metis/Chart.yaml @@ -1,12 +1,12 @@ apiVersion: v2 name: metis -version: 1.2.0 +version: 1.3.0 description: Read-only DB for replicas of OpenStack DBs and Project Masterdata dependencies: - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.1 + version: 0.14.3 - name: prometheus-monitors repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.2 diff --git a/system/metis/values.yaml b/system/metis/values.yaml index 96e60c39a3c..691c2c71b57 100644 --- a/system/metis/values.yaml +++ b/system/metis/values.yaml @@ -141,7 +141,7 @@ metis: prometheus: enabled: false releaseLocks: false - migration_version: 9 + migration_version: 10 requiredDatabases: - "keystone" - "nova" From b820c4de920b825fec086b51a7f9f657d1d0112b Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 13 Jan 2025 09:21:06 +0530 Subject: [PATCH 118/224] [Barbican] bump mysql_metrics chart version to 0.4.2 --- openstack/barbican/Chart.lock | 6 +++--- openstack/barbican/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/barbican/Chart.lock b/openstack/barbican/Chart.lock index 07ff9187e4a..77fc50325bb 100644 --- a/openstack/barbican/Chart.lock +++ b/openstack/barbican/Chart.lock @@ -7,7 +7,7 @@ dependencies: version: 0.5.3 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.3.6 + version: 0.4.2 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.11.1 @@ -23,5 +23,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.0 -digest: sha256:76aa870c3584c904b2b3a9bf2c22f5cf0602d53e8de1f348dcd1f4aff659a70d -generated: "2024-11-25T19:21:28.670888+05:30" +digest: sha256:739de2e7c7440561bb46400173d6534a8072eb77973cc52f6846754cc5dd44d2 +generated: "2025-01-13T09:20:19.987539+05:30" diff --git a/openstack/barbican/Chart.yaml b/openstack/barbican/Chart.yaml index c6716a0d0c5..fe4f7e879ed 100644 --- a/openstack/barbican/Chart.yaml +++ b/openstack/barbican/Chart.yaml @@ -3,7 +3,7 @@ appVersion: bobcat description: A Helm chart for Openstack Barbican icon: https://www.openstack.org/themes/openstack/images/project-mascots/Barbican/OpenStack_Project_Barbican_vertical.png name: barbican -version: 0.5.11 +version: 0.5.12 dependencies: - condition: mariadb.enabled name: mariadb @@ -15,7 +15,7 @@ dependencies: - condition: mariadb.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.3.6 + version: 0.4.2 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.11.1 From bea8706a7027beb0ef0df7dbac04ebff0bf969c1 Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 13 Jan 2025 09:25:11 +0530 Subject: [PATCH 119/224] [Glance] bump mysql_metrics chart version to 0.4.2 --- openstack/glance/Chart.lock | 6 +++--- openstack/glance/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/glance/Chart.lock b/openstack/glance/Chart.lock index 4acfbd24960..c33df131cb2 100644 --- a/openstack/glance/Chart.lock +++ b/openstack/glance/Chart.lock @@ -7,7 +7,7 @@ dependencies: version: 0.5.3 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.3.6 + version: 0.4.2 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.19.6 @@ -20,5 +20,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:eea7c5ca6fae578b7aed7a50d8f264480b6d61b850f2165e540a9ac3259c54b2 -generated: "2024-11-26T12:34:53.50177+05:30" +digest: sha256:964928d2d454d54802c68bb43557f4320227b4880a9152db591aae5418e1d483 +generated: "2025-01-13T09:23:55.214197+05:30" diff --git a/openstack/glance/Chart.yaml b/openstack/glance/Chart.yaml index 86a6f390d71..d4d675246ab 100644 --- a/openstack/glance/Chart.yaml +++ b/openstack/glance/Chart.yaml @@ -3,7 +3,7 @@ appVersion: dalmatian description: A Helm chart Openstack Glance icon: https://www.openstack.org/themes/openstack/images/project-mascots/Glance/OpenStack_Project_Glance_vertical.png name: glance -version: 0.6.2 +version: 0.6.3 dependencies: - condition: mariadb.enabled name: mariadb @@ -15,7 +15,7 @@ dependencies: - condition: mariadb.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.3.6 + version: 0.4.2 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.19.6 From 851a6abc74e56d1d321b838986e51a4703843a7d Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 13 Jan 2025 09:33:55 +0530 Subject: [PATCH 120/224] [Glance] bump memcached chart version to 0.6.2 --- openstack/glance/Chart.lock | 6 +++--- openstack/glance/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/glance/Chart.lock b/openstack/glance/Chart.lock index c33df131cb2..ba60aaf1dc2 100644 --- a/openstack/glance/Chart.lock +++ b/openstack/glance/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 0.14.2 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.5.3 + version: 0.6.2 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.4.2 @@ -20,5 +20,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:964928d2d454d54802c68bb43557f4320227b4880a9152db591aae5418e1d483 -generated: "2025-01-13T09:23:55.214197+05:30" +digest: sha256:d71a7789f43ed12a3cb3805854162a4c4fb34d08d1d817af93670a5a091b6f33 +generated: "2025-01-13T09:33:16.3966+05:30" diff --git a/openstack/glance/Chart.yaml b/openstack/glance/Chart.yaml index d4d675246ab..0a7f322c44e 100644 --- a/openstack/glance/Chart.yaml +++ b/openstack/glance/Chart.yaml @@ -3,7 +3,7 @@ appVersion: dalmatian description: A Helm chart Openstack Glance icon: https://www.openstack.org/themes/openstack/images/project-mascots/Glance/OpenStack_Project_Glance_vertical.png name: glance -version: 0.6.3 +version: 0.6.4 dependencies: - condition: mariadb.enabled name: mariadb @@ -11,7 +11,7 @@ dependencies: version: 0.14.2 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.5.3 + version: 0.6.2 - condition: mariadb.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From cf8323ab374522e7c69b92de5f047a848dd1dabe Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 13 Jan 2025 09:37:46 +0530 Subject: [PATCH 121/224] [Barbican] bump memcached chart version to 0.6.2 --- openstack/barbican/Chart.lock | 6 +++--- openstack/barbican/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/barbican/Chart.lock b/openstack/barbican/Chart.lock index 77fc50325bb..f2d8e283fcb 100644 --- a/openstack/barbican/Chart.lock +++ b/openstack/barbican/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 0.14.2 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.5.3 + version: 0.6.2 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.4.2 @@ -23,5 +23,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.0 -digest: sha256:739de2e7c7440561bb46400173d6534a8072eb77973cc52f6846754cc5dd44d2 -generated: "2025-01-13T09:20:19.987539+05:30" +digest: sha256:084f368c0b29a1be321a9b88be893460e2de2e2e1fba1cdfa1af4caabcd330f5 +generated: "2025-01-13T09:37:10.185209+05:30" diff --git a/openstack/barbican/Chart.yaml b/openstack/barbican/Chart.yaml index fe4f7e879ed..8ad47619622 100644 --- a/openstack/barbican/Chart.yaml +++ b/openstack/barbican/Chart.yaml @@ -3,7 +3,7 @@ appVersion: bobcat description: A Helm chart for Openstack Barbican icon: https://www.openstack.org/themes/openstack/images/project-mascots/Barbican/OpenStack_Project_Barbican_vertical.png name: barbican -version: 0.5.12 +version: 0.5.13 dependencies: - condition: mariadb.enabled name: mariadb @@ -11,7 +11,7 @@ dependencies: version: 0.14.2 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.5.3 + version: 0.6.2 - condition: mariadb.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 8992af8543932dd964985ba35cfb85eaffceb4b1 Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 13 Jan 2025 09:44:24 +0530 Subject: [PATCH 122/224] [Barbican] bump redis chart version to 1.6.2 --- openstack/barbican/Chart.lock | 6 +++--- openstack/barbican/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/barbican/Chart.lock b/openstack/barbican/Chart.lock index f2d8e283fcb..d055f75419e 100644 --- a/openstack/barbican/Chart.lock +++ b/openstack/barbican/Chart.lock @@ -16,12 +16,12 @@ dependencies: version: 0.19.6 - name: redis repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.5.3 + version: 1.6.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.0 -digest: sha256:084f368c0b29a1be321a9b88be893460e2de2e2e1fba1cdfa1af4caabcd330f5 -generated: "2025-01-13T09:37:10.185209+05:30" +digest: sha256:2063ebb548030aefae93ea53c27b58b62b6519804c4bf4df84e1323a661f1e10 +generated: "2025-01-13T09:43:30.556477+05:30" diff --git a/openstack/barbican/Chart.yaml b/openstack/barbican/Chart.yaml index 8ad47619622..fbcd250dae3 100644 --- a/openstack/barbican/Chart.yaml +++ b/openstack/barbican/Chart.yaml @@ -3,7 +3,7 @@ appVersion: bobcat description: A Helm chart for Openstack Barbican icon: https://www.openstack.org/themes/openstack/images/project-mascots/Barbican/OpenStack_Project_Barbican_vertical.png name: barbican -version: 0.5.13 +version: 0.5.14 dependencies: - condition: mariadb.enabled name: mariadb @@ -25,7 +25,7 @@ dependencies: - name: redis alias: sapcc_rate_limit repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.5.3 + version: 1.6.2 condition: sapcc_rate_limit.enabled - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 98c0fa2a08e16723cdfdf58c9ef996461e1e9a62 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Mon, 13 Jan 2025 05:19:12 +0000 Subject: [PATCH 123/224] global/ccloud-hedgedoc: run helm dep up --- global/ccloud-hedgedoc/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/global/ccloud-hedgedoc/Chart.lock b/global/ccloud-hedgedoc/Chart.lock index d0ab6b34703..83b6fb41cef 100644 --- a/global/ccloud-hedgedoc/Chart.lock +++ b/global/ccloud-hedgedoc/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 0.1.11 - name: postgresql-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.2.5 + version: 1.2.6 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.8 @@ -14,5 +14,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:68c6dfb3add8a77d3289bbd2642cf458875bec207fba9428e489c1ed129ab756 -generated: "2024-12-23T05:12:57.284085638Z" +digest: sha256:2a57e4fec74f0712c29cdbaf46a3647d05c31cecccb0b284ec612a390769f09b +generated: "2025-01-13T05:19:11.08362515Z" From 903fd74258b6d09d8ceca0d68510af8e9e36e975 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Mon, 13 Jan 2025 05:45:52 +0000 Subject: [PATCH 124/224] openstack/elektra: run helm dep up --- openstack/elektra/Chart.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openstack/elektra/Chart.lock b/openstack/elektra/Chart.lock index 9e60fe2d7cf..2473ca6e354 100644 --- a/openstack/elektra/Chart.lock +++ b/openstack/elektra/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: postgresql-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.2.5 + version: 1.2.6 - name: pgbackup repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.8 @@ -14,5 +14,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:586f0ef648aac2129d4d6e46956c559f1f2df682b022d7a390c9d22180cec9ed -generated: "2024-12-23T05:31:24.773396672Z" +digest: sha256:7b58adb36f0442da9118644c272f327cf8176e4892913b558868d62ff54dc812 +generated: "2025-01-13T05:45:50.436869816Z" From 974a8703a802d57def43e742ae3b40562cbcde8f Mon Sep 17 00:00:00 2001 From: degricar <156818083+degricar@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:07:32 +0100 Subject: [PATCH 125/224] add lenovo SR675V3 with Nvidia L40S flavor for node001r-gp900.cc.qa-de-1.cloud.sap --- openstack/ironic/templates/seed.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/openstack/ironic/templates/seed.yaml b/openstack/ironic/templates/seed.yaml index dff1276d500..04261d15979 100644 --- a/openstack/ironic/templates/seed.yaml +++ b/openstack/ironic/templates/seed.yaml @@ -581,6 +581,18 @@ spec: "resources:CUSTOM_BG_S2_C48_M2048_V0": "1" {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} + # New GPU Nodes Lenovo ThinkSystem SR675 V3 with Nvidia L40S + - name: "bg_s2_c96_m1536_v0" + id: "2042" + vcpus: 192 + ram: 1572864 + disk: 1788 + is_public: true + extra_specs: + "catalog:description": GPU Baremetal 2 Socket Sapphire Rapids 8x Nvidia L40S + "resources:CUSTOM_BG_S2_C96_M1536_V0": "1" + {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} + domains: - name: Default users: From 708b2d1c26b4fabc71b353575dbd3be7695f4d3d Mon Sep 17 00:00:00 2001 From: degricar <156818083+degricar@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:13:20 +0100 Subject: [PATCH 126/224] Update seed.yaml --- openstack/ironic/templates/seed.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack/ironic/templates/seed.yaml b/openstack/ironic/templates/seed.yaml index 04261d15979..f38b3ad3496 100644 --- a/openstack/ironic/templates/seed.yaml +++ b/openstack/ironic/templates/seed.yaml @@ -589,7 +589,7 @@ spec: disk: 1788 is_public: true extra_specs: - "catalog:description": GPU Baremetal 2 Socket Sapphire Rapids 8x Nvidia L40S + "catalog:description": GPU Baremetal 2 Socket AMD Epyc 8x Nvidia L40S "resources:CUSTOM_BG_S2_C96_M1536_V0": "1" {{- tuple . "baremetal" | include "ironic.helpers.extra_specs" | indent 6 }} From 41097847748c498f068befa811296f7b5dd88718 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 8 Nov 2024 15:32:33 +0100 Subject: [PATCH 127/224] tenso: use policy.json instead of policy.yaml This does not entirely remove the YAML parser library from Tenso yet, but it's a prerequisite to getting there and reducing our dependency tree a little bit. This change is immediately deployable because JSON is a subset of YAML to a sufficient extent. --- system/tenso/files/policy.yaml | 10 ++++++++++ system/tenso/templates/_utils.tpl | 2 +- system/tenso/templates/configmap.yaml | 13 ++----------- 3 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 system/tenso/files/policy.yaml diff --git a/system/tenso/files/policy.yaml b/system/tenso/files/policy.yaml new file mode 100644 index 00000000000..e4675d96c88 --- /dev/null +++ b/system/tenso/files/policy.yaml @@ -0,0 +1,10 @@ +# rules expected by Tenso +'event:create': '(rule:match_concourse_event and rule:match_concourse_user) or (rule:match_ansible_event and rule:match_ansible_user)' +'event:create_synthetic': 'rule:cloud_admin' + +# building blocks +'cloud_admin': 'project_name:cloud_admin and project_domain_name:ccadmin and role:admin' +'match_concourse_event': '"helm-deployment-from-concourse.v1":%(target.payload_type)s or "active-directory-deployment-from-concourse.v1":%(target.payload_type)s or "active-directory-deployment-from-concourse.v2":%(target.payload_type)s or "terraform-deployment-from-concourse.v1":%(target.payload_type)s' +'match_concourse_user': 'user_name:tenso-concourse and user_domain_name:Default' +'match_ansible_event': '"infra-workflow-from-awx.v1":%(target.payload_type)s' +'match_ansible_user': 'user_name:tenso-ansible and user_domain_name:Default' diff --git a/system/tenso/templates/_utils.tpl b/system/tenso/templates/_utils.tpl index d100296d75a..4252d89bbfb 100644 --- a/system/tenso/templates/_utils.tpl +++ b/system/tenso/templates/_utils.tpl @@ -52,7 +52,7 @@ - name: TENSO_HELM_DEPLOYMENT_SWIFT_CONTAINER value: tenso-deployment-events - name: TENSO_OSLO_POLICY_PATH - value: '/etc/tenso/policy.yaml' + value: '/etc/tenso/policy.json' - name: TENSO_ROUTES value: > helm-deployment-from-concourse.v1 -> helm-deployment-to-elk.v1, diff --git a/system/tenso/templates/configmap.yaml b/system/tenso/templates/configmap.yaml index 38ee8485139..0294d39e8f5 100644 --- a/system/tenso/templates/configmap.yaml +++ b/system/tenso/templates/configmap.yaml @@ -5,17 +5,8 @@ metadata: name: tenso data: - policy.yaml: | - # rules expected by Tenso - 'event:create': '(rule:match_concourse_event and rule:match_concourse_user) or (rule:match_ansible_event and rule:match_ansible_user)' - 'event:create_synthetic': 'rule:cloud_admin' - - # building blocks - 'cloud_admin': 'project_name:cloud_admin and project_domain_name:ccadmin and role:admin' - 'match_concourse_event': '"helm-deployment-from-concourse.v1":%(target.payload_type)s or "active-directory-deployment-from-concourse.v1":%(target.payload_type)s or "active-directory-deployment-from-concourse.v2":%(target.payload_type)s or "terraform-deployment-from-concourse.v1":%(target.payload_type)s' - 'match_concourse_user': 'user_name:tenso-concourse and user_domain_name:Default' - 'match_ansible_event': '"infra-workflow-from-awx.v1":%(target.payload_type)s' - 'match_ansible_user': 'user_name:tenso-ansible and user_domain_name:Default' + policy.json: | + {{- .Files.Get "files/policy.yaml" | fromYaml | toPrettyJson | nindent 4 }} servicenow-mapping.yaml: | {{ toYaml .Values.tenso.servicenow.mapping_config | indent 4 }} From c4592a25c6a8a55413d3dce24406002fdea81ec7 Mon Sep 17 00:00:00 2001 From: Artem Torubarov Date: Mon, 13 Jan 2025 11:25:44 +0100 Subject: [PATCH 128/224] support preminum target placements in multi-instance rgw Signed-off-by: Artem Torubarov --- system/cc-ceph/templates/cephobjectstore-extra.yaml | 9 +++++++++ system/cc-ceph/templates/cephobjectstore.yaml | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index 3772a5ad81a..8b7ce7ca71f 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -30,6 +30,15 @@ spec: dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec default: {{ $target.default | default false }} {{- end }} +{{- if .Values.rgwTargetPlacements.premiumPlacements }} +{{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} + - name: {{ $target.name }} + metadataPoolName: {{ $target.name }}.rgw.buckets.index + dataPoolName: {{ $target.name }}.rgw.buckets.data + dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec + default: {{ $target.default | default false }} +{{- end }} +{{- end }} {{- else }} metadataPool: {{ toYaml .Values.objectstore.metadataPool | nindent 4 }} dataPool: {{ toYaml .Values.objectstore.dataPool | nindent 4 }} diff --git a/system/cc-ceph/templates/cephobjectstore.yaml b/system/cc-ceph/templates/cephobjectstore.yaml index 8f8b729dfa4..644f9e6358b 100644 --- a/system/cc-ceph/templates/cephobjectstore.yaml +++ b/system/cc-ceph/templates/cephobjectstore.yaml @@ -17,6 +17,15 @@ spec: dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec default: {{ $target.default | default false }} {{- end }} +{{- if .Values.rgwTargetPlacements.premiumPlacements }} +{{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} + - name: {{ $target.name }} + metadataPoolName: {{ $target.name }}.rgw.buckets.index + dataPoolName: {{ $target.name }}.rgw.buckets.data + dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec + default: {{ $target.default | default false }} +{{- end }} +{{- end }} {{- else }} metadataPool: {{ toYaml .Values.objectstore.metadataPool | nindent 4 }} dataPool: {{ toYaml .Values.objectstore.dataPool | nindent 4 }} From b9c15c47f9bb0749fc70e663f34cc5ee8fb2e2ea Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 13 Jan 2025 16:33:44 +0100 Subject: [PATCH 129/224] [octobus-query-exporter] fix jumpserver alert, mv alert to \regional thanos-ruler --- .../octobus-query-exporter/Chart.lock | 6 +++--- .../octobus-query-exporter/Chart.yaml | 2 +- .../octobus-query-exporter/values.yaml | 2 ++ .../vendor/octobus-query-exporter/Chart.yaml | 2 +- .../alerts/region/jump.alerts | 16 ++++++++++++++++ 5 files changed, 23 insertions(+), 5 deletions(-) create mode 100644 prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts diff --git a/prometheus-exporters/octobus-query-exporter/Chart.lock b/prometheus-exporters/octobus-query-exporter/Chart.lock index 30500764e06..88f86441b33 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.lock +++ b/prometheus-exporters/octobus-query-exporter/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: octobus-query-exporter repository: file://vendor/octobus-query-exporter - version: 1.0.13 + version: 1.0.14 - name: octobus-query-exporter-global repository: file://vendor/octobus-query-exporter-global version: 1.0.12 @@ -11,5 +11,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:91c64bcc894bb31e73411e4ff1eb54bfb3d6593ff0da7815873795eda85b0106 -generated: "2024-06-24T09:31:18.092895+02:00" +digest: sha256:92ec8a429d2bcc6045cd5fe9780e83046ea503e342dc111245d8568b663d4663 +generated: "2025-01-13T16:32:28.384656+01:00" diff --git a/prometheus-exporters/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/Chart.yaml index 3e60ad4fd3a..d2af21eb86a 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/Chart.yaml @@ -9,7 +9,7 @@ dependencies: - name: octobus-query-exporter alias: octobus_query_exporter repository: file://vendor/octobus-query-exporter - version: 1.0.13 + version: 1.0.14 condition: octobus_query_exporter.enabled - name: octobus-query-exporter-global diff --git a/prometheus-exporters/octobus-query-exporter/values.yaml b/prometheus-exporters/octobus-query-exporter/values.yaml index bb8d56745d6..fd47540a194 100644 --- a/prometheus-exporters/octobus-query-exporter/values.yaml +++ b/prometheus-exporters/octobus-query-exporter/values.yaml @@ -16,6 +16,8 @@ octobus_query_exporter: type: prometheus - name: scaleout type: thanos-ruler + - name: region + type: thanos-ruler auditSources: enabled: false listen_port: 9206 diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml index 154eaf07322..b067661925e 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.0.13 +version: 1.0.14 name: octobus-query-exporter description: Elasticsearch prometheus query exporter maintainers: diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts new file mode 100644 index 00000000000..7e5a062218e --- /dev/null +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts @@ -0,0 +1,16 @@ +groups: + - name: jumplogs + rules: + - alert: OctobusJumpserverLogShipping + expr: label_replace(node_uname_info, "logs", "$0", "nodename", ".*") unless on (logs) (elasticsearch_octobus_jumpserver_logs_doc_count > 0) + for: 15m + labels: + severity: warning + service: audit + support_group: "observability" + meta: "No Jumpserver logs shipping to Octobus" + playbook: 'docs/support/playbook/opensearch/octobus/jumpserver-audit-logs-in-octobus-missing' + dashboard: audit-log-shipping + annotations: + description: "Jumpserver log shipping to Octobus not working for `{{ $labels.server_name }}`" + summary: "JumpserverLogs2Octobus not working" From a0559a59034480707d48cf8daf52f1042f3af69b Mon Sep 17 00:00:00 2001 From: Martin Vossen Date: Mon, 13 Jan 2025 17:40:37 +0100 Subject: [PATCH 130/224] update plutono maintainers (#7654) --- system/plutono/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/plutono/values.yaml b/system/plutono/values.yaml index 3bd2aa63c26..31cf0005574 100644 --- a/system/plutono/values.yaml +++ b/system/plutono/values.yaml @@ -135,6 +135,6 @@ owner-info: support-group: observability service: plutono maintainers: - - Thomas Graichen + - Akshay Iyyadurai Balasundaram - Richard Tief helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/openstack/plutono From 85db0d1c0c73b6e6392b5d7e5c77516519dfdbfb Mon Sep 17 00:00:00 2001 From: Tommy Sauer Date: Mon, 13 Jan 2025 17:42:16 +0100 Subject: [PATCH 131/224] [metis] adjust support_group (#7644) * adjust support_group * Update system/metis/values.yaml Co-authored-by: Richard Tief <56597015+richardtief@users.noreply.github.com> * update alerts Co-authored-by: IvoGoman --------- Co-authored-by: Richard Tief <56597015+richardtief@users.noreply.github.com> Co-authored-by: IvoGoman --- system/metis/Chart.yaml | 2 +- system/metis/values.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/system/metis/Chart.yaml b/system/metis/Chart.yaml index e432a6aea44..e494f55c08a 100644 --- a/system/metis/Chart.yaml +++ b/system/metis/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: metis -version: 1.3.0 +version: 1.3.1 description: Read-only DB for replicas of OpenStack DBs and Project Masterdata dependencies: - condition: mariadb.enabled diff --git a/system/metis/values.yaml b/system/metis/values.yaml index 691c2c71b57..b02ccfeee23 100644 --- a/system/metis/values.yaml +++ b/system/metis/values.yaml @@ -7,10 +7,10 @@ global: alerts: prometheus: "infra-collector" - supportGroup: "observability" + supportGroup: "containers" owner-info: - support-group: observability + support-group: containers service: metis maintainers: - "Ivo Gosemann" From f5d0ae6459b03a1170b80997adba5f94f5772364 Mon Sep 17 00:00:00 2001 From: sumitarora2786 <144459425+sumitarora2786@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:24:15 -0500 Subject: [PATCH 132/224] Update cephobjectstore.yaml adjust for PT's creation via rookCRD --- system/cc-ceph/templates/cephobjectstore.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/system/cc-ceph/templates/cephobjectstore.yaml b/system/cc-ceph/templates/cephobjectstore.yaml index 644f9e6358b..20331682c35 100644 --- a/system/cc-ceph/templates/cephobjectstore.yaml +++ b/system/cc-ceph/templates/cephobjectstore.yaml @@ -7,18 +7,18 @@ spec: {{- if .Values.objectstore.multiInstance.enabled }} zone: name: {{ .Values.objectstore.name }} -{{- else if and .Values.rgwTargetPlacements.useRookCRD .Values.rgwTargetPlacements.placements }} +{{- else if and .Values.rgwTargetPlacements.useRookCRD .Values.rgwTargetPlacements.premiumPlacements }} sharedPools: poolPlacements: -{{- range $target := .Values.rgwTargetPlacements.placements }} +{{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} - name: {{ $target.name }} metadataPoolName: {{ $target.name }}.rgw.buckets.index dataPoolName: {{ $target.name }}.rgw.buckets.data dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec default: {{ $target.default | default false }} {{- end }} -{{- if .Values.rgwTargetPlacements.premiumPlacements }} -{{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} +{{- if .Values.rgwTargetPlacements.Placements }} +{{- range $target := .Values.rgwTargetPlacements.placements }} - name: {{ $target.name }} metadataPoolName: {{ $target.name }}.rgw.buckets.index dataPoolName: {{ $target.name }}.rgw.buckets.data From 7ea7abb2dd685b070b0a007392ac49d060251bbd Mon Sep 17 00:00:00 2001 From: sumitarora2786 <144459425+sumitarora2786@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:27:02 -0500 Subject: [PATCH 133/224] Update cephobjectstore-extra.yaml --- system/cc-ceph/templates/cephobjectstore-extra.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index 8b7ce7ca71f..a07c2f7b74a 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -20,18 +20,18 @@ metadata: namespace: {{ .Release.Namespace }} spec: zoneGroup: {{ .Values.objectstore.name }} -{{- if and .Values.rgwTargetPlacements.useRookCRD .Values.rgwTargetPlacements.placements }} +{{- if and .Values.rgwTargetPlacements.useRookCRD .Values.rgwTargetPlacements.premiumPlacements }} sharedPools: poolPlacements: -{{- range $target := .Values.rgwTargetPlacements.placements }} +{{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} - name: {{ $target.name }} metadataPoolName: {{ $target.name }}.rgw.buckets.index dataPoolName: {{ $target.name }}.rgw.buckets.data dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec default: {{ $target.default | default false }} {{- end }} -{{- if .Values.rgwTargetPlacements.premiumPlacements }} -{{- range $target := .Values.rgwTargetPlacements.premiumPlacements }} +{{- if .Values.rgwTargetPlacements.Placements }} +{{- range $target := .Values.rgwTargetPlacements.placements }} - name: {{ $target.name }} metadataPoolName: {{ $target.name }}.rgw.buckets.index dataPoolName: {{ $target.name }}.rgw.buckets.data From 2249f67e0f3c77b9b7ad3245e12cfe1288e63aa3 Mon Sep 17 00:00:00 2001 From: sumitarora2786 <144459425+sumitarora2786@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:29:28 -0500 Subject: [PATCH 134/224] Update cephobjectstore-extra.yaml --- system/cc-ceph/templates/cephobjectstore-extra.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index a07c2f7b74a..ca62b10a4e9 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -30,7 +30,7 @@ spec: dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec default: {{ $target.default | default false }} {{- end }} -{{- if .Values.rgwTargetPlacements.Placements }} +{{- if .Values.rgwTargetPlacements.placements }} {{- range $target := .Values.rgwTargetPlacements.placements }} - name: {{ $target.name }} metadataPoolName: {{ $target.name }}.rgw.buckets.index From 89c8c8b41cd3e42490c86de0f4cb78d4ab8e4a58 Mon Sep 17 00:00:00 2001 From: sumitarora2786 <144459425+sumitarora2786@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:29:56 -0500 Subject: [PATCH 135/224] Update cephobjectstore.yaml --- system/cc-ceph/templates/cephobjectstore.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/cc-ceph/templates/cephobjectstore.yaml b/system/cc-ceph/templates/cephobjectstore.yaml index 20331682c35..c6713d9df9b 100644 --- a/system/cc-ceph/templates/cephobjectstore.yaml +++ b/system/cc-ceph/templates/cephobjectstore.yaml @@ -17,7 +17,7 @@ spec: dataNonECPoolName: {{ $target.name }}.rgw.buckets.non-ec default: {{ $target.default | default false }} {{- end }} -{{- if .Values.rgwTargetPlacements.Placements }} +{{- if .Values.rgwTargetPlacements.placements }} {{- range $target := .Values.rgwTargetPlacements.placements }} - name: {{ $target.name }} metadataPoolName: {{ $target.name }}.rgw.buckets.index From fee035859edc8bbfb9646c43d5cdc914b0534fb8 Mon Sep 17 00:00:00 2001 From: sumitarora2786 <144459425+sumitarora2786@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:36:57 -0500 Subject: [PATCH 136/224] Update Chart.yaml --- system/cc-ceph/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 8cf0b10c477..4b952a804e7 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.2 +version: 1.1.3 appVersion: "1.16.0" dependencies: - name: owner-info From b0c518c8e64c4342ed559b39884ad4033ed77f1d Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Tue, 7 Jan 2025 16:09:53 +0200 Subject: [PATCH 137/224] [memcached] memcached version bumped to 1.6.34-alpine3.21 * bumped memcached version * bumped memcached-exporter version * chart version bumped --- common/memcached/CHANGELOG.md | 6 ++++++ common/memcached/Chart.yaml | 2 +- common/memcached/values.yaml | 4 ++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/common/memcached/CHANGELOG.md b/common/memcached/CHANGELOG.md index 9dc7de14d33..443ae7a05e6 100644 --- a/common/memcached/CHANGELOG.md +++ b/common/memcached/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## v0.6.3 - 2025/01/07 + +* memcached [version](https://github.com/memcached/memcached/wiki/ReleaseNotes1634) bumped to `1.6.34-alpine3.21` +* memcached-exporter [version](https://github.com/prometheus/memcached_exporter/releases/tag/v0.15.0) bumped to `v0.15.0` +* chart version bumped + ## v0.6.2 - 2024/12/16 * memcached [version](https://github.com/memcached/memcached/wiki/ReleaseNotes1633) bumped to `1.6.33-alpine3.21` diff --git a/common/memcached/Chart.yaml b/common/memcached/Chart.yaml index 83a877fce78..6d74b37d315 100644 --- a/common/memcached/Chart.yaml +++ b/common/memcached/Chart.yaml @@ -1,7 +1,7 @@ --- apiVersion: v1 name: memcached -version: 0.6.2 +version: 0.6.3 description: Free & open source, high-performance, distributed memory object caching system. home: http://memcached.org/ sources: diff --git a/common/memcached/values.yaml b/common/memcached/values.yaml index 010a18a65cc..f7bbea6055d 100644 --- a/common/memcached/values.yaml +++ b/common/memcached/values.yaml @@ -2,7 +2,7 @@ ## ref: https://hub.docker.com/r/library/memcached/tags/ ## image: library/memcached -imageTag: 1.6.33-alpine3.21 +imageTag: 1.6.34-alpine3.21 # set to true to use .Values.global.dockerHubMirrorAlternateRegion instead of .Values.global.dockerHubMirror use_alternate_registry: false @@ -48,7 +48,7 @@ resources: metrics: enabled: true image: "prom/memcached-exporter" - imageTag: "v0.14.1" + imageTag: "v0.15.0" port: "9150" resources: From e721cfbf5ecd632e33664dc9e2bfb974e8fec37f Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Tue, 14 Jan 2025 14:21:03 +0530 Subject: [PATCH 138/224] [PyKMIP] rename kmip-barbican container name to kmip-core --- openstack/kmip/Chart.lock | 6 +++--- openstack/kmip/Chart.yaml | 2 +- openstack/kmip/templates/deployment.yaml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/kmip/Chart.lock b/openstack/kmip/Chart.lock index ba8906de87f..9fd70bf8f8f 100644 --- a/openstack/kmip/Chart.lock +++ b/openstack/kmip/Chart.lock @@ -7,12 +7,12 @@ dependencies: version: 1.1.0 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.20.0 + version: 0.21.0 - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.14.2 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.3.5 -digest: sha256:bb17389f99b286a9b65137c31a94f02426af229325160acfded17ecdcaca877f -generated: "2024-12-12T20:11:21.311376+05:30" +digest: sha256:11a2527a2f73497204ea9ba1814c4967a0dd5c329ff2d2d80082d002a08b6a69 +generated: "2025-01-14T14:19:54.499578+05:30" diff --git a/openstack/kmip/Chart.yaml b/openstack/kmip/Chart.yaml index 567c1932cc4..659badd747e 100644 --- a/openstack/kmip/Chart.yaml +++ b/openstack/kmip/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.1 +version: 0.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/openstack/kmip/templates/deployment.yaml b/openstack/kmip/templates/deployment.yaml index 99837105f19..245cee8f4c5 100644 --- a/openstack/kmip/templates/deployment.yaml +++ b/openstack/kmip/templates/deployment.yaml @@ -84,7 +84,7 @@ spec: requests: memory: 0 cpu: 0 - - name: kmip-barbican + - name: kmip-core securityContext: {{- toYaml .Values.securityContext | nindent 12 }} image: {{required ".Values.global.registry is missing" .Values.global.registry }}/loci-barbican:{{required "Values.kmip.image is missing" .Values.kmip.image }} From 2dc9a1cdfce9e82f87069bd5d9a676ee221da832 Mon Sep 17 00:00:00 2001 From: rajiv Date: Tue, 14 Jan 2025 16:01:09 +0530 Subject: [PATCH 139/224] [PyKMIP] update kmip-core service port to 5006 --- openstack/kmip/templates/service.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openstack/kmip/templates/service.yaml b/openstack/kmip/templates/service.yaml index 7f1ce892aac..f11230ca034 100644 --- a/openstack/kmip/templates/service.yaml +++ b/openstack/kmip/templates/service.yaml @@ -28,6 +28,6 @@ spec: targetPort: 5696 protocol: TCP - name: restapi - port: 5005 - targetPort: 5005 + port: 5006 + targetPort: 5006 protocol: TCP From 5fc1c468f7a9db67620fa08f1598757e5ecbd2f0 Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Tue, 14 Jan 2025 11:38:09 +0100 Subject: [PATCH 140/224] [designate][policy] fix typo in pool_zone_move rule --- openstack/designate/templates/etc/_designate-policy.yaml.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack/designate/templates/etc/_designate-policy.yaml.tpl b/openstack/designate/templates/etc/_designate-policy.yaml.tpl index fac98587e3c..0ce30f35011 100644 --- a/openstack/designate/templates/etc/_designate-policy.yaml.tpl +++ b/openstack/designate/templates/etc/_designate-policy.yaml.tpl @@ -56,7 +56,7 @@ share_zone: rule:context_is_master unshare_zone: rule:context_is_master create_zone: rule:context_is_dns_ops move_zone: rule:context_is_dns_ops -pool_move_zone: rule:context_is_dns_op +pool_move_zone: rule:context_is_dns_ops create_sub_zone: rule:context_is_zonemaster create_super_zone: rule:context_is_cloud_admin get_zones: rule:context_is_viewer From 30904a4e87a974c948f1b1693b49decb667592f9 Mon Sep 17 00:00:00 2001 From: Nuckal777 Date: Tue, 14 Jan 2025 11:49:35 +0100 Subject: [PATCH 141/224] Rename metal-token-dealer to metal-token-rotate --- system/Makefile | 4 +-- .../metal-operator-managedresources/rbac.yaml | 18 ++++++------- ...kubeconfig.yaml => rotate-kubeconfig.yaml} | 8 +++--- system/metal-operator-remote/Chart.yaml | 2 +- .../managedresources/rbac.yaml | 18 ++++++------- ...kubeconfig.yaml => rotate-kubeconfig.yaml} | 8 +++--- .../Chart.lock | 0 .../Chart.yaml | 6 ++--- .../ci/test-values.yaml | 0 .../files/rbac.yaml | 8 +++--- .../templates/configmap.yaml | 2 +- .../templates/deployment.yaml | 18 ++++++------- .../templates/managedresource.yaml | 6 ++--- .../templates/rbac.yaml | 26 +++++++++---------- .../templates/secret.yaml | 4 +-- .../values.yaml | 6 ++--- 16 files changed, 67 insertions(+), 67 deletions(-) rename system/kustomize/metal-operator-remote/{dealer-kubeconfig.yaml => rotate-kubeconfig.yaml} (80%) rename system/metal-operator-remote/templates/{dealer-kubeconfig.yaml => rotate-kubeconfig.yaml} (80%) rename system/{metal-token-dealer => metal-token-rotate}/Chart.lock (100%) rename system/{metal-token-dealer => metal-token-rotate}/Chart.yaml (64%) rename system/{metal-token-dealer => metal-token-rotate}/ci/test-values.yaml (100%) rename system/{metal-token-dealer => metal-token-rotate}/files/rbac.yaml (77%) rename system/{metal-token-dealer => metal-token-rotate}/templates/configmap.yaml (93%) rename system/{metal-token-dealer => metal-token-rotate}/templates/deployment.yaml (81%) rename system/{metal-token-dealer => metal-token-rotate}/templates/managedresource.yaml (77%) rename system/{metal-token-dealer => metal-token-rotate}/templates/rbac.yaml (80%) rename system/{metal-token-dealer => metal-token-rotate}/templates/secret.yaml (72%) rename system/{metal-token-dealer => metal-token-rotate}/values.yaml (87%) diff --git a/system/Makefile b/system/Makefile index 02943ba64e1..bd113d451d4 100644 --- a/system/Makefile +++ b/system/Makefile @@ -119,7 +119,7 @@ build-metal-operator-remote: @cat kustomize/metal-operator-remote/kustomization.yaml > kustomization.yaml @kubectl kustomize | helmify -crd-dir metal-operator-remote @cp kustomize/metal-operator-remote/remote-kubeconfig.yaml metal-operator-remote/templates - @cp kustomize/metal-operator-remote/dealer-kubeconfig.yaml metal-operator-remote/templates + @cp kustomize/metal-operator-remote/rotate-kubeconfig.yaml metal-operator-remote/templates @cp kustomize/metal-operator-remote/managedresource.yaml metal-operator-remote/templates @cp kustomize/metal-operator-remote/macdb.yaml metal-operator-remote/templates @mkdir metal-operator-remote/managedresources @@ -129,7 +129,7 @@ build-metal-operator-remote: @yq -i '.fullnameOverride="metal-operator"' metal-operator-remote/values.yaml @yq -i '.remote.ca=""' metal-operator-remote/values.yaml @echo 'macdb: {}' >> metal-operator-remote/values.yaml - @yq -i '.version="0.2.0"' metal-operator-remote/Chart.yaml + @yq -i '.version="0.2.1"' metal-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' metal-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' metal-operator-remote/managedresources/kustomize.yaml diff --git a/system/kustomize/metal-operator-managedresources/rbac.yaml b/system/kustomize/metal-operator-managedresources/rbac.yaml index 04944db124c..037a69aa814 100644 --- a/system/kustomize/metal-operator-managedresources/rbac.yaml +++ b/system/kustomize/metal-operator-managedresources/rbac.yaml @@ -16,13 +16,13 @@ subjects: apiVersion: v1 kind: ServiceAccount metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metal-token-dealer + name: metal-token-rotate rules: - apiGroups: - "authentication.k8s.io" @@ -34,20 +34,20 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system rules: - apiGroups: @@ -62,13 +62,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system diff --git a/system/kustomize/metal-operator-remote/dealer-kubeconfig.yaml b/system/kustomize/metal-operator-remote/rotate-kubeconfig.yaml similarity index 80% rename from system/kustomize/metal-operator-remote/dealer-kubeconfig.yaml rename to system/kustomize/metal-operator-remote/rotate-kubeconfig.yaml index cc5f3c836c4..2bb714ece15 100644 --- a/system/kustomize/metal-operator-remote/dealer-kubeconfig.yaml +++ b/system/kustomize/metal-operator-remote/rotate-kubeconfig.yaml @@ -1,12 +1,12 @@ apiVersion: v1 kind: Secret metadata: - name: metal-token-dealer-kubeconfig + name: metal-token-rotate-kubeconfig labels: resources.gardener.cloud/purpose: token-requestor resources.gardener.cloud/class: shoot annotations: - serviceaccount.resources.gardener.cloud/name: metal-token-dealer + serviceaccount.resources.gardener.cloud/name: metal-token-rotate serviceaccount.resources.gardener.cloud/namespace: kube-system stringData: kubeconfig: | @@ -19,13 +19,13 @@ stringData: contexts: - context: cluster: remote-cluster - user: metal-token-dealer + user: metal-token-rotate namespace: kube-system name: remote-cluster current-context: remote-cluster kind: Config preferences: {} users: - - name: metal-token-dealer + - name: metal-token-rotate user: token: "" diff --git a/system/metal-operator-remote/Chart.yaml b/system/metal-operator-remote/Chart.yaml index 44693d37c0d..b99449856d1 100644 --- a/system/metal-operator-remote/Chart.yaml +++ b/system/metal-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.2.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/metal-operator-remote/managedresources/rbac.yaml b/system/metal-operator-remote/managedresources/rbac.yaml index 04944db124c..037a69aa814 100644 --- a/system/metal-operator-remote/managedresources/rbac.yaml +++ b/system/metal-operator-remote/managedresources/rbac.yaml @@ -16,13 +16,13 @@ subjects: apiVersion: v1 kind: ServiceAccount metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metal-token-dealer + name: metal-token-rotate rules: - apiGroups: - "authentication.k8s.io" @@ -34,20 +34,20 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system rules: - apiGroups: @@ -62,13 +62,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: kube-system diff --git a/system/metal-operator-remote/templates/dealer-kubeconfig.yaml b/system/metal-operator-remote/templates/rotate-kubeconfig.yaml similarity index 80% rename from system/metal-operator-remote/templates/dealer-kubeconfig.yaml rename to system/metal-operator-remote/templates/rotate-kubeconfig.yaml index cc5f3c836c4..2bb714ece15 100644 --- a/system/metal-operator-remote/templates/dealer-kubeconfig.yaml +++ b/system/metal-operator-remote/templates/rotate-kubeconfig.yaml @@ -1,12 +1,12 @@ apiVersion: v1 kind: Secret metadata: - name: metal-token-dealer-kubeconfig + name: metal-token-rotate-kubeconfig labels: resources.gardener.cloud/purpose: token-requestor resources.gardener.cloud/class: shoot annotations: - serviceaccount.resources.gardener.cloud/name: metal-token-dealer + serviceaccount.resources.gardener.cloud/name: metal-token-rotate serviceaccount.resources.gardener.cloud/namespace: kube-system stringData: kubeconfig: | @@ -19,13 +19,13 @@ stringData: contexts: - context: cluster: remote-cluster - user: metal-token-dealer + user: metal-token-rotate namespace: kube-system name: remote-cluster current-context: remote-cluster kind: Config preferences: {} users: - - name: metal-token-dealer + - name: metal-token-rotate user: token: "" diff --git a/system/metal-token-dealer/Chart.lock b/system/metal-token-rotate/Chart.lock similarity index 100% rename from system/metal-token-dealer/Chart.lock rename to system/metal-token-rotate/Chart.lock diff --git a/system/metal-token-dealer/Chart.yaml b/system/metal-token-rotate/Chart.yaml similarity index 64% rename from system/metal-token-dealer/Chart.yaml rename to system/metal-token-rotate/Chart.yaml index 652f615c321..a91e4082635 100644 --- a/system/metal-token-dealer/Chart.yaml +++ b/system/metal-token-rotate/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -name: metal-token-dealer -description: A Helm chart for metal-token-dealer +name: metal-token-rotate +description: A Helm chart for metal-token-rotate type: application -version: 0.1.1 +version: 0.1.0 appVersion: "0.1.0" dependencies: - name: owner-info diff --git a/system/metal-token-dealer/ci/test-values.yaml b/system/metal-token-rotate/ci/test-values.yaml similarity index 100% rename from system/metal-token-dealer/ci/test-values.yaml rename to system/metal-token-rotate/ci/test-values.yaml diff --git a/system/metal-token-dealer/files/rbac.yaml b/system/metal-token-rotate/files/rbac.yaml similarity index 77% rename from system/metal-token-dealer/files/rbac.yaml rename to system/metal-token-rotate/files/rbac.yaml index 1b68268d3f0..040c60fb5c0 100644 --- a/system/metal-token-dealer/files/rbac.yaml +++ b/system/metal-token-rotate/files/rbac.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metal-token-dealer + name: metal-token-rotate rules: - apiGroups: - "" @@ -17,12 +17,12 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: garden diff --git a/system/metal-token-dealer/templates/configmap.yaml b/system/metal-token-rotate/templates/configmap.yaml similarity index 93% rename from system/metal-token-dealer/templates/configmap.yaml rename to system/metal-token-rotate/templates/configmap.yaml index 0e18203cff3..1f9a1a4ba8f 100644 --- a/system/metal-token-dealer/templates/configmap.yaml +++ b/system/metal-token-rotate/templates/configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: metal-token-dealer + name: metal-token-rotate data: config.json: | { diff --git a/system/metal-token-dealer/templates/deployment.yaml b/system/metal-token-rotate/templates/deployment.yaml similarity index 81% rename from system/metal-token-dealer/templates/deployment.yaml rename to system/metal-token-rotate/templates/deployment.yaml index 1dea92c8f92..15da394f76c 100644 --- a/system/metal-token-dealer/templates/deployment.yaml +++ b/system/metal-token-rotate/templates/deployment.yaml @@ -1,18 +1,18 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: metal-token-dealer + name: metal-token-rotate labels: - app: metal-token-dealer + app: metal-token-rotate spec: replicas: 1 selector: matchLabels: - app: metal-token-dealer + app: metal-token-rotate template: metadata: labels: - app: metal-token-dealer + app: metal-token-rotate networking.gardener.cloud/to-dns: allowed networking.gardener.cloud/to-runtime-apiserver: allowed networking.resources.gardener.cloud/to-virtual-garden-kube-apiserver-tcp-443: allowed @@ -21,7 +21,7 @@ spec: {{- end }} spec: containers: - - name: metal-token-dealer + - name: metal-token-rotate image: {{ .Values.global.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} env: @@ -33,7 +33,7 @@ spec: {{- end }} volumeMounts: - name: config - mountPath: /etc/metal-token-dealer + mountPath: /etc/metal-token-rotate readOnly: true - name: gardener-client-ca mountPath: /var/run/garden/ca @@ -41,14 +41,14 @@ spec: - name: garden-token mountPath: /var/run/garden/token readOnly: true - serviceAccountName: metal-token-dealer + serviceAccountName: metal-token-rotate volumes: - name: config configMap: - name: metal-token-dealer + name: metal-token-rotate - name: gardener-client-ca secret: secretName: {{ .Values.gardener.clientCASecretName }} - name: garden-token secret: - secretName: metal-token-dealer-garden-token + secretName: metal-token-rotate-garden-token diff --git a/system/metal-token-dealer/templates/managedresource.yaml b/system/metal-token-rotate/templates/managedresource.yaml similarity index 77% rename from system/metal-token-dealer/templates/managedresource.yaml rename to system/metal-token-rotate/templates/managedresource.yaml index 33b965828ea..e935d11f115 100644 --- a/system/metal-token-dealer/templates/managedresource.yaml +++ b/system/metal-token-rotate/templates/managedresource.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Secret metadata: - name: metal-token-dealer-rbac + name: metal-token-rotate-rbac namespace: garden type: Opaque data: @@ -12,9 +12,9 @@ data: apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: garden spec: secretRefs: - - name: metal-token-dealer-rbac + - name: metal-token-rotate-rbac {{ end -}} diff --git a/system/metal-token-dealer/templates/rbac.yaml b/system/metal-token-rotate/templates/rbac.yaml similarity index 80% rename from system/metal-token-dealer/templates/rbac.yaml rename to system/metal-token-rotate/templates/rbac.yaml index 829c8adc4e0..0412fdc3c2b 100644 --- a/system/metal-token-dealer/templates/rbac.yaml +++ b/system/metal-token-rotate/templates/rbac.yaml @@ -2,14 +2,14 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: garden {{- if .Values.targetSecret }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metal-token-dealer + name: metal-token-rotate rules: - apiGroups: - "" @@ -25,21 +25,21 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: garden {{- else }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metal-token-dealer + name: metal-token-rotate rules: - apiGroups: - "authentication.k8s.io" @@ -51,20 +51,20 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: garden --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: {{ .Values.config.serviceAccountNamespace | quote }} rules: - apiGroups: @@ -79,14 +79,14 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: metal-token-dealer + name: metal-token-rotate namespace: {{ .Values.config.serviceAccountNamespace | quote }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: metal-token-dealer + name: metal-token-rotate subjects: - kind: ServiceAccount - name: metal-token-dealer + name: metal-token-rotate namespace: garden {{- end }} diff --git a/system/metal-token-dealer/templates/secret.yaml b/system/metal-token-rotate/templates/secret.yaml similarity index 72% rename from system/metal-token-dealer/templates/secret.yaml rename to system/metal-token-rotate/templates/secret.yaml index 1c9cb7490ae..c32a6c4f95e 100644 --- a/system/metal-token-dealer/templates/secret.yaml +++ b/system/metal-token-rotate/templates/secret.yaml @@ -1,12 +1,12 @@ apiVersion: v1 kind: Secret metadata: - name: metal-token-dealer-garden-token + name: metal-token-rotate-garden-token labels: resources.gardener.cloud/purpose: token-requestor resources.gardener.cloud/class: shoot annotations: - serviceaccount.resources.gardener.cloud/name: metal-token-dealer + serviceaccount.resources.gardener.cloud/name: metal-token-rotate serviceaccount.resources.gardener.cloud/namespace: garden # data: # token: will be populated by the gardener-resource-manager diff --git a/system/metal-token-dealer/values.yaml b/system/metal-token-rotate/values.yaml similarity index 87% rename from system/metal-token-dealer/values.yaml rename to system/metal-token-rotate/values.yaml index 24b6be6cc0a..8fea24041f6 100644 --- a/system/metal-token-dealer/values.yaml +++ b/system/metal-token-rotate/values.yaml @@ -1,6 +1,6 @@ image: - repository: metal-token-dealer - tag: test + repository: metal-token-rotate + tag: main pullPolicy: IfNotPresent config: serviceAccountName: metal-operator @@ -13,7 +13,7 @@ managedResource: enabled: false targetSecret: "" # namespace/secret-name owner-info: - helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/metal-token-dealer + helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/metal-token-rotate maintainers: - Erik Schubert - Jan Knipper From ee1d3f8b33975d5a3d8a87c1e98f2c24a62816ed Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Tue, 14 Jan 2025 12:34:41 +0100 Subject: [PATCH 142/224] [unbound][desginate] moved the unbound alerts away from designate Unbound is a service of its own that has nothing to do with designate. Also, needed to adjust the unbound alerts now that the unbound services have been reconfigured and specifically the prometheus configuration thereof. --- .../designate/alerts/kubernetes/kube.alerts | 18 ----- .../designate/alerts/openstack/bind.alerts | 18 +++++ .../designate/alerts/openstack/unbound.alerts | 66 ------------------- system/unbound/alerts/unbound.alerts | 50 ++++++++++++++ 4 files changed, 68 insertions(+), 84 deletions(-) delete mode 100644 openstack/designate/alerts/kubernetes/kube.alerts create mode 100644 openstack/designate/alerts/openstack/bind.alerts delete mode 100644 openstack/designate/alerts/openstack/unbound.alerts create mode 100644 system/unbound/alerts/unbound.alerts diff --git a/openstack/designate/alerts/kubernetes/kube.alerts b/openstack/designate/alerts/kubernetes/kube.alerts deleted file mode 100644 index 21b6538b2f4..00000000000 --- a/openstack/designate/alerts/kubernetes/kube.alerts +++ /dev/null @@ -1,18 +0,0 @@ -groups: -- name: designate-unbound.alerts - rules: - - alert: OpenstackDesignateDnsUnboundEndpointNotAvailable - expr: max(kube_endpoint_address{namespace=~"dns-recursor"}) BY (region,endpoint) < 1 - for: 15m - labels: - context: unbound - dashboard: designate-unbound - meta: '{{ $labels.endpoint }}' - service: designate - severity: warning - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' - annotations: - description: 'DNS Unbound endpoint {{ $labels.endpoint }} not available in {{ $labels.region }} region.' - summary: 'DNS Unbound endpoint {{ $labels.endpoint }} is not available. DNS resolution might be handled by another region.' diff --git a/openstack/designate/alerts/openstack/bind.alerts b/openstack/designate/alerts/openstack/bind.alerts new file mode 100644 index 00000000000..739c90e7ead --- /dev/null +++ b/openstack/designate/alerts/openstack/bind.alerts @@ -0,0 +1,18 @@ +groups: +- name: designate-bind.alerts + rules: + - alert: OpenstackDesignateDnsBindDown + expr: max(bind_up) BY (region, kubernetes_name) < 1 + for: 10m + labels: + context: bind + dashboard: designate-bind + meta: '{{ $labels.kubernetes_name }}' + service: designate + severity: critical + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate/#OpenstackDesignateDnsBindDown' + annotations: + description: DNS Bind server {{ $labels.kubernetes_name }} down in region {{ $labels.region }}. + summary: DNS Bind server {{ $labels.kubernetes_name }} down. diff --git a/openstack/designate/alerts/openstack/unbound.alerts b/openstack/designate/alerts/openstack/unbound.alerts deleted file mode 100644 index 51c3863aa7e..00000000000 --- a/openstack/designate/alerts/openstack/unbound.alerts +++ /dev/null @@ -1,66 +0,0 @@ -groups: -- name: designate-unbound.alerts - rules: - - alert: OpenstackDesignateDnsBindDown - expr: max(bind_up) BY (region, kubernetes_name) < 1 - for: 10m - labels: - context: bind - dashboard: designate-bind - meta: '{{ $labels.kubernetes_name }}' - service: designate - severity: critical - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate/#OpenstackDesignateDnsBindDown' - annotations: - description: DNS Bind server {{ $labels.kubernetes_name }} down in region {{ $labels.region }}. - summary: DNS Bind server {{ $labels.kubernetes_name }} down. - - - alert: OpenstackDesignateDnsUnboundManySERVFAIL - expr: sum(delta(unbound_answer_rcodes_total{rcode="SERVFAIL"}[1h])) BY (region, kubernetes_name) > 500000 - for: 60m - labels: - context: unbound - dashboard: designate-unbound - meta: '{{ $labels.kubernetes_name }}' - service: designate - severity: info - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate' - annotations: - description: 'Recursor {{ $labels.kubernetes_name }} returns lots of SERVFAIL responses in {{ $labels.region }} region.' - summary: '{{ $labels.kubernetes_name }} returned a lot of SERVFAIL responses in the last hour. Check the logs.' - - - alert: OpenstackDesignateDnsUnbound1Down - expr: absent(unbound_up{kubernetes_name=~"unbound1"}) == 1 or unbound_up{kubernetes_name=~"unbound1"} != 1 - for: 30m - labels: - context: unbound - dashboard: designate-unbound - meta: unbound1 - service: designate - severity: warning - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' - annotations: - description: 'DNS Unbound1 recursor is down.' - summary: DNS Unbound1 recursor is down. DNS resolution might be handled by another region. - - - alert: OpenstackDesignateDnsUnbound2Down - expr: absent(unbound_up{kubernetes_name=~"unbound2"}) == 1 or unbound_up{kubernetes_name=~"unbound2"} != 1 - for: 30m - labels: - context: unbound - dashboard: designate-unbound - meta: unbound2 - service: designate - severity: warning - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' - annotations: - description: 'DNS Unbound2 recursor is down.' - summary: DNS Unbound2 recursor is down. DNS resolution might be handled by another region. diff --git a/system/unbound/alerts/unbound.alerts b/system/unbound/alerts/unbound.alerts new file mode 100644 index 00000000000..1e70de5efea --- /dev/null +++ b/system/unbound/alerts/unbound.alerts @@ -0,0 +1,50 @@ +groups: +- name: unbound.alerts + rules: + - alert: DnsUnboundManySERVFAIL + expr: sum(delta(unbound_answer_rcodes_total{rcode="SERVFAIL"}[1h])) BY (region, app) > 500000 + for: 60m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: '{{ $labels.app }}' + service: unbound + severity: info + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate' + annotations: + description: 'Recursor {{ $labels.app }} returns lots of SERVFAIL responses in {{ $labels.region }} region.' + summary: '{{ $labels.app }} returned a lot of SERVFAIL responses in the last hour. Check the logs.' + + - alert: DnsUnbound1Down + expr: absent(unbound_up{app="unbound1"}) == 1 or unbound_up{app="unbound1"} != 1 + for: 30m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: unbound1 + service: unbound + severity: warning + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' + annotations: + description: 'DNS Unbound1 recursor is down.' + summary: DNS Unbound1 recursor is down. DNS resolution might be handled by another region. + + - alert: DnsUnbound2Down + expr: absent(unbound_up{app="unbound2"}) == 1 or unbound_up{app="unbound2"} != 1 + for: 30m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: unbound2 + service: unbound + severity: warning + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' + annotations: + description: 'DNS Unbound2 recursor is down.' + summary: DNS Unbound2 recursor is down. DNS resolution might be handled by another region. From d43397c10116b8199690a2298340e6e271660c82 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Tue, 14 Jan 2025 13:46:57 +0200 Subject: [PATCH 143/224] [mariadb] Fixed mariadb and maria-backup service selector * fixed service selector: added `app.kubernetes.io/instance` label to make service target specific service instance * chart version bumped --- common/mariadb/CHANGELOG.md | 4 ++++ common/mariadb/Chart.yaml | 2 +- common/mariadb/templates/_helpers.tpl | 2 +- common/mariadb/templates/service.yaml | 4 +++- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/common/mariadb/CHANGELOG.md b/common/mariadb/CHANGELOG.md index 570110ac1c3..2a7acb04e99 100644 --- a/common/mariadb/CHANGELOG.md +++ b/common/mariadb/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## v0.15.3 - 2025/01/14 +- fixed service selector: added `app.kubernetes.io/instance` label to make service target specific service instance +- chart version bumped + ## v0.15.2 - 2024/11/29 - `app` selector label returned, because deployment selector is immutable - chart version bumped diff --git a/common/mariadb/Chart.yaml b/common/mariadb/Chart.yaml index b6be870d57c..979062b3ed0 100644 --- a/common/mariadb/Chart.yaml +++ b/common/mariadb/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v1 description: A Helm chart for Kubernetes name: mariadb -version: 0.15.2 +version: 0.15.3 diff --git a/common/mariadb/templates/_helpers.tpl b/common/mariadb/templates/_helpers.tpl index 88b007ec9c2..98e3f86c812 100644 --- a/common/mariadb/templates/_helpers.tpl +++ b/common/mariadb/templates/_helpers.tpl @@ -117,4 +117,4 @@ helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} {{- $type := index . 1 }} {{- $function := index . 2 }} {{- $component }}-{{ $type }}-{{ $function }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/common/mariadb/templates/service.yaml b/common/mariadb/templates/service.yaml index 2730c28d9bc..4edca1d4ca6 100644 --- a/common/mariadb/templates/service.yaml +++ b/common/mariadb/templates/service.yaml @@ -1,6 +1,6 @@ +--- kind: Service apiVersion: v1 - metadata: name: {{ include "fullName" . }} namespace: {{.Release.Namespace}} @@ -14,6 +14,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/component: {{ include "label.component" (list "mariadb" "deployment" "database") }} + app.kubernetes.io/instance: {{ $.Release.Name }}-{{ $.Chart.Name }} ports: - name: {{ include "fullName" . }} port: {{.Values.port_public}} @@ -37,6 +38,7 @@ metadata: spec: selector: app.kubernetes.io/component: {{ include "label.component" (list "mariadb" "deployment" "backup") }} + app.kubernetes.io/instance: {{ $.Release.Name }}-{{ $.Chart.Name }} ports: - port: 8081 name: http From 74d2d9484cb8173627fa3b76b9c92ba2e240e3e2 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 13:28:50 +0100 Subject: [PATCH 144/224] Update prometheus-exporters/octobus-query-exporter/values.yaml Co-authored-by: Tommy Sauer --- prometheus-exporters/octobus-query-exporter/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prometheus-exporters/octobus-query-exporter/values.yaml b/prometheus-exporters/octobus-query-exporter/values.yaml index fd47540a194..30d001b7129 100644 --- a/prometheus-exporters/octobus-query-exporter/values.yaml +++ b/prometheus-exporters/octobus-query-exporter/values.yaml @@ -16,7 +16,7 @@ octobus_query_exporter: type: prometheus - name: scaleout type: thanos-ruler - - name: region + - name: regional type: thanos-ruler auditSources: enabled: false From b124d42fec4a4741478865e2218fa34492c8d972 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 13:31:08 +0100 Subject: [PATCH 145/224] [octobus-query-exporter] remove moved alert from old location --- .../alerts/infra-frontend/logs.alerts | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts index 7527b57eeef..281145af42a 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts @@ -43,20 +43,6 @@ groups: description: "Octobus Keystone endpoint: `in-https` is down in region `{{ $labels.region }}`" summary: "Octobus Keystone endpoint: `in-https` is down in region `{{ $labels.region }}`" - - alert: OctobusJumpserverLogShipping - expr: label_replace(node_uname_info, "logs", "$0", "nodename", ".*") unless on (logs) (elasticsearch_octobus_jumpserver_logs_doc_count > 0) - for: 15m - labels: - severity: warning - service: audit - support_group: "observability" - meta: "No Jumpserver logs shipping to Octobus" - playbook: 'docs/support/playbook/opensearch/octobus/jumpserver-audit-logs-in-octobus-missing' - dashboard: audit-log-shipping - annotations: - description: "Jumpserver log shipping to Octobus not working for `{{ $labels.server_name }}`" - summary: "JumpserverLogs2Octobus not working" - - alert: OctobusJumpserverLogshipperEndpointDown expr: elasticsearch_octobus_jumpserver_logs_doc_count == 0 and on (region) {target=~"in-beats.*"} == 0 for: 15m From 2b56b336109e96fe83ff83f34e227005a4ed8a04 Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Tue, 14 Jan 2025 13:33:25 +0100 Subject: [PATCH 146/224] [designate] do not enforce_new_defaults. - otherwise all old policies stop working. --- openstack/designate/templates/etc/_designate.conf.tpl | 1 + 1 file changed, 1 insertion(+) diff --git a/openstack/designate/templates/etc/_designate.conf.tpl b/openstack/designate/templates/etc/_designate.conf.tpl index 7d0ea433f48..b8cb1d9e495 100644 --- a/openstack/designate/templates/etc/_designate.conf.tpl +++ b/openstack/designate/templates/etc/_designate.conf.tpl @@ -58,6 +58,7 @@ wsgi_default_pool_size = {{ .Values.wsgi_default_pool_size | default .Values.glo [oslo_policy] policy_file = policy.yaml +enforce_new_defaults = false [oslo_messaging_rabbit] heartbeat_in_pthread = false From 8408960a44f8e7fd805e621dc38d11bdf16d6176 Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Tue, 14 Jan 2025 14:18:14 +0200 Subject: [PATCH 147/224] [designate] bump chart dependencies * fixes mariadb service label selector * updates mysql-metrics * updates memcached and memcached-exporter * update pxc-db alerts with links to playbooks --- openstack/designate/Chart.lock | 12 ++++++------ openstack/designate/Chart.yaml | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/openstack/designate/Chart.lock b/openstack/designate/Chart.lock index beef2298a6e..f4744afc900 100644 --- a/openstack/designate/Chart.lock +++ b/openstack/designate/Chart.lock @@ -4,16 +4,16 @@ dependencies: version: 1.1.9 - name: pxc-db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.9 + version: 0.2.10 - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.15.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.1 + version: 0.6.3 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.4.1 + version: 0.4.2 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.12.1 @@ -29,5 +29,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:24f3a3722d109558c030acc84e5003cf44d77890fc8714fd067eb307c2df4189 -generated: "2025-01-06T15:22:41.779968+02:00" +digest: sha256:d5f9d9dfac9ec60d59da4f16a69450b6bd02f0a3cea49d1edbaf7f24a17f0b78 +generated: "2025-01-14T14:16:56.015529+02:00" diff --git a/openstack/designate/Chart.yaml b/openstack/designate/Chart.yaml index 9520b3e6ca5..4f40639724a 100644 --- a/openstack/designate/Chart.yaml +++ b/openstack/designate/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 description: A Helm chart for Kubernetes to deploy Openstack Designate (DNSaaS) name: designate -version: 0.4.9 +version: 0.4.10 appVersion: "xena" dependencies: - condition: percona_cluster.enabled @@ -13,18 +13,18 @@ dependencies: name: pxc-db alias: pxc_db repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.2.9 + version: 0.2.10 - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.15.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.1 + version: 0.6.3 - condition: mysql_metrics.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.4.1 + version: 0.4.2 - name: rabbitmq condition: rabbitmq.enabled repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From d107f5f3c662df04dd37fd8fdb6b921630e0f62f Mon Sep 17 00:00:00 2001 From: Tommy Sauer Date: Tue, 14 Jan 2025 13:42:37 +0100 Subject: [PATCH 148/224] bump thanos and prometheus version (#7661) this is the bump prior to prometheus 3.x to ensure backwards tsdb compatibility --- common/prometheus-server-pre7/CHANGELOG.md | 5 +++++ common/prometheus-server-pre7/Chart.yaml | 4 ++-- common/prometheus-server-pre7/values.yaml | 4 ++-- common/prometheus-server/CHANGELOG.md | 5 +++++ common/prometheus-server/Chart.yaml | 4 ++-- common/prometheus-server/values.yaml | 2 +- 6 files changed, 17 insertions(+), 7 deletions(-) diff --git a/common/prometheus-server-pre7/CHANGELOG.md b/common/prometheus-server-pre7/CHANGELOG.md index 20e1db3bebb..2e57a4d831f 100644 --- a/common/prometheus-server-pre7/CHANGELOG.md +++ b/common/prometheus-server-pre7/CHANGELOG.md @@ -1,3 +1,8 @@ +## 6.7.3 + +* Prometheus bump to v2.55.1 +* Thanos bump to v0.37.2 + ## 6.6.2 * another fix for ThanosStoreSeriesGateLatencyHigh diff --git a/common/prometheus-server-pre7/Chart.yaml b/common/prometheus-server-pre7/Chart.yaml index 40bf295453a..b49ce56d160 100644 --- a/common/prometheus-server-pre7/Chart.yaml +++ b/common/prometheus-server-pre7/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 description: Prometheus via operator. name: prometheus-server-pre7 -version: 6.7.2 -appVersion: v2.54.1 +version: 6.7.3 +appVersion: v2.55.1 icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png maintainers: - name: viennaa diff --git a/common/prometheus-server-pre7/values.yaml b/common/prometheus-server-pre7/values.yaml index 80046fd999c..f121834e225 100644 --- a/common/prometheus-server-pre7/values.yaml +++ b/common/prometheus-server-pre7/values.yaml @@ -229,12 +229,12 @@ thanos: # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec . spec: baseImage: keppel.eu-de-1.cloud.sap/ccloud-quay-mirror/thanos/thanos - version: v0.36.1 + version: v0.37.2 # Image for Thanos components. components: baseImage: keppel.eu-de-1.cloud.sap/ccloud-quay-mirror/thanos/thanos - version: v0.36.1 + version: v0.37.2 # Specification for Thanos components. compactor: diff --git a/common/prometheus-server/CHANGELOG.md b/common/prometheus-server/CHANGELOG.md index fa5c3d61482..656bec6bf34 100644 --- a/common/prometheus-server/CHANGELOG.md +++ b/common/prometheus-server/CHANGELOG.md @@ -1,3 +1,8 @@ +## 7.7.2 + +* Prometheus bump to v2.55.1 +* Thanos bump to v0.37.2 + ## 7.7.1 * Fix scraping of apiserver_request_duration_seconts histogram diff --git a/common/prometheus-server/Chart.yaml b/common/prometheus-server/Chart.yaml index 6ccafda2b99..315cfb9efd2 100644 --- a/common/prometheus-server/Chart.yaml +++ b/common/prometheus-server/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 description: Prometheus via operator. name: prometheus-server -version: 7.7.1 -appVersion: v2.54.1 +version: 7.7.2 +appVersion: v2.55.1 icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png maintainers: - name: Tommy Sauer (viennaa) diff --git a/common/prometheus-server/values.yaml b/common/prometheus-server/values.yaml index 1efd7b7359c..ebeb3631d92 100644 --- a/common/prometheus-server/values.yaml +++ b/common/prometheus-server/values.yaml @@ -227,7 +227,7 @@ thanos: # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec . spec: baseImage: keppel.eu-de-1.cloud.sap/ccloud-quay-mirror/thanos/thanos - version: v0.36.1 + version: v0.37.2 # Being one of debug, info, warn, error. Defaults to warn. # logLevel: info From bbe3cee07f83a0615a40539e727617946484df04 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 14:03:11 +0100 Subject: [PATCH 149/224] [octobus-exporter] bump main chart --- prometheus-exporters/octobus-query-exporter/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prometheus-exporters/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/Chart.yaml index d2af21eb86a..8c19cbf1b66 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.0.16 +version: 1.0.17 name: octobus-query-exporter description: Elasticsearch prometheus query exporter maintainers: From ecbe9ec58641e26c62f6dce5680452aac2beee0e Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 14:07:19 +0100 Subject: [PATCH 150/224] [octobus-query-exporter] fix jumpserver alert, mv alert to regional thanos ruler (#7653) * [octobus-query-exporter] fix jumpserver alert, mv alert to \regional thanos-ruler Co-authored-by: Tommy Sauer --- .../octobus-query-exporter/Chart.lock | 6 +++--- .../octobus-query-exporter/Chart.yaml | 4 ++-- .../octobus-query-exporter/values.yaml | 2 ++ .../vendor/octobus-query-exporter/Chart.yaml | 2 +- .../alerts/infra-frontend/logs.alerts | 14 -------------- .../alerts/region/jump.alerts | 16 ++++++++++++++++ 6 files changed, 24 insertions(+), 20 deletions(-) create mode 100644 prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts diff --git a/prometheus-exporters/octobus-query-exporter/Chart.lock b/prometheus-exporters/octobus-query-exporter/Chart.lock index 30500764e06..88f86441b33 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.lock +++ b/prometheus-exporters/octobus-query-exporter/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: octobus-query-exporter repository: file://vendor/octobus-query-exporter - version: 1.0.13 + version: 1.0.14 - name: octobus-query-exporter-global repository: file://vendor/octobus-query-exporter-global version: 1.0.12 @@ -11,5 +11,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:91c64bcc894bb31e73411e4ff1eb54bfb3d6593ff0da7815873795eda85b0106 -generated: "2024-06-24T09:31:18.092895+02:00" +digest: sha256:92ec8a429d2bcc6045cd5fe9780e83046ea503e342dc111245d8568b663d4663 +generated: "2025-01-13T16:32:28.384656+01:00" diff --git a/prometheus-exporters/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/Chart.yaml index 3e60ad4fd3a..8c19cbf1b66 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.0.16 +version: 1.0.17 name: octobus-query-exporter description: Elasticsearch prometheus query exporter maintainers: @@ -9,7 +9,7 @@ dependencies: - name: octobus-query-exporter alias: octobus_query_exporter repository: file://vendor/octobus-query-exporter - version: 1.0.13 + version: 1.0.14 condition: octobus_query_exporter.enabled - name: octobus-query-exporter-global diff --git a/prometheus-exporters/octobus-query-exporter/values.yaml b/prometheus-exporters/octobus-query-exporter/values.yaml index bb8d56745d6..30d001b7129 100644 --- a/prometheus-exporters/octobus-query-exporter/values.yaml +++ b/prometheus-exporters/octobus-query-exporter/values.yaml @@ -16,6 +16,8 @@ octobus_query_exporter: type: prometheus - name: scaleout type: thanos-ruler + - name: regional + type: thanos-ruler auditSources: enabled: false listen_port: 9206 diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml index 154eaf07322..b067661925e 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.0.13 +version: 1.0.14 name: octobus-query-exporter description: Elasticsearch prometheus query exporter maintainers: diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts index 7527b57eeef..281145af42a 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/infra-frontend/logs.alerts @@ -43,20 +43,6 @@ groups: description: "Octobus Keystone endpoint: `in-https` is down in region `{{ $labels.region }}`" summary: "Octobus Keystone endpoint: `in-https` is down in region `{{ $labels.region }}`" - - alert: OctobusJumpserverLogShipping - expr: label_replace(node_uname_info, "logs", "$0", "nodename", ".*") unless on (logs) (elasticsearch_octobus_jumpserver_logs_doc_count > 0) - for: 15m - labels: - severity: warning - service: audit - support_group: "observability" - meta: "No Jumpserver logs shipping to Octobus" - playbook: 'docs/support/playbook/opensearch/octobus/jumpserver-audit-logs-in-octobus-missing' - dashboard: audit-log-shipping - annotations: - description: "Jumpserver log shipping to Octobus not working for `{{ $labels.server_name }}`" - summary: "JumpserverLogs2Octobus not working" - - alert: OctobusJumpserverLogshipperEndpointDown expr: elasticsearch_octobus_jumpserver_logs_doc_count == 0 and on (region) {target=~"in-beats.*"} == 0 for: 15m diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts new file mode 100644 index 00000000000..7e5a062218e --- /dev/null +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts @@ -0,0 +1,16 @@ +groups: + - name: jumplogs + rules: + - alert: OctobusJumpserverLogShipping + expr: label_replace(node_uname_info, "logs", "$0", "nodename", ".*") unless on (logs) (elasticsearch_octobus_jumpserver_logs_doc_count > 0) + for: 15m + labels: + severity: warning + service: audit + support_group: "observability" + meta: "No Jumpserver logs shipping to Octobus" + playbook: 'docs/support/playbook/opensearch/octobus/jumpserver-audit-logs-in-octobus-missing' + dashboard: audit-log-shipping + annotations: + description: "Jumpserver log shipping to Octobus not working for `{{ $labels.server_name }}`" + summary: "JumpserverLogs2Octobus not working" From 4663ed261e922ca3e5b889c26b4d8e0c0cdf4362 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 14:17:25 +0100 Subject: [PATCH 151/224] [opensearch-logs] two more indexes for data retention 30d --- .../templates/config/_index-ism.json.tpl | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/system/opensearch-logs/templates/config/_index-ism.json.tpl b/system/opensearch-logs/templates/config/_index-ism.json.tpl index 4e50624f093..9e48e6b7a9a 100644 --- a/system/opensearch-logs/templates/config/_index-ism.json.tpl +++ b/system/opensearch-logs/templates/config/_index-ism.json.tpl @@ -97,6 +97,18 @@ "scaleout-*" ], "priority": 1 + }, + { + "index_patterns": [ + "compute-*" + ], + "priority": 1 + } + { + "index_patterns": [ + "storage-*" + ], + "priority": 1 } ] } From 5a8e06904b1001b4b65ae025532c753c45f2920b Mon Sep 17 00:00:00 2001 From: Tommy Sauer Date: Tue, 14 Jan 2025 14:29:17 +0100 Subject: [PATCH 152/224] bump thanos version (#7662) --- common/thanos/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/thanos/Chart.yaml b/common/thanos/Chart.yaml index 305981acb68..c049b12e994 100644 --- a/common/thanos/Chart.yaml +++ b/common/thanos/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: thanos description: Deploy Thanos via operator type: application -version: 1.1.2 -appVersion: v0.36.1 +version: 1.1.3 +appVersion: v0.37.2 maintainers: - name: Tommy Sauer (viennaa) - name: Richard Tief (richardtief) From cc1d93892b95dd499753366f765a8ae2c78b6c72 Mon Sep 17 00:00:00 2001 From: D071390 Date: Tue, 14 Jan 2025 14:31:10 +0100 Subject: [PATCH 153/224] [opensearch-logs] bump to 2.18.0 --- system/opensearch-logs/Chart.lock | 14 +++++++------- system/opensearch-logs/Chart.yaml | 12 ++++++------ system/opensearch-logs/values.yaml | 19 +++++++++---------- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/system/opensearch-logs/Chart.lock b/system/opensearch-logs/Chart.lock index f056f9a5a1f..1b8bc94e622 100644 --- a/system/opensearch-logs/Chart.lock +++ b/system/opensearch-logs/Chart.lock @@ -1,19 +1,19 @@ dependencies: - name: opensearch repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch-dashboards repository: https://opensearch-project.github.io/helm-charts - version: 2.24.0 + version: 2.26.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 @@ -26,5 +26,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:0ad65d3c61f972860502cfa8c690436478e24a39e9e090d386a9c443034927d9 -generated: "2024-12-10T09:35:49.201933+01:00" +digest: sha256:28a4aa7dfe49dab548b5748bcd9be65dab62fe37962594e6c44112c139ceee3e +generated: "2025-01-14T14:30:25.223889+01:00" diff --git a/system/opensearch-logs/Chart.yaml b/system/opensearch-logs/Chart.yaml index c939f6dd7d0..3db8145bc31 100644 --- a/system/opensearch-logs/Chart.yaml +++ b/system/opensearch-logs/Chart.yaml @@ -1,34 +1,34 @@ apiVersion: v2 description: A Helm chart for the Opensearch stack name: opensearch-logs -version: 0.0.31 +version: 0.0.32 home: https://github.com/sapcc/helm-charts/tree/master/system/opensearch-logs dependencies: - name: opensearch alias: opensearch_master condition: opensearch_master.enabled repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch alias: opensearch_client condition: opensearch_client.enabled repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch alias: opensearch_data condition: opensearch_data.enabled repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch alias: opensearch_ml condition: opensearch_ml.enabled repository: https://opensearch-project.github.io/helm-charts - version: 2.26.0 + version: 2.30.0 - name: opensearch-dashboards alias: opensearch_dashboards condition: opensearch_dashboards.enabled repository: https://opensearch-project.github.io/helm-charts - version: 2.24.0 + version: 2.26.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/system/opensearch-logs/values.yaml b/system/opensearch-logs/values.yaml index a829902e49a..b7dc72098a1 100644 --- a/system/opensearch-logs/values.yaml +++ b/system/opensearch-logs/values.yaml @@ -11,7 +11,6 @@ global: index: ism_indexes: "logstash" schema_version: "1" - owner-info: support-group: observability @@ -40,7 +39,7 @@ opensearch_master: nodeGroup: "master" masterService: "opensearch-logs-master" image: - tag: 2.17.1 + tag: 2.18.0 roles: - master replicas: 3 @@ -67,12 +66,12 @@ opensearch_master: plugins: enabled: true installList: - - https://github.com/Aiven-Open/prometheus-exporter-plugin-for-opensearch/releases/download/2.17.1.0/prometheus-exporter-2.17.1.0.zip + - https://github.com/Virtimo/prometheus-exporter-plugin-for-opensearch/releases/download/v2.18.0/prometheus-exporter-2.18.0.0.zip opensearch_client: enabled: false image: - tag: 2.17.1 + tag: 2.18.0 replicas: 3 nameOverride: "opensearch-logs-client" fullnameOverride: "opensearch-logs-client" @@ -113,12 +112,12 @@ opensearch_client: plugins: enabled: true installList: - - https://github.com/Aiven-Open/prometheus-exporter-plugin-for-opensearch/releases/download/2.17.1.0/prometheus-exporter-2.17.1.0.zip + - https://github.com/Virtimo/prometheus-exporter-plugin-for-opensearch/releases/download/v2.18.0/prometheus-exporter-2.18.0.0.zip opensearch_data: enabled: false image: - tag: 2.17.1 + tag: 2.18.0 nameOverride: "opensearch-logs-data" fullnameOverride: "opensearch-logs-data" nodeGroup: "data" @@ -142,7 +141,7 @@ opensearch_data: plugins: enabled: true installList: - - https://github.com/Aiven-Open/prometheus-exporter-plugin-for-opensearch/releases/download/2.17.1.0/prometheus-exporter-2.17.1.0.zip + - https://github.com/Virtimo/prometheus-exporter-plugin-for-opensearch/releases/download/v2.18.0/prometheus-exporter-2.18.0.0.zip securityConfig: enabled: false path: "/usr/share/opensearch/config/opensearch-security" @@ -155,7 +154,7 @@ opensearch_data: opensearch_ml: enabled: false image: - tag: 2.17.1 + tag: 2.18.0 replicas: 2 nameOverride: "opensearch-logs-ml" fullnameOverride: "opensearch-logs-ml" @@ -196,12 +195,12 @@ opensearch_ml: plugins: enabled: true installList: - - https://github.com/Aiven-Open/prometheus-exporter-plugin-for-opensearch/releases/download/2.17.1.0/prometheus-exporter-2.17.1.0.zip + - https://github.com/Virtimo/prometheus-exporter-plugin-for-opensearch/releases/download/v2.18.0/prometheus-exporter-2.18.0.0.zip opensearch_dashboards: enabled: false image: - tag: "2.17.1" + tag: "2.18.0" fullnameOverride: opensearch-logs-dashboards nameOverride: opensearch-logs-dashboards serviceAccount: From a0404bf6e1c0285dd902b1ba92c629477c6e85df Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Tue, 14 Jan 2025 14:34:27 +0100 Subject: [PATCH 154/224] [designate] update healthprobe for Dalmatian --- openstack/designate/bin/health-probe.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openstack/designate/bin/health-probe.py b/openstack/designate/bin/health-probe.py index 55a6485180a..88aeb619246 100644 --- a/openstack/designate/bin/health-probe.py +++ b/openstack/designate/bin/health-probe.py @@ -161,8 +161,8 @@ def check_tcp_socket( sys.exit(1) # let's do the db check - # producer service doesn't have a db connection - if service not in ["producer"]: + # producer, mdns services do not have a direct db connection: + if service not in ["producer", "mdns"]: if d_ports and tcp_socket_status(proc, d_ports) == 0: sys.stderr.write(f"Database socket not established for service {proc}") # Do not kill the pod if database is not reachable/down From 0f7dfe720041f4c1af15f002f67ac76db73c7c4f Mon Sep 17 00:00:00 2001 From: Nuckal777 Date: Tue, 14 Jan 2025 16:01:10 +0100 Subject: [PATCH 155/224] Bump metal-token-rotate for upload --- system/metal-token-rotate/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/metal-token-rotate/Chart.yaml b/system/metal-token-rotate/Chart.yaml index a91e4082635..eb403bc7ebb 100644 --- a/system/metal-token-rotate/Chart.yaml +++ b/system/metal-token-rotate/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: metal-token-rotate description: A Helm chart for metal-token-rotate type: application -version: 0.1.0 +version: 0.1.1 appVersion: "0.1.0" dependencies: - name: owner-info From 558244caa876905336545b021472dfdd0bee12f9 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 16:11:04 +0100 Subject: [PATCH 156/224] [octobus-exporter] rename alert folder --- .../alerts/{region => regional}/jump.alerts | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/{region => regional}/jump.alerts (100%) diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/regional/jump.alerts similarity index 100% rename from prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/region/jump.alerts rename to prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/alerts/regional/jump.alerts From 14ef17b36688f70b34051361ea5475bbd964a8fa Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Tue, 14 Jan 2025 16:12:53 +0100 Subject: [PATCH 157/224] [octobus-exporter] fix path --- prometheus-exporters/octobus-query-exporter/Chart.lock | 6 +++--- prometheus-exporters/octobus-query-exporter/Chart.yaml | 4 ++-- .../vendor/octobus-query-exporter/Chart.yaml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/prometheus-exporters/octobus-query-exporter/Chart.lock b/prometheus-exporters/octobus-query-exporter/Chart.lock index 88f86441b33..9958a4c5348 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.lock +++ b/prometheus-exporters/octobus-query-exporter/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: octobus-query-exporter repository: file://vendor/octobus-query-exporter - version: 1.0.14 + version: 1.0.15 - name: octobus-query-exporter-global repository: file://vendor/octobus-query-exporter-global version: 1.0.12 @@ -11,5 +11,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:92ec8a429d2bcc6045cd5fe9780e83046ea503e342dc111245d8568b663d4663 -generated: "2025-01-13T16:32:28.384656+01:00" +digest: sha256:1749fb8d5f348d2ff1248cd77ce751911fdb3ea35512ad97ed497638097d0480 +generated: "2025-01-14T16:12:28.428072+01:00" diff --git a/prometheus-exporters/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/Chart.yaml index 8c19cbf1b66..f58fc760981 100644 --- a/prometheus-exporters/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.0.17 +version: 1.0.18 name: octobus-query-exporter description: Elasticsearch prometheus query exporter maintainers: @@ -9,7 +9,7 @@ dependencies: - name: octobus-query-exporter alias: octobus_query_exporter repository: file://vendor/octobus-query-exporter - version: 1.0.14 + version: 1.0.15 condition: octobus_query_exporter.enabled - name: octobus-query-exporter-global diff --git a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml index b067661925e..e05f35aecdc 100644 --- a/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml +++ b/prometheus-exporters/octobus-query-exporter/vendor/octobus-query-exporter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.0.14 +version: 1.0.15 name: octobus-query-exporter description: Elasticsearch prometheus query exporter maintainers: From b22ee906cca494c416aac8b0879b589bc70d094f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Tue, 14 Jan 2025 16:25:56 +0100 Subject: [PATCH 158/224] ceph: update rook from 1.16.0 to 1.16.1 (#7665) --- system/cc-ceph/Chart.lock | 6 +++--- system/cc-ceph/Chart.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/system/cc-ceph/Chart.lock b/system/cc-ceph/Chart.lock index b3d632ad9b9..34c6194a781 100644 --- a/system/cc-ceph/Chart.lock +++ b/system/cc-ceph/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 1.0.0 - name: rook-ceph repository: https://charts.rook.io/release - version: v1.16.0 + version: v1.16.1 - name: rook-crds repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.2-rook.1.16.0 -digest: sha256:d9a8ec1509dcec3a634aada46cbf9434897bfa7bf85bad9adb8c6af157aa08d4 -generated: "2024-12-18T11:34:58.26072907+01:00" +digest: sha256:b9910a85ab182d38074a037622ae3ae55cde5cd035f9e8db197da004052264eb +generated: "2025-01-14T16:22:59.056441408+01:00" diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 4b952a804e7..ec98074e395 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.3 -appVersion: "1.16.0" +version: 1.1.4 +appVersion: "1.16.1" dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm @@ -11,7 +11,7 @@ dependencies: - name: rook-ceph # version update should be done in the rook-crds chart as well repository: https://charts.rook.io/release - version: 1.16.0 + version: 1.16.1 - name: rook-crds repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: '0.0.2-rook.1.16.0' From 14a261d627a8379f28d7f71456304fb10b32cadb Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Wed, 15 Jan 2025 07:52:26 +0100 Subject: [PATCH 159/224] [unbound] create proper PrometheusRule objects No need to be that dynamic and work with *.alert files. Also, recovered the UnboundEndpointNotAvailable alert (previously known as OpenstackDesignateDnsUnboundEndpointNotAvailable). --- system/unbound/alerts/unbound.alerts | 50 ---------- .../unbound/templates/prometheus-alerts.yaml | 93 +++++++++++++++++++ 2 files changed, 93 insertions(+), 50 deletions(-) delete mode 100644 system/unbound/alerts/unbound.alerts create mode 100644 system/unbound/templates/prometheus-alerts.yaml diff --git a/system/unbound/alerts/unbound.alerts b/system/unbound/alerts/unbound.alerts deleted file mode 100644 index 1e70de5efea..00000000000 --- a/system/unbound/alerts/unbound.alerts +++ /dev/null @@ -1,50 +0,0 @@ -groups: -- name: unbound.alerts - rules: - - alert: DnsUnboundManySERVFAIL - expr: sum(delta(unbound_answer_rcodes_total{rcode="SERVFAIL"}[1h])) BY (region, app) > 500000 - for: 60m - labels: - context: unbound - dashboard: dns-unbound-and-f5-performance - meta: '{{ $labels.app }}' - service: unbound - severity: info - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate' - annotations: - description: 'Recursor {{ $labels.app }} returns lots of SERVFAIL responses in {{ $labels.region }} region.' - summary: '{{ $labels.app }} returned a lot of SERVFAIL responses in the last hour. Check the logs.' - - - alert: DnsUnbound1Down - expr: absent(unbound_up{app="unbound1"}) == 1 or unbound_up{app="unbound1"} != 1 - for: 30m - labels: - context: unbound - dashboard: dns-unbound-and-f5-performance - meta: unbound1 - service: unbound - severity: warning - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' - annotations: - description: 'DNS Unbound1 recursor is down.' - summary: DNS Unbound1 recursor is down. DNS resolution might be handled by another region. - - - alert: DnsUnbound2Down - expr: absent(unbound_up{app="unbound2"}) == 1 or unbound_up{app="unbound2"} != 1 - for: 30m - labels: - context: unbound - dashboard: dns-unbound-and-f5-performance - meta: unbound2 - service: unbound - severity: warning - support_group: network-api - tier: os - playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' - annotations: - description: 'DNS Unbound2 recursor is down.' - summary: DNS Unbound2 recursor is down. DNS resolution might be handled by another region. diff --git a/system/unbound/templates/prometheus-alerts.yaml b/system/unbound/templates/prometheus-alerts.yaml new file mode 100644 index 00000000000..c41a23e5c3d --- /dev/null +++ b/system/unbound/templates/prometheus-alerts.yaml @@ -0,0 +1,93 @@ +--- +apiVersion: "monitoring.coreos.com/v1" +kind: "PrometheusRule" +metadata: + name: unbound-openstack-alerts + labels: + app: unbound + tier: os + type: alerting-rules + prometheus: openstack + +spec: + groups: + - name: unbound.alerts + rules: + - alert: DnsUnboundManySERVFAIL + expr: sum(delta(unbound_answer_rcodes_total{rcode="SERVFAIL"}[1h])) BY (region, app) > 500000 + for: 60m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: '{{ $labels.app }}' + service: unbound + severity: info + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate' + annotations: + description: 'Recursor {{ $labels.app }} returns lots of SERVFAIL responses in {{ $labels.region }} region.' + summary: '{{ $labels.app }} returned a lot of SERVFAIL responses in the last hour. Check the logs.' + + - alert: DnsUnbound1Down + expr: absent(unbound_up{app="unbound1"}) == 1 or unbound_up{app="unbound1"} != 1 + for: 30m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: unbound1 + service: unbound + severity: warning + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' + annotations: + description: 'DNS Unbound1 recursor is down.' + summary: DNS Unbound1 recursor is down. DNS resolution might be handled by another region. + + - alert: DnsUnbound2Down + expr: absent(unbound_up{app="unbound2"}) == 1 or unbound_up{app="unbound2"} != 1 + for: 30m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: unbound2 + service: unbound + severity: warning + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' + annotations: + description: 'DNS Unbound2 recursor is down.' + summary: DNS Unbound2 recursor is down. DNS resolution might be handled by another region. + +--- +apiVersion: "monitoring.coreos.com/v1" +kind: "PrometheusRule" +metadata: + name: unbound-kubernetes-alerts + labels: + app: unbound + tier: os + type: alerting-rules + prometheus: openstack + +spec: + groups: + - name: unbound.alerts + rules: + - alert: DnsUnboundEndpointNotAvailable + expr: max(kube_endpoint_address{namespace="dns-recursor"}) BY (region,endpoint) < 1 + for: 15m + labels: + context: unbound + dashboard: dns-unbound-and-f5-performance + meta: '{{ $labels.endpoint }}' + service: unbound + severity: warning + support_group: network-api + tier: os + playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' + annotations: + description: 'DNS Unbound endpoint {{ $labels.endpoint }} not available in {{ $labels.region }} region.' + summary: 'DNS Unbound endpoint {{ $labels.endpoint }} is not available. DNS resolution might be handled by another region.' From 6a42ded6c31e583f53ba93451ad030de8f58926a Mon Sep 17 00:00:00 2001 From: Tommy Sauer Date: Wed, 15 Jan 2025 10:11:39 +0100 Subject: [PATCH 160/224] bump prometheus and thanos (#7663) --- global/thanos-global/Chart.lock | 6 +++--- global/thanos-global/Chart.yaml | 4 ++-- openstack/prometheus-openstack/Chart.lock | 8 ++++---- openstack/prometheus-openstack/Chart.yaml | 6 +++--- system/infra-monitoring/Chart.lock | 8 ++++---- system/infra-monitoring/Chart.yaml | 6 +++--- system/prometheus-infra/Chart.lock | 8 ++++---- system/prometheus-infra/Chart.yaml | 6 +++--- system/prometheus-kubernetes/Chart.lock | 8 ++++---- system/prometheus-kubernetes/Chart.yaml | 6 +++--- system/storage-monitoring/Chart.lock | 8 ++++---- system/storage-monitoring/Chart.yaml | 6 +++--- system/thanos-metal/Chart.lock | 6 +++--- system/thanos-metal/Chart.yaml | 4 ++-- system/thanos-scaleout/Chart.lock | 6 +++--- system/thanos-scaleout/Chart.yaml | 4 ++-- system/vmware-monitoring/Chart.lock | 8 ++++---- system/vmware-monitoring/Chart.yaml | 6 +++--- 18 files changed, 57 insertions(+), 57 deletions(-) diff --git a/global/thanos-global/Chart.lock b/global/thanos-global/Chart.lock index 4777ad91375..ce7df320777 100644 --- a/global/thanos-global/Chart.lock +++ b/global/thanos-global/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:f1fa9d2f4e1a8f82f4db06e9b8331486ce92ba61212b8c1e1725e7d488aaa295 -generated: "2024-10-02T09:44:13.452518+02:00" +digest: sha256:8250c02591bc7a64f590c00b255767cdab1e5c86fa4780e9503c01d41febedd3 +generated: "2025-01-14T14:57:58.125209+01:00" diff --git a/global/thanos-global/Chart.yaml b/global/thanos-global/Chart.yaml index 7629bae2787..10ff020b74e 100644 --- a/global/thanos-global/Chart.yaml +++ b/global/thanos-global/Chart.yaml @@ -2,11 +2,11 @@ apiVersion: v2 name: thanos-global description: Deploy Thanos via operator type: application -version: 0.4.6 +version: 0.4.7 dependencies: - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/openstack/prometheus-openstack/Chart.lock b/openstack/prometheus-openstack/Chart.lock index 8061e3a180c..cf66ee1a27d 100644 --- a/openstack/prometheus-openstack/Chart.lock +++ b/openstack/prometheus-openstack/Chart.lock @@ -1,10 +1,10 @@ dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 @@ -14,5 +14,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:670aaf935ca7ab79e17131aa9b279bb256663821f5e6264ff732ad64eded298f -generated: "2024-10-02T09:44:18.770639+02:00" +digest: sha256:af93500f3120f644bb1836acd5cc23bda5edbfb2d395cc3ec39d0459029ab7b2 +generated: "2025-01-14T14:36:43.636533+01:00" diff --git a/openstack/prometheus-openstack/Chart.yaml b/openstack/prometheus-openstack/Chart.yaml index 81df95c4a81..af4d00d9311 100644 --- a/openstack/prometheus-openstack/Chart.yaml +++ b/openstack/prometheus-openstack/Chart.yaml @@ -1,15 +1,15 @@ apiVersion: v2 name: openstack-prometheus -version: 3.5.2 +version: 3.5.3 description: Prometheus Openstack Monitoring and Metrics Collection dependencies: - name: prometheus-server alias: openstack-prometheus repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/system/infra-monitoring/Chart.lock b/system/infra-monitoring/Chart.lock index 1bfe19090df..3223de7c8db 100644 --- a/system/infra-monitoring/Chart.lock +++ b/system/infra-monitoring/Chart.lock @@ -1,10 +1,10 @@ dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: blackbox-exporter repository: file://vendor/blackbox-exporter version: 0.1.0 @@ -26,5 +26,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:dd53a70f63ba4e12d3021ddf5eb8db31c4ba74b5e7bdd12e032f33b2297b4b98 -generated: "2024-10-02T09:44:22.712371+02:00" +digest: sha256:4319296481e163adf354490ce7ce2b03d0cc0897575438835e6516f9fa73e0be +generated: "2025-01-14T14:39:02.573974+01:00" diff --git a/system/infra-monitoring/Chart.yaml b/system/infra-monitoring/Chart.yaml index 01e7940ebee..96f439c9e64 100644 --- a/system/infra-monitoring/Chart.yaml +++ b/system/infra-monitoring/Chart.yaml @@ -1,17 +1,17 @@ apiVersion: v2 name: infra-monitoring -version: 2.4.5 +version: 2.4.6 description: Prometheus Infrastructure Monitoring and Metrics Collection dependencies: - name: prometheus-server alias: prometheus_infra_collector repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 condition: prometheus_infra_collector.enabled - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: blackbox-exporter alias: blackbox_exporter diff --git a/system/prometheus-infra/Chart.lock b/system/prometheus-infra/Chart.lock index 36d012e9495..4be27ec9b96 100644 --- a/system/prometheus-infra/Chart.lock +++ b/system/prometheus-infra/Chart.lock @@ -1,10 +1,10 @@ dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: interconnect-sre repository: file://vendor/interconnect-sre version: 0.1.0 @@ -14,5 +14,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:23aa5782814f09f1f7ee81d2d4115c9484f3deb4a36cf720aa7af28dad9c25c1 -generated: "2024-10-02T09:44:28.406066+02:00" +digest: sha256:511a9e797e711959703798b284897ba5d82549e7f4c27caa5850640c644be6a1 +generated: "2025-01-14T14:40:29.224587+01:00" diff --git a/system/prometheus-infra/Chart.yaml b/system/prometheus-infra/Chart.yaml index 7c31e18daef..b562f42f6da 100644 --- a/system/prometheus-infra/Chart.yaml +++ b/system/prometheus-infra/Chart.yaml @@ -1,17 +1,17 @@ apiVersion: v2 name: prometheus-infra description: Prometheus Infrastructure Monitoring - A Helm chart for the operated regional Prometheus Frontend for monitoring infrastructure. -version: 3.5.6 +version: 3.5.7 dependencies: - name: prometheus-server alias: prometheus-infra-frontend repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 condition: prometheus-infra-frontend.enabled - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: interconnect-sre alias: interconnect_sre diff --git a/system/prometheus-kubernetes/Chart.lock b/system/prometheus-kubernetes/Chart.lock index 55d3b9480c4..47c83bcc1f3 100644 --- a/system/prometheus-kubernetes/Chart.lock +++ b/system/prometheus-kubernetes/Chart.lock @@ -1,15 +1,15 @@ dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.1 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.2 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:48d9726425491f1da0872c6d690123b84ea10e066ca2950b2ad6deb3818f7fbf -generated: "2024-12-17T14:35:18.103276+01:00" +digest: sha256:4b94bfcee7ae89dde32277dfa44145b730cfd472df33f64b29b9abb571b13b46 +generated: "2025-01-14T14:32:19.197642+01:00" diff --git a/system/prometheus-kubernetes/Chart.yaml b/system/prometheus-kubernetes/Chart.yaml index 07e9c09ada7..ab3b8d259be 100644 --- a/system/prometheus-kubernetes/Chart.yaml +++ b/system/prometheus-kubernetes/Chart.yaml @@ -1,15 +1,15 @@ apiVersion: v2 description: Common Kubernetes Prometheus name: prometheus-kubernetes -version: 0.5.4 +version: 0.5.5 home: https://github.com/sapcc/helm-charts/tree/master/system/prometheus-kubernetes dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.1 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.2 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/system/storage-monitoring/Chart.lock b/system/storage-monitoring/Chart.lock index 5ccd5a9f2a6..43245b540ff 100644 --- a/system/storage-monitoring/Chart.lock +++ b/system/storage-monitoring/Chart.lock @@ -1,15 +1,15 @@ dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:b4380cd914b2ab0d46710b25d0e2ece48cf5becc1d1cd53841b26500980aaba7 -generated: "2024-10-02T09:44:33.159687+02:00" +digest: sha256:4b94bfcee7ae89dde32277dfa44145b730cfd472df33f64b29b9abb571b13b46 +generated: "2025-01-14T14:41:14.209819+01:00" diff --git a/system/storage-monitoring/Chart.yaml b/system/storage-monitoring/Chart.yaml index ff2eedae168..d0925187091 100644 --- a/system/storage-monitoring/Chart.yaml +++ b/system/storage-monitoring/Chart.yaml @@ -1,14 +1,14 @@ apiVersion: v2 name: storage-monitoring description: Prometheus and Thanos setup for netapp-exporter -version: 0.6.4 +version: 0.6.5 dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/system/thanos-metal/Chart.lock b/system/thanos-metal/Chart.lock index 1f25cabd420..fb2f88ea7a8 100644 --- a/system/thanos-metal/Chart.lock +++ b/system/thanos-metal/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:cd8a0ff6e78ec498cd0a6d1aaf555b78c8984832727683928d3bfb2cbfed82c5 -generated: "2024-10-02T09:44:35.60765+02:00" +digest: sha256:44a96930ff5f30ae66127cccd1867b691656a1f9aac7617b52ab7408650cbabd +generated: "2025-01-14T14:45:18.771991+01:00" diff --git a/system/thanos-metal/Chart.yaml b/system/thanos-metal/Chart.yaml index b24bdc01cf4..3ef1f8c8b80 100644 --- a/system/thanos-metal/Chart.yaml +++ b/system/thanos-metal/Chart.yaml @@ -2,11 +2,11 @@ apiVersion: v2 name: thanos-metal-and-regional description: Deploy Thanos metal and regional via operator type: application -version: 0.5.9 +version: 0.5.10 dependencies: - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: thanos alias: regional_thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm diff --git a/system/thanos-scaleout/Chart.lock b/system/thanos-scaleout/Chart.lock index 0a089ae106e..a57b9af846a 100644 --- a/system/thanos-scaleout/Chart.lock +++ b/system/thanos-scaleout/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:f1fa9d2f4e1a8f82f4db06e9b8331486ce92ba61212b8c1e1725e7d488aaa295 -generated: "2024-10-02T09:44:37.576572+02:00" +digest: sha256:8250c02591bc7a64f590c00b255767cdab1e5c86fa4780e9503c01d41febedd3 +generated: "2025-01-14T14:43:59.801543+01:00" diff --git a/system/thanos-scaleout/Chart.yaml b/system/thanos-scaleout/Chart.yaml index a9b5d24e110..25d7af6d96d 100644 --- a/system/thanos-scaleout/Chart.yaml +++ b/system/thanos-scaleout/Chart.yaml @@ -2,11 +2,11 @@ apiVersion: v2 name: thanos-scaleout description: Deploy Thanos via operator type: application -version: 0.5.9 +version: 0.5.10 dependencies: - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 diff --git a/system/vmware-monitoring/Chart.lock b/system/vmware-monitoring/Chart.lock index d9133ecf589..275151ae67e 100644 --- a/system/vmware-monitoring/Chart.lock +++ b/system/vmware-monitoring/Chart.lock @@ -1,15 +1,15 @@ dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:b4380cd914b2ab0d46710b25d0e2ece48cf5becc1d1cd53841b26500980aaba7 -generated: "2024-10-02T09:44:39.418675+02:00" +digest: sha256:4b94bfcee7ae89dde32277dfa44145b730cfd472df33f64b29b9abb571b13b46 +generated: "2025-01-14T14:41:43.018951+01:00" diff --git a/system/vmware-monitoring/Chart.yaml b/system/vmware-monitoring/Chart.yaml index 0e07bab094c..99d58daa601 100644 --- a/system/vmware-monitoring/Chart.yaml +++ b/system/vmware-monitoring/Chart.yaml @@ -1,14 +1,14 @@ apiVersion: v2 name: vmware-monitoring -version: 2.0.5 +version: 2.0.6 description: VMware Monitoring and Metrics Collection dependencies: - name: prometheus-server repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 7.7.0 + version: 7.7.2 - name: thanos repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.3 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 From 34a97943fccf128a61937f38ca28f1342809b91f Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Fri, 29 Nov 2024 12:39:09 +0100 Subject: [PATCH 161/224] [rabbitmq] Option to enable SSL for amqp server The option `enableSsl` configures a certificate with the same name as the rabbitmq with the prefix `tls-`, which will in turn generate a secret of the same name. The only other missing value is the name of the cluster-issuer to be specified with `certificate.issuerRef.name` Erlang should automatically reload the certificate, when the file changes. So, certificate rotation should also be handled. --- common/rabbitmq/CHANGELOG.md | 20 +++++++++++++ common/rabbitmq/Chart.yaml | 2 +- common/rabbitmq/ci/test-values.yaml | 14 ++++++++++ common/rabbitmq/templates/certificate.yaml | 28 +++++++++++++++++++ .../templates/custom-conf-configmap.yaml | 12 ++++++-- common/rabbitmq/templates/deployment.yaml | 24 +++++++++++++--- common/rabbitmq/templates/service.yaml | 8 ++++-- common/rabbitmq/templates/statefulset.yaml | 18 +++++++++++- common/rabbitmq/values.yaml | 14 +++++++++- 9 files changed, 129 insertions(+), 11 deletions(-) create mode 100644 common/rabbitmq/templates/certificate.yaml diff --git a/common/rabbitmq/CHANGELOG.md b/common/rabbitmq/CHANGELOG.md index af9339de89e..9ef6771e2c0 100644 --- a/common/rabbitmq/CHANGELOG.md +++ b/common/rabbitmq/CHANGELOG.md @@ -2,6 +2,26 @@ This file is used to list changes made in each version of the common chart rabbitmq. +## 0.13.0 +[@fwiesel](https://github.com/fwiesel) +- Add options to enable ssl in rabbitmq + +The following options need to be set: +```yaml +enableSsl: true +certificate: + issuerRef: + name: +externalNames: +- +``` + +The default is a `ClusterIssuer`, but it can be changed with the respective value +`certificate.issuerRef.kind` + +`externalNames` is optional, and specifies the SAN in the certificate. +It is imporant there, that all names entered are accepted by the certificate-issuer. + ## 0.12.1 - `app` selector label returned, because deployment selector is immutable - chart version bumped diff --git a/common/rabbitmq/Chart.yaml b/common/rabbitmq/Chart.yaml index 28c9506f7e6..b7fbf5a4a78 100644 --- a/common/rabbitmq/Chart.yaml +++ b/common/rabbitmq/Chart.yaml @@ -1,7 +1,7 @@ --- apiVersion: v1 name: rabbitmq -version: 0.12.1 +version: 0.13.0 appVersion: 3.13.7 description: A Helm chart for RabbitMQ sources: diff --git a/common/rabbitmq/ci/test-values.yaml b/common/rabbitmq/ci/test-values.yaml index 7abee69f056..3dc26ab704d 100644 --- a/common/rabbitmq/ci/test-values.yaml +++ b/common/rabbitmq/ci/test-values.yaml @@ -4,6 +4,8 @@ global: user_suffix: "" master_password: "" dockerHubMirrorAlternateRegion: "other.dockerhub.mirror" + region: "region" + tld: "tld" ports: public: 5672 @@ -33,3 +35,15 @@ replicas: 1 metrics: enabled: false + + +externalIPs: + - "127.0.0.1" + +externalNames: + - "rabbitmq.example.com" + +enableSsl: true +certificate: + issuerRef: + name: "test-issuer" diff --git a/common/rabbitmq/templates/certificate.yaml b/common/rabbitmq/templates/certificate.yaml new file mode 100644 index 00000000000..07a11233134 --- /dev/null +++ b/common/rabbitmq/templates/certificate.yaml @@ -0,0 +1,28 @@ +{{- if .Values.enableSsl }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-{{ template "fullname" . }} + labels: + {{- include "rabbitmq.labels" (list $ "version" "rabbitmq" "deployment" "messagequeue") | indent 4 }} +spec: + secretName: tls-{{ template "fullname" . }} + secretTemplate: + labels: + {{- include "rabbitmq.labels" (list $ "version" "rabbitmq" "deployment" "messagequeue") | indent 6 }} + commonName: "{{ template "fullname" . }}.{{ .Release.Namespace }}.svc.kubernetes.{{ .Values.global.region | required "global.region missing" }}.{{ .Values.global.tld | required "global.tld missing" }}" + dnsNames: + - "{{ template "fullname" . }}.{{ .Release.Namespace }}.svc.kubernetes.{{ .Values.global.region | required "global.region missing" }}.{{ .Values.global.tld | required "global.tld missing" }}" + {{- if .Values.externalNames }} + {{- range .Values.externalNames }} + - "{{ . }}" + {{- end }} + {{- end }} + {{- if .Values.externalIPs }} + ipAddresses: + {{- range .Values.externalIPs }} + - "{{ . }}" + {{- end }} + {{- end }} + {{- .Values.certificate | toYaml | nindent 2 }} +{{- end }} diff --git a/common/rabbitmq/templates/custom-conf-configmap.yaml b/common/rabbitmq/templates/custom-conf-configmap.yaml index 567a1e0cb31..17f2b281c8f 100644 --- a/common/rabbitmq/templates/custom-conf-configmap.yaml +++ b/common/rabbitmq/templates/custom-conf-configmap.yaml @@ -1,5 +1,5 @@ --- -{{ if $.Values.customConfig }} +{{ if or $.Values.customConfig $.Values.enableSsl }} apiVersion: v1 kind: ConfigMap metadata: @@ -7,6 +7,14 @@ metadata: labels: {{- include "rabbitmq.labels" (list $ "version" "rabbitmq" "configmap" "messagequeue") | indent 4 }} data: + {{- if $.Values.customConfig }} 20-custom.conf: | -{{ include (print .Template.BasePath "/etc/_rabbitmq-custom-config.tpl") . | indent 4 }} + {{- include (print .Template.BasePath "/etc/_rabbitmq-custom-config.tpl") . | nindent 4 }} + {{- end }} + {{- if $.Values.enableSsl }} + 30-ssl.conf: | + listeners.ssl.default={{ $.Values.ports.amqps }} + ssl_options.certfile=/etc/rabbitmq/ssl/tls.crt + ssl_options.keyfile=/etc/rabbitmq/ssl/tls.key + {{- end }} {{ end }} diff --git a/common/rabbitmq/templates/deployment.yaml b/common/rabbitmq/templates/deployment.yaml index 95c1cebd33a..41de0e51901 100644 --- a/common/rabbitmq/templates/deployment.yaml +++ b/common/rabbitmq/templates/deployment.yaml @@ -106,12 +106,16 @@ spec: {{ toYaml .Values.resources | indent 10 }} ports: - name: public - containerPort: {{ default "5672" .Values.ports.public }} + containerPort: {{ .Values.ports.public }} - name: management - containerPort: {{ default "15672" .Values.ports.management }} + containerPort: {{ .Values.ports.management }} +{{- if .Values.enableSsl }} + - name: amqps + containerPort: {{ .Values.ports.amqps }} +{{- end }} {{- if .Values.metrics.enabled }} - name: metrics - containerPort: {{ default "15692" .Values.metrics.port }} + containerPort: {{ .Values.metrics.port }} {{- end }} volumeMounts: - mountPath: /var/lib/rabbitmq @@ -125,6 +129,13 @@ spec: name: rabbitmq-custom-config subPath: 20-custom.conf {{- end }} + {{- if .Values.enableSsl }} + - mountPath: /etc/rabbitmq/conf.d/30-ssl.conf + name: rabbitmq-custom-config + subPath: 30-ssl.conf + - mountPath: /etc/rabbitmq/ssl + name: ssl + {{- end }} priorityClassName: {{ .Values.priority_class | default "critical-infrastructure" | quote }} volumes: - name: rabbitmq-persistent-storage @@ -145,9 +156,14 @@ spec: sources: - secret: name: {{ template "fullname" . }}-users - {{- if .Values.customConfig }} + {{- if or .Values.customConfig .Values.enableSsl }} - name: rabbitmq-custom-config configMap: name: {{ template "fullname" . }}-custom-conf {{- end }} + {{- if .Values.enableSsl }} + - name: ssl + secret: + secretName: tls-{{ template "fullname" . }} + {{- end }} {{- end }} diff --git a/common/rabbitmq/templates/service.yaml b/common/rabbitmq/templates/service.yaml index c9c380e22ee..dcddbc1fb27 100644 --- a/common/rabbitmq/templates/service.yaml +++ b/common/rabbitmq/templates/service.yaml @@ -21,9 +21,13 @@ spec: {{- end }} ports: - name: public - port: {{ default 5672 .Values.ports.public }} + port: {{ .Values.ports.public }} - name: management - port: {{ default 15672 .Values.ports.management }} + port: {{ .Values.ports.management }} +{{- if .Values.enableSsl }} + - name: amqps + port: {{ .Values.ports.amqps }} +{{- end }} selector: app.kubernetes.io/instance: {{ template "fullname" . }} {{- if .Values.externalIPs }} diff --git a/common/rabbitmq/templates/statefulset.yaml b/common/rabbitmq/templates/statefulset.yaml index e9fee9a82a0..0c806910c9e 100644 --- a/common/rabbitmq/templates/statefulset.yaml +++ b/common/rabbitmq/templates/statefulset.yaml @@ -101,6 +101,10 @@ spec: containerPort: {{ default "5672" .Values.ports.public }} - name: management containerPort: {{ default "15672" .Values.ports.management }} +{{- if .Values.enableSsl }} + - name: amqps + containerPort: {{ .Values.ports.amqps }} +{{- end }} {{- if .Values.metrics.enabled }} - name: metrics containerPort: {{ default "15692" .Values.metrics.port }} @@ -117,6 +121,13 @@ spec: name: rabbitmq-custom-config subPath: 20-custom.conf {{- end }} + {{- if .Values.enableSsl }} + - mountPath: /etc/rabbitmq/conf.d/30-ssl.conf + name: rabbitmq-custom-config + subPath: 30-ssl.conf + - mountPath: /etc/rabbitmq/ssl + name: ssl + {{- end }} priorityClassName: {{ .Values.priority_class | default "critical-infrastructure" | quote }} volumes: {{- if not .Values.persistence.enabled }} @@ -131,11 +142,16 @@ spec: sources: - secret: name: {{ template "fullname" . }}-users - {{- if .Values.customConfig }} + {{- if or .Values.customConfig .Values.enableSsl }} - name: rabbitmq-custom-config configMap: name: {{ template "fullname" . }}-custom-conf {{- end }} + {{- if .Values.enableSsl }} + - name: ssl + secret: + secretName: tls-{{ template "fullname" . }} + {{- end }} {{- if .Values.persistence.enabled }} volumeClaimTemplates: - metadata: diff --git a/common/rabbitmq/values.yaml b/common/rabbitmq/values.yaml index 52af8897ec3..0a80e24e50c 100644 --- a/common/rabbitmq/values.yaml +++ b/common/rabbitmq/values.yaml @@ -19,6 +19,7 @@ imageTag: 3.13.7-management priority_class: "critical-infrastructure" ports: + amqps: 5671 public: 5672 management: 15672 @@ -117,7 +118,18 @@ linkerd: enabled: true # RabbitMQ custom configuration to be added under /etc/rabbitmq/conf.d/20-custom.conf -customConfig: +customConfig: {} # to set a custom limit please use the following format: 50MB or 1GB # if not set default value of 50MB will be used # disk_free_limit.absolute: 500MB + +enableSsl: false +certificate: + issuerRef: + name: nil + kind: "ClusterIssuer" + group: "cert-manager.io" + usages: + - digital signature + - key encipherment + - server auth From 4ef0384f0c0bb6a1f9fc2dcc02f20593b16a7fe9 Mon Sep 17 00:00:00 2001 From: Erik Schubert Date: Wed, 15 Jan 2025 13:27:04 +0100 Subject: [PATCH 162/224] Split up ManagedResources in gardener charts (#7655) * Split up ManagedResources in cc-gardener chart * Split up ManagedResources in system remote charts --- global/cc-gardener/Chart.yaml | 2 +- .../templates/managedresource.yaml | 37 ++++++++++--------- system/Makefile | 6 +-- system/boot-operator-remote/Chart.yaml | 2 +- .../templates/managedresource.yaml | 33 ++++++++++------- system/ipam-operator-remote/Chart.yaml | 2 +- .../templates/managedresource.yaml | 33 ++++++++++------- .../boot-operator-remote/managedresource.yaml | 33 ++++++++++------- .../ipam-operator-remote/managedresource.yaml | 33 ++++++++++------- .../managedresource.yaml | 33 ++++++++++------- system/metal-operator-remote/Chart.yaml | 2 +- .../templates/managedresource.yaml | 33 ++++++++++------- 12 files changed, 147 insertions(+), 102 deletions(-) diff --git a/global/cc-gardener/Chart.yaml b/global/cc-gardener/Chart.yaml index 6f18572b236..cf45607ffb6 100644 --- a/global/cc-gardener/Chart.yaml +++ b/global/cc-gardener/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-gardener description: Converged Cloud Gardener setup based on gardener-operator type: application -version: 0.6.2 +version: 0.7.0 appVersion: "v1.110.1" home: https://github.com/gardener/gardener dependencies: diff --git a/global/cc-gardener/templates/managedresource.yaml b/global/cc-gardener/templates/managedresource.yaml index 28bbf9e14f6..0b8c89e9bee 100644 --- a/global/cc-gardener/templates/managedresource.yaml +++ b/global/cc-gardener/templates/managedresource.yaml @@ -1,30 +1,33 @@ -{{ if .Values.garden.managedresources -}} +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- if .Values.garden.managedresources }} +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} --- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: cc-gardener - namespace: garden + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: cc-gardener + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: cc-gardener - namespace: garden + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $fileContent := tpl ($.Files.Get $path) $ }} - {{- $combined = print $combined $fileContent "\n" }} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} {{- end }} diff --git a/system/Makefile b/system/Makefile index bd113d451d4..ad22403ce28 100644 --- a/system/Makefile +++ b/system/Makefile @@ -129,7 +129,7 @@ build-metal-operator-remote: @yq -i '.fullnameOverride="metal-operator"' metal-operator-remote/values.yaml @yq -i '.remote.ca=""' metal-operator-remote/values.yaml @echo 'macdb: {}' >> metal-operator-remote/values.yaml - @yq -i '.version="0.2.1"' metal-operator-remote/Chart.yaml + @yq -i '.version="0.3.0"' metal-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' metal-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' metal-operator-remote/managedresources/kustomize.yaml @@ -151,7 +151,7 @@ build-boot-operator-remote: @kubectl kustomize kustomize/boot-operator-managedresources > boot-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(BOOT_OPERATOR_VERSION)"' boot-operator-remote/values.yaml @yq -i '.fullnameOverride="boot-operator"' boot-operator-remote/values.yaml - @yq -i '.version="0.2.0"' boot-operator-remote/Chart.yaml + @yq -i '.version="0.3.0"' boot-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' boot-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' boot-operator-remote/managedresources/kustomize.yaml @@ -188,7 +188,7 @@ build-ipam-operator-remote: @kubectl kustomize kustomize/ipam-operator-managedresources > ipam-operator-remote/managedresources/kustomize.yaml @yq -i '.controllerManager.manager.image.tag="$(IPAM_VERSION)"' ipam-operator-remote/values.yaml @yq -i '.fullnameOverride="ipam-operator"' ipam-operator-remote/values.yaml - @yq -i '.version="0.1.5"' ipam-operator-remote/Chart.yaml + @yq -i '.version="0.2.0"' ipam-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' ipam-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' ipam-operator-remote/managedresources/kustomize.yaml diff --git a/system/boot-operator-remote/Chart.yaml b/system/boot-operator-remote/Chart.yaml index 2b316aec17e..81a3d537650 100644 --- a/system/boot-operator-remote/Chart.yaml +++ b/system/boot-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/boot-operator-remote/templates/managedresource.yaml b/system/boot-operator-remote/templates/managedresource.yaml index 5c066f8d21c..f0ca1c59851 100644 --- a/system/boot-operator-remote/templates/managedresource.yaml +++ b/system/boot-operator-remote/templates/managedresource.yaml @@ -1,24 +1,31 @@ +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} +--- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: {{ include "boot-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: {{ include "boot-operator-remote.fullname" . }}-resources + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: {{ include "boot-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $combined = print $combined ($.Files.Get $path) "\n"}} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} diff --git a/system/ipam-operator-remote/Chart.yaml b/system/ipam-operator-remote/Chart.yaml index 9346816a5a2..f169b8077dc 100644 --- a/system/ipam-operator-remote/Chart.yaml +++ b/system/ipam-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.5 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/ipam-operator-remote/templates/managedresource.yaml b/system/ipam-operator-remote/templates/managedresource.yaml index be285e8f073..f0ca1c59851 100644 --- a/system/ipam-operator-remote/templates/managedresource.yaml +++ b/system/ipam-operator-remote/templates/managedresource.yaml @@ -1,24 +1,31 @@ +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} +--- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: {{ include "ipam-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: {{ include "ipam-operator-remote.fullname" . }}-resources + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: {{ include "ipam-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $combined = print $combined ($.Files.Get $path) "\n"}} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} diff --git a/system/kustomize/boot-operator-remote/managedresource.yaml b/system/kustomize/boot-operator-remote/managedresource.yaml index 5c066f8d21c..f0ca1c59851 100644 --- a/system/kustomize/boot-operator-remote/managedresource.yaml +++ b/system/kustomize/boot-operator-remote/managedresource.yaml @@ -1,24 +1,31 @@ +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} +--- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: {{ include "boot-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: {{ include "boot-operator-remote.fullname" . }}-resources + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: {{ include "boot-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $combined = print $combined ($.Files.Get $path) "\n"}} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} diff --git a/system/kustomize/ipam-operator-remote/managedresource.yaml b/system/kustomize/ipam-operator-remote/managedresource.yaml index be285e8f073..f0ca1c59851 100644 --- a/system/kustomize/ipam-operator-remote/managedresource.yaml +++ b/system/kustomize/ipam-operator-remote/managedresource.yaml @@ -1,24 +1,31 @@ +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} +--- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: {{ include "ipam-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: {{ include "ipam-operator-remote.fullname" . }}-resources + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: {{ include "ipam-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $combined = print $combined ($.Files.Get $path) "\n"}} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} diff --git a/system/kustomize/metal-operator-remote/managedresource.yaml b/system/kustomize/metal-operator-remote/managedresource.yaml index a6f83ecab55..f0ca1c59851 100644 --- a/system/kustomize/metal-operator-remote/managedresource.yaml +++ b/system/kustomize/metal-operator-remote/managedresource.yaml @@ -1,24 +1,31 @@ +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} +--- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: {{ include "metal-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: {{ include "metal-operator-remote.fullname" . }}-resources + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: {{ include "metal-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $combined = print $combined ($.Files.Get $path) "\n"}} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} diff --git a/system/metal-operator-remote/Chart.yaml b/system/metal-operator-remote/Chart.yaml index b99449856d1..26887db1719 100644 --- a/system/metal-operator-remote/Chart.yaml +++ b/system/metal-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.1 +version: 0.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/metal-operator-remote/templates/managedresource.yaml b/system/metal-operator-remote/templates/managedresource.yaml index a6f83ecab55..f0ca1c59851 100644 --- a/system/metal-operator-remote/templates/managedresource.yaml +++ b/system/metal-operator-remote/templates/managedresource.yaml @@ -1,24 +1,31 @@ +{{- define "mrname" }} +{{- /* use uppercase letters of kind as prefix to avoid some naming clashes */ -}} +{{- $kind := regexFindAll "[A-Z]" .kind -1 | join "" | lower }} +{{- $name := .metadata.name | replace "." "-" | replace ":" "-" -}} +mr-{{ $kind }}-{{ $name }} +{{- end }} + +{{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} +{{- range $_, $doc := (tpl ($.Files.Get $path) $ | splitList "---\n") }} +{{- $obj := $doc | fromYaml }} +{{- if not $obj }} +{{- continue }} +{{- end }} +--- apiVersion: resources.gardener.cloud/v1alpha1 kind: ManagedResource metadata: - name: {{ include "metal-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} spec: secretRefs: - - name: {{ include "metal-operator-remote.fullname" . }}-resources + - name: {{ template "mrname" $obj }} --- apiVersion: v1 kind: Secret metadata: - name: {{ include "metal-operator-remote.fullname" . }}-resources + name: {{ template "mrname" $obj }} type: Opaque data: - # Cannot use .Files.AsSecrets because it would create a map - # of "file: base64 data" instead of concatenating the data - # and encoding that - objects.yaml: |- - {{- $combined := "" }} - {{- range $path, $_ := .Files.Glob "managedresources/*.yaml" }} - {{- $combined = print $combined ($.Files.Get $path) "\n"}} - {{- end }} - {{- $encoded := $combined | b64enc }} - {{ $encoded }} + objects.yaml: {{ $doc | b64enc }} +{{- end }} +{{- end }} From cc8040a7e3f2f5a2f64a4c5c2c57b662d7e59d3a Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Wed, 15 Jan 2025 15:15:15 +0200 Subject: [PATCH 163/224] [calico-cni] add owner service --- system/calico-cni/Chart.yaml | 2 +- system/calico-cni/values.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/system/calico-cni/Chart.yaml b/system/calico-cni/Chart.yaml index 297ebbe826b..5753b5c1448 100644 --- a/system/calico-cni/Chart.yaml +++ b/system/calico-cni/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: calico-cni description: A Helm chart for the all things CNI. type: application -version: 1.0.15 +version: 1.0.16 dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm diff --git a/system/calico-cni/values.yaml b/system/calico-cni/values.yaml index 5c5146aec08..ce3e13f5e43 100644 --- a/system/calico-cni/values.yaml +++ b/system/calico-cni/values.yaml @@ -1,5 +1,6 @@ owner-info: support-group: containers + service: calico helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/cni bgpNeighborCount: 2 From b58163fc9ab9045007d7a334467d0e09624dd7b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Wed, 15 Jan 2025 14:41:02 +0100 Subject: [PATCH 164/224] ceph: fix rook cert loop delimiter (#7671) --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/certificate.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index ec98074e395..b6be7ac819d 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.4 +version: 1.1.5 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/certificate.yaml b/system/cc-ceph/templates/certificate.yaml index 4c9214f7b95..e3fd6f44916 100644 --- a/system/cc-ceph/templates/certificate.yaml +++ b/system/cc-ceph/templates/certificate.yaml @@ -1,4 +1,5 @@ {{- range $key, $record := .Values.objectstore.gateway.dnsNames }} +--- apiVersion: cert-manager.io/v1 kind: Certificate metadata: From 2c14392fd636b76f952536437e2707e92bc34245 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Wed, 15 Jan 2025 15:16:56 +0100 Subject: [PATCH 165/224] ceph: ability to skip DNS record and Cert creation for dnsNames (#7672) --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/certificate-extra.yaml | 2 ++ system/cc-ceph/templates/certificate.yaml | 2 ++ system/cc-ceph/templates/record-extra.yaml | 2 ++ system/cc-ceph/templates/record.yaml | 2 ++ system/cc-ceph/values.yaml | 5 +++++ 6 files changed, 14 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index b6be7ac819d..198c0555105 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.5 +version: 1.1.6 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/certificate-extra.yaml b/system/cc-ceph/templates/certificate-extra.yaml index 852c5168623..edb11f4d5ab 100644 --- a/system/cc-ceph/templates/certificate-extra.yaml +++ b/system/cc-ceph/templates/certificate-extra.yaml @@ -1,6 +1,7 @@ {{- if .Values.objectstore.multiInstance.enabled }} {{- range $instance := .Values.objectstore.multiInstance.extraInstances }} {{- range $key, $record := $instance.gateway.dnsNames }} +{{- if or (not $.Values.dnsNamesSkipCertificate) (not (has $record $.Values.dnsNamesSkipCertificate)) }} --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -23,3 +24,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/certificate.yaml b/system/cc-ceph/templates/certificate.yaml index e3fd6f44916..fc59f519c11 100644 --- a/system/cc-ceph/templates/certificate.yaml +++ b/system/cc-ceph/templates/certificate.yaml @@ -1,4 +1,5 @@ {{- range $key, $record := .Values.objectstore.gateway.dnsNames }} +{{- if or (not $.Values.dnsNamesSkipCertificate) (not (has $record $.Values.dnsNamesSkipCertificate)) }} --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -19,3 +20,4 @@ spec: - digital signature - key encipherment {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/record-extra.yaml b/system/cc-ceph/templates/record-extra.yaml index ad2c07d5c81..48d7748594a 100644 --- a/system/cc-ceph/templates/record-extra.yaml +++ b/system/cc-ceph/templates/record-extra.yaml @@ -1,6 +1,7 @@ {{- if .Values.objectstore.multiInstance.enabled }} {{- range $instance := .Values.objectstore.multiInstance.extraInstances }} {{- range $key, $record := $instance.gateway.dnsNames }} +{{- if or (not $.Values.dnsNamesSkipRecord) (not (has $record $.Values.dnsNamesSkipRecord)) }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record @@ -24,3 +25,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/record.yaml b/system/cc-ceph/templates/record.yaml index 020c7b1b6a2..2f25fbe5ae8 100644 --- a/system/cc-ceph/templates/record.yaml +++ b/system/cc-ceph/templates/record.yaml @@ -1,4 +1,5 @@ {{- range $key, $record := .Values.objectstore.gateway.dnsNames }} +{{- if or (not $.Values.dnsNamesSkipRecord) (not (has $record $.Values.dnsNamesSkipRecord)) }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record @@ -20,3 +21,4 @@ spec: hosts: - "*.{{ $record }}." {{- end }} +{{- end }} diff --git a/system/cc-ceph/values.yaml b/system/cc-ceph/values.yaml index c03a65f36bf..49a3197ee74 100644 --- a/system/cc-ceph/values.yaml +++ b/system/cc-ceph/values.yaml @@ -194,3 +194,8 @@ defaultRgwPools: replicasPerFailureDomain: 2 subFailureDomain: host deviceClass: nvme + +# a list of dnsNames to skip DNS record creation for RGW instances +dnsNamesSkipRecord: [] +# a list of dnsNames to skip certificate creation for RGW instances +dnsNamesSkipCertificate: [] From 0eac7bd0a6542bace2934091f2abae7a1e1a55a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Wed, 15 Jan 2025 15:49:21 +0100 Subject: [PATCH 166/224] ceph: add skip for dnsNames wildcard, and skip external service (#7673) --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/certificate-extra.yaml | 4 ++++ system/cc-ceph/templates/certificate.yaml | 2 ++ system/cc-ceph/templates/record-extra.yaml | 2 ++ system/cc-ceph/templates/record.yaml | 2 ++ system/cc-ceph/templates/service-extra.yaml | 2 ++ system/cc-ceph/templates/service.yaml | 2 ++ system/cc-ceph/values.yaml | 4 ++++ 8 files changed, 19 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 198c0555105..f8cc2437ca7 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.6 +version: 1.1.7 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/certificate-extra.yaml b/system/cc-ceph/templates/certificate-extra.yaml index edb11f4d5ab..ffc3c91aeff 100644 --- a/system/cc-ceph/templates/certificate-extra.yaml +++ b/system/cc-ceph/templates/certificate-extra.yaml @@ -1,5 +1,6 @@ {{- if .Values.objectstore.multiInstance.enabled }} {{- range $instance := .Values.objectstore.multiInstance.extraInstances }} +{{- if $instance.gateway.sslCertificateRef }} {{- range $key, $record := $instance.gateway.dnsNames }} {{- if or (not $.Values.dnsNamesSkipCertificate) (not (has $record $.Values.dnsNamesSkipCertificate)) }} --- @@ -9,7 +10,9 @@ metadata: name: {{ $record }} spec: dnsNames: +{{- if or (not $.Values.dnsNamesSkipCertificateWildcard) (not (has $record $.Values.dnsNamesSkipCertificateWildcard)) }} - "*.{{ $record }}" +{{- end }} - "{{ $record }}" uris: - rook-ceph-rgw-{{ $instance.name }}.rook-ceph.svc @@ -25,3 +28,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/certificate.yaml b/system/cc-ceph/templates/certificate.yaml index fc59f519c11..afafdcd619a 100644 --- a/system/cc-ceph/templates/certificate.yaml +++ b/system/cc-ceph/templates/certificate.yaml @@ -1,3 +1,4 @@ +{{- if $.Values.objectstore.gateway.sslCertificateRef }} {{- range $key, $record := .Values.objectstore.gateway.dnsNames }} {{- if or (not $.Values.dnsNamesSkipCertificate) (not (has $record $.Values.dnsNamesSkipCertificate)) }} --- @@ -21,3 +22,4 @@ spec: - key encipherment {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/record-extra.yaml b/system/cc-ceph/templates/record-extra.yaml index 48d7748594a..6cdc803b631 100644 --- a/system/cc-ceph/templates/record-extra.yaml +++ b/system/cc-ceph/templates/record-extra.yaml @@ -12,6 +12,7 @@ spec: record: {{ $instance.service.externalIP }} hosts: - "{{ $record }}." +{{- if or (not $.Values.dnsNamesSkipRecordWildcard) (not (has $record $.Values.dnsNamesSkipRecordWildcard)) }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record @@ -26,3 +27,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/record.yaml b/system/cc-ceph/templates/record.yaml index 2f25fbe5ae8..ca76bd89296 100644 --- a/system/cc-ceph/templates/record.yaml +++ b/system/cc-ceph/templates/record.yaml @@ -10,6 +10,7 @@ spec: record: {{ $.Values.objectstore.service.externalIP }} hosts: - "{{ $record }}." +{{- if or (not $.Values.dnsNamesSkipRecordWildcard) (not (has $record $.Values.dnsNamesSkipRecordWildcard)) }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record @@ -22,3 +23,4 @@ spec: - "*.{{ $record }}." {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/service-extra.yaml b/system/cc-ceph/templates/service-extra.yaml index 3259f479bfc..f4d195ea009 100644 --- a/system/cc-ceph/templates/service-extra.yaml +++ b/system/cc-ceph/templates/service-extra.yaml @@ -7,9 +7,11 @@ metadata: name: {{ $instance.service.name }} namespace: {{ $.Release.Namespace }} spec: +{{- if $instance.service.externalIP }} externalIPs: - {{ $instance.service.externalIP }} type: NodePort +{{- end }} sessionAffinity: None externalTrafficPolicy: Local ports: diff --git a/system/cc-ceph/templates/service.yaml b/system/cc-ceph/templates/service.yaml index 83f629fe058..1af096a5f4d 100644 --- a/system/cc-ceph/templates/service.yaml +++ b/system/cc-ceph/templates/service.yaml @@ -4,9 +4,11 @@ metadata: name: {{ .Values.objectstore.service.name }} namespace: {{ .Release.Namespace }} spec: +{{- if .Values.objectstore.service.externalIP }} externalIPs: - {{ .Values.objectstore.service.externalIP }} type: NodePort +{{- end }} sessionAffinity: None externalTrafficPolicy: Local ports: diff --git a/system/cc-ceph/values.yaml b/system/cc-ceph/values.yaml index 49a3197ee74..69eee4be465 100644 --- a/system/cc-ceph/values.yaml +++ b/system/cc-ceph/values.yaml @@ -197,5 +197,9 @@ defaultRgwPools: # a list of dnsNames to skip DNS record creation for RGW instances dnsNamesSkipRecord: [] +# a list of dnsNames to skip DNS record creation for RGW instances with wildcard +dnsNamesSkipRecordWildcard: [] # a list of dnsNames to skip certificate creation for RGW instances dnsNamesSkipCertificate: [] +# a list of dnsNames to skip certificate creation for RGW instances with wildcard +dnsNamesSkipCertificateWildcard: [] From a9af37db91d014e9009bd0841efa12715cb78f2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Wed, 15 Jan 2025 16:38:15 +0100 Subject: [PATCH 167/224] ceph: allow admin endpoint to listen internal 80 port only (#7674) --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/cephobjectstore-extra.yaml | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index f8cc2437ca7..9d791d59861 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.7 +version: 1.1.8 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index ca62b10a4e9..ec3d3aec582 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -57,8 +57,10 @@ spec: {{- if gt (len $instance.gateway.dnsNames) 0 }} advertiseEndpoint: dnsName: {{ $instance.gateway.dnsNames | first }} +{{- if $instance.gateway.securePort }} port: 443 useTls: true +{{- end }} dnsNames: {{ toYaml $instance.gateway.dnsNames | nindent 8 }} {{- end }} gateway: @@ -66,8 +68,8 @@ spec: {{- if or $instance.gateway.port $.Values.objectstore.gateway.port }} port: {{ $instance.gateway.port | default $.Values.objectstore.gateway.port }} {{- end }} - {{- if or $instance.gateway.securePort $.Values.objectstore.gateway.securePort }} - securePort: {{ $instance.gateway.securePort | default $.Values.objectstore.gateway.securePort }} + {{- if $instance.gateway.securePort }} + securePort: {{ $instance.gateway.securePort }} {{- end }} placement: nodeAffinity: @@ -89,7 +91,9 @@ spec: - rook-ceph-rgw topologyKey: kubernetes.io/hostname priorityClassName: system-cluster-critical +{{- if $instance.gateway.securePort }} sslCertificateRef: {{ $instance.gateway.sslCertificateRef | default $.Values.objectstore.gateway.sslCertificateRef }} +{{- end }} resources: {{ toYaml ( $instance.gateway.resources | default $.Values.objectstore.gateway.resources) | nindent 6 }} preservePoolsOnDelete: true {{- if and $.Values.objectstore.keystone.enabled }} From f524aa5bfff44d29ca05cb6a95600842567450ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?p=C3=BDrus?= Date: Wed, 15 Jan 2025 17:16:42 +0100 Subject: [PATCH 168/224] ceph: skip DNS record creation, when there is no external IP (#7675) --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/record-extra.yaml | 2 ++ system/cc-ceph/templates/record.yaml | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 9d791d59861..4f059acb5a6 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.8 +version: 1.1.9 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/record-extra.yaml b/system/cc-ceph/templates/record-extra.yaml index 6cdc803b631..bb0bef11665 100644 --- a/system/cc-ceph/templates/record-extra.yaml +++ b/system/cc-ceph/templates/record-extra.yaml @@ -2,6 +2,7 @@ {{- range $instance := .Values.objectstore.multiInstance.extraInstances }} {{- range $key, $record := $instance.gateway.dnsNames }} {{- if or (not $.Values.dnsNamesSkipRecord) (not (has $record $.Values.dnsNamesSkipRecord)) }} +{{- if $.Values.objectstore.service.externalIP }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record @@ -28,3 +29,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/system/cc-ceph/templates/record.yaml b/system/cc-ceph/templates/record.yaml index ca76bd89296..6bf89e0924f 100644 --- a/system/cc-ceph/templates/record.yaml +++ b/system/cc-ceph/templates/record.yaml @@ -1,5 +1,6 @@ {{- range $key, $record := .Values.objectstore.gateway.dnsNames }} {{- if or (not $.Values.dnsNamesSkipRecord) (not (has $record $.Values.dnsNamesSkipRecord)) }} +{{- if $.Values.objectstore.service.externalIP }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record @@ -24,3 +25,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} From 1004c728395ac806bb9548166834651ca1ec53c5 Mon Sep 17 00:00:00 2001 From: kayrus Date: Wed, 15 Jan 2025 17:22:40 +0100 Subject: [PATCH 169/224] ceph: fix a typo --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/record-extra.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 4f059acb5a6..90114ac5e23 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.9 +version: 1.1.10 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/record-extra.yaml b/system/cc-ceph/templates/record-extra.yaml index bb0bef11665..5917de90bfd 100644 --- a/system/cc-ceph/templates/record-extra.yaml +++ b/system/cc-ceph/templates/record-extra.yaml @@ -2,7 +2,7 @@ {{- range $instance := .Values.objectstore.multiInstance.extraInstances }} {{- range $key, $record := $instance.gateway.dnsNames }} {{- if or (not $.Values.dnsNamesSkipRecord) (not (has $record $.Values.dnsNamesSkipRecord)) }} -{{- if $.Values.objectstore.service.externalIP }} +{{- if $instance.service.externalIP }} --- apiVersion: disco.stable.sap.cc/v1 kind: Record From 0a74b99239098fab1ae9946140be9dac3134eab8 Mon Sep 17 00:00:00 2001 From: kayrus Date: Wed, 15 Jan 2025 17:33:24 +0100 Subject: [PATCH 170/224] ceph: export insecure port only internally --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/cephobjectstore-extra.yaml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 90114ac5e23..8816d3beab5 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.10 +version: 1.1.11 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index ec3d3aec582..0c0e2d53a24 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -71,6 +71,9 @@ spec: {{- if $instance.gateway.securePort }} securePort: {{ $instance.gateway.securePort }} {{- end }} +{{- if not $instance.gateway.securePort }} + hostNetwork: false # skip hostNetwork for non-secure port +{{- end }} placement: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From 909068d61fc4b2eb77cd23e80135b5e5f4f52f9e Mon Sep 17 00:00:00 2001 From: kayrus Date: Wed, 15 Jan 2025 17:39:09 +0100 Subject: [PATCH 171/224] ceph: fix some typos --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/cephobjectstore-extra.yaml | 5 ++--- system/cc-ceph/templates/service-extra.yaml | 2 +- system/cc-ceph/templates/service.yaml | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index 8816d3beab5..e8a1be266cf 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.11 +version: 1.1.12 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index 0c0e2d53a24..ac39d9abcd3 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -70,10 +70,9 @@ spec: {{- end }} {{- if $instance.gateway.securePort }} securePort: {{ $instance.gateway.securePort }} - {{- end }} -{{- if not $instance.gateway.securePort }} + {{- else }} hostNetwork: false # skip hostNetwork for non-secure port -{{- end }} + {{- end }} placement: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: diff --git a/system/cc-ceph/templates/service-extra.yaml b/system/cc-ceph/templates/service-extra.yaml index f4d195ea009..c8c7b88c498 100644 --- a/system/cc-ceph/templates/service-extra.yaml +++ b/system/cc-ceph/templates/service-extra.yaml @@ -11,9 +11,9 @@ spec: externalIPs: - {{ $instance.service.externalIP }} type: NodePort + externalTrafficPolicy: Local {{- end }} sessionAffinity: None - externalTrafficPolicy: Local ports: - port: {{ $instance.service.port }} targetPort: {{ $instance.service.port }} diff --git a/system/cc-ceph/templates/service.yaml b/system/cc-ceph/templates/service.yaml index 1af096a5f4d..70319c0ae7e 100644 --- a/system/cc-ceph/templates/service.yaml +++ b/system/cc-ceph/templates/service.yaml @@ -8,9 +8,9 @@ spec: externalIPs: - {{ .Values.objectstore.service.externalIP }} type: NodePort + externalTrafficPolicy: Local {{- end }} sessionAffinity: None - externalTrafficPolicy: Local ports: - port: {{ .Values.objectstore.service.port }} targetPort: {{ .Values.objectstore.service.port }} From dd379940076723b81e9b273f686c486634aa720d Mon Sep 17 00:00:00 2001 From: kayrus Date: Wed, 15 Jan 2025 17:43:52 +0100 Subject: [PATCH 172/224] ceph: fix required values --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/cephobjectstore-extra.yaml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index e8a1be266cf..c3dcc3d524b 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.12 +version: 1.1.13 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/cephobjectstore-extra.yaml b/system/cc-ceph/templates/cephobjectstore-extra.yaml index ac39d9abcd3..e09be8c265a 100644 --- a/system/cc-ceph/templates/cephobjectstore-extra.yaml +++ b/system/cc-ceph/templates/cephobjectstore-extra.yaml @@ -60,6 +60,9 @@ spec: {{- if $instance.gateway.securePort }} port: 443 useTls: true +{{- else }} + port: 80 + useTls: false {{- end }} dnsNames: {{ toYaml $instance.gateway.dnsNames | nindent 8 }} {{- end }} From 51759b7138b8144b2c6e41a07ceafca5657b76cd Mon Sep 17 00:00:00 2001 From: kayrus Date: Wed, 15 Jan 2025 18:05:14 +0100 Subject: [PATCH 173/224] ceph: forward insecure rgw service to 8080 port --- system/cc-ceph/Chart.yaml | 2 +- system/cc-ceph/templates/service-extra.yaml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/system/cc-ceph/Chart.yaml b/system/cc-ceph/Chart.yaml index c3dcc3d524b..d24bbda534e 100644 --- a/system/cc-ceph/Chart.yaml +++ b/system/cc-ceph/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cc-ceph description: A Helm chart for the Rook / Ceph Objects inside the Storage Clusters type: application -version: 1.1.13 +version: 1.1.14 appVersion: "1.16.1" dependencies: - name: owner-info diff --git a/system/cc-ceph/templates/service-extra.yaml b/system/cc-ceph/templates/service-extra.yaml index c8c7b88c498..b0d68c3ebe4 100644 --- a/system/cc-ceph/templates/service-extra.yaml +++ b/system/cc-ceph/templates/service-extra.yaml @@ -16,7 +16,11 @@ spec: sessionAffinity: None ports: - port: {{ $instance.service.port }} +{{- if $instance.gateway.securePort }} targetPort: {{ $instance.service.port }} +{{- else }} + targetPort: 8080 # rgw doesn't listen to 80 port for some reason +{{- end }} protocol: TCP name: rgw-ssl selector: From d86e288b10ecdd1709fd3de17787b212c7b9efa1 Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Thu, 16 Jan 2025 08:35:51 +0200 Subject: [PATCH 174/224] [kube-parrot] parrot image source change --- system/kube-parrot/templates/daemonset.yaml | 2 +- system/kube-parrot/values.yaml | 2 ++ system/kube-proxy/templates/daemonset.yaml | 2 +- system/kube-proxy/values.yaml | 2 ++ 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/system/kube-parrot/templates/daemonset.yaml b/system/kube-parrot/templates/daemonset.yaml index c89bbffa90f..b6db6c4612d 100644 --- a/system/kube-parrot/templates/daemonset.yaml +++ b/system/kube-parrot/templates/daemonset.yaml @@ -32,7 +32,7 @@ spec: {{- end }} containers: - name: parrot - image: "{{ required ".Values.global.registryAlternateRegion is missing" $.Values.global.registryAlternateRegion }}/{{ $.Values.images.parrot.image }}:{{ $.Values.images.parrot.tag }}" + image: "{{ required ".Values.global.ghcrIoMirrorAlternateRegion is missing" $.Values.global.ghcrIoMirrorAlternateRegion }}/{{ $.Values.images.parrot.image }}:{{ $.Values.images.parrot.tag }}" imagePullPolicy: IfNotPresent command: - /parrot diff --git a/system/kube-parrot/values.yaml b/system/kube-parrot/values.yaml index da9977fb514..dacf88ca97e 100644 --- a/system/kube-parrot/values.yaml +++ b/system/kube-parrot/values.yaml @@ -1,5 +1,7 @@ global: registryAlternateRegion: test + dockerHubMirrorAlternateRegion: test + ghcrIoMirrorAlternateRegion: test selector: true toleration: false diff --git a/system/kube-proxy/templates/daemonset.yaml b/system/kube-proxy/templates/daemonset.yaml index bb00260be66..c6e2f2db25a 100644 --- a/system/kube-proxy/templates/daemonset.yaml +++ b/system/kube-proxy/templates/daemonset.yaml @@ -83,7 +83,7 @@ spec: {{- end }} {{- if .Values.sidecars.parrot }} - name: parrot - image: "{{ required ".Values.global.registryAlternateRegion is missing" .Values.global.registryAlternateRegion }}/{{ .Values.images.parrot.image }}:{{ .Values.images.parrot.tag }}" + image: "{{ required ".Values.global.ghcrIoMirrorAlternateRegion is missing" $.Values.global.ghcrIoMirrorAlternateRegion }}/{{ .Values.images.parrot.image }}:{{ .Values.images.parrot.tag }}" imagePullPolicy: IfNotPresent command: - /parrot diff --git a/system/kube-proxy/values.yaml b/system/kube-proxy/values.yaml index 50e444bf8de..05862ccff61 100644 --- a/system/kube-proxy/values.yaml +++ b/system/kube-proxy/values.yaml @@ -1,5 +1,7 @@ global: registryAlternateRegion: test + dockerHubMirrorAlternateRegion: test + ghcrIoMirrorAlternateRegion: test selector: true toleration: false From 8a2038fe6e5d373146a45fc83710271628befcf6 Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Thu, 16 Jan 2025 09:06:02 +0200 Subject: [PATCH 175/224] [kube-parrot] parrot image source change, bump charts --- system/kube-parrot/Chart.yaml | 2 +- system/kube-proxy/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system/kube-parrot/Chart.yaml b/system/kube-parrot/Chart.yaml index cfcb19d1175..deededa844a 100644 --- a/system/kube-parrot/Chart.yaml +++ b/system/kube-parrot/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v2 description: Kube Parrot name: kube-parrot -version: 4.0.6 +version: 4.0.7 diff --git a/system/kube-proxy/Chart.yaml b/system/kube-proxy/Chart.yaml index 66717cabd28..5fe260efc53 100644 --- a/system/kube-proxy/Chart.yaml +++ b/system/kube-proxy/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube Proxy name: kube-proxy -version: 0.6.40 +version: 0.6.41 dependencies: - name: helm3-helper From 753e0c90e6127b89c40cbb75e88ba7f541c89087 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Thu, 16 Jan 2025 10:31:52 +0200 Subject: [PATCH 176/224] [kube-system-metal] bump deps --- system/kube-system-metal/Chart.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/kube-system-metal/Chart.yaml b/system/kube-system-metal/Chart.yaml index 08aa043cb2c..b0912d81f05 100644 --- a/system/kube-system-metal/Chart.yaml +++ b/system/kube-system-metal/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for metal clusters. name: kube-system-metal -version: 6.10.38 +version: 6.10.39 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-metal dependencies: - name: cc-rbac @@ -23,7 +23,7 @@ dependencies: version: 1.1.0 - name: kube-proxy repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.40 + version: 0.6.41 - name: kubernikus-rbac repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 2.2.0 @@ -60,7 +60,7 @@ dependencies: - condition: kube-parrot.enabled name: kube-parrot repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 4.0.6 + version: 4.0.7 - name: kube-proxy-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.10 From a2f0df4f0d13d7cb64cb18307dc9b8a69bc8b73a Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Thu, 16 Jan 2025 10:34:03 +0200 Subject: [PATCH 177/224] helm dep up --- system/kube-system-metal/Chart.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/system/kube-system-metal/Chart.lock b/system/kube-system-metal/Chart.lock index e31b81ee070..3463faa887f 100644 --- a/system/kube-system-metal/Chart.lock +++ b/system/kube-system-metal/Chart.lock @@ -16,7 +16,7 @@ dependencies: version: 1.1.0 - name: kube-proxy repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.40 + version: 0.6.41 - name: kubernikus-rbac repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 2.2.0 @@ -49,7 +49,7 @@ dependencies: version: 1.0.12 - name: kube-parrot repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 4.0.6 + version: 4.0.7 - name: kube-proxy-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.10 @@ -101,5 +101,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:855a768368cf258302b5fa4bfffd27b65b1420a7216c2a6ed59eedc1355e7cec -generated: "2025-01-07T17:33:56.778210003Z" +digest: sha256:1c55eb4511fe3434285790e2a259286a994cbb9150f9b44a94b511931f90097b +generated: "2025-01-16T10:33:00.818757+02:00" From a3c04519acb07cf732778789ea0586a8852ec207 Mon Sep 17 00:00:00 2001 From: Chuan Miao Date: Thu, 16 Jan 2025 10:21:59 +0100 Subject: [PATCH 178/224] seeding a new user for backup service (#7670) * seeding a new user for backup service * Update openstack/manila/templates/seeds.yaml Co-authored-by: Maurice Escher --- openstack/manila/templates/seeds.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/openstack/manila/templates/seeds.yaml b/openstack/manila/templates/seeds.yaml index 08f45fbddc7..cfee654c197 100644 --- a/openstack/manila/templates/seeds.yaml +++ b/openstack/manila/templates/seeds.yaml @@ -28,6 +28,7 @@ spec: - monsoon3/domain-neo-seed - monsoon3/domain-s4-seed - monsoon3/domain-wbs-seed + - swift/swift-seed services: - name: {{ .Release.Name }} @@ -99,6 +100,12 @@ spec: role: service - project: service role: cloud_network_admin + - name: manilabackup + description: Manila Backup Service + password: '{{.Values.global.manila_backup_password | include "resolve_secret"}}' + role_assignments: + - project: service + role: service - name: ccadmin projects: @@ -106,6 +113,10 @@ spec: role_assignments: - user: admin@Default role: cloud_sharedfilesystem_admin + - user: manilabackup@Default + role: cloud_objectstore_admin + - user: manilabackup@Default + role: cloud_sharedfilesystem_admin groups: - name: CCADMIN_CLOUD_ADMINS role_assignments: From 4d180d94e160721b072193bf52034d42aa67948f Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Thu, 16 Jan 2025 11:25:48 +0200 Subject: [PATCH 179/224] [kube-parrot] fix the other daemonset --- system/kube-parrot/Chart.yaml | 2 +- system/kube-parrot/templates/daemonset.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system/kube-parrot/Chart.yaml b/system/kube-parrot/Chart.yaml index deededa844a..1a8e0506fbb 100644 --- a/system/kube-parrot/Chart.yaml +++ b/system/kube-parrot/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v2 description: Kube Parrot name: kube-parrot -version: 4.0.7 +version: 4.0.8 diff --git a/system/kube-parrot/templates/daemonset.yaml b/system/kube-parrot/templates/daemonset.yaml index b6db6c4612d..8bff89e34d4 100644 --- a/system/kube-parrot/templates/daemonset.yaml +++ b/system/kube-parrot/templates/daemonset.yaml @@ -114,7 +114,7 @@ spec: {{- end }} containers: - name: parrot - image: "{{ required ".Values.global.registryAlternateRegion is missing" .Values.global.registryAlternateRegion }}/{{ .Values.images.parrot.image }}:{{ .Values.images.parrot.tag }}" + image: "{{ required ".Values.global.ghcrIoMirrorAlternateRegion is missing" $.Values.global.ghcrIoMirrorAlternateRegion }}/{{ .Values.images.parrot.image }}:{{ .Values.images.parrot.tag }}" imagePullPolicy: IfNotPresent command: - /parrot From 2aa0a0853dcf021d86ee7197abcbd747e29aea00 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Thu, 16 Jan 2025 11:27:45 +0200 Subject: [PATCH 180/224] [kube-system-metal] use semver in some deps --- system/kube-system-metal/Chart.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/system/kube-system-metal/Chart.yaml b/system/kube-system-metal/Chart.yaml index b0912d81f05..791758f2fc8 100644 --- a/system/kube-system-metal/Chart.yaml +++ b/system/kube-system-metal/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for metal clusters. name: kube-system-metal -version: 6.10.39 +version: 6.10.40 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-metal dependencies: - name: cc-rbac @@ -16,14 +16,14 @@ dependencies: version: '>= 0.0.0' - name: kube-dns repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.3.20 + version: "^0.x" - condition: kube-fip-controller.enabled name: kube-fip-controller repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 - name: kube-proxy repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.41 + version: "^0.x" - name: kubernikus-rbac repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 2.2.0 @@ -35,7 +35,7 @@ dependencies: version: 6.5.0 - name: sysctl repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.0.9 + version: "^0.x" - name: cert-manager repository: https://charts.jetstack.io version: 1.13.3 @@ -52,21 +52,21 @@ dependencies: version: 4.5.2 - name: wormhole repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 3.1.8 + version: "^3.x" condition: wormhole.enabled - name: nodecidr-controller repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.12 + version: "^1.x" - condition: kube-parrot.enabled name: kube-parrot repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 4.0.7 + version: "^4.x" - name: kube-proxy-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.10 - name: toolbox-prepull repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.7 + version: "^1.x" - name: metrics-server repository: https://kubernetes-sigs.github.io/metrics-server version: 3.8.3 @@ -90,7 +90,7 @@ dependencies: - name: kube-detective condition: kube-detective.enabled repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.0.2 + version: '>= 0.0.0' - name: kube-cni condition: kube-cni.enabled repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm @@ -108,7 +108,7 @@ dependencies: - condition: ipmasq.enabled name: ipmasq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.1.3 + version: "^0.x" - name: velero repository: https://vmware-tanzu.github.io/helm-charts version: 5.0.2 From ebf7b6e7302d9f05c57568639ba1d92cac0c0f85 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Thu, 16 Jan 2025 09:29:56 +0000 Subject: [PATCH 181/224] system/kube-system-metal: run helm dep up --- system/kube-system-metal/Chart.lock | 6 +++--- system/kube-system-metal/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/kube-system-metal/Chart.lock b/system/kube-system-metal/Chart.lock index 3463faa887f..96438f63282 100644 --- a/system/kube-system-metal/Chart.lock +++ b/system/kube-system-metal/Chart.lock @@ -49,7 +49,7 @@ dependencies: version: 1.0.12 - name: kube-parrot repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 4.0.7 + version: 4.0.8 - name: kube-proxy-ng repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.10 @@ -101,5 +101,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:1c55eb4511fe3434285790e2a259286a994cbb9150f9b44a94b511931f90097b -generated: "2025-01-16T10:33:00.818757+02:00" +digest: sha256:213e30a5d3d14ccc24f7440344d8cbda0087d6d7a6594c5c761125fc5aee9ac6 +generated: "2025-01-16T09:29:43.751180733Z" diff --git a/system/kube-system-metal/Chart.yaml b/system/kube-system-metal/Chart.yaml index 791758f2fc8..b0995d4d763 100644 --- a/system/kube-system-metal/Chart.yaml +++ b/system/kube-system-metal/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for metal clusters. name: kube-system-metal -version: 6.10.40 +version: 6.10.41 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-metal dependencies: - name: cc-rbac From 143b56681e387929ce2df7ca9650f9f7a42e5d86 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Thu, 16 Jan 2025 10:57:14 +0100 Subject: [PATCH 182/224] [opensearch-logs] adding otel2 user to otel role --- system/opensearch-logs/templates/config/_roles_mapping.yml.tpl | 1 + 1 file changed, 1 insertion(+) diff --git a/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl b/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl index 4e25c9be659..5548530ca60 100644 --- a/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles_mapping.yml.tpl @@ -66,6 +66,7 @@ otel: reserved: false users: - "otel" + - "otel2" otellogs: reserved: false From 4db354fac8a4d301e4152f66ec18b5af11b366cc Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 11:25:01 +0100 Subject: [PATCH 183/224] limes: remove .Values.limes.constraints This feature was removed a year ago. --- openstack/limes/templates/_utils.tpl | 2 -- openstack/limes/templates/configmap.yaml | 2 -- openstack/limes/values.yaml | 15 --------------- 3 files changed, 19 deletions(-) diff --git a/openstack/limes/templates/_utils.tpl b/openstack/limes/templates/_utils.tpl index 170dca0bb44..a84b77bedd7 100644 --- a/openstack/limes/templates/_utils.tpl +++ b/openstack/limes/templates/_utils.tpl @@ -25,8 +25,6 @@ {{- end }} - name: LIMES_AUTHORITATIVE value: "true" -- name: LIMES_CONSTRAINTS_PATH - value: "/etc/limes/constraints-ccloud.yaml" - name: LIMES_DEBUG value: '0' - name: LIMES_DB_USERNAME diff --git a/openstack/limes/templates/configmap.yaml b/openstack/limes/templates/configmap.yaml index 5dd7c180533..458ed0cb7db 100644 --- a/openstack/limes/templates/configmap.yaml +++ b/openstack/limes/templates/configmap.yaml @@ -11,8 +11,6 @@ data: - {{ . }} {{- end }} {{ toYaml .Values.limes.clusters.ccloud | indent 4 }} - constraints-ccloud.yaml: | -{{ toYaml .Values.limes.constraints.ccloud | indent 4 }} policy.json: | {{- .Files.Get "files/policy.yaml" | fromYaml | toPrettyJson | nindent 4 }} diff --git a/openstack/limes/values.yaml b/openstack/limes/values.yaml index e742a24dbf0..8a8b33114ba 100644 --- a/openstack/limes/values.yaml +++ b/openstack/limes/values.yaml @@ -70,21 +70,6 @@ limes: clusters: ccloud: {} - # Map with entries being the contents of a Limes quota constraint file. - # - # e.g. - # - # constraints: - # ccloud: - # domains: ... - # projects: ... - # - # To use this constraint in a cluster, set: - # - # .Values.limes.clusters.ccloud.constraints = "/etc/limes/constraints-ccloud.yaml" - constraints: - ccloud: {} - # Whether to apply resource requests/limits to containers. resources: enabled: false From 263082b7e931866722c872687b8907c3a84edafc Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Wed, 15 Jan 2025 10:57:15 +0200 Subject: [PATCH 184/224] [ironic] update chart dependencies * mariadb 0.15.2 to 0.15.3: fixes service selector * memcached 0.6.1 to 0.6.3: updates memcached and memcached-exporter * mysql-metrics: 0.4.1 to 0.4.2: updates sql-expoter to 2025-01-07 --- openstack/ironic/Chart.lock | 10 +++++----- openstack/ironic/Chart.yaml | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/openstack/ironic/Chart.lock b/openstack/ironic/Chart.lock index bafe409f113..3d269cecc03 100644 --- a/openstack/ironic/Chart.lock +++ b/openstack/ironic/Chart.lock @@ -1,13 +1,13 @@ dependencies: - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.15.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.1 + version: 0.6.3 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.4.1 + version: 0.4.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 @@ -23,5 +23,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.1.0 -digest: sha256:ffd6daea23296f101996ad5971d1dc7246209c1645f8a1c3cbd2ee91ccc86e89 -generated: "2025-01-07T13:09:51.01360416+01:00" +digest: sha256:923d38665af49bed9ae82c71af46bdd28692c58f6537c7d9f7e7b5e7ab587a1e +generated: "2025-01-15T10:55:29.950058+02:00" diff --git a/openstack/ironic/Chart.yaml b/openstack/ironic/Chart.yaml index e179317784c..2212f46d9fa 100644 --- a/openstack/ironic/Chart.yaml +++ b/openstack/ironic/Chart.yaml @@ -2,19 +2,19 @@ apiVersion: v2 description: A Helm chart for Kubernetes icon: https://www.openstack.org/themes/openstack/images/project-mascots/Ironic/OpenStack_Project_Ironic_vertical.png name: ironic -version: 0.1.7 +version: 0.1.8 dependencies: - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.15.2 + version: ~0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.6.1 + version: ~0.6.3 - condition: mysql_metrics.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: ~0.4.1 + version: ~0.4.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: ~1.0.0 From 927bcc8e85a1f0e89beb2b8fc60984ccc3170a4f Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 13:24:47 +0100 Subject: [PATCH 185/224] limes: move all cloud_resource_* assignments from billing-seed to limes-seed I started touching this because billing is getting new technical users (coming from CAM instead of our own seeds; we only seed the role assignments). Then I was dissatisfied with the structure of everything. Having the role assignments hardcoded in the billing seed makes some sense, but then this requires also maintaining the role-assignment alert on the Limes side. This commit moves all of this into the Limes chart, and changes it from hardcoded individual assignments to a configurable list. The templating in the seed is a bit of a mess, but the alert definition and the seed do different forms of lookup, so one of them had to be a mess. --- openstack/billing/templates/seeds.yaml | 11 +--- openstack/billing/values.yaml | 4 -- .../limes/templates/prometheus-alerts.yaml | 11 ++-- openstack/limes/templates/seed-grants.yaml | 62 +++++++++++++++++++ openstack/limes/values.yaml | 6 ++ 5 files changed, 74 insertions(+), 20 deletions(-) create mode 100644 openstack/limes/templates/seed-grants.yaml diff --git a/openstack/billing/templates/seeds.yaml b/openstack/billing/templates/seeds.yaml index cd6a94682e2..28182347afc 100644 --- a/openstack/billing/templates/seeds.yaml +++ b/openstack/billing/templates/seeds.yaml @@ -17,7 +17,6 @@ spec: {{- end }} - monsoon3/domain-cc3test-seed - swift/swift-seed - - limes/limes-seed roles: - name: masterdata_admin @@ -97,21 +96,13 @@ spec: role_assignments: - user: billing@Default role: objectstore_admin - - user: billing@Default - role: cloud_resource_viewer + # NOTE: The cloud_resource_viewer role is given by the limes seed. swift: enabled: true - name: cloud_admin role_assignments: - user: masterdata_scanner@Default role: cloud_identity_viewer - {{- if ($.Values.global.region | contains "qa") }} - - user: {{ $.Values.ccadmin.readwriteUserForPlutus }}@ccadmin - role: cloud_resource_admin - {{- else }} - - user: {{ $.Values.ccadmin.readonlyUserForIBP }}@ccadmin - role: cloud_resource_viewer - {{- end }} {{- end }} groups: {{- if eq . "monsoon3" }} diff --git a/openstack/billing/values.yaml b/openstack/billing/values.yaml index 3ca310bf2d8..9a14ec99570 100644 --- a/openstack/billing/values.yaml +++ b/openstack/billing/values.yaml @@ -9,7 +9,3 @@ owner-info: - Stefan Majewsky - Sandro Jäckel helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/openstack/billing - -ccadmin: - readonlyUserForIBP: null # see values.yaml in secrets - readwriteUserForPlutus: null # see values.yaml in secrets diff --git a/openstack/limes/templates/prometheus-alerts.yaml b/openstack/limes/templates/prometheus-alerts.yaml index e35313ab8e2..286edec10e8 100644 --- a/openstack/limes/templates/prometheus-alerts.yaml +++ b/openstack/limes/templates/prometheus-alerts.yaml @@ -57,9 +57,9 @@ spec: # allowed role assignments for the `cloud_resource_admin` role: # - group CCADMIN_CLOUD_ADMINS@ccadmin in project cloud_admin@ccadmin - # - user $PLUTUS_READWRITE@ccadmin in project cloud_admin@ccadmin (for management of commitments by Plutus, see billing-seed for details; QA only) + # - all in .Values.limes.external_role_assignments.cloud_resource_admin - alert: OpenstackLimesUnexpectedCloudAdminRoleAssignments - expr: max(openstack_assignments_per_role{role_name="cloud_resource_admin"}) > {{ if hasPrefix "qa" .Values.global.region }}2{{ else }}1{{ end }} + expr: max(openstack_assignments_per_role{role_name="cloud_resource_admin"}) > {{ add 1 (len .Values.limes.external_role_assignments.cloud_resource_admin) }} for: 10m labels: support_group: containers @@ -73,11 +73,10 @@ spec: description: 'The Keystone role "cloud_resource_admin" is assigned to more users/groups than expected.' # allowed role assignments for the `cloud_resource_viewer` role: - # - user billing@Default in project billing@ccadmin (for data transfer from Limes to CBR) - # - user $IBP_READONLY@ccadmin in project cloud_admin@ccadmin (for data transfer from Limes to IBP, see billing-seed for details; prod only) - # - group CCADMIN_CLOUD_ADMINS@ccadmin in project cloud_admin@ccadmin + # - group CCADMIN_CLOUD_ADMINS@ccadmin in project cloud_admin@ccadmin + # - all in .Values.limes.external_role_assignments.cloud_resource_viewer - alert: OpenstackLimesUnexpectedCloudViewerRoleAssignments - expr: max(openstack_assignments_per_role{role_name="cloud_resource_viewer"}) > {{ if hasPrefix "qa" .Values.global.region }}2{{ else }}3{{ end }} + expr: max(openstack_assignments_per_role{role_name="cloud_resource_viewer"}) > {{ add 1 (len .Values.limes.external_role_assignments.cloud_resource_viewer) }} for: 10m labels: support_group: containers diff --git a/openstack/limes/templates/seed-grants.yaml b/openstack/limes/templates/seed-grants.yaml new file mode 100644 index 00000000000..a2585daab66 --- /dev/null +++ b/openstack/limes/templates/seed-grants.yaml @@ -0,0 +1,62 @@ +apiVersion: "openstack.stable.sap.cc/v1" +kind: OpenstackSeed +metadata: + # This seed is for granting cloud_resource_admin and cloud_resource_viewer + # to the chosen few permitted to wield such powers. + name: limes-grants-seed + +{{/* If a domain is mentioned in the external_role_assignments, we need to add a dependency on its domain seed. */}} +{{- $relevant_domain_names := list }} +{{- range $role, $grants := $.Values.limes.external_role_assignments }} + {{- range $grant := $grants }} + {{- $relevant_domain_names = append $relevant_domain_names $grant.user_domain_name }} + {{- $relevant_domain_names = append $relevant_domain_names $grant.project_domain_name }} + {{- end }} +{{- end }} +{{- $relevant_domain_names = sortAlpha (uniq $relevant_domain_names) }} + +spec: + {{- if $relevant_domain_names }} + requires: + {{- $is_global := $.Values.limes.clusters.ccloud.catalog_url | contains "global" -}} + {{- $base_seed_namespace := $is_global | ternary "monsoon3global" "monsoon3" }} + {{- range $relevant_domain_names }} + - {{ $base_seed_namespace }}/domain-{{replace "_" "-" .}}-seed + {{- end }} + + roles: + # NOTE: These role declarations are duplicated from limes-seed. + # I don't want to depend on limes-seed here because its dependency tree is huge and slow. + - name: cloud_resource_admin + - name: cloud_resource_viewer + + {{/* The big pile of templating here is to restructure from "roles -> assignments" into "domains -> projects -> assignments". */}} + domains: + {{- range $domain_name := $relevant_domain_names }} + - name: {{ $domain_name }} + {{- $relevant_project_names := list }} + {{- range $role, $grants := $.Values.limes.external_role_assignments }} + {{- range $grant := $grants }} + {{- if eq $grant.project_domain_name $domain_name }} + {{- $relevant_project_names = append $relevant_project_names $grant.project_name }} + {{- end }} + {{- end }} + {{- end }} + {{- $relevant_project_names = sortAlpha (uniq $relevant_project_names) }} + {{- if $relevant_project_names }} + projects: + {{- range $project_name := $relevant_project_names }} + - name: {{ $project_name }} + role_assignments: + {{- range $role := sortAlpha (keys $.Values.limes.external_role_assignments) }} + {{- range $grant := index $.Values.limes.external_role_assignments $role }} + {{- if and (eq $grant.project_domain_name $domain_name) (eq $grant.project_name $project_name) }} + - user: {{ $grant.user_name }}@{{ $grant.user_domain_name }} + role: {{ $role }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} diff --git a/openstack/limes/values.yaml b/openstack/limes/values.yaml index 8a8b33114ba..2555d74e4d5 100644 --- a/openstack/limes/values.yaml +++ b/openstack/limes/values.yaml @@ -70,6 +70,12 @@ limes: clusters: ccloud: {} + # Additional role assignments for the respective roles to external users of the Limes API. + # Each entry must be an object with the keys "user_name", "user_domain_name", "project_name", "project_domain_name". + external_role_assignments: + cloud_resource_admin: [] + cloud_resource_viewer: [] + # Whether to apply resource requests/limits to containers. resources: enabled: false From 4123891d7ebbde29e8a0e5b2bccb0268fa5745ed Mon Sep 17 00:00:00 2001 From: Vlad Gusev Date: Thu, 16 Jan 2025 15:16:53 +0200 Subject: [PATCH 186/224] [pxc-db] Set max_binlog_size option to 100M (#7679) Lowers default value of the max_binlog_size option from 1G to 100M This would help to avoid timeouts on binlog upload if binlog is big enough, so it couldn't be uploaded within the upload timeout --- common/pxc-db/Chart.yaml | 2 +- common/pxc-db/values.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/common/pxc-db/Chart.yaml b/common/pxc-db/Chart.yaml index 132f203249a..1b5ad20ba9b 100644 --- a/common/pxc-db/Chart.yaml +++ b/common/pxc-db/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.10 +version: 0.2.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/common/pxc-db/values.yaml b/common/pxc-db/values.yaml index af148d58977..4d37f786333 100644 --- a/common/pxc-db/values.yaml +++ b/common/pxc-db/values.yaml @@ -192,6 +192,7 @@ pxc: pxc_strict_mode: MASTER # default is ENFORCING binlog_format: ROW binlog_expire_logs_seconds: 345600 # default 30 days -> 4 days + max_binlog_size: 104857600 # default 1G -> 100M sync_binlog: 1 # default value for PXC net_read_timeout: 30 net_write_timeout: 60 From e518343bb0f6e32838c3c9c550bf80396702873e Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 15:20:48 +0100 Subject: [PATCH 187/224] limes: do not render grants-seed if there are no grants to be made Helm can deal with this just fine, and the seeder might be able to handle it, too, but `secrets-injector validate` chokes on `spec:` being absent (i.e. null). --- openstack/limes/templates/seed-grants.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/openstack/limes/templates/seed-grants.yaml b/openstack/limes/templates/seed-grants.yaml index a2585daab66..10321cd8e6a 100644 --- a/openstack/limes/templates/seed-grants.yaml +++ b/openstack/limes/templates/seed-grants.yaml @@ -1,3 +1,9 @@ +{{- $num_grants := 0 }} +{{- range $role, $grants := $.Values.limes.external_role_assignments }} + {{- $num_grants = add $num_grants (len $grants) }} +{{- end }} +{{- if gt $num_grants 0 }} + apiVersion: "openstack.stable.sap.cc/v1" kind: OpenstackSeed metadata: @@ -60,3 +66,5 @@ spec: {{- end }} {{- end }} {{- end }} + +{{- end }} From f8a3eed2bcbc992dda62a8b684a47ff278f88761 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Thu, 16 Jan 2025 16:38:29 +0200 Subject: [PATCH 188/224] [calico] bump CRDs to 3.29.1, fix rbac --- system/calico/Chart.yaml | 3 +- ...workpolicies.policy.networking.k8s.io.yaml | 1046 +++++++++++++++++ .../bgpfilters.crd.projectcalico.org.yaml | 52 + ...xconfigurations.crd.projectcalico.org.yaml | 46 +- ...networkpolicies.crd.projectcalico.org.yaml | 5 +- ...networkpolicies.crd.projectcalico.org.yaml | 5 +- .../crds/tiers.crd.projectcalico.org.yaml | 52 + .../templates/clusterrole-calico-node.yaml | 19 +- 8 files changed, 1221 insertions(+), 7 deletions(-) create mode 100644 system/calico/crds/adminnetworkpolicies.policy.networking.k8s.io.yaml create mode 100644 system/calico/crds/tiers.crd.projectcalico.org.yaml diff --git a/system/calico/Chart.yaml b/system/calico/Chart.yaml index b90a66aac52..ee54ade5254 100644 --- a/system/calico/Chart.yaml +++ b/system/calico/Chart.yaml @@ -2,4 +2,5 @@ apiVersion: v2 name: calico description: A Helm chart for the Calico. type: application -version: 1.1.0 +version: 1.1.1 +appVersion: 3.29.1 diff --git a/system/calico/crds/adminnetworkpolicies.policy.networking.k8s.io.yaml b/system/calico/crds/adminnetworkpolicies.policy.networking.k8s.io.yaml new file mode 100644 index 00000000000..a0218c44b84 --- /dev/null +++ b/system/calico/crds/adminnetworkpolicies.policy.networking.k8s.io.yaml @@ -0,0 +1,1046 @@ +--- +# Source: crds/policy.networking.k8s.io_adminnetworkpolicies.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/network-policy-api/pull/30 + policy.networking.k8s.io/bundle-version: v0.1.1 + policy.networking.k8s.io/channel: experimental + creationTimestamp: null + name: adminnetworkpolicies.policy.networking.k8s.io +spec: + group: policy.networking.k8s.io + names: + kind: AdminNetworkPolicy + listKind: AdminNetworkPolicyList + plural: adminnetworkpolicies + shortNames: + - anp + singular: adminnetworkpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.priority + name: Priority + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + AdminNetworkPolicy is a cluster level resource that is part of the + AdminNetworkPolicy API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of AdminNetworkPolicy. + properties: + egress: + description: |- + Egress is the list of Egress rules to be applied to the selected pods. + A total of 100 rules will be allowed in each ANP instance. + The relative precedence of egress rules within a single ANP object (all of + which share the priority) will be determined by the order in which the rule + is written. Thus, a rule that appears at the top of the egress rules + would take the highest precedence. + ANPs with no egress rules do not affect egress traffic. + + + Support: Core + items: + description: |- + AdminNetworkPolicyEgressRule describes an action to take on a particular + set of traffic originating from pods selected by a AdminNetworkPolicy's + Subject field. + + properties: + action: + description: |- + Action specifies the effect this rule will have on matching traffic. + Currently the following actions are supported: + Allow: allows the selected traffic (even if it would otherwise have been denied by NetworkPolicy) + Deny: denies the selected traffic + Pass: instructs the selected traffic to skip any remaining ANP rules, and + then pass execution to any NetworkPolicies that select the pod. + If the pod is not selected by any NetworkPolicies then execution + is passed to any BaselineAdminNetworkPolicies that select the pod. + + + Support: Core + enum: + - Allow + - Deny + - Pass + type: string + name: + description: |- + Name is an identifier for this rule, that may be no more than 100 characters + in length. This field should be used by the implementation to help + improve observability, readability and error-reporting for any applied + AdminNetworkPolicies. + + + Support: Core + maxLength: 100 + type: string + ports: + description: |- + Ports allows for matching traffic based on port and protocols. + This field is a list of destination ports for the outgoing egress traffic. + If Ports is not set then the rule does not filter traffic via port. + + + Support: Core + items: + description: |- + AdminNetworkPolicyPort describes how to select network ports on pod(s). + Exactly one field must be set. + maxProperties: 1 + minProperties: 1 + properties: + namedPort: + description: |- + NamedPort selects a port on a pod(s) based on name. + + + Support: Extended + + + + type: string + portNumber: + description: |- + Port selects a port on a pod(s) based on number. + + + Support: Core + properties: + port: + description: |- + Number defines a network port value. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + required: + - port + - protocol + type: object + portRange: + description: |- + PortRange selects a port range on a pod(s) based on provided start and end + values. + + + Support: Core + properties: + end: + description: |- + End defines a network port that is the end of a port range, the End value + must be greater than Start. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + start: + description: |- + Start defines a network port that is the start of a port range, the Start + value must be less than End. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object + type: object + maxItems: 100 + type: array + to: + description: |- + To is the List of destinations whose traffic this rule applies to. + If any AdminNetworkPolicyEgressPeer matches the destination of outgoing + traffic then the specified action is applied. + This field must be defined and contain at least one item. + + + Support: Core + items: + description: |- + AdminNetworkPolicyEgressPeer defines a peer to allow traffic to. + Exactly one of the selector pointers must be set for a given peer. If a + consumer observes none of its fields are set, they must assume an unknown + option has been specified and fail closed. + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: |- + Namespaces defines a way to select all pods within a set of Namespaces. + Note that host-networked pods are not included in this type of peer. + + + Support: Core + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + networks: + description: |- + Networks defines a way to select peers via CIDR blocks. + This is intended for representing entities that live outside the cluster, + which can't be selected by pods, namespaces and nodes peers, but note + that cluster-internal traffic will be checked against the rule as + well. So if you Allow or Deny traffic to `"0.0.0.0/0"`, that will allow + or deny all IPv4 pod-to-pod traffic as well. If you don't want that, + add a rule that Passes all pod traffic before the Networks rule. + + + Each item in Networks should be provided in the CIDR format and should be + IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + + + Networks can have upto 25 CIDRs specified. + + + Support: Extended + + + + items: + description: |- + CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + This string must be validated by implementations using net.ParseCIDR + TODO: Introduce CEL CIDR validation regex isCIDR() in Kube 1.31 when it is available. + maxLength: 43 + type: string + x-kubernetes-validations: + - message: CIDR must be either an IPv4 or IPv6 address. IPv4 address embedded in IPv6 addresses are not supported + rule: self.contains(':') != self.contains('.') + maxItems: 25 + minItems: 1 + type: array + x-kubernetes-list-type: set + nodes: + description: |- + Nodes defines a way to select a set of nodes in + the cluster. This field follows standard label selector + semantics; if present but empty, it selects all Nodes. + + + Support: Extended + + + + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: |- + Pods defines a way to select a set of pods in + a set of namespaces. Note that host-networked pods + are not included in this type of peer. + + + Support: Core + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + maxItems: 100 + minItems: 1 + type: array + required: + - action + - to + type: object + x-kubernetes-validations: + - message: networks/nodes peer cannot be set with namedPorts since there are no namedPorts for networks/nodes + rule: '!(self.to.exists(peer, has(peer.networks) || has(peer.nodes)) && has(self.ports) && self.ports.exists(port, has(port.namedPort)))' + maxItems: 100 + type: array + ingress: + description: |- + Ingress is the list of Ingress rules to be applied to the selected pods. + A total of 100 rules will be allowed in each ANP instance. + The relative precedence of ingress rules within a single ANP object (all of + which share the priority) will be determined by the order in which the rule + is written. Thus, a rule that appears at the top of the ingress rules + would take the highest precedence. + ANPs with no ingress rules do not affect ingress traffic. + + + Support: Core + items: + description: |- + AdminNetworkPolicyIngressRule describes an action to take on a particular + set of traffic destined for pods selected by an AdminNetworkPolicy's + Subject field. + properties: + action: + description: |- + Action specifies the effect this rule will have on matching traffic. + Currently the following actions are supported: + Allow: allows the selected traffic (even if it would otherwise have been denied by NetworkPolicy) + Deny: denies the selected traffic + Pass: instructs the selected traffic to skip any remaining ANP rules, and + then pass execution to any NetworkPolicies that select the pod. + If the pod is not selected by any NetworkPolicies then execution + is passed to any BaselineAdminNetworkPolicies that select the pod. + + + Support: Core + enum: + - Allow + - Deny + - Pass + type: string + from: + description: |- + From is the list of sources whose traffic this rule applies to. + If any AdminNetworkPolicyIngressPeer matches the source of incoming + traffic then the specified action is applied. + This field must be defined and contain at least one item. + + + Support: Core + items: + description: |- + AdminNetworkPolicyIngressPeer defines an in-cluster peer to allow traffic from. + Exactly one of the selector pointers must be set for a given peer. If a + consumer observes none of its fields are set, they must assume an unknown + option has been specified and fail closed. + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: |- + Namespaces defines a way to select all pods within a set of Namespaces. + Note that host-networked pods are not included in this type of peer. + + + Support: Core + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: |- + Pods defines a way to select a set of pods in + a set of namespaces. Note that host-networked pods + are not included in this type of peer. + + + Support: Core + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + maxItems: 100 + minItems: 1 + type: array + name: + description: |- + Name is an identifier for this rule, that may be no more than 100 characters + in length. This field should be used by the implementation to help + improve observability, readability and error-reporting for any applied + AdminNetworkPolicies. + + + Support: Core + maxLength: 100 + type: string + ports: + description: |- + Ports allows for matching traffic based on port and protocols. + This field is a list of ports which should be matched on + the pods selected for this policy i.e the subject of the policy. + So it matches on the destination port for the ingress traffic. + If Ports is not set then the rule does not filter traffic via port. + + + Support: Core + items: + description: |- + AdminNetworkPolicyPort describes how to select network ports on pod(s). + Exactly one field must be set. + maxProperties: 1 + minProperties: 1 + properties: + namedPort: + description: |- + NamedPort selects a port on a pod(s) based on name. + + + Support: Extended + + + + type: string + portNumber: + description: |- + Port selects a port on a pod(s) based on number. + + + Support: Core + properties: + port: + description: |- + Number defines a network port value. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + required: + - port + - protocol + type: object + portRange: + description: |- + PortRange selects a port range on a pod(s) based on provided start and end + values. + + + Support: Core + properties: + end: + description: |- + End defines a network port that is the end of a port range, the End value + must be greater than Start. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + default: TCP + description: |- + Protocol is the network protocol (TCP, UDP, or SCTP) which traffic must + match. If not specified, this field defaults to TCP. + + + Support: Core + type: string + start: + description: |- + Start defines a network port that is the start of a port range, the Start + value must be less than End. + + + Support: Core + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object + type: object + maxItems: 100 + type: array + required: + - action + - from + type: object + maxItems: 100 + type: array + priority: + description: |- + Priority is a value from 0 to 1000. Rules with lower priority values have + higher precedence, and are checked before rules with higher priority values. + All AdminNetworkPolicy rules have higher precedence than NetworkPolicy or + BaselineAdminNetworkPolicy rules + The behavior is undefined if two ANP objects have same priority. + + + Support: Core + format: int32 + maximum: 1000 + minimum: 0 + type: integer + subject: + description: |- + Subject defines the pods to which this AdminNetworkPolicy applies. + Note that host-networked pods are not included in subject selection. + + + Support: Core + maxProperties: 1 + minProperties: 1 + properties: + namespaces: + description: Namespaces is used to select pods via namespace selectors. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + pods: + description: Pods is used to select pods via namespace AND pod selectors. + properties: + namespaceSelector: + description: |- + NamespaceSelector follows standard label selector semantics; if empty, + it selects all Namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + PodSelector is used to explicitly select pods within a namespace; if empty, + it selects all Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - podSelector + type: object + type: object + required: + - priority + - subject + type: object + status: + description: Status is the status to be reported by the implementation. + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - conditions + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/system/calico/crds/bgpfilters.crd.projectcalico.org.yaml b/system/calico/crds/bgpfilters.crd.projectcalico.org.yaml index d469b138f93..c51d35348c2 100644 --- a/system/calico/crds/bgpfilters.crd.projectcalico.org.yaml +++ b/system/calico/crds/bgpfilters.crd.projectcalico.org.yaml @@ -44,6 +44,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object source: type: string required: @@ -63,6 +76,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object source: type: string required: @@ -82,6 +108,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object source: type: string required: @@ -101,6 +140,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object source: type: string required: diff --git a/system/calico/crds/felixconfigurations.crd.projectcalico.org.yaml b/system/calico/crds/felixconfigurations.crd.projectcalico.org.yaml index d398e6c99f3..fa7ea77707e 100644 --- a/system/calico/crds/felixconfigurations.crd.projectcalico.org.yaml +++ b/system/calico/crds/felixconfigurations.crd.projectcalico.org.yaml @@ -155,6 +155,9 @@ spec: bpfPolicyDebugEnabled: description: BPFPolicyDebugEnabled when true, Felix records detailed information about the BPF policy programs, which can be examined with the calico-bpf command-line tool. type: boolean + bpfRedirectToPeer: + description: 'BPFRedirectToPeer controls which whether it is allowed to forward straight to the peer side of the workload devices. It is allowed for any host L2 devices by default (L2Only), but it breaks TCP dump on the host side of workload device as it bypasses it on ingress. Value of Enabled also allows redirection from L3 host devices like IPIP tunnel or Wireguard directly to the peer side of the workload''s device. This makes redirection faster, however, it breaks tools like tcpdump on the peer side. Use Enabled with caution. [Default: L2Only]' + type: string chainInsertMode: description: 'ChainInsertMode controls whether Felix hooks the kernel''s top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. insert is the safe default since it prevents Calico''s rules from being bypassed. If you switch to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' pattern: ^(?i)(insert|append)?$ @@ -213,7 +216,7 @@ spec: type: string type: array failsafeInboundHostPorts: - description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, use the value none. The default value allows ssh access and DHCP. [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + description: 'FailsafeInboundHostPorts is a list of PortProto struct objects including UDP/TCP/SCTP ports and CIDRs that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, use the value "[]". The default value allows ssh access, DHCP, BGP, etcd and the Kubernetes API. [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:5473, tcp:6443, tcp:6666, tcp:6667 ]' items: description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: @@ -229,7 +232,7 @@ spec: type: object type: array failsafeOutboundHostPorts: - description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, use the value none. The default value opens etcd''s standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP and DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, udp:53, udp:67]' + description: 'FailsafeOutboundHostPorts is a list of List of PortProto struct objects including UDP/TCP/SCTP ports and CIDRs that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, use the value "[]". The default value opens etcd''s standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP, DNS, BGP and the Kubernetes API. [Default: udp:53, udp:67, tcp:179, tcp:2379, tcp:2380, tcp:5473, tcp:6443, tcp:6666, tcp:6667 ]' items: description: ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. properties: @@ -261,6 +264,15 @@ spec: genericXDPEnabled: description: 'GenericXDPEnabled enables Generic XDP so network cards that don''t support XDP offload or driver modes can use XDP. This is not recommended since it doesn''t provide better performance than iptables. [Default: false]' type: boolean + goGCThreshold: + description: "GoGCThreshold Sets the Go runtime's garbage collection threshold. I.e. the percentage that the heap is allowed to grow before garbage collection is triggered. In general, doubling the value halves the CPU time spent doing GC, but it also doubles peak GC memory overhead. A special value of -1 can be used to disable GC entirely; this should only be used in conjunction with the GoMemoryLimitMB setting. \n This setting is overridden by the GOGC environment variable. \n [Default: 40]" + type: integer + goMaxProcs: + description: "GoMaxProcs sets the maximum number of CPUs that the Go runtime will use concurrently. A value of -1 means \"use the system default\"; typically the number of real CPUs on the system. \n this setting is overridden by the GOMAXPROCS environment variable. \n [Default: -1]" + type: integer + goMemoryLimitMB: + description: "GoMemoryLimitMB sets a (soft) memory limit for the Go runtime in MB. The Go runtime will try to keep its memory usage under the limit by triggering GC as needed. To avoid thrashing, it will exceed the limit if GC starts to take more than 50% of the process's CPU time. A value of -1 disables the memory limit. \n Note that the memory limit, if used, must be considerably less than any hard resource limit set at the container or pod level. This is because felix is not the only process that must run in the container or pod. \n This setting is overridden by the GOMEMLIMIT environment variable. \n [Default: -1]" + type: integer healthEnabled: type: boolean healthHost: @@ -290,6 +302,12 @@ spec: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string + ipForwarding: + description: 'IPForwarding controls whether Felix sets the host sysctls to enable IP forwarding. IP forwarding is required when using Calico for workload networking. This should only be disabled on hosts where Calico is used for host protection. [Default: Enabled]' + enum: + - Enabled + - Disabled + type: string ipipEnabled: description: 'IPIPEnabled overrides whether Felix should configure an IPIP interface on the host. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)]' type: boolean @@ -373,6 +391,7 @@ spec: pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: + description: MaxIpsetSize is the maximum number of IP addresses that can be stored in an IP set. Not applicable if using the nftables backend. type: integer metadataAddr: description: 'MetadataAddr is the IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of none (case-insensitive) means that Felix should not set up any NAT rule for the metadata path. [Default: 127.0.0.1]' @@ -396,6 +415,26 @@ spec: netlinkTimeout: pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string + nftablesFilterAllowAction: + pattern: ^(?i)(Accept|Return)?$ + type: string + nftablesFilterDenyAction: + description: FilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic with a "drop" action. If you want to use a "reject" action instead you can configure it here. + pattern: ^(?i)(Drop|Reject)?$ + type: string + nftablesMangleAllowAction: + pattern: ^(?i)(Accept|Return)?$ + type: string + nftablesMarkMask: + description: 'MarkMask is the mask that Felix selects its nftables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. [Default: 0xffff0000]' + format: int32 + type: integer + nftablesMode: + description: 'NFTablesMode configures nftables support in Felix. [Default: Disabled]' + type: string + nftablesRefreshInterval: + description: 'NftablesRefreshInterval controls the interval at which Felix periodically refreshes the nftables rules. [Default: 90s]' + type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular Felix belongs to. In a multi-region Calico/OpenStack deployment, this must be configured somehow for each Felix (here in the datamodel, or in felix.cfg or the environment on each compute node), and must match the [calico] openstack_region value configured in neutron.conf on each node. [Default: Empty]' type: string @@ -540,6 +579,9 @@ spec: wireguardRoutingRulePriority: description: 'WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99]' type: integer + wireguardThreadingEnabled: + description: 'WireguardThreadingEnabled controls whether Wireguard has NAPI threading enabled. [Default: false]' + type: boolean workloadSourceSpoofing: description: WorkloadSourceSpoofing controls whether pods can use the allowedSourcePrefixes annotation to send traffic with a source IP address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. pattern: ^(?i)(Disabled|Any)?$ diff --git a/system/calico/crds/globalnetworkpolicies.crd.projectcalico.org.yaml b/system/calico/crds/globalnetworkpolicies.crd.projectcalico.org.yaml index af5d7c9ddce..24666a33429 100644 --- a/system/calico/crds/globalnetworkpolicies.crd.projectcalico.org.yaml +++ b/system/calico/crds/globalnetworkpolicies.crd.projectcalico.org.yaml @@ -444,7 +444,7 @@ spec: description: NamespaceSelector is an optional field for an expression used to select a pod based on namespaces. type: string order: - description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name". + description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order within the same tier. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name" within the tier. type: number performanceHints: description: "PerformanceHints contains a list of hints to Calico's policy engine to help process the policy more efficiently. Hints never change the enforcement behaviour of the policy. \n Currently, the only available hint is \"AssumeNeededOnEveryNode\". When that hint is set on a policy, Felix will act as if the policy matches a local endpoint even if it does not. This is useful for \"preloading\" any large static policies that are known to be used on every node. If the policy is _not_ used on a particular node then the work done to preload the policy (and to maintain it) is wasted." @@ -460,6 +460,9 @@ spec: serviceAccountSelector: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string + tier: + description: The name of the tier that this policy belongs to. If this is omitted, the default tier (name is "default") is assumed. The specified tier must exist in order to create security policies within the tier, the "default" tier is created automatically if it does not exist, this means for deployments requiring only a single Tier, the tier name may be omitted on all policy management requests. + type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so the value on creation is empty or nil), Calico defaults Types according to what Ingress and Egress rules are present in the policy. The default is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are also no Ingress rules) \n - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. \n When the policy is read back again, Types will always be one of these values, never empty or nil." items: diff --git a/system/calico/crds/networkpolicies.crd.projectcalico.org.yaml b/system/calico/crds/networkpolicies.crd.projectcalico.org.yaml index 1926242054f..95002193843 100644 --- a/system/calico/crds/networkpolicies.crd.projectcalico.org.yaml +++ b/system/calico/crds/networkpolicies.crd.projectcalico.org.yaml @@ -435,7 +435,7 @@ spec: type: object type: array order: - description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name". + description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied after those with lower order within the same tier. If the order is omitted, it may be considered to be "infinite" - i.e. the policy will be applied last. Policies with identical order will be applied in alphanumerical order based on the Policy "Name" within the tier. type: number performanceHints: description: "PerformanceHints contains a list of hints to Calico's policy engine to help process the policy more efficiently. Hints never change the enforcement behaviour of the policy. \n Currently, the only available hint is \"AssumeNeededOnEveryNode\". When that hint is set on a policy, Felix will act as if the policy matches a local endpoint even if it does not. This is useful for \"preloading\" any large static policies that are known to be used on every node. If the policy is _not_ used on a particular node then the work done to preload the policy (and to maintain it) is wasted." @@ -448,6 +448,9 @@ spec: serviceAccountSelector: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string + tier: + description: The name of the tier that this policy belongs to. If this is omitted, the default tier (name is "default") is assumed. The specified tier must exist in order to create security policies within the tier, the "default" tier is created automatically if it does not exist, this means for deployments requiring only a single Tier, the tier name may be omitted on all policy management requests. + type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so the value on creation is empty or nil), Calico defaults Types according to what Ingress and Egress are present in the policy. The default is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are also no Ingress rules) \n - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. \n When the policy is read back again, Types will always be one of these values, never empty or nil." items: diff --git a/system/calico/crds/tiers.crd.projectcalico.org.yaml b/system/calico/crds/tiers.crd.projectcalico.org.yaml new file mode 100644 index 00000000000..f6d97a41ce1 --- /dev/null +++ b/system/calico/crds/tiers.crd.projectcalico.org.yaml @@ -0,0 +1,52 @@ +--- +# Source: crds/crd.projectcalico.org_tiers.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: tiers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: Tier + listKind: TierList + plural: tiers + singular: tier + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TierSpec contains the specification for a security policy tier resource. + properties: + defaultAction: + description: 'DefaultAction specifies the action applied to workloads selected by a policy in the tier, but not rule matched the workload''s traffic. [Default: Deny]' + enum: + - Pass + - Deny + type: string + order: + description: Order is an optional field that specifies the order in which the tier is applied. Tiers with higher "order" are applied after those with lower order. If the order is omitted, it may be considered to be "infinite" - i.e. the tier will be applied last. Tiers with identical order will be applied in alphanumerical order based on the Tier "Name". + type: number + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/system/calico/templates/clusterrole-calico-node.yaml b/system/calico/templates/clusterrole-calico-node.yaml index 8ca62b11312..1c9210cc0cd 100644 --- a/system/calico/templates/clusterrole-calico-node.yaml +++ b/system/calico/templates/clusterrole-calico-node.yaml @@ -58,6 +58,13 @@ rules: verbs: - watch - list + # Watch for changes to Kubernetes AdminNetworkPolicies. + - apiGroups: ["policy.networking.k8s.io"] + resources: + - adminnetworkpolicies + verbs: + - watch + - list # Used by Calico for policy information. - apiGroups: [""] resources: @@ -93,10 +100,17 @@ rules: - hostendpoints - blockaffinities - caliconodestatuses + - tiers verbs: - get - list - watch + # Calico creates some tiers on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - tiers + verbs: +kind: ClusterRole - create # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: @@ -107,7 +121,7 @@ rules: - create - update # Calico must update some CRDs. - - apiGroups: [ "crd.projectcalico.org" ] + - apiGroups: ["crd.projectcalico.org"] resources: - caliconodestatuses verbs: @@ -141,6 +155,7 @@ rules: - create - update - delete + # The CNI plugin and calico/node need to be able to create a default # IPAMConfiguration - apiGroups: ["crd.projectcalico.org"] @@ -161,4 +176,4 @@ rules: resources: - daemonsets verbs: - - get + - get \ No newline at end of file From 2dc2416502bace416eefde56d355ca7edad4aea8 Mon Sep 17 00:00:00 2001 From: Dmitri Fedotov Date: Thu, 16 Jan 2025 16:43:09 +0200 Subject: [PATCH 189/224] [calico] fix typo --- system/calico/templates/clusterrole-calico-node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/calico/templates/clusterrole-calico-node.yaml b/system/calico/templates/clusterrole-calico-node.yaml index 1c9210cc0cd..08426bc08ba 100644 --- a/system/calico/templates/clusterrole-calico-node.yaml +++ b/system/calico/templates/clusterrole-calico-node.yaml @@ -110,7 +110,7 @@ rules: resources: - tiers verbs: -kind: ClusterRole - create + - create # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: From dd17f9868c6a598d59ef9510f620e0f18b013f55 Mon Sep 17 00:00:00 2001 From: sapcc-bot Date: Thu, 16 Jan 2025 14:44:48 +0000 Subject: [PATCH 190/224] system/calico-cni: run helm dep up --- system/calico-cni/Chart.lock | 6 +++--- system/calico-cni/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/system/calico-cni/Chart.lock b/system/calico-cni/Chart.lock index 4d73c26a331..7804e9c3fd1 100644 --- a/system/calico-cni/Chart.lock +++ b/system/calico-cni/Chart.lock @@ -4,12 +4,12 @@ dependencies: version: 1.0.0 - name: calico repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.1.0 + version: 1.1.1 - name: calico-apiserver repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.3 - name: cni-nanny repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.0.5 -digest: sha256:aaa7823009c9d8724cc64272508296e68ca4834fdf5cd3e5dbfebbb341340475 -generated: "2025-01-10T10:52:19.095444075Z" +digest: sha256:d8f5f2f4e20a23ebd8f5a28111a534e2fbde36cd0a47cc5c7ea35f00738a3444 +generated: "2025-01-16T14:44:47.260316063Z" diff --git a/system/calico-cni/Chart.yaml b/system/calico-cni/Chart.yaml index 5753b5c1448..465946c2161 100644 --- a/system/calico-cni/Chart.yaml +++ b/system/calico-cni/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: calico-cni description: A Helm chart for the all things CNI. type: application -version: 1.0.16 +version: 1.0.17 dependencies: - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 874665dd7d5413c73ed6ebc4b81f020af156029a Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Thu, 16 Jan 2025 10:50:18 +0100 Subject: [PATCH 191/224] [cc-cluster] Set cni bin-dir explicitly Debian (and therefor Gardenlinux) defaults to `/usr/lib/cni` In order to stay with the upstream default, we need to specify that explicitly. For flatcar, the change should be a no-op. For more details see: https://github.com/containerd/containerd/issues/6600 --- system/cc-cluster/Chart.yaml | 2 +- system/cc-cluster/templates/kubeadmconfigtemplate.yaml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/system/cc-cluster/Chart.yaml b/system/cc-cluster/Chart.yaml index 397c3c01a65..3e8bb6452f8 100644 --- a/system/cc-cluster/Chart.yaml +++ b/system/cc-cluster/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v2 name: cc-cluster description: A Helm chart for the cc clusters. type: application -version: 1.0.23 +version: 1.0.24 diff --git a/system/cc-cluster/templates/kubeadmconfigtemplate.yaml b/system/cc-cluster/templates/kubeadmconfigtemplate.yaml index 3df82627e14..24b79b70794 100644 --- a/system/cc-cluster/templates/kubeadmconfigtemplate.yaml +++ b/system/cc-cluster/templates/kubeadmconfigtemplate.yaml @@ -52,6 +52,10 @@ spec: runtime_type = "io.containerd.runc.v2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" - path: /etc/ssh/sshd_config filesystem: root mode: 393 From 14e37726bc48799ecca49573c41e901c0b9cdc99 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 16:07:16 +0100 Subject: [PATCH 192/224] billing: support deployment into global clusters --- openstack/billing/templates/seeds.yaml | 20 +++++++++++++++++--- openstack/billing/values.yaml | 3 +++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/openstack/billing/templates/seeds.yaml b/openstack/billing/templates/seeds.yaml index 28182347afc..ae07dabb956 100644 --- a/openstack/billing/templates/seeds.yaml +++ b/openstack/billing/templates/seeds.yaml @@ -1,10 +1,14 @@ {{- $vbase := .Values.global.vaultBaseURL | required "missing value for .Values.global.vaultBaseURL" -}} {{- $region := .Values.global.region | required "missing value for .Values.global.region" -}} +{{- $tld := .Values.global.tld | required "missing value for .Values.global.tld" -}} {{- $domains := list "ccadmin" "bs" "cis" "cp" "fsn" "hcp03" "hec" "monsoon3" "neo" "s4" "wbs"}} {{- if not .Values.global.domain_seeds.skip_hcm_domain -}} {{- $domains = append $domains "hcm" }} {{- end -}} +{{- if .Values.is_global -}} + {{- $domains = list "ccadmin" "global" -}} +{{- end -}} apiVersion: "openstack.stable.sap.cc/v1" kind: OpenstackSeed @@ -30,18 +34,26 @@ spec: description: 'SAP Converged Cloud Billing services' endpoints: - interface: public - region: '{{.Values.global.region}}' - url: 'https://billing.{{.Values.global.region}}.cloud.sap:64000' + {{- if .Values.is_global }} + region: global + url: 'https://{{ $region | contains "qa" | ternary "billing-qa" "billing" }}.global.{{ $tld }}:64000' + {{- else }} + region: '{{ $region }}' + url: 'https://billing.{{ $region }}.{{ $tld }}:64000' + {{- end }} domains: - name: Default users: + {{/* TODO: remove the billing@Default user account once CBR has migrated to their new technical user */}} + {{- if not .Values.is_global }} - name: billing # service user for the billing API itself description: 'Billing Service' password: {{ printf "%s/%s/billing/keystone-user/service/password" $vbase $region | quote }} role_assignments: - project: service role: service + {{- end }} - name: masterdata_scanner # service user for a data quality check job description: 'Masterdata Scanner (Data Quality Validation)' password: {{ printf "%s/%s/billing/keystone-user/masterdata-scanner/password" $vbase $region | quote }} @@ -51,7 +63,7 @@ spec: - user: masterdata_scanner@Default role: masterdata_admin inherited: true - {{- if eq $region "qa-de-1" }} + {{- if and (eq $region "qa-de-1") (not .Values.is_global) }} {{- if not .Values.cc3test }} {{- fail "missing values in .Values.cc3test" }} {{- end }} @@ -91,6 +103,7 @@ spec: inherited: true {{- if eq . "ccadmin" }} projects: + {{- if not $.Values.is_global }} - name: billing description: 'Billing Administration for Converged Cloud' role_assignments: @@ -99,6 +112,7 @@ spec: # NOTE: The cloud_resource_viewer role is given by the limes seed. swift: enabled: true + {{- end }} - name: cloud_admin role_assignments: - user: masterdata_scanner@Default diff --git a/openstack/billing/values.yaml b/openstack/billing/values.yaml index 9a14ec99570..8697e705d93 100644 --- a/openstack/billing/values.yaml +++ b/openstack/billing/values.yaml @@ -9,3 +9,6 @@ owner-info: - Stefan Majewsky - Sandro Jäckel helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/openstack/billing + +# will be set to true by the CI for the respective deployments +is_global: false From 4d386753b4badfd4878859b2bd93d3e8a5f38302 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 16:12:34 +0100 Subject: [PATCH 193/224] limes: fix derivation of domain seed names e.g. "Default" -> "domain-default-seed" --- openstack/limes/templates/seed-grants.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack/limes/templates/seed-grants.yaml b/openstack/limes/templates/seed-grants.yaml index a2585daab66..6213c29efa7 100644 --- a/openstack/limes/templates/seed-grants.yaml +++ b/openstack/limes/templates/seed-grants.yaml @@ -21,7 +21,7 @@ spec: {{- $is_global := $.Values.limes.clusters.ccloud.catalog_url | contains "global" -}} {{- $base_seed_namespace := $is_global | ternary "monsoon3global" "monsoon3" }} {{- range $relevant_domain_names }} - - {{ $base_seed_namespace }}/domain-{{replace "_" "-" .}}-seed + - {{ $base_seed_namespace }}/domain-{{ . | lower | replace "_" "-" }}-seed {{- end }} roles: From 6cb17cbc5322cf805d9a811559cfe8fb3db24203 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 16:25:45 +0100 Subject: [PATCH 194/224] billing: fix seed references --- openstack/billing/templates/seeds.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/openstack/billing/templates/seeds.yaml b/openstack/billing/templates/seeds.yaml index ae07dabb956..6e258ed2af7 100644 --- a/openstack/billing/templates/seeds.yaml +++ b/openstack/billing/templates/seeds.yaml @@ -16,10 +16,11 @@ metadata: name: billing-seed spec: requires: + {{- $base_seed_namespace := .Values.is_global | ternary "monsoon3global" "monsoon3" }} {{- range $domains }} - - monsoon3/domain-{{ . | lower }}-seed + - {{ $base_seed_namespace }}/domain-{{ . | lower }}-seed {{- end }} - - monsoon3/domain-cc3test-seed + - {{ $base_seed_namespace }}/domain-cc3test-seed - swift/swift-seed roles: From b7954b6f8379191b9d6b41bd3034646a1f9a4e2d Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 16:33:38 +0100 Subject: [PATCH 195/224] openstack/billing: fix seed dependencies in global --- openstack/billing/templates/seeds.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openstack/billing/templates/seeds.yaml b/openstack/billing/templates/seeds.yaml index 6e258ed2af7..00579445b8d 100644 --- a/openstack/billing/templates/seeds.yaml +++ b/openstack/billing/templates/seeds.yaml @@ -21,7 +21,9 @@ spec: - {{ $base_seed_namespace }}/domain-{{ . | lower }}-seed {{- end }} - {{ $base_seed_namespace }}/domain-cc3test-seed + {{- if not .Values.is_global }} - swift/swift-seed + {{- end }} roles: - name: masterdata_admin From 7728199a08d1c9c8ed78f683bc5d6025593ad139 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 16:43:00 +0100 Subject: [PATCH 196/224] openstack/billing: skip seeding role assignments for groups that do not exist in global --- openstack/billing/templates/seeds.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openstack/billing/templates/seeds.yaml b/openstack/billing/templates/seeds.yaml index 00579445b8d..f983874b6ca 100644 --- a/openstack/billing/templates/seeds.yaml +++ b/openstack/billing/templates/seeds.yaml @@ -139,6 +139,7 @@ spec: - domain: {{ . | lower }} role: masterdata_admin inherited: true + {{- if ne . "global" }} - name: {{ . | upper }}_COMPUTE_SUPPORT role_assignments: - project: compute_support @@ -179,6 +180,7 @@ spec: - domain: {{ . | lower }} role: masterdata_viewer inherited: true + {{- end }} {{- if and (eq . "bs") (ne $.Values.global.region "qa-de-1") }} - name: BS_CCloud_SO_TLO role_assignments: From 1bbb51012b80efc0bba30923fb366840db6ea607 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Thu, 16 Jan 2025 16:50:02 +0100 Subject: [PATCH 197/224] openstack/limes: skip seeding role assignments for groups that do not exist in global --- openstack/limes/templates/support_seed.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openstack/limes/templates/support_seed.yaml b/openstack/limes/templates/support_seed.yaml index cd4809cd1fe..d3ac409cdde 100644 --- a/openstack/limes/templates/support_seed.yaml +++ b/openstack/limes/templates/support_seed.yaml @@ -37,6 +37,7 @@ spec: - domain: {{ . | lower }} role: resource_viewer inherited: true + {{- if ne . "global" }} - name: {{ . | upper }}_COMPUTE_SUPPORT role_assignments: - project: compute_support @@ -81,4 +82,5 @@ spec: - domain: {{ . | lower }} role: resource_viewer inherited: true + {{- end }} {{- end }} From 7e2dba9fbeec5ef7799059d91ec40bf190194888 Mon Sep 17 00:00:00 2001 From: Jan Knipper Date: Thu, 16 Jan 2025 16:55:00 +0100 Subject: [PATCH 198/224] Restart concourse workers on ssh key change --- global/concourse-main/templates/worker-daemonset.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/global/concourse-main/templates/worker-daemonset.yaml b/global/concourse-main/templates/worker-daemonset.yaml index ceb39ca3d01..948d3e45c1f 100644 --- a/global/concourse-main/templates/worker-daemonset.yaml +++ b/global/concourse-main/templates/worker-daemonset.yaml @@ -1,3 +1,4 @@ +{{ $workerSecretsSum := include (print $.Template.BasePath "/worker-secrets.yaml") . | sha256sum }} {{ range .Values.teams }} {{ if eq .type "standalone" }} apiVersion: apps/v1 @@ -16,6 +17,8 @@ spec: app: {{ $.Values.worker.name }}-{{ .name }} template: metadata: + annotations: + checksum/secrets: {{ $workerSecretsSum }} labels: app: {{ $.Values.worker.name }}-{{ .name }} release: "{{ $.Release.Name }}" From 2eac4add6b921c227dc2ba77bf1ddab098a38921 Mon Sep 17 00:00:00 2001 From: Jan Knipper <9881823+jknipper@users.noreply.github.com> Date: Thu, 16 Jan 2025 16:57:13 +0100 Subject: [PATCH 199/224] Temporary fix for concourse issue (#7622) * Temporary fix for concourse issue * Replace cni-plugin in worker daemonset --- global/concourse-main/templates/worker-daemonset.yaml | 4 ++++ global/concourse-main/values.yaml | 11 +++++++++++ 2 files changed, 15 insertions(+) diff --git a/global/concourse-main/templates/worker-daemonset.yaml b/global/concourse-main/templates/worker-daemonset.yaml index 948d3e45c1f..b665b0fbd2f 100644 --- a/global/concourse-main/templates/worker-daemonset.yaml +++ b/global/concourse-main/templates/worker-daemonset.yaml @@ -47,6 +47,10 @@ spec: update-ca-certificates + apt update && apt install -y wget + wget -P /tmp https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz + tar xvf /tmp/cni-plugins-linux-amd64-v1.5.1.tgz -C /usr/local/concourse/bin + exec /usr/local/concourse/bin/concourse worker --name=${NODE_NAME} livenessProbe: failureThreshold: 5 diff --git a/global/concourse-main/values.yaml b/global/concourse-main/values.yaml index 384cc6b52b4..7d7e34e8870 100644 --- a/global/concourse-main/values.yaml +++ b/global/concourse-main/values.yaml @@ -88,6 +88,17 @@ concourse: values: - concourse-main-web topologyKey: "kubernetes.io/hostname" + command: + - dumb-init + - sh + args: + - -ce + - |- + # fix cni-plugins, see https://github.com/concourse/concourse/issues/9027 + apt update && apt install -y wget + wget -P /tmp https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz + tar xvf /tmp/cni-plugins-linux-amd64-v1.5.1.tgz -C /usr/local/concourse/bin + exec /usr/local/bin/entrypoint.sh web persistence: enabled: false From 6eabfdd9c73eb9ee17aa73041be6d3f5bb9b8911 Mon Sep 17 00:00:00 2001 From: Dmitry Galkin Date: Thu, 16 Jan 2025 18:03:56 +0100 Subject: [PATCH 200/224] [designate] update healthprober - fix deprecation warning for psutil - disable check for mdns due to 8050680948941482d8816c57e81a9fa3110c3818 --- openstack/designate/bin/health-probe.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/openstack/designate/bin/health-probe.py b/openstack/designate/bin/health-probe.py index 88aeb619246..dcfed12a987 100644 --- a/openstack/designate/bin/health-probe.py +++ b/openstack/designate/bin/health-probe.py @@ -122,7 +122,7 @@ def tcp_socket_status(process: Optional[str], ports: Set[int]) -> int: try: with p.oneshot(): if process in " ".join(p.cmdline()): - pcon = p.connections() + pcon = p.net_connections() for con in pcon: try: rport = con.raddr[1] @@ -261,7 +261,8 @@ def test_liveness() -> None: if service in rpc_services: check_tcp_socket(service, rabbits, databases) - check_service_status(transport) + if service not in "mdns": + check_service_status(transport) if service in api_services: check_tcp_connectivity(rabbits, databases) From 6131f201347c88571eb31aea207fcd6f58c190d2 Mon Sep 17 00:00:00 2001 From: notque Date: Thu, 16 Jan 2025 10:44:34 -0700 Subject: [PATCH 201/224] [ironic] add states power and custom id to audit middleware mapping (#7686) * [ironic] add states power to audit middleware mapping * [ironic] attempt mapping of custom id for audit middleware mapping --- openstack/ironic/templates/etc/_api_audit_map.yaml.tpl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/openstack/ironic/templates/etc/_api_audit_map.yaml.tpl b/openstack/ironic/templates/etc/_api_audit_map.yaml.tpl index 8e4005bdd4a..f362431b8ad 100644 --- a/openstack/ironic/templates/etc/_api_audit_map.yaml.tpl +++ b/openstack/ironic/templates/etc/_api_audit_map.yaml.tpl @@ -7,12 +7,15 @@ resources: chassis: deploy_templates: nodes: + custom_id: id children: allocation: validate: maintenance: management: states: + children: + power: traits: vifs: vmedia: From 9d037b2e4817a2a9aede4e21f1f0f3f0b2e5014a Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Fri, 17 Jan 2025 08:54:14 +0100 Subject: [PATCH 202/224] [unbound] Don't let helm render the prometheus templates We use some in our alerts definitions. They are supposed to be passed to prometheus as-is and not rendered by helm. --- system/unbound/templates/prometheus-alerts.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/unbound/templates/prometheus-alerts.yaml b/system/unbound/templates/prometheus-alerts.yaml index c41a23e5c3d..00ef7270f90 100644 --- a/system/unbound/templates/prometheus-alerts.yaml +++ b/system/unbound/templates/prometheus-alerts.yaml @@ -19,15 +19,15 @@ spec: labels: context: unbound dashboard: dns-unbound-and-f5-performance - meta: '{{ $labels.app }}' + meta: {{` '{{ $labels.app }}' `}} service: unbound severity: info support_group: network-api tier: os playbook: 'docs/devops/alert/designate' annotations: - description: 'Recursor {{ $labels.app }} returns lots of SERVFAIL responses in {{ $labels.region }} region.' - summary: '{{ $labels.app }} returned a lot of SERVFAIL responses in the last hour. Check the logs.' + description: {{` 'Recursor {{ $labels.app }} returns lots of SERVFAIL responses in {{ $labels.region }} region.' `}} + summary: {{` '{{ $labels.app }} returned a lot of SERVFAIL responses in the last hour. Check the logs.' `}} - alert: DnsUnbound1Down expr: absent(unbound_up{app="unbound1"}) == 1 or unbound_up{app="unbound1"} != 1 @@ -82,12 +82,12 @@ spec: labels: context: unbound dashboard: dns-unbound-and-f5-performance - meta: '{{ $labels.endpoint }}' + meta: {{` '{{ $labels.endpoint }}' `}} service: unbound severity: warning support_group: network-api tier: os playbook: 'docs/devops/alert/designate/#test_unbound_endpoint' annotations: - description: 'DNS Unbound endpoint {{ $labels.endpoint }} not available in {{ $labels.region }} region.' - summary: 'DNS Unbound endpoint {{ $labels.endpoint }} is not available. DNS resolution might be handled by another region.' + description: {{` 'DNS Unbound endpoint {{ $labels.endpoint }} not available in {{ $labels.region }} region.' `}} + summary: {{` 'DNS Unbound endpoint {{ $labels.endpoint }} is not available. DNS resolution might be handled by another region.' `}} From a52596fbdae27202909580f42f77316f206cb79f Mon Sep 17 00:00:00 2001 From: Vassil Dimitrov Date: Fri, 17 Jan 2025 09:00:21 +0100 Subject: [PATCH 203/224] [unbound] fixed the unbound-kubernetes-alerts promrule Use the proper prometheus, namely kubernetes. --- system/unbound/templates/prometheus-alerts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/unbound/templates/prometheus-alerts.yaml b/system/unbound/templates/prometheus-alerts.yaml index 00ef7270f90..66f86e7b89c 100644 --- a/system/unbound/templates/prometheus-alerts.yaml +++ b/system/unbound/templates/prometheus-alerts.yaml @@ -70,7 +70,7 @@ metadata: app: unbound tier: os type: alerting-rules - prometheus: openstack + prometheus: kubernetes spec: groups: From 8621a5cf4aa11909a08d6c30a29a803ff28d48a7 Mon Sep 17 00:00:00 2001 From: Erik Schubert Date: Fri, 17 Jan 2025 10:14:05 +0100 Subject: [PATCH 204/224] Fix metal-operator-remote regional RBAC (#7688) * Fix metal-operator-remote regional RBAC * Add test-values to metal-operator-remote --- system/Makefile | 5 ++++- system/kustomize/metal-operator-managedresources/rbac.yaml | 3 +-- system/metal-operator-remote/Chart.yaml | 2 +- system/metal-operator-remote/ci/test-values.yaml | 2 ++ system/metal-operator-remote/managedresources/rbac.yaml | 3 +-- 5 files changed, 9 insertions(+), 6 deletions(-) create mode 100644 system/metal-operator-remote/ci/test-values.yaml diff --git a/system/Makefile b/system/Makefile index ad22403ce28..faa9eba16dd 100644 --- a/system/Makefile +++ b/system/Makefile @@ -129,7 +129,10 @@ build-metal-operator-remote: @yq -i '.fullnameOverride="metal-operator"' metal-operator-remote/values.yaml @yq -i '.remote.ca=""' metal-operator-remote/values.yaml @echo 'macdb: {}' >> metal-operator-remote/values.yaml - @yq -i '.version="0.3.0"' metal-operator-remote/Chart.yaml + @mkdir metal-operator-remote/ci + @echo 'global:' >> metal-operator-remote/ci/test-values.yaml + @echo ' region: ab-cd-1' >> metal-operator-remote/ci/test-values.yaml + @yq -i '.version="0.3.1"' metal-operator-remote/Chart.yaml @$(SED) -i 's/serviceAccountName.*$$/serviceAccountName: default/g' metal-operator-remote/templates/deployment.yaml @$(SED) -i 's/kind: Role/kind: ClusterRole/g' metal-operator-remote/managedresources/kustomize.yaml diff --git a/system/kustomize/metal-operator-managedresources/rbac.yaml b/system/kustomize/metal-operator-managedresources/rbac.yaml index 037a69aa814..b0722d6d5e1 100644 --- a/system/kustomize/metal-operator-managedresources/rbac.yaml +++ b/system/kustomize/metal-operator-managedresources/rbac.yaml @@ -10,8 +10,7 @@ roleRef: subjects: - apiGroup: rbac.authorization.k8s.io kind: Group - name: CC_IAS_CONTROLPLANE_PROD_ADMIN - + name: {{ if contains "qa-de-" .Values.global.region -}} CC_IAS_CONTROLPLANE_QA_ADMIN {{- else -}} CC_IAS_CONTROLPLANE_PROD_ADMIN {{- end }} --- apiVersion: v1 kind: ServiceAccount diff --git a/system/metal-operator-remote/Chart.yaml b/system/metal-operator-remote/Chart.yaml index 26887db1719..4b7eee664d7 100644 --- a/system/metal-operator-remote/Chart.yaml +++ b/system/metal-operator-remote/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.0 +version: 0.3.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/system/metal-operator-remote/ci/test-values.yaml b/system/metal-operator-remote/ci/test-values.yaml new file mode 100644 index 00000000000..bad98bc1f9e --- /dev/null +++ b/system/metal-operator-remote/ci/test-values.yaml @@ -0,0 +1,2 @@ +global: + region: ab-cd-1 diff --git a/system/metal-operator-remote/managedresources/rbac.yaml b/system/metal-operator-remote/managedresources/rbac.yaml index 037a69aa814..b0722d6d5e1 100644 --- a/system/metal-operator-remote/managedresources/rbac.yaml +++ b/system/metal-operator-remote/managedresources/rbac.yaml @@ -10,8 +10,7 @@ roleRef: subjects: - apiGroup: rbac.authorization.k8s.io kind: Group - name: CC_IAS_CONTROLPLANE_PROD_ADMIN - + name: {{ if contains "qa-de-" .Values.global.region -}} CC_IAS_CONTROLPLANE_QA_ADMIN {{- else -}} CC_IAS_CONTROLPLANE_PROD_ADMIN {{- end }} --- apiVersion: v1 kind: ServiceAccount From ed31ff4384cfb631e1a73711ad192a6272885c4d Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 17 Jan 2025 10:44:46 +0100 Subject: [PATCH 205/224] [logs] removing netflow input --- system/logs/Chart.lock | 6 +++--- system/logs/Chart.yaml | 4 ++-- .../logs/vendor/logstash-external/Chart.yaml | 2 +- .../templates/_logstash.conf.tpl | 19 ------------------- .../templates/_netflow.json.tpl | 18 ------------------ .../templates/configmap.yaml | 2 -- .../logstash-external/templates/service.yaml | 4 ---- .../templates/statefulset.yaml | 3 --- 8 files changed, 6 insertions(+), 52 deletions(-) delete mode 100644 system/logs/vendor/logstash-external/templates/_netflow.json.tpl diff --git a/system/logs/Chart.lock b/system/logs/Chart.lock index 1ef59d1372b..b213f6fb42c 100644 --- a/system/logs/Chart.lock +++ b/system/logs/Chart.lock @@ -7,7 +7,7 @@ dependencies: version: 1.0.1 - name: logstash-external repository: file://vendor/logstash-external - version: 1.1.13 + version: 1.1.14 - name: k8s-event-logger repository: https://kuckkuck.github.io/k8s-event-logger version: 1.2.1 @@ -26,5 +26,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:b396db519897557ab25bb8936dba627a1ffcbf2912ccc719d1e731a84ad7f139 -generated: "2024-12-16T12:15:05.760166+01:00" +digest: sha256:6f7c376733709202b8f99f89fdccf5893480f6f298f84ee72eca740b6cc97696 +generated: "2025-01-17T10:43:37.162631+01:00" diff --git a/system/logs/Chart.yaml b/system/logs/Chart.yaml index 97a1255b8b5..19a2a6d37fa 100644 --- a/system/logs/Chart.yaml +++ b/system/logs/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: A Helm chart for all log shippers name: logs -version: 0.0.47 +version: 0.0.48 home: https://github.com/sapcc/helm-charts/tree/master/system/logs dependencies: - name: fluent @@ -18,7 +18,7 @@ dependencies: - name: logstash-external alias: logstash_external repository: file://vendor/logstash-external - version: 1.1.13 + version: 1.1.14 condition: logstash_external.enabled - name: k8s-event-logger diff --git a/system/logs/vendor/logstash-external/Chart.yaml b/system/logs/vendor/logstash-external/Chart.yaml index 17291c189e2..7b196fcd729 100644 --- a/system/logs/vendor/logstash-external/Chart.yaml +++ b/system/logs/vendor/logstash-external/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: logstash-external -version: 1.1.13 +version: 1.1.14 description: logstash log collector maintainers: - name: Olaf Heydorn diff --git a/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl b/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl index ae3ac0b1125..7a0b3bda1e6 100644 --- a/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl +++ b/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl @@ -9,11 +9,6 @@ input { port => {{.Values.input.syslog_port}} type => syslog } - udp { - id => "input-udp-netflow" - port => {{.Values.input.netflow_port}} - type => netflow - } http { id => "input-http" port => {{.Values.input.alertmanager_port}} @@ -206,20 +201,6 @@ output { ssl_certificate_verification => true } } - elseif [type] == "netflow" { - opensearch { - id => "opensearch-netflow" - index => "netflow-%{+YYYY.MM.dd}" - hosts => ["https://{{.Values.global.opensearch.host}}:{{.Values.global.opensearch.port}}"] - auth_type => { - type => 'basic' - user => "${OPENSEARCH_USER}" - password => "${OPENSEARCH_PASSWORD}" - } - ssl => true - ssl_certificate_verification => true - } - } elseif [type] == "jumpserver" { opensearch { id => "opensearch-jump" diff --git a/system/logs/vendor/logstash-external/templates/_netflow.json.tpl b/system/logs/vendor/logstash-external/templates/_netflow.json.tpl deleted file mode 100644 index a1c1a396299..00000000000 --- a/system/logs/vendor/logstash-external/templates/_netflow.json.tpl +++ /dev/null @@ -1,18 +0,0 @@ -{ - "order": 0, - "template": "netflow-*", - "settings": { - "index": { - "refresh_interval": "60s", - "unassigned": { - "node_left": { - "delayed_timeout": "10m" - } - }, - "number_of_shards": "1", - "number_of_replicas": "1" - } - }, - "mappings": {}, - "aliases": {} -} diff --git a/system/logs/vendor/logstash-external/templates/configmap.yaml b/system/logs/vendor/logstash-external/templates/configmap.yaml index fa6381cc12f..bb5bb3bd457 100644 --- a/system/logs/vendor/logstash-external/templates/configmap.yaml +++ b/system/logs/vendor/logstash-external/templates/configmap.yaml @@ -17,8 +17,6 @@ data: {{ include (print .Template.BasePath "/_patterns.syslog.tpl") . | indent 4 }} alerts.json: | {{ include (print .Template.BasePath "/_alerts.json.tpl") . | indent 4 }} - netflow.json: | -{{ include (print .Template.BasePath "/_netflow.json.tpl") . | indent 4 }} syslog.json: | {{ include (print .Template.BasePath "/_syslog.json.tpl") . | indent 4 }} deployments.json: | diff --git a/system/logs/vendor/logstash-external/templates/service.yaml b/system/logs/vendor/logstash-external/templates/service.yaml index 8d747e6104e..b76735413d3 100644 --- a/system/logs/vendor/logstash-external/templates/service.yaml +++ b/system/logs/vendor/logstash-external/templates/service.yaml @@ -12,10 +12,6 @@ spec: externalTrafficPolicy: Local externalIPs: ["{{.Values.external_ip}}"] ports: - - name: netflow - protocol: UDP - port: {{.Values.input.netflow_port}} - targetPort: {{.Values.input.netflow_port}} - name: syslogudp protocol: UDP port: {{.Values.input.syslog_port}} diff --git a/system/logs/vendor/logstash-external/templates/statefulset.yaml b/system/logs/vendor/logstash-external/templates/statefulset.yaml index c2dac4dd604..60b0a31409f 100644 --- a/system/logs/vendor/logstash-external/templates/statefulset.yaml +++ b/system/logs/vendor/logstash-external/templates/statefulset.yaml @@ -44,9 +44,6 @@ spec: image: {{ .Values.global.registry }}/elk-logstash:{{ .Values.image_version }} imagePullPolicy: IfNotPresent ports: - - name: netflow - containerPort: {{ .Values.input.netflow_port }} - protocol: UDP - name: syslogudp containerPort: {{ .Values.input.syslog_port }} protocol: UDP From 62db84238108ced01dead8223ce6973d6ab01282 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 17 Jan 2025 13:42:49 +0100 Subject: [PATCH 206/224] [logs] remove syslog udp/tcp receiver from logstash --- system/logs/Chart.lock | 6 +- system/logs/Chart.yaml | 4 +- .../logs/vendor/logstash-external/Chart.yaml | 2 +- .../templates/_logstash.conf.tpl | 62 +------------------ .../templates/_patterns.syslog.tpl | 24 ------- .../templates/_syslog.json.tpl | 18 ------ .../templates/alerts/_logstash-alerts.tpl | 20 ------ .../templates/configmap.yaml | 4 -- .../logstash-external/templates/secrets.yaml | 2 - .../logstash-external/templates/service.yaml | 12 ---- .../templates/statefulset.yaml | 19 ------ 11 files changed, 7 insertions(+), 166 deletions(-) delete mode 100644 system/logs/vendor/logstash-external/templates/_patterns.syslog.tpl delete mode 100644 system/logs/vendor/logstash-external/templates/_syslog.json.tpl delete mode 100644 system/logs/vendor/logstash-external/templates/alerts/_logstash-alerts.tpl diff --git a/system/logs/Chart.lock b/system/logs/Chart.lock index b213f6fb42c..b3dcc4e294d 100644 --- a/system/logs/Chart.lock +++ b/system/logs/Chart.lock @@ -7,7 +7,7 @@ dependencies: version: 1.0.1 - name: logstash-external repository: file://vendor/logstash-external - version: 1.1.14 + version: 1.1.15 - name: k8s-event-logger repository: https://kuckkuck.github.io/k8s-event-logger version: 1.2.1 @@ -26,5 +26,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:6f7c376733709202b8f99f89fdccf5893480f6f298f84ee72eca740b6cc97696 -generated: "2025-01-17T10:43:37.162631+01:00" +digest: sha256:86a6ba6962b119ad43deb6f34914d964690ab081e2a2c096ae9ec9fb6d27e34a +generated: "2025-01-17T13:41:37.01847+01:00" diff --git a/system/logs/Chart.yaml b/system/logs/Chart.yaml index 19a2a6d37fa..db9e6bd0034 100644 --- a/system/logs/Chart.yaml +++ b/system/logs/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: A Helm chart for all log shippers name: logs -version: 0.0.48 +version: 0.0.49 home: https://github.com/sapcc/helm-charts/tree/master/system/logs dependencies: - name: fluent @@ -18,7 +18,7 @@ dependencies: - name: logstash-external alias: logstash_external repository: file://vendor/logstash-external - version: 1.1.14 + version: 1.1.15 condition: logstash_external.enabled - name: k8s-event-logger diff --git a/system/logs/vendor/logstash-external/Chart.yaml b/system/logs/vendor/logstash-external/Chart.yaml index 7b196fcd729..3a0f33afb16 100644 --- a/system/logs/vendor/logstash-external/Chart.yaml +++ b/system/logs/vendor/logstash-external/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: logstash-external -version: 1.1.14 +version: 1.1.15 description: logstash log collector maintainers: - name: Olaf Heydorn diff --git a/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl b/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl index 7a0b3bda1e6..a6570c9fb42 100644 --- a/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl +++ b/system/logs/vendor/logstash-external/templates/_logstash.conf.tpl @@ -1,14 +1,4 @@ input { - udp { - id => "input-udp-syslog" - port => {{.Values.input.syslog_port}} - type => syslog - } - tcp { - id => "input-tcp-syslog" - port => {{.Values.input.syslog_port}} - type => syslog - } http { id => "input-http" port => {{.Values.input.alertmanager_port}} @@ -38,39 +28,6 @@ input { } filter { - if [type] == "syslog" { - mutate { - id => "syslog-rename-hostname" - rename => { "host" => "hostname"} - } - - dns { - id => "syslog-dns-resolve" - reverse => [ "hostname" ] - action => "replace" - hit_cache_size => "100" - hit_cache_ttl => "2678600" - failed_cache_size => "100" - failed_cache_ttl => "3600" - } - grok { - id => "syslog-grok" - match => { - "message" => [ - "<%{NONNEGINT:syslog_pri}>: %{SYSLOGCISCOTIMESTAMP:syslog_timestamp}: %{SYSLOGCISCOSTRING}:", - "<%{NONNEGINT:syslog_pri}>%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{SYSLOGPROG:syslog_process}: %{SYSLOGCISCOSTRING}:", - "<%{NONNEGINT:syslog_pri}>%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} Severity: (?\w+), Category: (?\w+), MessageID: (?\w+)", - "<%{NONNEGINT:syslog_pri}>%{PROG:syslog_process}\[%{POSINT:pid}\]", - "<%{NONNEGINT:syslog_pri}>Severity: (?\w+), Category: (?\w+), MessageID: (?\w+)" - ] - } - break_on_match => "true" - overwrite => ["message"] - patterns_dir => ["/logstash-etc/*.grok"] - tag_on_failure => ["_syslog_grok_failure"] - } - } - if [type] == "jumpserver" { mutate { id => "jump-split" @@ -116,24 +73,7 @@ filter { output { - if [type] == "syslog" { - opensearch { - id => "opensearch-syslog" - index => "syslog-%{+YYYY.MM.dd}" - hosts => ["https://{{.Values.global.opensearch.host}}:{{.Values.global.opensearch.port}}"] - template => "/logstash-etc/syslog.json" - template_name => "syslog" - template_overwrite => true - auth_type => { - type => "basic" - user => "${OPENSEARCH_SYSLOG_USER}" - password => "${OPENSEARCH_SYSLOG_PASSWORD}" - } - ssl => true - ssl_certificate_verification => true - } - } - elseif [type] == "alert" and [alerts][labels][severity] == "critical"{ + if [type] == "alert" and [alerts][labels][severity] == "critical"{ opensearch { id => "opensearch-critical-alerts" index => "alerts-critical-%{+YYYY}" diff --git a/system/logs/vendor/logstash-external/templates/_patterns.syslog.tpl b/system/logs/vendor/logstash-external/templates/_patterns.syslog.tpl deleted file mode 100644 index 81bbdbf30fa..00000000000 --- a/system/logs/vendor/logstash-external/templates/_patterns.syslog.tpl +++ /dev/null @@ -1,24 +0,0 @@ -SYSLOG5424PRINTASCII [!-~]+ - -SYSLOGBASE2 (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource}+(?: %{SYSLOGPROG}:|) -SYSLOGPAMSESSION %{SYSLOGBASE} (?=%{GREEDYDATA:message})%{WORD:pam_module}\(%{DATA:pam_caller}\): session %{WORD:pam_session_state} for user %{USERNAME:username}(?: by %{GREEDYDATA:pam_by})? - -CRON_ACTION [A-Z ]+ -CRONLOG %{SYSLOGBASE} \(%{USER:user}\) %{CRON_ACTION:action} \(%{DATA:message}\) - -SYSLOGLINE %{SYSLOGBASE2} %{GREEDYDATA:message} - -# IETF 5424 syslog(8) format (see http://www.rfc-editor.org/info/rfc5424) -SYSLOG5424PRI <%{NONNEGINT:syslog5424_pri}> -SYSLOG5424SD \[%{DATA}\]+ -SYSLOG5424BASE %{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{IPORHOST:syslog5424_host}|-) +(-|%{SYSLOG5424PRINTASCII:syslog5424_app}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_proc}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_msgid}) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|) - -SYSLOG5424LINE %{SYSLOG5424BASE} +%{GREEDYDATA:syslog5424_msg} - - -# CISCO Syslog Pattern -SYSLOGCISCOTIMESTAMP [0-9]{4} [A-Z][a-z]{2}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2} [A-Z]{3} -SYSLOGCISCOFACILITY [%A-Z]+ -SYSLOGCISCOSEVERITY \d -SYSLOGCISCOCODE [A-Z_]+ -SYSLOGCISCOSTRING %{SYSLOGCISCOFACILITY:syslogcisco_facility}-%{SYSLOGCISCOSEVERITY:syslogcisco_severity}-%{SYSLOGCISCOCODE:syslogcisco_code} diff --git a/system/logs/vendor/logstash-external/templates/_syslog.json.tpl b/system/logs/vendor/logstash-external/templates/_syslog.json.tpl deleted file mode 100644 index ef80eedaf13..00000000000 --- a/system/logs/vendor/logstash-external/templates/_syslog.json.tpl +++ /dev/null @@ -1,18 +0,0 @@ -{ - "order": 0, - "template": "syslog-*", - "settings": { - "index": { - "refresh_interval": "60s", - "unassigned": { - "node_left": { - "delayed_timeout": "10m" - } - }, - "number_of_shards": "1", - "number_of_replicas": "1" - } - }, - "mappings": {}, - "aliases": {} -} diff --git a/system/logs/vendor/logstash-external/templates/alerts/_logstash-alerts.tpl b/system/logs/vendor/logstash-external/templates/alerts/_logstash-alerts.tpl deleted file mode 100644 index 4c4d49d1c48..00000000000 --- a/system/logs/vendor/logstash-external/templates/alerts/_logstash-alerts.tpl +++ /dev/null @@ -1,20 +0,0 @@ -groups: -- name: logstash.alerts - rules: - - alert: LogsLogstashLogsIncreasing -{{ if eq .Values.global.clusterType "scaleout" }} - expr: increase(logstash_node_plugin_events_in_total{cluster_type!="controlplane",cluster_type!="metal",namespace="logs",plugin_id="input-tcp-syslog"}[1h]) / increase(logstash_node_plugin_events_in_total{namespace="logs",cluster_type!="controlplane",cluster_type!="metal",plugin_id="input-tcp-syslog"}[1h]offset 2h) > 2 -{{ else }} - expr: increase(logstash_node_plugin_events_in_total{namespace="logs",plugin_id="input-tcp-syslog"}[1h]) / increase(logstash_node_plugin_events_in_total{namespace="logs",plugin_id="input-tcp-syslog"}[1h]offset 2h) > 2 -{{ end }} - for: 120m - labels: - context: logshipping - service: logs - severity: info - support_group: observability - tier: os - playbook: 'docs/support/playbook/opensearch/opensearch_logs/logs-increasing' - annotations: - description: 'logstash in {{`{{ $labels.region }}`}} {{`{{ $labels.kubernetes_pod_name }}`}} pod on {{`{{ $labels.nodename }}`}} 100 % more logs' - summary: logstash external receiver events increasing diff --git a/system/logs/vendor/logstash-external/templates/configmap.yaml b/system/logs/vendor/logstash-external/templates/configmap.yaml index bb5bb3bd457..c144a07da28 100644 --- a/system/logs/vendor/logstash-external/templates/configmap.yaml +++ b/system/logs/vendor/logstash-external/templates/configmap.yaml @@ -13,12 +13,8 @@ data: {{ include (print .Template.BasePath "/_logstash.conf.tpl") . | indent 4 }} start.sh: | {{ include (print .Template.BasePath "/_start.sh.tpl") . | indent 4 }} - patterns_syslog.grok: | -{{ include (print .Template.BasePath "/_patterns.syslog.tpl") . | indent 4 }} alerts.json: | {{ include (print .Template.BasePath "/_alerts.json.tpl") . | indent 4 }} - syslog.json: | -{{ include (print .Template.BasePath "/_syslog.json.tpl") . | indent 4 }} deployments.json: | {{ include (print .Template.BasePath "/_deployments.json.tpl") . | indent 4 }} jump.json: | diff --git a/system/logs/vendor/logstash-external/templates/secrets.yaml b/system/logs/vendor/logstash-external/templates/secrets.yaml index d39c40972c1..30363f1fd87 100644 --- a/system/logs/vendor/logstash-external/templates/secrets.yaml +++ b/system/logs/vendor/logstash-external/templates/secrets.yaml @@ -10,7 +10,5 @@ data: opensearch_password: {{ .Values.global.opensearch.data.password | b64enc }} opensearch_jump_user: {{ .Values.global.opensearch.jump.user | b64enc }} opensearch_jump_password: {{ .Values.global.opensearch.jump.password | b64enc }} - opensearch_syslog_user: {{ .Values.global.opensearch.syslog.user | b64enc }} - opensearch_syslog_password: {{ .Values.global.opensearch.syslog.password | b64enc }} http_user: {{ .Values.http.user | b64enc }} http_password: {{ .Values.http.password | b64enc }} diff --git a/system/logs/vendor/logstash-external/templates/service.yaml b/system/logs/vendor/logstash-external/templates/service.yaml index b76735413d3..947121af8ea 100644 --- a/system/logs/vendor/logstash-external/templates/service.yaml +++ b/system/logs/vendor/logstash-external/templates/service.yaml @@ -12,18 +12,6 @@ spec: externalTrafficPolicy: Local externalIPs: ["{{.Values.external_ip}}"] ports: - - name: syslogudp - protocol: UDP - port: {{.Values.input.syslog_port}} - targetPort: {{.Values.input.syslog_port}} - - name: syslogtcp - protocol: TCP - port: {{.Values.input.syslog_port}} - targetPort: {{.Values.input.syslog_port}} - - name: bigiplogsudp - protocol: UDP - port: {{.Values.input.bigiplogs_port}} - targetPort: {{.Values.input.bigiplogs_port}} - name: alertmanagertcp protocol: TCP port: {{.Values.input.alertmanager_port}} diff --git a/system/logs/vendor/logstash-external/templates/statefulset.yaml b/system/logs/vendor/logstash-external/templates/statefulset.yaml index 60b0a31409f..3271bf08b2e 100644 --- a/system/logs/vendor/logstash-external/templates/statefulset.yaml +++ b/system/logs/vendor/logstash-external/templates/statefulset.yaml @@ -44,15 +44,6 @@ spec: image: {{ .Values.global.registry }}/elk-logstash:{{ .Values.image_version }} imagePullPolicy: IfNotPresent ports: - - name: syslogudp - containerPort: {{ .Values.input.syslog_port }} - protocol: UDP - - name: syslogtcp - containerPort: {{ .Values.input.syslog_port }} - protocol: TCP - - name: bigiplogsudp - containerPort: {{ .Values.input.bigiplogs_port }} - protocol: UDP - name: alertmanagertcp containerPort: {{ .Values.input.alertmanager_port }} protocol: TCP @@ -88,16 +79,6 @@ spec: secretKeyRef: name: logstash-external-secret key: opensearch_jump_password - - name: OPENSEARCH_SYSLOG_USER - valueFrom: - secretKeyRef: - name: logstash-external-secret - key: opensearch_syslog_user - - name: OPENSEARCH_SYSLOG_PASSWORD - valueFrom: - secretKeyRef: - name: logstash-external-secret - key: opensearch_syslog_password - name: HTTP_USER valueFrom: secretKeyRef: From 5edc35f96e709b0a4a13bd7a81914ddbe50cc31e Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 17 Jan 2025 13:46:57 +0100 Subject: [PATCH 207/224] [logs] remove alert reference --- .../templates/prometheus-alerts.yaml | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 system/logs/vendor/logstash-external/templates/prometheus-alerts.yaml diff --git a/system/logs/vendor/logstash-external/templates/prometheus-alerts.yaml b/system/logs/vendor/logstash-external/templates/prometheus-alerts.yaml deleted file mode 100644 index df3e8198771..00000000000 --- a/system/logs/vendor/logstash-external/templates/prometheus-alerts.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and .Values.alerts.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule - -metadata: - name: logstash-alerts - labels: - app: logstash-prometheus - tier: infra - type: alerting-rules - prometheus: {{ required ".Values.global.prometheus missing" .Values.global.prometheus }} - -spec: -{{ include (print .Template.BasePath "/alerts/_logstash-alerts.tpl") . | indent 2 }} - -{{- end }} From 7b4501c9ac59a1b8c620f16fa0620f497417fcfe Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Fri, 17 Jan 2025 14:04:08 +0100 Subject: [PATCH 208/224] [logs] adding syslog user --- system/logs/Chart.lock | 6 +++--- system/logs/Chart.yaml | 4 ++-- system/logs/vendor/logstash-external/Chart.yaml | 2 +- .../vendor/logstash-external/templates/secrets.yaml | 2 ++ .../logstash-external/templates/statefulset.yaml | 10 ++++++++++ 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/system/logs/Chart.lock b/system/logs/Chart.lock index b3dcc4e294d..e3bfaf862e7 100644 --- a/system/logs/Chart.lock +++ b/system/logs/Chart.lock @@ -7,7 +7,7 @@ dependencies: version: 1.0.1 - name: logstash-external repository: file://vendor/logstash-external - version: 1.1.15 + version: 1.1.16 - name: k8s-event-logger repository: https://kuckkuck.github.io/k8s-event-logger version: 1.2.1 @@ -26,5 +26,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.3 -digest: sha256:86a6ba6962b119ad43deb6f34914d964690ab081e2a2c096ae9ec9fb6d27e34a -generated: "2025-01-17T13:41:37.01847+01:00" +digest: sha256:c27f9f1d7427dba0efdca6c3d9ba77f4e511ef8339030a97739041e950028707 +generated: "2025-01-17T14:03:34.722343+01:00" diff --git a/system/logs/Chart.yaml b/system/logs/Chart.yaml index db9e6bd0034..9dda1414d36 100644 --- a/system/logs/Chart.yaml +++ b/system/logs/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: A Helm chart for all log shippers name: logs -version: 0.0.49 +version: 0.0.50 home: https://github.com/sapcc/helm-charts/tree/master/system/logs dependencies: - name: fluent @@ -18,7 +18,7 @@ dependencies: - name: logstash-external alias: logstash_external repository: file://vendor/logstash-external - version: 1.1.15 + version: 1.1.16 condition: logstash_external.enabled - name: k8s-event-logger diff --git a/system/logs/vendor/logstash-external/Chart.yaml b/system/logs/vendor/logstash-external/Chart.yaml index 3a0f33afb16..ac9c8267fbf 100644 --- a/system/logs/vendor/logstash-external/Chart.yaml +++ b/system/logs/vendor/logstash-external/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: logstash-external -version: 1.1.15 +version: 1.1.16 description: logstash log collector maintainers: - name: Olaf Heydorn diff --git a/system/logs/vendor/logstash-external/templates/secrets.yaml b/system/logs/vendor/logstash-external/templates/secrets.yaml index 30363f1fd87..d39c40972c1 100644 --- a/system/logs/vendor/logstash-external/templates/secrets.yaml +++ b/system/logs/vendor/logstash-external/templates/secrets.yaml @@ -10,5 +10,7 @@ data: opensearch_password: {{ .Values.global.opensearch.data.password | b64enc }} opensearch_jump_user: {{ .Values.global.opensearch.jump.user | b64enc }} opensearch_jump_password: {{ .Values.global.opensearch.jump.password | b64enc }} + opensearch_syslog_user: {{ .Values.global.opensearch.syslog.user | b64enc }} + opensearch_syslog_password: {{ .Values.global.opensearch.syslog.password | b64enc }} http_user: {{ .Values.http.user | b64enc }} http_password: {{ .Values.http.password | b64enc }} diff --git a/system/logs/vendor/logstash-external/templates/statefulset.yaml b/system/logs/vendor/logstash-external/templates/statefulset.yaml index 3271bf08b2e..160d61dde31 100644 --- a/system/logs/vendor/logstash-external/templates/statefulset.yaml +++ b/system/logs/vendor/logstash-external/templates/statefulset.yaml @@ -69,6 +69,16 @@ spec: secretKeyRef: name: logstash-external-secret key: opensearch_password + - name: OPENSEARCH_SYSLOG_USER + valueFrom: + secretKeyRef: + name: logstash-external-secret + key: opensearch_syslog_user + - name: OPENSEARCH_SYSLOG_PASSWORD + valueFrom: + secretKeyRef: + name: logstash-external-secret + key: opensearch_syslog_password - name: OPENSEARCH_JUMP_USER valueFrom: secretKeyRef: From 0d372b6c7ccd67a95bb7730d8e31acb0215f760f Mon Sep 17 00:00:00 2001 From: Fabian Ruff Date: Fri, 17 Jan 2025 14:22:52 +0100 Subject: [PATCH 209/224] Add David Gogl & David Rochow as codeowner for oauth-proxy (#7694) * Add David Gogl as codeowner for oauth-proxy * Add David Rochow as oauth-proxy code owner --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bed6095f1eb..0a71070a808 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -38,7 +38,7 @@ /global/gh-actions @notandy @auhlig /global/git-cert-shim @auhlig /global/kibana-objecter @dimtass -/global/oauth-proxy @andypf @ArtieReus @databus23 +/global/oauth-proxy @andypf @ArtieReus @databus23 @kengou @drochow /global/percona_cluster @galkindmitrii @defo89 @occamshatchet @s10 @businessbean /global/prometheus-alertmanager-cnmp @auhlig /global/prometheus-alertmanager-operated @viennaa @richardtief @Kuckkuck @IvoGoman @timojohlo From 4c933fd29f6d2214790276b556668859abef5659 Mon Sep 17 00:00:00 2001 From: notque Date: Fri, 17 Jan 2025 07:38:54 -0700 Subject: [PATCH 210/224] [hermes] upgrade rabbitmq chart 0.13.0 (#7687) * [hermes] upgrade rabbitmq chart 0.13.0 * [hermes] upgrade hermes chart with this change --- openstack/hermes/Chart.lock | 6 +++--- openstack/hermes/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openstack/hermes/Chart.lock b/openstack/hermes/Chart.lock index aba0b83afc0..33f39b6fe02 100644 --- a/openstack/hermes/Chart.lock +++ b/openstack/hermes/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 0.1.3 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.11.1 + version: 0.13.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 -digest: sha256:122938071a5c1365c23b71f1278524bf2ba00eb296ea035bbd4d2ffd1c6d47b1 -generated: "2024-09-03T10:24:00.142426-07:00" +digest: sha256:ff9b0b8da4045de4eb3fef4a86ceb2a9605cfe4945ac27512c3551031a34eebb +generated: "2025-01-16T19:18:48.124643-07:00" diff --git a/openstack/hermes/Chart.yaml b/openstack/hermes/Chart.yaml index 2a36dff51cb..92db64da1c7 100644 --- a/openstack/hermes/Chart.yaml +++ b/openstack/hermes/Chart.yaml @@ -4,7 +4,7 @@ description: Helm audit management for Openstack maintainers: - name: notque name: hermes -version: 0.1.2 +version: 0.1.3 dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm @@ -13,7 +13,7 @@ dependencies: condition: audit.enabled name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.11.1 + version: 0.13.0 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.2.0 From f178917164f8030d09a760211286482344d23fce Mon Sep 17 00:00:00 2001 From: Fabian Ruff Date: Fri, 17 Jan 2025 15:58:26 +0100 Subject: [PATCH 211/224] [oauth-proxy]: enable metrics (#7614) This should allow us to alert on failing authentications --- global/oauth-proxy/ci/test-values.yaml | 3 +++ global/oauth-proxy/templates/deployment.yaml | 14 ++++++++++++-- global/oauth-proxy/values.yaml | 5 ++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/global/oauth-proxy/ci/test-values.yaml b/global/oauth-proxy/ci/test-values.yaml index bc8316985a9..07270db7658 100644 --- a/global/oauth-proxy/ci/test-values.yaml +++ b/global/oauth-proxy/ci/test-values.yaml @@ -1,2 +1,5 @@ global: region: "global" + tld: "example.com" +ingress: + host_name: "auth" diff --git a/global/oauth-proxy/templates/deployment.yaml b/global/oauth-proxy/templates/deployment.yaml index 872b09788fa..17f1125576f 100644 --- a/global/oauth-proxy/templates/deployment.yaml +++ b/global/oauth-proxy/templates/deployment.yaml @@ -17,6 +17,10 @@ spec: app: oauth2-proxy alert-tier: auth alert-service: oauth-proxy + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} spec: containers: - args: @@ -35,7 +39,9 @@ spec: - --cookie-domain={{ .Values.oauth_proxy.cookie_domain }} - --cookie-expire={{ .Values.oauth_proxy.cookie_expire }} - --oidc-email-claim=email - + {{- with .Values.oauth_proxy.metrics_address }} + - --metrics-address={{ . }} + {{- end }} # Register a new application # https://github.com/settings/applications/new env: @@ -63,7 +69,11 @@ spec: ports: - containerPort: 4180 protocol: TCP - + {{- if .Values.oauth_proxy.metrics_address }} + - containerPort: {{ regexFind `:\d+` .Values.oauth_proxy.metrics_address | trimPrefix ":" }} + protocol: TCP + name: metrics + {{- end }} livenessProbe: httpGet: path: /ping diff --git a/global/oauth-proxy/values.yaml b/global/oauth-proxy/values.yaml index a5835be7aa0..07b1cc61442 100644 --- a/global/oauth-proxy/values.yaml +++ b/global/oauth-proxy/values.yaml @@ -17,8 +17,11 @@ owner-info: - Arturo Reuschenbach - Andreas Pfau - Hoda Noori - +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/targets: "kubernetes" oauth_proxy: + metrics_address: ":9100" client_id: DEFINED_IN_VALUES_FILE client_secret: DEFINED_IN_VALUES_FILE cookie_secret: DEFINED_IN_VALUES_FILE From 23fdc3b1c349b4297f2466592fc205e1e1b7a274 Mon Sep 17 00:00:00 2001 From: Fabian Ruff Date: Fri, 17 Jan 2025 16:57:06 +0100 Subject: [PATCH 212/224] =?UTF-8?q?[oauth=5Fproxy]:=20Support=20for=20opti?= =?UTF-8?q?on=20=E2=80=94skip-auth-routes=20(#7698)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- global/oauth-proxy/templates/deployment.yaml | 4 ++++ global/oauth-proxy/values.yaml | 1 + 2 files changed, 5 insertions(+) diff --git a/global/oauth-proxy/templates/deployment.yaml b/global/oauth-proxy/templates/deployment.yaml index 17f1125576f..0106000117b 100644 --- a/global/oauth-proxy/templates/deployment.yaml +++ b/global/oauth-proxy/templates/deployment.yaml @@ -61,6 +61,10 @@ spec: secretKeyRef: name: oauth-secret key: OAUTH_PROXY_COOKIE_SECRET + {{- with .Values.oauth_proxy.skipAuthRoutes }} + - name: OAUTH2_PROXY_SKIP_AUTH_ROUTES + value: {{ . | join "," | quote }} + {{- end }} image: "{{ .Values.global.dockerHubMirror }}/{{.Values.image.name}}:{{ .Values.image.tag }}" imagePullPolicy: {{.Values.image.pullPolicy}} name: oauth2-proxy diff --git a/global/oauth-proxy/values.yaml b/global/oauth-proxy/values.yaml index 07b1cc61442..fb7d33be0c4 100644 --- a/global/oauth-proxy/values.yaml +++ b/global/oauth-proxy/values.yaml @@ -21,6 +21,7 @@ podAnnotations: prometheus.io/scrape: "true" prometheus.io/targets: "kubernetes" oauth_proxy: + skipAuthRoutes: [] metrics_address: ":9100" client_id: DEFINED_IN_VALUES_FILE client_secret: DEFINED_IN_VALUES_FILE From 0974a5c2706b674c43bfbc030639acf33fdf54be Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 20 Jan 2025 09:09:50 +0530 Subject: [PATCH 213/224] [Barbican] bump mariadb chart to 0.15.3,rabbitmq to 0.13.0 and memcached to 0.6.3 --- openstack/barbican/Chart.lock | 10 +++++----- openstack/barbican/Chart.yaml | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/openstack/barbican/Chart.lock b/openstack/barbican/Chart.lock index d055f75419e..4617b7422a3 100644 --- a/openstack/barbican/Chart.lock +++ b/openstack/barbican/Chart.lock @@ -1,16 +1,16 @@ dependencies: - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.2 + version: 0.6.3 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.4.2 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.11.1 + version: 0.13.0 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.19.6 @@ -23,5 +23,5 @@ dependencies: - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.1.0 -digest: sha256:2063ebb548030aefae93ea53c27b58b62b6519804c4bf4df84e1323a661f1e10 -generated: "2025-01-13T09:43:30.556477+05:30" +digest: sha256:12e137cb1c86b2ab6e3a172c4d9e5018867b852aac6a61bc99899b78b58916b3 +generated: "2025-01-20T09:08:23.329203+05:30" diff --git a/openstack/barbican/Chart.yaml b/openstack/barbican/Chart.yaml index fbcd250dae3..e7a09b13c04 100644 --- a/openstack/barbican/Chart.yaml +++ b/openstack/barbican/Chart.yaml @@ -3,22 +3,22 @@ appVersion: bobcat description: A Helm chart for Openstack Barbican icon: https://www.openstack.org/themes/openstack/images/project-mascots/Barbican/OpenStack_Project_Barbican_vertical.png name: barbican -version: 0.5.14 +version: 0.5.15 dependencies: - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.2 + version: 0.6.3 - condition: mariadb.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.4.2 - name: rabbitmq repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.11.1 + version: 0.13.0 - name: utils repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.19.6 From d3262ccb2bd15cad16695c4ac24d9b8080b5ee80 Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 20 Jan 2025 10:20:58 +0530 Subject: [PATCH 214/224] [Barbican] set no_alert_on_absence for OpenstackBarbicanUserOpaqueCertificateExpiresIn30days Alert --- openstack/barbican/alerts/openstack-barbican.alerts | 1 + 1 file changed, 1 insertion(+) diff --git a/openstack/barbican/alerts/openstack-barbican.alerts b/openstack/barbican/alerts/openstack-barbican.alerts index b4832c9dcc5..ae66e588086 100644 --- a/openstack/barbican/alerts/openstack-barbican.alerts +++ b/openstack/barbican/alerts/openstack-barbican.alerts @@ -26,6 +26,7 @@ groups: context: 'certificate' dashboard: barbican support_group: foundation + no_alert_on_absence: "true" meta: 'Certificate {{ $labels.certificate_name }} will expire in {{ $value }} Days' annotations: description: 'Opaque Certificate {{ $labels.certificate_name }} created by {{ $labels.full_name }} in Project_id {{ $labels.project_id }} will expire in {{ $value }} Days' From d56c10cbcac51245fbf1cdf5615222457f9906f0 Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 20 Jan 2025 10:54:38 +0530 Subject: [PATCH 215/224] [Barbican] Fix metrics query errors --- openstack/barbican/values.yaml | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/openstack/barbican/values.yaml b/openstack/barbican/values.yaml index 13bc4861f93..6b68a526f6f 100644 --- a/openstack/barbican/values.yaml +++ b/openstack/barbican/values.yaml @@ -177,15 +177,17 @@ mysql_metrics: name: openstack_barbican_certificate_expiration_date query: | SELECT - UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration,'1970-01-01 00:00:00'), '%Y-%c-%e %T')) AS expiration_date, - secrets.name AS certificate_name, - creator_id, - projects.external_id AS project_id, - COUNT(*) AS count_gauge - from secrets + UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration, '1970-01-01 00:00:00'), '%Y-%c-%e %T')) AS expiration_date, + secrets.name AS certificate_name, + projects.external_id AS project_id, + secrets.creator_id, + COUNT(*) AS count_gauge + FROM secrets INNER JOIN projects - on secrets.project_id=projects.id - where secrets.deleted='false' AND secret_type='certificate' AND expiration!='0' + ON secrets.project_id = projects.id + WHERE secrets.deleted = 'false' + AND secret_type = 'certificate' + AND expiration != '0' GROUP BY secrets.creator_id, secrets.name, projects.external_id, secrets.expiration; values: - "expiration_date" @@ -198,15 +200,15 @@ mysql_metrics: name: openstack_barbican_opaque_certificate_expiration_date query: | SELECT - UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration,'1970-01-01 00:00:00'), '%Y-%c-%e %T')) AS expiration_date, - secrets.name AS certificate_name, - creator_id, - projects.external_id AS project_id, - COUNT(*) AS count_gauge - from secrets + CAST(UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration, '1970-01-01 00:00:00'), '%Y-%c-%e %T')) AS CHAR) AS expiration_date, + secrets.name AS certificate_name, + secrets.creator_id, + projects.external_id AS project_id, + COUNT(*) AS count_gauge + FROM secrets INNER JOIN projects - on secrets.project_id=projects.id - where secret_type='opaque' AND expiration!='0' + ON secrets.project_id = projects.id + WHERE secret_type = 'opaque' AND expiration != '0' GROUP BY secrets.creator_id, secrets.name, projects.external_id, secrets.expiration; values: - "expiration_date" From 20514ce08157d691dbf1137f91bbd415e9b1d0ba Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 20 Jan 2025 11:03:09 +0530 Subject: [PATCH 216/224] [Barbican] Fix metrics query errors --- openstack/barbican/values.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/openstack/barbican/values.yaml b/openstack/barbican/values.yaml index 6b68a526f6f..9520e76c023 100644 --- a/openstack/barbican/values.yaml +++ b/openstack/barbican/values.yaml @@ -177,7 +177,9 @@ mysql_metrics: name: openstack_barbican_certificate_expiration_date query: | SELECT - UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration, '1970-01-01 00:00:00'), '%Y-%c-%e %T')) AS expiration_date, + CONCAT( + UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration, '1970-01-01 00:00:00'), '%Y-%c-%e %T')) + ) AS expiration_date, secrets.name AS certificate_name, projects.external_id AS project_id, secrets.creator_id, @@ -200,7 +202,9 @@ mysql_metrics: name: openstack_barbican_opaque_certificate_expiration_date query: | SELECT - CAST(UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration, '1970-01-01 00:00:00'), '%Y-%c-%e %T')) AS CHAR) AS expiration_date, + CONCAT( + UNIX_TIMESTAMP(STR_TO_DATE(IFNULL(secrets.expiration, '1970-01-01 00:00:00'), '%Y-%c-%e %T')) + ) AS expiration_date, secrets.name AS certificate_name, secrets.creator_id, projects.external_id AS project_id, From 80917e1c23f8e93ec54f3d58cd64f6065507b112 Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Mon, 20 Jan 2025 09:23:08 +0200 Subject: [PATCH 217/224] [nodecidr-controller] deployment indepentednt of kube-system --- system/kube-system-metal/Chart.lock | 7 ++----- system/kube-system-metal/Chart.yaml | 8 ++++---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/system/kube-system-metal/Chart.lock b/system/kube-system-metal/Chart.lock index 96438f63282..23f26f1583d 100644 --- a/system/kube-system-metal/Chart.lock +++ b/system/kube-system-metal/Chart.lock @@ -44,9 +44,6 @@ dependencies: - name: wormhole repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 3.1.8 -- name: nodecidr-controller - repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.0.12 - name: kube-parrot repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 4.0.8 @@ -101,5 +98,5 @@ dependencies: - name: reloader repository: oci://ghcr.io/stakater/charts version: 1.2.0 -digest: sha256:213e30a5d3d14ccc24f7440344d8cbda0087d6d7a6594c5c761125fc5aee9ac6 -generated: "2025-01-16T09:29:43.751180733Z" +digest: sha256:4c803eb2da2d5744b81d4bf96e0b7c5dcc9c078e0a528dd10efa801a2861b91d +generated: "2025-01-20T09:12:39.730332+02:00" diff --git a/system/kube-system-metal/Chart.yaml b/system/kube-system-metal/Chart.yaml index b0995d4d763..327fa5fcf6b 100644 --- a/system/kube-system-metal/Chart.yaml +++ b/system/kube-system-metal/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: Kube-System relevant Service collection for metal clusters. name: kube-system-metal -version: 6.10.41 +version: 6.10.42 home: https://github.com/sapcc/helm-charts/tree/master/system/kube-system-metal dependencies: - name: cc-rbac @@ -54,9 +54,9 @@ dependencies: repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: "^3.x" condition: wormhole.enabled - - name: nodecidr-controller - repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: "^1.x" + # - name: nodecidr-controller + # repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm + # version: "^1.x" - condition: kube-parrot.enabled name: kube-parrot repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From fa74dc51d4afe9b6b0397a5d64adcd6c9f846ed1 Mon Sep 17 00:00:00 2001 From: rajivmucheli Date: Mon, 20 Jan 2025 12:55:48 +0530 Subject: [PATCH 218/224] [Barbican] bump mariadb chart to 0.15.3,redis to 1.6.2 and memcached to 0.6.3 --- openstack/glance/Chart.lock | 10 +++++----- openstack/glance/Chart.yaml | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/openstack/glance/Chart.lock b/openstack/glance/Chart.lock index ba60aaf1dc2..d0a7d113714 100644 --- a/openstack/glance/Chart.lock +++ b/openstack/glance/Chart.lock @@ -1,10 +1,10 @@ dependencies: - name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.2 + version: 0.6.3 - name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 0.4.2 @@ -13,12 +13,12 @@ dependencies: version: 0.19.6 - name: redis repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.5.3 + version: 1.6.2 - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 - name: linkerd-support repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm version: 1.0.0 -digest: sha256:d71a7789f43ed12a3cb3805854162a4c4fb34d08d1d817af93670a5a091b6f33 -generated: "2025-01-13T09:33:16.3966+05:30" +digest: sha256:24085cef1641f95fd2b5da8b47efc1195732d03e72da4e8a859066c5082969f7 +generated: "2025-01-20T12:55:01.544922+05:30" diff --git a/openstack/glance/Chart.yaml b/openstack/glance/Chart.yaml index 0a7f322c44e..13d5732d285 100644 --- a/openstack/glance/Chart.yaml +++ b/openstack/glance/Chart.yaml @@ -8,10 +8,10 @@ dependencies: - condition: mariadb.enabled name: mariadb repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.14.2 + version: 0.15.3 - name: memcached repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 0.6.2 + version: 0.6.3 - condition: mariadb.enabled name: mysql_metrics repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm @@ -22,7 +22,7 @@ dependencies: - name: redis alias: sapcc_rate_limit repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm - version: 1.5.3 + version: 1.6.2 condition: sapcc_rate_limit.enabled - name: owner-info repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm From 470bb0d317a84dfe998b2a8b09bdd47fa19f9678 Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Mon, 20 Jan 2025 11:23:20 +0200 Subject: [PATCH 219/224] [nodecidr-controller] set owner info --- system/nodecidr-controller/Chart.yaml | 2 +- system/nodecidr-controller/values.yaml | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/system/nodecidr-controller/Chart.yaml b/system/nodecidr-controller/Chart.yaml index 543965c5c02..60b14e463a0 100644 --- a/system/nodecidr-controller/Chart.yaml +++ b/system/nodecidr-controller/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: nodecidr-controller description: A Helm chart for the CCloud nodeCIDR controller. -version: 1.0.12 +version: 1.0.13 appVersion: 1.0.3 home: https://github.com/sapcc/ccloud-nodeCIDR-controller sources: diff --git a/system/nodecidr-controller/values.yaml b/system/nodecidr-controller/values.yaml index 3e0c2b3fef7..e6114c4f73c 100644 --- a/system/nodecidr-controller/values.yaml +++ b/system/nodecidr-controller/values.yaml @@ -1,3 +1,12 @@ +owner-info: + support-group: containers + service: nodeCIDRcontroller + helm-chart-url: https://github.com/sapcc/helm-charts/tree/master/system/nodecidr-controller + maintainers: + - Alexandru Mihai + - Dmitri Fedotov + - Goeran Gudat + - Marian Schwarz image: repository: sapcc/ccloud-nodecidr-controller tag: From da8d8ea81a89c60408bab9cd60643e2988b217a8 Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Mon, 20 Jan 2025 11:51:03 +0200 Subject: [PATCH 220/224] [nodecidr-controller] set owner info dep --- system/nodecidr-controller/Chart.lock | 6 ++++++ system/nodecidr-controller/Chart.yaml | 4 ++++ 2 files changed, 10 insertions(+) create mode 100644 system/nodecidr-controller/Chart.lock diff --git a/system/nodecidr-controller/Chart.lock b/system/nodecidr-controller/Chart.lock new file mode 100644 index 00000000000..9f3955c5bcb --- /dev/null +++ b/system/nodecidr-controller/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: owner-info + repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm + version: 1.0.0 +digest: sha256:ca7c19b16632950e17c13dc4e5dbda8c7006ad9178f511fefe25bc57e69ad033 +generated: "2025-01-20T11:50:20.427315+02:00" diff --git a/system/nodecidr-controller/Chart.yaml b/system/nodecidr-controller/Chart.yaml index 60b14e463a0..a1b6b228053 100644 --- a/system/nodecidr-controller/Chart.yaml +++ b/system/nodecidr-controller/Chart.yaml @@ -6,3 +6,7 @@ appVersion: 1.0.3 home: https://github.com/sapcc/ccloud-nodeCIDR-controller sources: - https://github.com/sapcc/ccloud-nodeCIDR-controller +dependencies: + - name: owner-info + repository: oci://keppel.eu-de-1.cloud.sap/ccloud-helm + version: '>= 0.0.0' \ No newline at end of file From a3267358238104df14ae87444402c0eeddad15f7 Mon Sep 17 00:00:00 2001 From: Alexandru Mihai Date: Mon, 20 Jan 2025 11:55:43 +0200 Subject: [PATCH 221/224] [nodecidr-controller] set owner info dep/bump version --- system/nodecidr-controller/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/nodecidr-controller/Chart.yaml b/system/nodecidr-controller/Chart.yaml index a1b6b228053..e070b10ccd2 100644 --- a/system/nodecidr-controller/Chart.yaml +++ b/system/nodecidr-controller/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: nodecidr-controller description: A Helm chart for the CCloud nodeCIDR controller. -version: 1.0.13 +version: 1.0.14 appVersion: 1.0.3 home: https://github.com/sapcc/ccloud-nodeCIDR-controller sources: From 6e657265980e15aa2eb8b2cbe58fd5e4fd2e9f8d Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 20 Jan 2025 11:00:23 +0100 Subject: [PATCH 222/224] [opensearch-logs] make ds/index ism retention configurable --- system/opensearch-logs/Chart.yaml | 2 +- system/opensearch-logs/templates/config/_ds-ism.json.tpl | 2 +- system/opensearch-logs/templates/config/_index-ism.json.tpl | 4 ++-- system/opensearch-logs/values.yaml | 4 ++++ 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/system/opensearch-logs/Chart.yaml b/system/opensearch-logs/Chart.yaml index c939f6dd7d0..aefb907e490 100644 --- a/system/opensearch-logs/Chart.yaml +++ b/system/opensearch-logs/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: A Helm chart for the Opensearch stack name: opensearch-logs -version: 0.0.31 +version: 0.0.32 home: https://github.com/sapcc/helm-charts/tree/master/system/opensearch-logs dependencies: - name: opensearch diff --git a/system/opensearch-logs/templates/config/_ds-ism.json.tpl b/system/opensearch-logs/templates/config/_ds-ism.json.tpl index de5dbeff790..e1f2e27c1e9 100644 --- a/system/opensearch-logs/templates/config/_ds-ism.json.tpl +++ b/system/opensearch-logs/templates/config/_ds-ism.json.tpl @@ -37,7 +37,7 @@ { "state_name": "delete", "conditions": { - "min_index_age": "31d" + "min_index_age": "{{ .Values.retention.ds }}" } } ] diff --git a/system/opensearch-logs/templates/config/_index-ism.json.tpl b/system/opensearch-logs/templates/config/_index-ism.json.tpl index 4e50624f093..5d146bcdc37 100644 --- a/system/opensearch-logs/templates/config/_index-ism.json.tpl +++ b/system/opensearch-logs/templates/config/_index-ism.json.tpl @@ -1,6 +1,6 @@ { "policy": { - "description": "Simple 31d log retention", + "description": "Simple {{ .Values.retention.index}} log retention", "default_state": "ingest", "schema_version": 25, "states": [ @@ -11,7 +11,7 @@ { "state_name": "delete", "conditions": { - "min_index_age": "31d" + "min_index_age": "{{ .Values.retention.index}}" } } ] diff --git a/system/opensearch-logs/values.yaml b/system/opensearch-logs/values.yaml index a829902e49a..b43ceb06a79 100644 --- a/system/opensearch-logs/values.yaml +++ b/system/opensearch-logs/values.yaml @@ -33,6 +33,10 @@ alerts: auth: ca_path: certs/admin/ca.crt +retention: + index: "31d" + ds: "31d" + opensearch_master: enabled: false nameOverride: "opensearch-logs-master" From be665565b70aa497afe9948da2edd2fb6356dfd9 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 20 Jan 2025 13:56:56 +0100 Subject: [PATCH 223/224] [opensearch-logs] mv audit and otel user to qa only condition --- .../templates/config/_internal_users.yml.tpl | 24 +++++++++---------- .../templates/config/_roles.yml.tpl | 10 ++++++++ 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/system/opensearch-logs/templates/config/_internal_users.yml.tpl b/system/opensearch-logs/templates/config/_internal_users.yml.tpl index c50141825a9..975fdd1716b 100644 --- a/system/opensearch-logs/templates/config/_internal_users.yml.tpl +++ b/system/opensearch-logs/templates/config/_internal_users.yml.tpl @@ -44,18 +44,6 @@ otel: backend_roles: - "otel" -otel2: - hash: "{{ .Values.users.otel2.nohash }}" - reserved: true - backend_roles: - - "otel" - -audit: - hash: "{{ .Values.users.audit.nohash }}" - reserved: true - backend_roles: - - "audit" - otellogs: hash: "{{ .Values.users.otellogs.nohash }}" reserved: true @@ -81,6 +69,18 @@ dataqade5: backend_roles: - "qade5" +otel2: + hash: "{{ .Values.users.otel2.nohash }}" + reserved: true + backend_roles: + - "otel" + +audit: + hash: "{{ .Values.users.audit.nohash }}" + reserved: true + backend_roles: + - "audit" + oraboskvm: hash: "{{ .Values.users.oraboskvm.nohash }}" reserved: true diff --git a/system/opensearch-logs/templates/config/_roles.yml.tpl b/system/opensearch-logs/templates/config/_roles.yml.tpl index 0c8d6688199..53623682cdb 100644 --- a/system/opensearch-logs/templates/config/_roles.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles.yml.tpl @@ -12,6 +12,7 @@ security_rest_api_access: data: + index_permissions: reserved: false cluster_permissions: - "cluster_monitor" @@ -31,6 +32,15 @@ data: - "indices:admin/create" - "indices:data/write/bulk*" - "indices:data/write/index" + - index_patterns: + - "logs" + allowed_actions: + - "indices:admin/template/get" + - "indices:admin/template/put" + - "indices:admin/mapping/put" + - "indices:admin/create" + - "indices:data/write/bulk*" + - "indices:data/write/index" - index_patterns: - "systemd-*" allowed_actions: From fe42f7c40d4f9c4d4767cc1073678dd4cb055ed4 Mon Sep 17 00:00:00 2001 From: Olaf Heydorn Date: Mon, 20 Jan 2025 14:04:27 +0100 Subject: [PATCH 224/224] [opensearch-logs] fix format --- system/opensearch-logs/templates/config/_roles.yml.tpl | 1 - 1 file changed, 1 deletion(-) diff --git a/system/opensearch-logs/templates/config/_roles.yml.tpl b/system/opensearch-logs/templates/config/_roles.yml.tpl index 53623682cdb..c5fc17b7f1b 100644 --- a/system/opensearch-logs/templates/config/_roles.yml.tpl +++ b/system/opensearch-logs/templates/config/_roles.yml.tpl @@ -12,7 +12,6 @@ security_rest_api_access: data: - index_permissions: reserved: false cluster_permissions: - "cluster_monitor"