From 17d376df23ee35b066a41d379883bd01b093fe51 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Sun, 23 Feb 2025 20:42:16 -0600 Subject: [PATCH] chore(cinder): convert helm chart from submodule to repo This change removes the need to carry the openstack-helm charts for the purposes of providing a cinder deployment. The base helm files have been updated and simplified, reducing the values we carry to only what we need. Related Issue: #809 Signed-off-by: Kevin Carter --- .../cinder/cinder-helm-overrides.yaml | 1441 +---------------- bin/install-cinder.sh | 9 +- .../notes/cinder-chart-3e9c76938ef9ebe8.yaml | 17 + 3 files changed, 110 insertions(+), 1357 deletions(-) create mode 100644 releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml diff --git a/base-helm-configs/cinder/cinder-helm-overrides.yaml b/base-helm-configs/cinder/cinder-helm-overrides.yaml index dd7d994a..b0c428ab 100644 --- a/base-helm-configs/cinder/cinder-helm-overrides.yaml +++ b/base-helm-configs/cinder/cinder-helm-overrides.yaml @@ -1,1484 +1,221 @@ +--- storage: lvm labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - backup: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - scheduler: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: null - # test: - # node_selector_key: openstack-control-plane - # node_selector_value: enabled volume: node_selector_key: openstack-storage-node - node_selector_value: enabled - -release_group: null images: tags: - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - cinder_db_sync: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - cinder_api: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - cinder_scheduler: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - cinder_volume: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - cinder_volume_usage_audit: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - cinder_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - cinder_backup: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - cinder_backup_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -jobs: - volume_usage_audit: - cron: "5 * * * *" - starting_deadline: 600 - history: - success: 3 - failed: 1 + bootstrap: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + cinder_api: 'quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy' + cinder_backup: 'quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy' + cinder_backup_storage_init: 'quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy' + cinder_db_sync: 'quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy' + cinder_scheduler: 'quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy' + cinder_storage_init: 'quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy' + cinder_volume: 'quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy' + cinder_volume_usage_audit: 'quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy' + db_drop: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + db_init: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + dep_check: 'quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0' + image_repo_sync: 'quay.io/rackspace/rackerlabs-docker:17.07.0' + ks_endpoints: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + ks_service: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + ks_user: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + rabbit_init: 'quay.io/rackspace/rackerlabs-rabbitmq:3.13-management' + test: 'quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0' pod: security_context: - volume_usage_audit: - pod: - runAsUser: 42424 - container: - cinder_volume_usage_audit: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false cinder_api: - pod: - runAsUser: 42424 container: - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true cinder_api: - readOnlyRootFilesystem: true allowPrivilegeEscalation: true privileged: true cinder_backup: - pod: - runAsUser: 42424 container: - ceph_backup_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_backup_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true cinder_backup: - # capabilities: - # add: - # - SYS_ADMIN privileged: true - readOnlyRootFilesystem: true - runAsUser: 0 - cinder_scheduler: - pod: - runAsUser: 42424 - container: - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - cinder_scheduler: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false cinder_volume: - pod: - runAsUser: 42424 container: - ceph_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - init_cinder_conf: - runAsUser: 0 - readOnlyRootFilesystem: true cinder_volume: - # capabilities: - # add: - # - SYS_ADMIN - readOnlyRootFilesystem: true privileged: true - storage_init: - pod: - runAsUser: 42424 - container: - ceph_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - cinder_backup_storage_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - clean: - pod: - runAsUser: 42424 - # container: - # cinder_volume_rbd_secret_clean: - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - create_internal_tenant: - pod: - runAsUser: 42424 - container: - create_internal_tenant: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - cinder: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule useHostNetwork: - volume: true backup: true - mounts: - cinder_api: - init_container: null - cinder_api: - volumeMounts: - volumes: - cinder_scheduler: - init_container: null - cinder_scheduler: - volumeMounts: - volumes: - cinder_volume: - init_container: null - cinder_volume: - volumeMounts: - volumes: - cinder_volume_usage_audit: - init_container: null - cinder_volume_usage_audit: - volumeMounts: - volumes: - cinder_backup: - init_container: null - cinder_backup: - volumeMounts: - volumes: - cinder_tests: - init_container: null - cinder_tests: - volumeMounts: - volumes: - cinder_db_sync: - cinder_db_sync: - volumeMounts: - volumes: - replicas: - api: 1 - volume: 1 - scheduler: 1 - backup: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - scheduler: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - volume: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - volume_usage_audit: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - clean: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - backup_storage_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - storage_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -bootstrap: - enabled: true - ks_user: admin - bootstrap_conf_backends: true - volume_types: - name: - group: - volume_backend_name: - # access_type: "private" - # If you set up access_type to private, only the creator - # will get an access to the volume type. You can extend - # the access to your volume type by providing a list of - # domain names and projects as shown below - # grant_access: - # : - # - - # - - # <...> - # : - # - - # <...> - # Volume QoS if any. By default, None QoS is created. - # Below values with a number at the end need to be replaced - # with real names. - # volume_qos: - # qos_name_1: - # consumer: front-end - # properties: - # key_1: value_1 - # key_2: value_2 - # associates: - # - volume_type_1 - # - volume_type_2 - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30877 + volume: true + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local + hosts: + default: mariadb-cluster-primary + oslo_cache: + host_fqdn_override: + default: memcached.openstack.svc.cluster.local + hosts: + default: memcached + oslo_messaging: + host_fqdn_override: + default: rabbitmq.openstack.svc.cluster.local + hosts: + default: rabbitmq-nodes -ceph_client: - # enable this when there is a need to create second ceph backed pointing - # to external ceph cluster - enable_external_ceph_backend: false -# # change this in case of first ceph backend name pointing to internal ceph cluster -# # is diffrent -# internal_ceph_backend: rbd1 -# configmap: ceph-etc -# user_secret_name: pvc-ceph-client-key -# external_ceph: -# # Only when enable_external_ceph_backend is true and rbd_user is NOT null -# # secret for external ceph keyring will be created. -# rbd_user: null -# rbd_user_keyring: null -# configmap: null -# conf: -# global: null -# osd: null conf: - paste: - composite:osapi_volume: - use: call:cinder.api:root_app_factory - /: apiversions - /v1: openstack_volume_api_v1 - /v2: openstack_volume_api_v2 - /v3: openstack_volume_api_v3 - composite:openstack_volume_api_v1: - use: call:cinder.api.middleware.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1 - keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1 - keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1 - composite:openstack_volume_api_v2: - use: call:cinder.api.middleware.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2 - keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2 - keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2 - composite:openstack_volume_api_v3: - use: call:cinder.api.middleware.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3 - keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3 - keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3 - filter:request_id: - paste.filter_factory: oslo_middleware.request_id:RequestId.factory - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: cinder - filter:faultwrap: - paste.filter_factory: cinder.api.middleware.fault:FaultWrapper.factory - filter:osprofiler: - paste.filter_factory: osprofiler.web:WsgiMiddleware.factory - filter:noauth: - paste.filter_factory: cinder.api.middleware.auth:NoAuthMiddleware.factory - filter:sizelimit: - paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory - app:apiv1: - paste.app_factory: cinder.api.v1.router:APIRouter.factory - app:apiv2: - paste.app_factory: cinder.api.v2.router:APIRouter.factory - app:apiv3: - paste.app_factory: cinder.api.v3.router:APIRouter.factory - pipeline:apiversions: - pipeline: cors http_proxy_to_wsgi faultwrap osvolumeversionapp - app:osvolumeversionapp: - paste.app_factory: cinder.api.versions:Versions.factory - filter:keystonecontext: - paste.filter_factory: cinder.api.middleware.auth:CinderKeystoneContext.factory - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/cinder/api_audit_map.conf - policy: {} - api_audit_map: - DEFAULT: - target_endpoint_type: None - custom_actions: - associate: update/associate - disassociate: update/disassociate_all - disassociate_all: update/disassociate_all - associations: read/list/associations - path_keywords: - defaults: None - detail: None - limits: None - os-quota-specs: project - qos-specs: qos-spec - snapshots: snapshot - types: type - volumes: volume - service_endpoints: - volume: service/storage/block - volumev2: service/storage/block - volumev3: service/storage/block - cinder_sudoers: | - # This sudoers file supports rootwrap for both Kolla and LOCI Images. - Defaults !requiretty - Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" - cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf * - rootwrap: | - # Configuration for cinder-rootwrap - # This file should be owned by (and only-writeable by) the root user - - [DEFAULT] - # List of directories to load filter definitions from (separated by ','). - # These directories MUST all be only writeable by root ! - filters_path=/etc/cinder/rootwrap.d - - # List of directories to search executables in, in case filters do not - # explicitely specify a full path (separated by ',') - # If not specified, defaults to system PATH environment variable. - # These directories MUST all be only writeable by root ! - exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin - - # Enable logging to syslog - # Default value is False - use_syslog=False - - # Which syslog facility to use. - # Valid values include auth, authpriv, syslog, local0, local1... - # Default value is 'syslog' - syslog_log_facility=syslog - - # Which messages to log. - # INFO means log all usage - # ERROR means only log unsuccessful attempts - syslog_log_level=ERROR - rootwrap_filters: - volume: - pods: - - volume - content: | - # cinder-rootwrap command filters for volume nodes - # This file should be owned by (and only-writeable by) the root user - - [Filters] - # cinder/volume/iscsi.py: iscsi_helper '--op' ... - ietadm: CommandFilter, ietadm, root - tgtadm: CommandFilter, tgtadm, root - iscsictl: CommandFilter, iscsictl, root - tgt-admin: CommandFilter, tgt-admin, root - cinder-rtstool: CommandFilter, cinder-rtstool, root - scstadmin: CommandFilter, scstadmin, root - - # LVM related show commands - pvs: EnvFilter, env, root, LC_ALL=C, pvs - vgs: EnvFilter, env, root, LC_ALL=C, vgs - lvs: EnvFilter, env, root, LC_ALL=C, lvs - lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay - - # -LVM related show commands with suppress fd warnings - pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs - vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs - lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs - lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay - - - # -LVM related show commands conf var - pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs - vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs - lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs - lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay - - # -LVM conf var with suppress fd_warnings - pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs - vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs - lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs - lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay - - # os-brick library commands - # os_brick.privileged.run_as_root oslo.privsep context - # This line ties the superuser privs with the config files, context name, - # and (implicitly) the actual python code invoked. - privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* - # The following and any cinder/brick/* entries should all be obsoleted - # by privsep, and may be removed once the os-brick version requirement - # is updated appropriately. - scsi_id: CommandFilter, /lib/udev/scsi_id, root - drbdadm: CommandFilter, drbdadm, root - - # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list - vgcreate: CommandFilter, vgcreate, root - - # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. - # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ... - lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate - lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate - lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate - lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate - - # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... - dd: CommandFilter, dd, root - - # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... - lvremove: CommandFilter, lvremove, root - - # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'... - lvrename: CommandFilter, lvrename, root - - # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... - # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ... - lvextend: EnvFilter, env, root, LC_ALL=C, lvextend - lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend - lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend - lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend - - # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' - lvchange: CommandFilter, lvchange, root - - # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name - lvconvert: CommandFilter, lvconvert, root - - # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... - # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... - iscsiadm: CommandFilter, iscsiadm, root - - # cinder/volume/utils.py: utils.temporary_chown(path, 0) - chown: CommandFilter, chown, root - - # cinder/volume/utils.py: copy_volume(..., ionice='...') - ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] - ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] - - # cinder/volume/utils.py: setup_blkio_cgroup() - cgcreate: CommandFilter, cgcreate, root - cgset: CommandFilter, cgset, root - cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ - - # cinder/volume/driver.py - dmsetup: CommandFilter, dmsetup, root - ln: CommandFilter, ln, root - - # cinder/image/image_utils.py - qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img - qemu-img_convert: CommandFilter, qemu-img, root - - udevadm: CommandFilter, udevadm, root - - # cinder/volume/driver.py: utils.read_file_as_root() - cat: CommandFilter, cat, root - - # cinder/volume/nfs.py - stat: CommandFilter, stat, root - mount: CommandFilter, mount, root - df: CommandFilter, df, root - du: CommandFilter, du, root - truncate: CommandFilter, truncate, root - chmod: CommandFilter, chmod, root - rm: CommandFilter, rm, root - - # cinder/volume/drivers/remotefs.py - mkdir: CommandFilter, mkdir, root - - # cinder/volume/drivers/netapp/nfs.py: - netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ - - # cinder/volume/drivers/glusterfs.py - chgrp: CommandFilter, chgrp, root - umount: CommandFilter, umount, root - fallocate: CommandFilter, fallocate, root - - # cinder/volumes/drivers/hds/hds.py: - hus-cmd: CommandFilter, hus-cmd, root - hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root - - # cinder/volumes/drivers/hds/hnas_backend.py - ssc: CommandFilter, ssc, root - - # cinder/brick/initiator/connector.py: - ls: CommandFilter, ls, root - tee: CommandFilter, tee, root - multipath: CommandFilter, multipath, root - multipathd: CommandFilter, multipathd, root - systool: CommandFilter, systool, root - - # cinder/volume/drivers/block_device.py - blockdev: CommandFilter, blockdev, root - - # cinder/volume/drivers/ibm/gpfs.py - # cinder/volume/drivers/tintri.py - mv: CommandFilter, mv, root - - # cinder/volume/drivers/ibm/gpfs.py - cp: CommandFilter, cp, root - mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root - mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root - mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root - mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root - mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root - mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root - mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root - mkfs: CommandFilter, mkfs, root - mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root - mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root - mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root - mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root - mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root - mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root - - # cinder/volume/drivers/ibm/gpfs.py - # cinder/volume/drivers/ibm/ibmnas.py - find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit - - # cinder/brick/initiator/connector.py: - aoe-revalidate: CommandFilter, aoe-revalidate, root - aoe-discover: CommandFilter, aoe-discover, root - aoe-flush: CommandFilter, aoe-flush, root - - # cinder/brick/initiator/linuxscsi.py: - sg_scan: CommandFilter, sg_scan, root - - #cinder/backup/services/tsm.py - dsmc:CommandFilter,/usr/bin/dsmc,root - - # cinder/volume/drivers/hitachi/hbsd_horcm.py - raidqry: CommandFilter, raidqry, root - raidcom: CommandFilter, raidcom, root - pairsplit: CommandFilter, pairsplit, root - paircreate: CommandFilter, paircreate, root - pairdisplay: CommandFilter, pairdisplay, root - pairevtwait: CommandFilter, pairevtwait, root - horcmstart.sh: CommandFilter, horcmstart.sh, root - horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root - horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr - - # cinder/volume/drivers/hitachi/hbsd_snm2.py - auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman - auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref - auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef - aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1 - auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn - auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap - autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap - aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol - auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd - auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel - auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize - auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser - autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef - autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt - autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini - auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi - audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool - aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal - aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon - - # cinder/volume/drivers/hgst.py - vgc-cluster: CommandFilter, vgc-cluster, root - - # cinder/volume/drivers/vzstorage.py - pstorage-mount: CommandFilter, pstorage-mount, root - pstorage: CommandFilter, pstorage, root - ploop: CommandFilter, ploop, root - - # initiator/connector.py: - drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid - ceph: - override: - append: - monitors: [] - admin_keyring: null - pools: - backup: - replication: 3 - crush_rule: replicated_rule - chunk_size: 8 - app_name: cinder-backup - cinder.volumes: - replication: 3 - crush_rule: replicated_rule - chunk_size: 8 - app_name: cinder-volume + backends: + lvmdriver-1: + image_volume_cache_enabled: true + iscsi_iotype: fileio + iscsi_num_targets: 100 + lvm_type: default + target_helper: tgtadm + target_port: 3260 + target_protocol: iscsi + volume_backend_name: LVM_iSCSI + volume_clear: zero + volume_driver: cinder_rxt.rackspace.RXTLVM + volume_group: cinder-volumes-1 cinder: DEFAULT: - storage_availability_zone: az1 - default_availability_zone: az1 allow_availability_zone_fallback: true - scheduler_default_filters: AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter - volume_usage_audit_period: hour - resource_query_filters_file: /etc/cinder/resource_filters.json - log_config_append: /etc/cinder/logging.conf - use_syslog: false - use_stderr: true - enable_v1_api: false - enable_v2_api: false - volume_name_template: "%s" - osapi_volume_workers: 8 - glance_api_version: 2 - os_region_name: RegionOne - host: cinder-volume-worker - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - osapi_volume_listen_port: null - enabled_backends: "lvmdriver-1" - default_volume_type: "lvmdriver-1" - # NOTE(portdirect): "cinder.backup.drivers.ceph" and - # "cinder.backup.drivers.posix" also supported - # NOTE(rchurch): As of Stein, drivers by class name are required - # - cinder.backup.drivers.swift.SwiftBackupDriver - # - cinder.backup.drivers.ceph.CephBackupDriver - # - cinder.backup.drivers.posix.PosixBackupDriver - backup_driver: "cinder.backup.drivers.swift.SwiftBackupDriver" + backup_compression_algorithm: zstd backup_swift_auth: per_user backup_swift_auth_version: 3 - backup_compression_algorithm: zstd - # # Backup: Ceph RBD options - # backup_ceph_conf: "/etc/ceph/ceph.conf" - # backup_ceph_user: cinderbackup - # backup_ceph_pool: cinder.backups - # Backup: Posix options - backup_posix_path: /var/lib/cinder/backup - auth_strategy: keystone - # Internal tenant id - internal_project_name: internal_cinder - internal_user_name: internal_cinder + default_availability_zone: az1 + default_volume_type: lvmdriver-1 + enabled_backends: lvmdriver-1 + osapi_volume_workers: 2 rootwrap_config: /etc/cinder/rootwrap.conf - use_multipath_for_image_xfer: False #Add Cinder Multipath support for image xfer - database: - max_retries: -1 + scheduler_default_filters: 'AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter' + storage_availability_zone: az1 + use_multipath_for_image_xfer: false barbican: barbican_endpoint_type: internal key_manager: backend: barbican keystone_authtoken: - service_token_roles: service - service_token_roles_required: true - auth_version: v3 auth_type: password + auth_version: v3 memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true service_type: volumev3 - nova: - auth_type: password - auth_version: v3 - interface: internal - oslo_policy: - policy_file: /etc/cinder/policy.yaml - oslo_concurrency: - lock_path: /tmp/cinder - oslo_messaging_notifications: - driver: messagingv2 - oslo_middleware: - enable_proxy_headers_parsing: true oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + kombu_reconnect_delay: 0.5 rabbit_ha_queues: false + rabbit_interval_max: 10 rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly - rabbit_interval_max: 10 - # Send more frequent heartbeats and fail unhealthy nodes faster - # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 - # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 - heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down - # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html - # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 - kombu_reconnect_delay: 0.5 - coordination: - backend_url: file:///var/lib/cinder/coordination - service_user: - auth_type: password - send_service_user_token: true + cinder_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 + enable_iscsi: true logging: - loggers: - keys: - - root - - cinder - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default logger_root: - level: INFO handlers: - stdout - logger_cinder: level: INFO - handlers: - - stdout - qualname: cinder - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" rabbitmq: policies: [] - backends: - # Those options will be written to backends.conf as-is. - lvmdriver-1: - volume_group: cinder-volumes-1 - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - volume_clear: zero - volume_driver: cinder_rxt.rackspace.RXTLVM - lvm_type: default - image_volume_cache_enabled: True - iscsi_iotype: fileio - iscsi_num_targets: 100 - target_protocol: iscsi - target_helper: tgtadm - target_port: 3260 - rally_tests: - run_tempest: false - clean_up: | - VOLUMES=$(openstack volume list -f value | grep -e "^s_rally_" | awk '{ print $1 }') - if [ -n "$VOLUMES" ]; then - echo $VOLUMES | xargs openstack volume delete - fi - tests: - CinderVolumes.create_and_delete_volume: - - args: - size: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - - args: - size: - max: 5 - min: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - resource_filters: - volume: - - name - - status - - metadata - - bootable - - migration_status - - availability_zone - - group_id - backup: - - name - - status - - volume_id - snapshot: - - name - - status - - volume_id - - metadata - - availability_zone - group: [] - group_snapshot: - - status - - group_id - attachment: - - volume_id - - status - - instance_id - - attach_status - message: - - resource_uuid - - resource_type - - event_id - - request_id - - message_level - pool: - - name - - volume_type - volume_type: [] - enable_iscsi: true -backup: - # external_ceph_rbd: - # enabled: false - # admin_keyring: null - # configmap: null - # conf: - # global: null - # osd: null - posix: - volume: - class_name: general - size: 10Gi dependencies: - dynamic: - common: - local_image_registry: - jobs: - - cinder-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity backup: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - # - cinder-backup-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - backup_storage_init: - jobs: null - bootstrap: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - pod: - - requireSameNode: false - labels: - application: cinder - component: volume - clean: - jobs: null - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: null - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - cinder-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - service: oslo_messaging - endpoint: internal scheduler: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - storage_init: - jobs: null - tests: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume volume: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume volume_usage_audit: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - create_internal_tenant: - services: - - endpoint: internal - service: identity - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: cinder-keystone-admin - cinder: cinder-keystone-user - # test: cinder-keystone-test - oslo_db: - admin: mariadb - cinder: cinder-db-password - rbd: - backup: cinder-backup-rbd-keyring - volume: cinder-volume-rbd-keyring - volume_external: cinder-volume-external-rbd-keyring - oslo_messaging: - admin: rabbitmq-default-user - cinder: cinder-rabbitmq-user - tls: - volume: - api: - public: cinder-tls-public - internal: cinder-tls-api - oci_image_registry: - cinder: cinder-oci-image-registry -# We use a different layout of the endpoints here to account for versioning -# this swaps the service name and type, and should be rolled out to other -# services. endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - cinder: - username: cinder - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - cinder: - role: admin,service - region_name: RegionOne - username: cinder - password: password - project_name: service - user_domain_name: service - project_domain_name: service - # test: - # role: admin - # region_name: RegionOne - # username: cinder-test - # password: password - # project_name: test - # user_domain_name: service - # project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http port: api: default: 5000 - public: 80 internal: 5000 + public: 80 service: 5000 image: - name: glance - hosts: - default: glance-api - public: glance - host_fqdn_override: - default: null - path: - default: null - scheme: - default: http port: api: default: 9292 - public: 80 internal: 9292 + public: 80 service: 9292 - volume: - name: cinder + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local + hosts: + default: mariadb-cluster-primary + oslo_cache: + host_fqdn_override: + default: memcached.openstack.svc.cluster.local hosts: - default: cinder-api - public: cinder + default: memcached + oslo_messaging: host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v1/%(tenant_id)s' - scheme: - default: 'http' + default: rabbitmq.openstack.svc.cluster.local + hosts: + default: rabbitmq-nodes + volume: port: api: default: 8776 - public: 80 internal: 8776 + public: 80 service: 8776 volumev2: - name: cinderv2 - hosts: - default: cinder-api - public: cinder - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v2/%(tenant_id)s' - scheme: - default: 'http' port: api: default: 8776 - public: 80 internal: 8776 + public: 80 service: 8776 volumev3: - name: cinderv3 - hosts: - default: cinder-api - public: cinder - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v3/%(tenant_id)s' - scheme: - default: 'http' port: api: default: 8776 - public: 80 internal: 8776 + public: 80 service: 8776 - oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - cinder: - username: cinder - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /cinder - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - cinder: - username: cinder - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes - host_fqdn_override: - default: rabbitmq.openstack.svc.cluster.local - path: /cinder - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 - fluentd: - namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress - hosts: - default: ingress - port: - ingress: - default: 80 - -network_policy: - cinder: - ingress: - - {} - egress: - - {} - -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true - -tls: - identity: false - oslo_messaging: false - oslo_db: false manifests: - certificates: false - configmap_bin: true - configmap_etc: true - cron_volume_usage_audit: true - deployment_api: true deployment_backup: false - deployment_scheduler: true deployment_volume: false ingress_api: false - job_backup_storage_init: true job_bootstrap: false - job_clean: true - job_create_internal_tenant: true job_db_init: false - job_image_repo_sync: true job_rabbit_init: false - job_db_sync: true - job_db_drop: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true job_storage_init: false - pdb_api: true pod_rally_test: false - pvc_backup: true - network_policy: false secret_db: false secret_ingress_tls: false - secret_keystone: true secret_rabbitmq: false - secret_registry: true - service_api: true service_ingress_api: false diff --git a/bin/install-cinder.sh b/bin/install-cinder.sh index 75da36a3..cf286b73 100755 --- a/bin/install-cinder.sh +++ b/bin/install-cinder.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/cinder" BASE_OVERRIDES="/opt/genestack/base-helm-configs/cinder/cinder-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install cinder ./cinder \ +HELM_CMD="helm upgrade --install cinder openstack-helm/cinder --version 2024.2.409+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -33,8 +31,9 @@ HELM_CMD+=" --set endpoints.oslo_messaging.auth.cinder.password=\"\$(kubectl --n HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args cinder/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml b/releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml new file mode 100644 index 00000000..6f5927f0 --- /dev/null +++ b/releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The cinder chart will now use the online OSH helm repository. This change + will allow the cinder chart to be updated more frequently and will allow + the cinder chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall cinder + kubectl -n openstack delete -f /etc/genestack/kustomize/cinder/base/cinder-rabbitmq-queue.yaml + /opt/genestack/bin/install-cinder.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window.