From 33958ccb7aa8156f0c30382fa3a566787e7e311d Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Wed, 26 Feb 2025 16:57:19 -0600 Subject: [PATCH] chore(octavia): convert helm chart from submodule to repo (#833) This change removes the need to carry the openstack-helm charts for the purposes of providing a octavia deployment. The base helm files have been updated and simplified, reducing the values we carry to only what we need. Related Issue: #809 Signed-off-by: Kevin Carter --- .../octavia/octavia-helm-overrides.yaml | 748 ++---------------- .../octavia/base/kustomization.yaml | 58 +- bin/install-octavia.sh | 5 +- .../octavia-flavor-and-flavorprofile-guide.md | 8 + .../notes/octavia-chart-4471a89ed3631d45.yaml | 25 + 5 files changed, 127 insertions(+), 717 deletions(-) create mode 100644 releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml diff --git a/base-helm-configs/octavia/octavia-helm-overrides.yaml b/base-helm-configs/octavia/octavia-helm-overrides.yaml index 45f29d0b..3d6dcdac 100644 --- a/base-helm-configs/octavia/octavia-helm-overrides.yaml +++ b/base-helm-configs/octavia/octavia-helm-overrides.yaml @@ -1,605 +1,170 @@ --- -release_group: null - -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - worker: - node_selector_key: openstack-control-plane - node_selector_value: enabled - housekeeping: - node_selector_key: openstack-control-plane - node_selector_value: enabled - health_manager: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - images: tags: - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - octavia_db_sync: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_api: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_worker: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_housekeeping: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_health_manager: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_health_manager_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - openvswitch_vswitchd: docker.io/kolla/centos-source-openvswitch-vswitchd:rocky - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -bootstrap: - enabled: true - ks_user: admin - script: | - openstack role create --or-show load-balancer_admin - openstack role create --or-show load-balancer_observer - openstack role create --or-show load-balancer_global_observer - openstack role create --or-show load-balancer_quota_admin - openstack role create --or-show load-balancer_member - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30826 + bootstrap: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + db_drop: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + db_init: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + dep_check: 'quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0' + image_repo_sync: 'quay.io/rackspace/rackerlabs-docker:17.07.0' + ks_endpoints: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + ks_service: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + ks_user: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + octavia_api: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_db_sync: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_health_manager: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_health_manager_init: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + octavia_housekeeping: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_worker: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + openvswitch_vswitchd: 'docker.io/kolla/centos-source-openvswitch-vswitchd:rocky' + rabbit_init: 'quay.io/rackspace/rackerlabs-rabbitmq:3.13-management' + test: 'quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0' dependencies: - dynamic: - common: - local_image_registry: - jobs: - - heat-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - worker: + db_sync: + jobs: [] + health_manager: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - - endpoint: internal - service: load_balancer housekeeping: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - - endpoint: internal - service: load_balancer - health_manager: + worker: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - - endpoint: internal - service: load_balancer - db_init: - services: - - endpoint: internal - service: oslo_db - db_sync: - jobs: [] - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - octavia-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - endpoint: internal - service: oslo_messaging - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry conf: + logging: + logger_root: + handlers: + - stdout + level: INFO octavia: - DEFAULT: - log_config_append: /etc/octavia/logging.conf api_settings: default_provider_driver: amphora enabled_provider_drivers: >- ovn: "The Octavia OVN driver", amphora: "The Octavia Amphora driver" - api_handler: queue_producer - bind_host: 0.0.0.0 + certificates: + endpoint_type: internalURL + cinder: + endpoint_type: internalURL + valid_interfaces: internal + controller_worker: + loadbalancer_topology: ACTIVE_STANDBY + workers: 4 driver_agent: enabled_provider_agents: ovn - database: - max_retries: -1 - health_manager: - bind_port: 5555 - bind_ip: 0.0.0.0 - controller_ip_port_list: 0.0.0.0:5555 - heartbeat_key: insecure + glance: + endpoint_type: internalURL + valid_interfaces: internal keystone_authtoken: service_token_roles: service service_token_roles_required: true auth_type: password auth_version: v3 memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true service_type: load-balancer valid_interfaces: internal - certificates: - endpoint_type: internalURL - ca_private_key_passphrase: foobar - ca_private_key: /etc/octavia/certs/private/cakey.pem - ca_certificate: /etc/octavia/certs/ca_01.pem - cinder: - endpoint_type: internalURL - valid_interfaces: internal - glance: - endpoint_type: internalURL - valid_interfaces: internal neutron: endpoint_type: internalURL valid_interfaces: internal - haproxy_amphora: - server_ca: /etc/octavia/certs/ca_01.pem - client_cert: /etc/octavia/certs/client.pem - base_path: /var/lib/octavia - base_cert_dir: /var/lib/octavia/certs - controller_worker: - amp_image_owner_id: null - amp_secgroup_list: null - amp_flavor_id: null - amp_boot_network_list: null - amp_ssh_key_name: octavia_ssh_key - amp_image_tag: amphora - network_driver: allowed_address_pairs_driver - compute_driver: compute_nova_driver - amphora_driver: amphora_haproxy_rest_driver - workers: 8 - amp_active_retries: 100 - amp_active_wait_sec: 2 - loadbalancer_topology: ACTIVE_STANDBY - oslo_messaging: - topic: octavia_prov - rpc_thread_pool_size: 2 - oslo_messaging_notifications: - driver: messagingv2 + nova: + enable_anti_affinity: 'True' + endpoint_type: internalURL oslo_concurrency: lock_path: /tmp/octavia oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + kombu_reconnect_delay: 0.5 rabbit_ha_queues: false + rabbit_interval_max: 10 rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly - rabbit_interval_max: 10 - # Send more frequent heartbeats and fail unhealthy nodes faster - # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 - # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 - heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down - # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html - # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 - kombu_reconnect_delay: 0.5 - house_keeping: - load_balancer_expiry_age: 3600 - amphora_expiry_age: 3600 + ovn: + ovn_nb_connection: 'tcp:127.0.0.1:6641' + ovn_sb_connection: 'tcp:127.0.0.1:6642' service_auth: - auth_type: password - cafile: "" - auth_version: v3 - memcache_security_strategy: ENCRYPT insecure: true - ovn: - ovn_sb_connection: tcp:127.0.0.1:6642 - ovn_nb_connection: tcp:127.0.0.1:6641 - nova: - enable_anti_affinity: "True" - endpoint_type: internalURL - - logging: - loggers: - keys: - - root - - octavia - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_octavia: - level: WARNING - handlers: - - stdout - qualname: octavia - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - formatter_default: - format: "%(message)s" - rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "octavia" - name: "ha_ttl_octavia" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '(notifications)\.' - -secrets: - identity: - admin: octavia-keystone-admin - octavia: octavia-keystone-user - test: octavia-keystone-test - oslo_db: - admin: octavia-db-admin - octavia: octavia-db-user - oslo_messaging: - admin: octavia-rabbitmq-admin - octavia: octavia-rabbitmq-user - tls: - load_balancer: - api: - public: octavia-tls-public - oci_image_registry: - octavia: octavia-oci-image-registry + octavia_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - octavia: - username: octavia - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - octavia: - role: admin - region_name: RegionOne - username: octavia - password: password - project_name: service - user_domain_name: service - project_domain_name: service - test: - role: admin - region_name: RegionOne - username: test - password: password - project_name: test - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: /v3 - scheme: - default: http - service: http port: api: admin: 5000 default: 5000 - public: 80 - # NOTE(portdirect): to retain portability across images, and allow - # running under a unprivileged user simply, we default to a port > 1000. internal: 5000 + public: 80 service: 5000 + scheme: + default: http + service: http load_balancer: - name: octavia hosts: - internal: octavia-api default: octavia - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null + internal: octavia-api + port: + api: + default: 9876 + internal: 9876 + public: 80 + service: 9876 scheme: default: http service: http + network: port: api: - default: 9876 + default: 9696 + internal: 9696 public: 80 - internal: 9876 - service: 9876 + service: 9696 + scheme: + default: http + service: http oslo_db: - auth: - admin: - username: root - password: password - octavia: - username: octavia - password: password + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /octavia - scheme: mysql+pymysql - port: - mysql: - default: 3306 oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null + host_fqdn_override: + default: memcached.openstack.svc.cluster.local hosts: default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - octavia: - username: octavia - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes host_fqdn_override: default: rabbitmq.openstack.svc.cluster.local - path: /octavia - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - network: - name: neutron hosts: - default: neutron-server - public: neutron - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: 'http' - service: 'http' - port: - api: - default: 9696 - public: 80 - internal: 9696 - service: 9696 - fluentd: - namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 + default: rabbitmq-nodes pod: - user: - octavia: - uid: 42424 affinity: anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname weight: default: 10 - tolerations: - octavia: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule mounts: octavia_api: init_container: null @@ -619,172 +184,15 @@ pod: volumes: - name: pod-run-octavia emptyDir: {} - octavia_housekeeping: - init_container: null - octavia_housekeeping: - volumeMounts: - volumes: - octavia_health_manager: - init_container: null - octavia_health_manager: - volumeMounts: - volumes: - octavia_bootstrap: - init_container: null - octavia_bootstrap: - volumeMounts: - volumes: octavia_driver_agent: init_container: null octavia_bootstrap: volumeMounts: volumes: - replicas: - api: 1 - worker: 1 - housekeeping: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - daemonsets: - pod_replacement_strategy: RollingUpdate - health_manager: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - worker: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - housekeeping: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - health_manager: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -network_policy: - octavia: - ingress: - - {} manifests: - configmap_bin: true - configmap_etc: true - daemonset_health_manager: true - deployment_api: true - deployment_worker: true - deployment_housekeeping: true ingress_api: false - job_bootstrap: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true job_rabbit_init: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - pdb_api: true - pod_rally_test: false - network_policy: false - secret_credential_keys: true - secret_db: true secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true service_ingress_api: false - service_api: true diff --git a/base-kustomize/octavia/base/kustomization.yaml b/base-kustomize/octavia/base/kustomization.yaml index 8b8269df..5a690606 100644 --- a/base-kustomize/octavia/base/kustomization.yaml +++ b/base-kustomize/octavia/base/kustomization.yaml @@ -19,66 +19,32 @@ resources: # To run the OVN driver, the octavia-api container must have an agent container within the same pod. patches: + - target: + kind: Deployment + name: octavia-api + patch: |- + - op: replace + path: /spec/template/spec/containers/0/securityContext + value: + runAsUser: 0 - target: kind: Deployment name: octavia-api patch: |- - op: add - path: /spec/template/spec/containers + path: /spec/template/spec/containers/- value: - - name: octavia-agent - image: image-octavia-ovn - imagePullPolicy: IfNotPresent - securityContext: - runAsUser: 0 command: - octavia-driver-agent - --config-dir - /etc/octavia/octavia.conf - volumeMounts: - - name: pod-etc-octavia - mountPath: /etc/octavia - - name: octavia-bin - mountPath: /tmp/octavia-api.sh - subPath: octavia-api.sh - readOnly: true - - name: octavia-etc - mountPath: /etc/octavia/octavia.conf - subPath: octavia.conf - readOnly: true - - name: octavia-etc - mountPath: /etc/octavia/logging.conf - subPath: logging.conf - readOnly: true - - mountPath: /var/run/octavia - name: pod-run-octavia - - name: octavia-api image: image-octavia-ovn imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 - command: - - /tmp/octavia-api.sh - - start - lifecycle: - preStop: - exec: - command: - - /tmp/octavia-api.sh - - stop - ports: - - name: o-api - containerPort: 9876 - readinessProbe: - httpGet: - scheme: HTTP - path: / - port: 9876 - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 9876 + name: octavia-agent + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File volumeMounts: - name: pod-etc-octavia mountPath: /etc/octavia diff --git a/bin/install-octavia.sh b/bin/install-octavia.sh index 11c77710..fcc359ae 100755 --- a/bin/install-octavia.sh +++ b/bin/install-octavia.sh @@ -6,7 +6,7 @@ BASE_OVERRIDES="/opt/genestack/base-helm-configs/octavia/octavia-helm-overrides. pushd /opt/genestack/submodules/openstack-helm || exit 1 -HELM_CMD="helm upgrade --install octavia ./octavia \ +HELM_CMD="helm upgrade --install octavia openstack-helm/octavia --version 2024.2.30+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -39,6 +39,9 @@ HELM_CMD+=" --set conf.octavia.ovn.ovn_sb_connection=\"tcp:\$(kubectl --namespac HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args octavia/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" diff --git a/docs/octavia-flavor-and-flavorprofile-guide.md b/docs/octavia-flavor-and-flavorprofile-guide.md index 019ef284..cef93918 100644 --- a/docs/octavia-flavor-and-flavorprofile-guide.md +++ b/docs/octavia-flavor-and-flavorprofile-guide.md @@ -74,6 +74,14 @@ You can extend the flavor profile with additional provider capabilities as neede $ openstack loadbalancer flavorprofile set --flavor-data '{"loadbalancer_topology": "ACTIVE_STANDBY", "amp_image_tag": "amphora-image-v2", "sriov_vip": false}' 5f4d2c7c-e294-4a9c-b97a-54a2b97a17a5 ``` +!!! note "Loadbalancer Topologies" + + The `loadbalancer_topology` field in the flavor data specifies the number of Amphora instances per + load balancer. The possible values are: + + - `SINGLE`: One Amphora per load balancer. + - `ACTIVE_STANDBY`: Two Amphora per load balancer. + ## Flavors To create a flavor using the previously defined flavor profile, run the following command: diff --git a/releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml b/releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml new file mode 100644 index 00000000..d1f31199 --- /dev/null +++ b/releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml @@ -0,0 +1,25 @@ +--- +deprecations: + - | + The octavia chart will now use the online OSH helm repository. This change + will allow the octavia chart to be updated more frequently and will allow + the octavia chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall octavia + kubectl -n openstack delete -f /etc/genestack/kustomize/octavia/base/octavia-rabbitmq-queue.yaml + /opt/genestack/bin/install-octavia.sh + + Depending on the state of the Octavia deployment, it may be nessessary to + rerun the ansible-playbook for the octavia deployment. Note that this playbook + will drop a marker file ``/tmp/octavia_hm_controller_ip_port_list`` which may + need to be cleaned up before rerunning the playbook. + + https://docs.rackspacecloud.com/openstack-octavia/#run-the-playbook + + That said, if the deployment was healthy before, the cleanup steps should not + be needed. This operation should have no operational impact on running VMs but + should be performed during a maintenance window.