From 4731d66dede99b5fb5dcafafb6cd44ce3d9e62f3 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Fri, 10 May 2024 13:21:00 +0200 Subject: [PATCH] Multi-cell adoption Keep renaming 'default' cell consistent for single and multi cells: * Default becomes cellX (or it can be imported as is, for a multi-cell case only) * cell1 becomes mapped to openstack-cell1 osdp node set * cell2 becomes mapped to openstack-cell2 osdp node set, etc. * cellX (X=3 here) becomes mapped to openstack-cell3. Alternatively, default cell retains its name for the openstack-default osdpns mapping Evaluate podified MariaDB passwords for cells from osp-secret to align the tests with documented commands. Remove no longer needed podified DB password variable. Make ansible and shell variables compute cells aware. Split edpm nodes into compute cells by 1:1 mapping it as dataplane nodesets. Rework vars and secrets YAML values for the source and edpm nodes to not confuse its different naming schemes for cells in OSP/TripleO and RHOSO. Use edpm_nodes var to describe compuptes for each cell, instead of static host and ip vars that only used to work for a single-cell standalone, or multi-node single cell cases. Also explain EDPM net config requirements in vars.sample, when it is used outside of ci-framework (local deployments). Remove edpm_computes vars no longer used after moving stopping control-plane tripleo services into edpm-ansible Remove cached fact for pulled OSP configuration as it can no longer be generated in a multi-cell setup, where related shell variables become bash arrays. Simplify ENV headers management by collecting in a single place. Provide a variable to define the source cloud Ironic topology, for any cells with Ironic services. Align nova/libvirt and related services ordering in the lists of services defined in multiple places, with those specified in VA. Add a missing step in the fast forward uprgade guide to complete the adoption of the remaining dataplane services. Align the names in the tests to follow the documented steps to make the corresponding code easy discoverable. Adjust storage/storageRequests values to make it better fitting a multi-cell test scenarios. Also provide values in docs and add a comment to adjust them as needed. Stop ovn services only if active, or not missing (like on the cell controllers) Signed-off-by: Bohdan Dobrelia --- .../assemblies/development_environment.adoc | 137 ++++++- docs_dev/assemblies/tests.adoc | 8 +- ...ng-compute-services-to-the-data-plane.adoc | 369 +++++++++++++----- ...-networker-services-to-the-data-plane.adoc | 1 - .../proc_adopting-the-compute-service.adoc | 116 ++++-- .../proc_configuring-data-plane-nodes.adoc | 3 +- .../proc_deploying-backend-services.adoc | 115 +++--- ...rating-databases-to-mariadb-instances.adoc | 324 +++++++++------ ...t-forward-upgrade-on-compute-services.adoc | 137 +++++-- ...pology-specific-service-configuration.adoc | 144 +++++-- ...cture-management-and-compute-services.adoc | 20 +- .../proc_stopping-openstack-services.adoc | 24 +- tests/roles/backend_services/tasks/main.yaml | 23 +- .../templates/openstack_control_plane.j2 | 39 +- .../roles/common_defaults/defaults/main.yaml | 162 +++++++- .../control_plane_rollback/defaults/main.yaml | 1 + .../dataplane_adoption/defaults/main.yaml | 48 +-- .../roles/dataplane_adoption/tasks/main.yaml | 282 +++++++++---- .../dataplane_adoption/tasks/nova_ffu.yaml | 107 ++--- tests/roles/dataplane_adoption/vars/rhev.yaml | 77 ++-- .../tasks/main.yaml | 112 +++--- tests/roles/mariadb_copy/defaults/main.yaml | 2 - .../mariadb_copy/tasks/env_vars_dst.yaml | 27 -- .../mariadb_copy/tasks/env_vars_src.yaml | 14 - tests/roles/mariadb_copy/tasks/main.yaml | 41 +- .../mariadb_copy/tasks/mariadb_verify.yaml | 8 - .../mariadb_copy/templates/dump_dbs.bash | 22 +- .../mariadb_copy/templates/post_checks.bash | 47 ++- .../mariadb_copy/templates/pre_checks.bash | 13 +- .../mariadb_copy/templates/restore_dbs.bash | 111 ++++-- tests/roles/nova_adoption/defaults/main.yaml | 93 +++-- .../nova_adoption/tasks/nova_ironic.yaml | 9 +- .../nova_adoption/tasks/nova_libvirt.yaml | 23 +- tests/roles/nova_adoption/tasks/wait.yaml | 2 +- tests/roles/ovn_adoption/tasks/main.yaml | 18 +- tests/roles/pcp_cleanup/tasks/main.yaml | 5 + .../defaults/main.yaml | 2 - .../stop_remaining_services/tasks/main.yaml | 4 - tests/secrets.sample.yaml | 44 ++- tests/vars.sample.yaml | 87 ++++- 40 files changed, 1914 insertions(+), 907 deletions(-) delete mode 100644 tests/roles/mariadb_copy/tasks/env_vars_dst.yaml delete mode 100644 tests/roles/mariadb_copy/tasks/env_vars_src.yaml diff --git a/docs_dev/assemblies/development_environment.adoc b/docs_dev/assemblies/development_environment.adoc index d7a49973c..775b17200 100644 --- a/docs_dev/assemblies/development_environment.adoc +++ b/docs_dev/assemblies/development_environment.adoc @@ -187,6 +187,102 @@ https://openstack-k8s-operators.github.io/data-plane-adoption/dev/#_reset_the_en ''' +== Deploying TripleO With Multiple Cells + +A TripleO Standalone setup creates only a single Nova v2 cell, with a combined controller and compute services on it. +In order to deploy multiple compute cells for adoption testing (without Ceph), create a 5 VMs, with the following requirements met: + +* Named `edpm-compute-0` .. `edpm-compute-4`. +* Running RHEL 9.2, with RHOSP 17.1 repositiries configured. +* Can login via SSH without a password as the root user, from the hypervisor host. +* User `zuul` is created, and can sudo without a password, and login via SSH without a password, from the hypervisor host. +* User `zuul` can login to `edpm-compute-1`, `edpm-compute-2`, `edpm-compute-3`, `edpm-compute-4` nodes via SSH without a password, from the `edpm-compute-0` node, +by using the generated `/home/zuul/.ssh/id_rsa` private key. +* RedHat registry credentials are exported on the hypervisor host. + +Adjust the following commands for a repositories configuration tool of your choice: + +[,bash] +---- +export RH_REGISTRY_USER="" +export RH_REGISTRY_PWD="" + +DEFAULT_CELL_NAME="cell3" <1> +RENAMED_CELLS="cell1 cell2 $DEFAULT_CELL_NAME" + +cd ~/install_yamls/devsetup +cat < /tmp/osp17_repos +# Use a tool of your choice: +# 1. Rhos-release example steps are only available from the internal RedHat network +# ... skipping download and install steps ... +# sudo rhos-release -x +# sudo rhos-release 17.1 + +# 2. Subscription-manager example steps require an active registration +# subscription-manager release --set=9.2 +# subscription-manager repos --disable=* +# sudo subscription-manager repos \ +# --enable=rhel-9-for-x86_64-baseos-eus-rpms \ +# --enable=rhel-9-for-x86_64-appstream-eus-rpms \ +# --enable=rhel-9-for-x86_64-highavailability-eus-rpms \ +# --enable=openstack-17.1-for-rhel-9-x86_64-rpms \ +# --enable=rhceph-6-tools-for-rhel-9-x86_64-rpms \ +# --enable=fast-datapath-for-rhel-9-x86_64-rpms + +# firstboot commands +sudo dnf install -y git curl wget podman python3-tripleoclient openvswitch3.1 NetworkManager-initscripts-updown \ +sudo dnf install -y util-linux cephadm driverctl lvm2 jq nftables iptables-nft openstack-heat-agents \ + os-net-config python3-libselinux python3-pyyaml rsync tmpwatch sysstat iproute-tc +sudo dnf install -y puppet-tripleo puppet-headless +sudo dnf install -y openstack-selinux +EOF + +export CENTOS_9_STREAM_URL= +export NTP_SERVER= + +export MANILA_ENABLED=false +export EDPM_COMPUTE_CEPH_ENABLED=false +export EDPM_COMPUTE_CEPH_NOVA=false +export EDPM_COMPUTE_CELLS=3 + +export STANDALONE_EXTRA_CMD="bash -c 'echo \"$RH_REGISTRY_PWD\" > ~/authfile; chmod 0600 ~/authfile; sudo /bin/podman login registry.redhat.io -u \"$RH_REGISTRY_USER\" --password-stdin < ~/authfile'" +export EDPM_FIRSTBOOT_EXTRA=/tmp/osp17_repos +export EDPM_TOTAL_NODES=1 +export SKIP_TRIPLEO_REPOS=false +export EDPM_COMPUTE_NETWORK_IP=192.168.122.1 +export HOST_PRIMARY_RESOLV_CONF_ENTRY=192.168.122.1 +export BASE_DISK_FILENAME="rhel-9-base.qcow2" + +EDPM_COMPUTE_SUFFIX=0 IP=192.168.122.100 EDPM_COMPUTE_DISK_SIZE=10 EDPM_COMPUTE_RAM=9 EDPM_COMPUTE_VCPUS=2 make edpm_compute +EDPM_COMPUTE_SUFFIX=1 IP=192.168.122.103 EDPM_COMPUTE_DISK_SIZE=17 EDPM_COMPUTE_RAM=12 EDPM_COMPUTE_VCPUS=4 make edpm_compute +EDPM_COMPUTE_SUFFIX=2 IP=192.168.122.106 EDPM_COMPUTE_DISK_SIZE=14 EDPM_COMPUTE_RAM=12 EDPM_COMPUTE_VCPUS=4 make edpm_compute +EDPM_COMPUTE_SUFFIX=3 IP=192.168.122.107 EDPM_COMPUTE_DISK_SIZE=12 EDPM_COMPUTE_RAM=4 EDPM_COMPUTE_VCPUS=2 make edpm_compute +EDPM_COMPUTE_SUFFIX=4 IP=192.168.122.109 EDPM_COMPUTE_DISK_SIZE=16 EDPM_COMPUTE_RAM=12 EDPM_COMPUTE_VCPUS=4 make edpm_compute + +for n in 0 3 6 7 9; do + # w/a bad packages installation, if done by firstboot - resulting in rpm -V check failures in tripleo-ansible + ssh -o StrictHostKeyChecking=false -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa \ + root@192.168.122.10${n} dnf install -y openstack-selinux ';' \ + dnf reinstall -y openstack-selinux + ssh -o StrictHostKeyChecking=false -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa \ + root@192.168.122.10${n} useradd --create-home --shell /bin/bash --groups root zuul ';' \ + mkdir -p /home/zuul/.ssh + scp -o StrictHostKeyChecking=false -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa \ + ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.10${n}:/home/zuul/.ssh/id_rsa + ssh -o StrictHostKeyChecking=false -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa \ + root@192.168.122.10${n} cp /root/.ssh/authorized_keys /home/zuul/.ssh/authorized_keys + ssh -o StrictHostKeyChecking=false -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa \ + root@192.168.122.10${n} chown zuul: /home/zuul/.ssh/* + ssh -o StrictHostKeyChecking=false -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa \ + root@192.168.122.10${n} echo "zuul ALL=NOPASSWD:ALL" '>' /etc/sudoers.d/zuul +done + +make tripleo_deploy + +for n in 0 1 2 3 4; do make standalone_snapshot EDPM_COMPUTE_SUFFIX=$n; done +---- +<1> The source cloud default cell takes a new `$DEFAULT_CELL_NAME`. In a multi-cell adoption scenario, it may either retain its original name `default`, or become created as a last `cell`. + == Network routing Route VLAN20 to have access to the MariaDB cluster: @@ -219,8 +315,10 @@ installing the package and copying the configuration file from the virtual machi [,bash] ---- -alias openstack="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100 OS_CLOUD=standalone openstack" +OS_CLOUD_NAME=standalone +alias openstack="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100 OS_CLOUD=$OS_CLOUD_NAME openstack" ---- +For a multi-cell environment, set `OS_CLOUD_NAME` to `overcloud`. === Virtual machine steps @@ -345,15 +443,28 @@ make openstack == Performing the adoption procedure -To simplify the adoption procedure, copy the deployment passwords that +To simplify the adoption procedure with additional cells, copy and rename the deployment passwords that you use in copy the deployment passwords that you use in the https://openstack-k8s-operators.github.io/data-plane-adoption/user/#deploying-backend-services_migrating-databases[backend services deployment phase of the data plane adoption]. +For a single-cell standalone TripleO deployment: +[,bash] +---- +scp -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100:/root/tripleo-standalone-passwords.yaml ~/overcloud-passwords.yaml +---- + +Further on, this password is going to be referenced as `TRIPLEO_PASSWORDS[default]` for a `default` cell name, in terms of TripleO. + +For a source cloud deployment with multiple stacks, change the above command to these: [,bash] ---- -scp -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100:/root/tripleo-standalone-passwords.yaml ~/ +scp -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa zuul@192.168.122.100:overcloud-deploy/overcloud/overcloud-passwords.yaml ~/ +scp -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa zuul@192.168.122.100:overcloud-deploy/cell1/cell1-passwords.yaml ~/ +scp -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa zuul@192.168.122.100:overcloud-deploy/cell2/cell2-passwords.yaml ~/ ---- +Note that all compute cells of the source cloud always share the same database and messaging passwords. +On the contrary, a generic split-stack topology allows using different passwords files for its stacks. The development environment is now set up, you can go to the https://openstack-k8s-operators.github.io/data-plane-adoption/[Adoption documentation] @@ -371,8 +482,10 @@ Delete the data-plane and control-plane resources from the CRC vm [,bash] ---- -oc delete --ignore-not-found=true --wait=false openstackdataplanedeployment/openstack -oc delete --ignore-not-found=true --wait=false openstackdataplanedeployment/openstack-nova-compute-ffu +for CELL in $(echo $RENAMED_CELLS); do + oc delete --ignore-not-found=true --wait=false openstackdataplanedeployment/openstack-$CELL + oc delete --ignore-not-found=true --wait=false openstackdataplanedeployment/openstack-nova-compute-ffu-$CELL +done oc delete --ignore-not-found=true --wait=false openstackcontrolplane/openstack oc patch openstackcontrolplane openstack --type=merge --patch ' metadata: @@ -391,7 +504,7 @@ oc delete --wait=false pod mariadb-copy-data || true oc delete secret osp-secret || true ---- -Revert the standalone vm to the snapshotted state +Revert the standalone vm(s) to the snapshotted state [,bash] ---- @@ -399,13 +512,25 @@ cd ~/install_yamls/devsetup make standalone_revert ---- +For a multi-cell deployment, change the above command to these: +[,bash] +---- +cd ~/install_yamls/devsetup +for n in 0 1 2 3 4; do make standalone_revert EDPM_COMPUTE_SUFFIX=$n; done +---- + Clean up and initialize the storage PVs in CRC vm [,bash] ---- cd .. for i in {1..3}; do make crc_storage_cleanup crc_storage && break || sleep 5; done +for CELL in $(echo $RENAMED_CELLS); do + oc delete pvc mysql-db-openstack-$CELL-galera-0 --ignore-not-found=true + oc delete pvc persistence-rabbitmq-$CELL-server-0 --ignore-not-found=true +done ---- +Use indexes like `*-0`, `*-1` based on the replica counts configured in `oscp/openstack` CR. ''' diff --git a/docs_dev/assemblies/tests.adoc b/docs_dev/assemblies/tests.adoc index 6300d8609..8945195ad 100644 --- a/docs_dev/assemblies/tests.adoc +++ b/docs_dev/assemblies/tests.adoc @@ -25,9 +25,13 @@ work out of the box. The comments in the YAML files will guide you regarding the expected values. You may want to double check that these variables suit your environment: ** `install_yamls_path` - ** `tripleo_passwords` - ** `controller*_ssh` + ** `controller*_ssh` (for each {OpenStackPreviousInstaller} controller in each Heat stack on the source cloud) + ** `tripleo_passwords` (for each {OpenStackPreviousInstaller} Heat stack on the source cloud) + ** `source_galera_members` (for each cell controller on the source cloud) + ** `source_mariadb_ip` (for each cell controller on the source cloud) + ** `edpm_nodes` (for each cell compute node on the destination) ** `edpm_privatekey_path` + ** `source_ovndb_ip`` ** `timesync_ntp_servers` == Running the tests diff --git a/docs_user/modules/proc_adopting-compute-services-to-the-data-plane.adoc b/docs_user/modules/proc_adopting-compute-services-to-the-data-plane.adoc index 100ff15eb..378cce8ce 100644 --- a/docs_user/modules/proc_adopting-compute-services-to-the-data-plane.adoc +++ b/docs_user/modules/proc_adopting-compute-services-to-the-data-plane.adoc @@ -81,21 +81,55 @@ EOF + * If `neutron-sriov-nic-agent` is running on your {compute_service} nodes, ensure that the physical device mappings match the values that are defined in the `OpenStackDataPlaneNodeSet` custom resource (CR). For more information, see xref:pulling-configuration-from-tripleo-deployment_adopt-control-plane[Pulling the configuration from a {OpenStackPreviousInstaller} deployment]. -* You have defined the shell variables to run the script that runs the fast-forward upgrade: +* You have defined the shell variables to run the script that runs the upgrade: + ---- -PODIFIED_DB_ROOT_PASSWORD=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d) -CEPH_FSID=$(oc get secret ceph-conf-files -o json | jq -r '.data."ceph.conf"' | base64 -d | grep fsid | sed -e 's/fsid = //' +$ CEPH_FSID=$(oc get secret ceph-conf-files -o json | jq -r '.data."ceph.conf"' | base64 -d | grep fsid | sed -e 's/fsid = //' -alias openstack="oc exec -t openstackclient -- openstack" -declare -A computes -export computes=( - ["standalone.localdomain"]="192.168.122.100" +$ alias openstack="oc exec -t openstackclient -- openstack" + +$ DEFAULT_CELL_NAME="cell3" <1> +$ RENAMED_CELLS="cell1 cell2 $DEFAULT_CELL_NAME" + +$ declare -A COMPUTES_CELL1 +$ export COMPUTES_CELL1=( <2> + ["standalone.localdomain"]="192.168.122.100" <3> + # ... <4> +) +$ declare -A COMPUTES_CELL2 +$ export COMPUTES_CELL2=( # ... ) +$ declare -A COMPUTES_CELL3 +$ export COMPUTES_CELL3=( + # ... <5> +) +# ... + +$ NODESETS="" +$ for CELL in $(echo $RENAMED_CELLS); do + ref="COMPUTES_$(echo ${CELL}|tr '[:lower:]' '[:upper:]')" + eval names=\${!${ref}[@]} + [ -z "$names" ] && continue <6> + NODESETS="'openstack-${CELL}', $NODESETS" +done +$ NODESETS="[${NODESETS%,*}]" ---- + -** Replace `["standalone.localdomain"]="192.168.122.100"` with the name and IP address of the {compute_service} node. +<1> The source cloud default cell takes a new `$DEFAULT_CELL_NAME`. In a multi-cell adoption scenario, it may either retain its original name `default`, or become created as a last `cell`. +<2> For each cell, adjust <["standalone.localdomain"]="192.168.122.100">, and complete `COMPUTES_CELL_` data with the names and IP addresses of the {compute_service} nodes. +<3> If your deployment has a custom DNS Domain, put it in for FQDN of the nodes. The given values will be used in the dataplane node sets' `spec.nodes..hostName`. +<4> Assign all {compute_service} nodes from the source cloud `cell1` cell into `COMPUTES_CELL1`, and so on. +<5> Assign all {compute_service} nodes from the source cloud `default` cell into `openstack-`, +where `` is the `DEFAULT_CELL_NAME` environment variable value (here, it equals 'cell3'). +<6> Cells not containing compute nodes will be omitted as no node sets for it should be created. + +** A standalone TripleO only creates a default cell, so you should define that instead: ++ +---- +$ DEFAULT_CELL_NAME="cell1" +$ RENAMED_CELLS="cell1" +---- + [NOTE] Do not set a value for the `CEPH_FSID` parameter if the local storage back end is configured by the {compute_service} for libvirt. The storage back end must match the source cloud storage back end. You cannot change the storage back end during adoption. @@ -147,7 +181,7 @@ rm -f id* cd - ---- -. If you use a local storage back end for libvirt, create a `nova-compute-extra-config` service to remove pre-fast-forward workarounds and configure Compute services to use a local storage back end: +. Create a configuration map which should become common for all cells. To configure a local storage back end for libvirt: + [source,yaml] ---- @@ -155,35 +189,24 @@ $ oc apply -f - < + 99-nova-compute-cells-workarounds.conf: | <2> [workarounds] disable_compute_service_check_for_ffu=true EOF ---- + -[NOTE] -The secret `nova-cell-compute-config` auto-generates for each -`cell`. You must specify values for the `nova-cell-compute-config` and `nova-migration-ssh-key` parameters for each custom `OpenStackDataPlaneService` CR that is related to the {compute_service}. +<1> The `data` resources in the `ConfigMap` provide cell-specific configuration files. +<2> There is a requirement to index the <*.conf> files from '03' to '99', based on its precedence. +Whereis a <99-*.conf> takes top precedence. Indexes below '03' are reserved for internal use. -. If TLS Everywhere is enabled, append the following content to the `OpenStackDataPlaneService` CR: -+ -[source,yaml] ----- - tlsCerts: - contents: - - dnsnames - - ips - networks: - - ctlplane - issuer: osp-rootca-issuer-internal - caCerts: combined-ca-bundle - edpmServiceType: nova ----- +[NOTE] +You should never delete, nor overwrite, the cell1's default `nova-extra-config` configuration map assigned to its default dataplane service 'nova'. +Adopting a live cloud might require other configurations to carry over for Nova EDPM services stored in that configuration map, without overwriting or losing them. -. If you use a Ceph back end for libvirt, create a `nova-compute-extra-config` service to remove pre-fast-forward upgrade workarounds and configure Compute services to use a Ceph back end: +. To configure a Ceph back end for libvirt: + [source,yaml] ---- @@ -191,10 +214,10 @@ $ oc apply -f - < + - secretRef: + name: nova-$CELL-compute-config <2> + - secretRef: + name: nova-migration-ssh-key <3> + - configMapRef: + name: nova-cells-global-config + optional: true + playbook: osp.edpm.nova + caCerts: combined-ca-bundle + edpmServiceType: nova + containerImageFields: + - NovaComputeImage + - EdpmIscsidImage +EOF + done +---- ++ + +* If TLS Everywhere is enabled, append the following content to the `OpenStackDataPlaneService` CR: ++ +[source,yaml] +---- + tlsCerts: + contents: + - dnsnames + - ips + networks: + - ctlplane + issuer: osp-rootca-issuer-internal + caCerts: combined-ca-bundle + edpmServiceType: nova +---- ++ +<1> To enable a local metadata services for a cell, append a `spec.dataSources.secretRef` to reference +an additional auto-generated `nova-cell-metadata-neutron-config` secret. You should have also set +`spec.nova.template.cellTemplates.cell.metadataServiceTemplate.enable` in the `OpenStackControlPlane/openstack` CR. +<2> The secret `nova-cell-compute-config` auto-generates for each `cell`. +<3> You must append the `nova-cell-compute-config` and `nova-migration-ssh-key` references for each custom `OpenStackDataPlaneService` CR that is related to the {compute_service}. + +* For simple configuration overrides, we do not need a custom dataplane service. However, to reconfigure the cell `cell1` in general, +the safest option would be always creating a custom service, and a dedicated configuration map for it. + +[NOTE] +The cell `cell1` is already managed with the default `OpenStackDataPlaneService` called `nova` +and its `nova-extra-config` configuration map. Do not change the default dataplane service 'nova' definition. +The changes will be lost, when the {rhos_long} operator becomes updated with OLM. + +* When a cell spans multiple node sets, you might want to name the custom `OpenStackDataPlaneService` resources like +`nova-cell1-nfv` and `nova-cell1-enterprise`. Then the auto-generated configmaps would be named +`nova-cell1-nfv-extra-config` and `nova-cell1-enterprise-extra-config`. + +[NOTE] +Different configurations for nodes in multiple node sets of the same cell are not covered in this guide. ifeval::["{build}" == "downstream"] . Create a secret for the subscription manager and a secret for the Red Hat registry: @@ -236,18 +327,60 @@ data: EOF ---- endif::[] ++ + +[NOTE] +The `subscription-manager` secret does not need to be referenced in `OpenStackDataPlaneService`'s `spec.dataSources` data. +It is already passed in via a node-specific `OpenStackDataPlaneNodeSet` data in `spec.nodeTemplate.ansible.ansibleVarsFrom`. + + +. Create the dataplane node sets definitions for each cell: -. Deploy the `OpenStackDataPlaneNodeSet` CR: + [source,yaml] ---- -$ oc apply -f - <> computes-$CELL << EOF + ${compute}: + hostName: $compute + ansible: + ansibleHost: $compute + networks: <1> + - defaultRoute: true + fixedIP: ${!ip} + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 +EOF + ind=$(( ind + 1 )) + done + + test -f computes-$CELL || continue + if [ "$CELL" = "cell1" ]; then + GLOBAL="- ssh-known-hosts" + else + GLOBAL=" " + fi + cat > nodeset-${CELL}.yaml < spec: - tlsEnabled: false <1> + tlsEnabled: false <3> networkAttachments: - ctlplane preProvisioned: true @@ -258,36 +391,22 @@ spec: - validate-network - install-os - configure-os - - ssh-known-hosts + $GLOBAL - run-os - reboot-os - install-certs - - libvirt - - nova - ovn - neutron-metadata - - telemetry + - libvirt + - nova-$CELL + - telemetry <4> env: - name: ANSIBLE_CALLBACKS_ENABLED value: "profile_tasks" - name: ANSIBLE_FORCE_COLOR value: "True" - nodes: - standalone: - hostName: standalone <2> - ansible: - ansibleHost: ${computes[standalone.localdomain]} - networks: - - defaultRoute: true - fixedIP: ${computes[standalone.localdomain]} - name: ctlplane - subnetName: subnet1 - - name: internalapi - subnetName: subnet1 - - name: storage - subnetName: subnet1 - - name: tenant - subnetName: subnet1 + - name: ANSIBLE_VERBOSITY + value: 3 nodeTemplate: ansibleSSHPrivateKeySecret: dataplane-adoption-secret ansible: @@ -343,7 +462,7 @@ endif::[] # # These vars are for the network config templates themselves and are # considered EDPM network defaults. - neutron_physical_bridge_name: br-ctlplane + neutron_physical_bridge_name: br-ctlplane <5> neutron_public_interface_name: eth0 # edpm_nodes_validation @@ -351,7 +470,7 @@ endif::[] edpm_nodes_validation_validate_gateway_icmp: false # edpm ovn-controller configuration - edpm_ovn_bridge_mappings: <3> + edpm_ovn_bridge_mappings: <6> edpm_ovn_bridge: br-int edpm_ovn_encap_type: geneve ovn_monitor_all: true @@ -407,69 +526,99 @@ endif::[] # Do not attempt OVS major upgrades here edpm_ovs_packages: - openvswitch3.1 + nodes: EOF + cat computes-$CELL >> nodeset-${CELL}.yaml +done ---- + -<1> If TLS Everywhere is enabled, change `spec:tlsEnabled` to `true`. -<2> If your deployment has a custom DNS Domain, modify the `spec:nodes:[NODE NAME]:hostName` to use fqdn for the node. -<3> Replace `` with the value of the bridge mappings in your configuration, for example, `"datacentre:br-ctlplane"`. +<1> The networks composition must match the source cloud configuration to avoid dataplane connectivity downtime. The ctlplane network must come first. +<2> Use node sets names, like `openstack-cell1`, `openstack-cell2`. Only create node sets for cells containing compute nodes. +<3> If TLS Everywhere is enabled, change `spec.tlsEnabled` to `true`. +<4> If not adopting the telemetry services, omit it from the services list. +<5> The bridge name and other OVN and Neutron specific values must match the source cloud configuration to avoid dataplane connectivity downtime. +<6> Replace `` with the value of the bridge mappings in your configuration, for example, `"datacentre:br-ctlplane"`. + +[NOTE] +The global service `ssh-known-hosts` may only be defined for a single node set. -. Ensure that you use the same `ovn-controller` settings in the `OpenStackDataPlaneNodeSet` CR that you used in the {compute_service} nodes before adoption. This configuration is stored in the `external_ids` column in the `Open_vSwitch` table in the Open vSwitch database: +* Ensure that you use the same `ovn-controller` settings in the `OpenStackDataPlaneNodeSet` CR that you used in the {compute_service} nodes before adoption. This configuration is stored in the `external_ids` column in the `Open_vSwitch` table in the Open vSwitch database: + ---- -ovs-vsctl list Open . +$ ovs-vsctl list Open . ... external_ids : {hostname=standalone.localdomain, ovn-bridge=br-int, ovn-bridge-mappings=, ovn-chassis-mac-mappings="datacentre:1e:0a:bb:e6:7c:ad", ovn-encap-ip="172.19.0.100", ovn-encap-tos="0", ovn-encap-type=geneve, ovn-match-northd-version=False, ovn-monitor-all=True, ovn-ofctrl-wait-before-clear="8000", ovn-openflow-probe-interval="60", ovn-remote="tcp:ovsdbserver-sb.openstack.svc:6642", ovn-remote-probe-interval="60000", rundir="/var/run/openvswitch", system-id="2eec68e6-aa21-4c95-a868-31aeafc11736"} ... ---- + -* Replace `` with the value of the bridge mappings in your configuration, for example, `"datacentre:br-ctlplane"`. +Replace `` with the value of the bridge mappings in your configuration, for example, `"datacentre:br-ctlplane"` + +. Deploy the `OpenStackDataPlaneNodeSet` CRs for each Nova compute cell ++ +---- +$ for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + oc apply -f nodeset-${CELL}.yaml +done +---- . If you use a Ceph back end for {block_storage_first_ref}, prepare the adopted data plane workloads: + [source,yaml] ---- -$ oc patch osdpns/openstack --type=merge --patch " -spec: - services: - - bootstrap - - download-cache - - configure-network - - validate-network - - install-os - - configure-os - - ssh-known-hosts - - run-os - - reboot-os - - ceph-client - - install-certs - - ovn - - neutron-metadata - - libvirt - - nova - - telemetry - nodeTemplate: - extraMounts: - - extraVolType: Ceph - volumes: - - name: ceph - secret: - secretName: ceph-conf-files - mounts: - - name: ceph - mountPath: "/etc/ceph" - readOnly: true -" +$ for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + if [ "$CELL" = "cell1" ]; then + GLOBAL="- ssh-known-hosts" + else + GLOBAL=" " + fi + + oc patch osdpns/openstack-$CELL --type=merge --patch " + spec: + services: + - bootstrap + - download-cache + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + $GLOBAL + - run-os + - reboot-os + - ceph-client + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova-$CELL + - telemetry + nodeTemplate: + extraMounts: + - extraVolType: Ceph + volumes: + - name: ceph + secret: + secretName: ceph-conf-files + mounts: + - name: ceph + mountPath: "/etc/ceph" + readOnly: true + " +done ---- + [NOTE] -Ensure that you use the same list of services from the original `OpenStackDataPlaneNodeSet` CR, except for the inserted `ceph-client` service. +Ensure that you use the same list of services from the original `OpenStackDataPlaneNodeSet` CR, except for the inserted `ceph-client` and `ceph-hci-pre` services. . Optional: Enable `neutron-sriov-nic-agent` in the `OpenStackDataPlaneNodeSet` CR: + [source,yaml] ---- -$ oc patch openstackdataplanenodeset openstack --type='json' --patch='[ +$ for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + oc patch openstackdataplanenodeset openstack-$CELL --type='json' --patch='[ { "op": "add", "path": "/spec/services/-", @@ -486,20 +635,23 @@ $ oc patch openstackdataplanenodeset openstack --type='json' --patch='[ "op": "add", "path": "/spec/nodeTemplate/ansible/ansibleVars/edpm_neutron_sriov_agent_SRIOV_NIC_resource_provider_hypervisors", "value": "dummy-dev:standalone.localdomain" - } -]' + }]' + done ---- . Optional: Enable `neutron-dhcp` in the `OpenStackDataPlaneNodeSet` CR: + [source,yaml] ---- -$ oc patch openstackdataplanenodeset openstack --type='json' --patch='[ +$ for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + oc patch openstackdataplanenodeset openstack-$CELL --type='json' --patch='[ { "op": "add", "path": "/spec/services/-", "value": "neutron-dhcp" }]' +done ---- + [NOTE] @@ -543,8 +695,7 @@ kind: OpenStackDataPlaneDeployment metadata: name: openstack-pre-adoption spec: - nodeSets: - - openstack + nodeSets: $NODESETS servicesOverride: - pre-adoption-validation EOF @@ -606,8 +757,7 @@ kind: OpenStackDataPlaneDeployment metadata: name: tripleo-cleanup spec: - nodeSets: - - openstack + nodeSets: $NODESETS servicesOverride: - tripleo-cleanup EOF @@ -623,8 +773,7 @@ kind: OpenStackDataPlaneDeployment metadata: name: openstack spec: - nodeSets: - - openstack + nodeSets: $NODESETS EOF ---- + @@ -663,6 +812,12 @@ $ oc exec openstackclient -- openstack network agent list +--------------------------------------+------------------------------+------------------------+-------------------+-------+-------+----------------------------+ ---- +[NOTE] +==== +After the data plane adoption completed, {OpenStackPreviousInstaller} cell controllers should be decomissioned. +To become new cell compute nodes, they must be re-provisioned, then scaled-out, or added into additional node sets of corresponding cells. +==== + .Next steps * You must perform a fast-forward upgrade on your Compute services. For more information, see xref:performing-a-fast-forward-upgrade-on-compute-services_{context}[Performing a fast-forward upgrade on Compute services]. diff --git a/docs_user/modules/proc_adopting-networker-services-to-the-data-plane.adoc b/docs_user/modules/proc_adopting-networker-services-to-the-data-plane.adoc index 3a88e924d..5b3276c51 100644 --- a/docs_user/modules/proc_adopting-networker-services-to-the-data-plane.adoc +++ b/docs_user/modules/proc_adopting-networker-services-to-the-data-plane.adoc @@ -52,7 +52,6 @@ spec: - validate-network - install-os - configure-os - - ssh-known-hosts - run-os - install-certs - ovn diff --git a/docs_user/modules/proc_adopting-the-compute-service.adoc b/docs_user/modules/proc_adopting-the-compute-service.adoc index 9941c7995..cd5cdf3a8 100644 --- a/docs_user/modules/proc_adopting-the-compute-service.adoc +++ b/docs_user/modules/proc_adopting-the-compute-service.adoc @@ -13,8 +13,22 @@ To adopt the {compute_service_first_ref}, you patch an existing `OpenStackContro * You have completed the previous adoption steps. * You have defined the following shell variables. Replace the following example values with the values that are correct for your environment: ---- -$ alias openstack="oc exec -t openstackclient -- openstack" +alias openstack="oc exec -t openstackclient -- openstack" + +DEFAULT_CELL_NAME="cell3" +RENAMED_CELLS="cell1 cell2 $DEFAULT_CELL_NAME" +---- ++ +The `default` cell takes a new name from `DEFAULT_CELL_NAME`. +In a multi-cell adoption scenario, it may retain its original 'default' name as well. + +A standalone TripleO only creates a default cell, so you should define that instead: ++ +---- +DEFAULT_CELL_NAME="cell1" +RENAMED_CELLS="cell1" ---- ++ .Procedure @@ -22,10 +36,40 @@ $ alias openstack="oc exec -t openstackclient -- openstack" + [NOTE] This procedure assumes that {compute_service} metadata is deployed on the top level and not on each cell level. If the {OpenStackShort} deployment has a per-cell metadata deployment, adjust the following patch as needed. You cannot run the metadata service in `cell0`. +To enable local cells metadata services, set `spec.nova.template.cellTemplates.cell*.metadataServiceTemplate.enable` in `OpenStackControlPlane` CR. + [source,yaml] ---- -$ oc patch openstackcontrolplane openstack -n openstack --type=merge --patch ' +rm -f celltemplates +for CELL in $(echo $RENAMED_CELLS); do + cat >> celltemplates << EOF + ${CELL}: + hasAPIAccess: true + cellDatabaseAccount: nova-$CELL + cellDatabaseInstance: openstack-$CELL + cellMessageBusInstance: rabbitmq-$CELL + metadataServiceTemplate: + enabled: false # enable here to run it in a cell instead + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.$(( 79 + ${CELL##*cell} )) + spec: + type: LoadBalancer + customServiceConfig: | + [workarounds] + disable_compute_service_check_for_ffu=true + conductorServiceTemplate: + customServiceConfig: | + [workarounds] + disable_compute_service_check_for_ffu=true +EOF +done + +cat > oscp-patch.yaml << EOF spec: nova: enabled: true @@ -33,6 +77,7 @@ spec: route: {} template: secret: osp-secret + apiDatabaseAccount: nova-api apiServiceTemplate: override: service: @@ -67,37 +112,25 @@ spec: disable_compute_service_check_for_ffu=true cellTemplates: cell0: + hasAPIAccess: true + cellDatabaseAccount: nova-cell0 + cellDatabaseInstance: openstack + cellMessageBusInstance: rabbitmq conductorServiceTemplate: customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=true - cell1: - metadataServiceTemplate: - enabled: false # enable here to run it in a cell instead - override: - service: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - customServiceConfig: | - [workarounds] - disable_compute_service_check_for_ffu=true - conductorServiceTemplate: - customServiceConfig: | - [workarounds] - disable_compute_service_check_for_ffu=true -' +EOF +cat celltemplates >> oscp-patch.yaml +oc patch openstackcontrolplane openstack -n openstack --type=merge --patch-file=oscp-patch.yaml ---- ++ -. If you are adopting the {compute_service} with the {bare_metal_first_ref}, append the following `novaComputeTemplates` in the `cell1` section of the {compute_service} CR patch: +. If you are adopting the {compute_service} with the {bare_metal_first_ref}, append the following `novaComputeTemplates` in the each `cellX` section of the {compute_service} CR patch: + [source,yaml] ---- - cell1: + cell: novaComputeTemplates: standalone: customServiceConfig: | @@ -105,9 +138,11 @@ spec: host = [workarounds] disable_compute_service_check_for_ffu=true + computeDriver: ironic.IronicDriver + ... ---- + -* Replace with the hostname of the node that is running the `ironic` Compute driver in the source cloud. +* Replace `` with the hostname of the node that is running the `ironic` Compute driver in the source cloud. . Wait for the CRs for the Compute control plane services to be ready: + @@ -117,7 +152,7 @@ $ oc wait --for condition=Ready --timeout=300s Nova/nova + [NOTE] The local Conductor services are started for each cell, while the superconductor runs in `cell0`. -Note that `disable_compute_service_check_for_ffu` is mandatory for all imported Compute services until the external data plane is imported, and until Compute services are fast-forward upgraded. For more information, see xref:adopting-compute-services-to-the-data-plane_data-plane[Adopting Compute services to the {rhos_acro} data plane] and xref:performing-a-fast-forward-upgrade-on-compute-services_data-plane[Performing a fast-forward upgrade on Compute services]. +Note that `disable_compute_service_check_for_ffu` is mandatory for all imported Compute services until the external data plane is imported, and until Compute services are fast-forward upgraded. For more information, see xref:adopting-compute-services-to-the-data-plane_data-plane[Adopting Compute services to the {rhos_acro} data plane] and xref:performing-a-fast-forward-upgrade-on-compute-services_data-plane[Upgrading Compute services]. .Verification @@ -131,19 +166,34 @@ $ openstack server list + ** Compare the outputs with the topology-specific configuration in xref:proc_retrieving-topology-specific-service-configuration_migrating-databases[Retrieving topology-specific service configuration]. -* Query the superconductor to check that `cell1` exists, and compare it to pre-adoption values: +* Query the superconductor to check that the expected cells exist, and compare it to pre-adoption values: + ---- -. ~/.source_cloud_exported_variables -echo $PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS -oc rsh nova-cell0-conductor-0 nova-manage cell_v2 list_cells | grep -F '| cell1 |' +$ for CELL in $(echo $CELLS); do + set +u + . ~/.source_cloud_exported_variables_$CELL + set -u + RCELL=$CELL + [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME + + echo "comparing $CELL to $RCELL" + echo $PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS | grep -F "| $CELL |" + oc rsh nova-cell0-conductor-0 nova-manage cell_v2 list_cells | grep -F "| $RCELL |" +done ---- + -The following changes are expected: +The following changes are expected, for each cell `X`: + -** The `cell1` `nova` database and username become `nova_cell1`. -** The default cell is renamed to `cell1`. +** The `cellX` `nova` database and username become `nova_cellX`. +** The `default` cell is renamed to `DEFAULT_CELL_NAME` (it may retain the original name, if there are multiple cells). +** RabbitMQ transport URL no longer uses `guest`. ** RabbitMQ transport URL no longer uses `guest`. [NOTE] +==== At this point, the {compute_service} control plane services do not control the existing {compute_service} workloads. The control plane manages the data plane only after the data adoption process is completed. For more information, see xref:adopting-compute-services-to-the-data-plane_data-plane[Adopting Compute services to the {rhos_acro} data plane]. +==== + +To import external Compute services to the {rhos_acro} data plane, you must upgrade them first. +See xref:adopting-compute-services-to-the-data-plane_data-plane[Adopting Compute services to the {rhos_acro} data plane], and +See xref:performing-a-fast-forward-upgrade-on-compute-services_data-plane[Upgrading Compute services]. diff --git a/docs_user/modules/proc_configuring-data-plane-nodes.adoc b/docs_user/modules/proc_configuring-data-plane-nodes.adoc index 9ff5e2884..c213a34ce 100644 --- a/docs_user/modules/proc_configuring-data-plane-nodes.adoc +++ b/docs_user/modules/proc_configuring-data-plane-nodes.adoc @@ -18,7 +18,7 @@ kind: NetConfig metadata: name: netconfig spec: - networks: + networks: <1> - name: internalapi dnsDomain: internalapi.example.com subnets: @@ -47,6 +47,7 @@ spec: cidr: 172.19.0.0/24 vlan: 22 ---- +<1> The networks composition must match the source cloud configuration to avoid dataplane connectivity downtime. . Optional: In the `NetConfig` CR, list multiple ranges for the `allocationRanges` field to exclude some of the IP addresses, for example, to accommodate IP addresses that are already consumed by the adopted environment: + diff --git a/docs_user/modules/proc_deploying-backend-services.adoc b/docs_user/modules/proc_deploying-backend-services.adoc index c25e9ef6a..77a0fa260 100644 --- a/docs_user/modules/proc_deploying-backend-services.adoc +++ b/docs_user/modules/proc_deploying-backend-services.adoc @@ -42,12 +42,9 @@ ADMIN_PASSWORD=SomePassword To use the existing {OpenStackShort} deployment password: + ---- -ifeval::["{build}" != "downstream"] -ADMIN_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' AdminPassword:' | awk -F ': ' '{ print $2; }') -endif::[] -ifeval::["{build}" == "downstream"] -ADMIN_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' AdminPassword:' | awk -F ': ' '{ print $2; }') -endif::[] +declare -A TRIPLEO_PASSWORDS +TRIPLEO_PASSWORDS[default]="$HOME/overcloud-passwords.yaml" +ADMIN_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' AdminPassword:' | awk -F ': ' '{ print $2; }') ---- * Set the service password variables to match the original deployment. Database passwords can differ in the control plane environment, but @@ -56,40 +53,22 @@ you must synchronize the service account passwords. For example, in developer environments with {OpenStackPreviousInstaller} Standalone, the passwords can be extracted: + ---- -ifeval::["{build}" != "downstream"] -AODH_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' AodhPassword:' | awk -F ': ' '{ print $2; }') -BARBICAN_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' BarbicanPassword:' | awk -F ': ' '{ print $2; }') -CEILOMETER_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' CeilometerPassword:' | awk -F ': ' '{ print $2; }') -CINDER_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' CinderPassword:' | awk -F ': ' '{ print $2; }') -GLANCE_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' GlancePassword:' | awk -F ': ' '{ print $2; }') -HEAT_AUTH_ENCRYPTION_KEY=$(cat ~/tripleo-standalone-passwords.yaml | grep ' HeatAuthEncryptionKey:' | awk -F ': ' '{ print $2; }') -HEAT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' HeatPassword:' | awk -F ': ' '{ print $2; }') -HEAT_STACK_DOMAIN_ADMIN_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' HeatStackDomainAdminPassword:' | awk -F ': ' '{ print $2; }') -IRONIC_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' IronicPassword:' | awk -F ': ' '{ print $2; }') -MANILA_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' ManilaPassword:' | awk -F ': ' '{ print $2; }') -NEUTRON_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' NeutronPassword:' | awk -F ': ' '{ print $2; }') -NOVA_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' NovaPassword:' | awk -F ': ' '{ print $2; }') -OCTAVIA_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' OctaviaPassword:' | awk -F ': ' '{ print $2; }') -PLACEMENT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' PlacementPassword:' | awk -F ': ' '{ print $2; }') -SWIFT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' SwiftPassword:' | awk -F ': ' '{ print $2; }') -endif::[] -ifeval::["{build}" == "downstream"] -AODH_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' AodhPassword:' | awk -F ': ' '{ print $2; }') -BARBICAN_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' BarbicanPassword:' | awk -F ': ' '{ print $2; }') -CEILOMETER_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' CeilometerPassword:' | awk -F ': ' '{ print $2; }') -CINDER_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' CinderPassword:' | awk -F ': ' '{ print $2; }') -GLANCE_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' GlancePassword:' | awk -F ': ' '{ print $2; }') -HEAT_AUTH_ENCRYPTION_KEY=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' HeatAuthEncryptionKey:' | awk -F ': ' '{ print $2; }') -HEAT_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' HeatPassword:' | awk -F ': ' '{ print $2; }') -HEAT_STACK_DOMAIN_ADMIN_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' HeatStackDomainAdminPassword:' | awk -F ': ' '{ print $2; }') -IRONIC_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' IronicPassword:' | awk -F ': ' '{ print $2; }') -MANILA_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' ManilaPassword:' | awk -F ': ' '{ print $2; }') -NEUTRON_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' NeutronPassword:' | awk -F ': ' '{ print $2; }') -NOVA_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' NovaPassword:' | awk -F ': ' '{ print $2; }') -OCTAVIA_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' OctaviaPassword:' | awk -F ': ' '{ print $2; }') -PLACEMENT_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' PlacementPassword:' | awk -F ': ' '{ print $2; }') -SWIFT_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' SwiftPassword:' | awk -F ': ' '{ print $2; }') -endif::[] +AODH_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' AodhPassword:' | awk -F ': ' '{ print $2; }') +BARBICAN_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' BarbicanPassword:' | awk -F ': ' '{ print $2; }') +CEILOMETER_METERING_SECRET=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' CeilometerMeteringSecret:' | awk -F ': ' '{ print $2; }') +CEILOMETER_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' CeilometerPassword:' | awk -F ': ' '{ print $2; }') +CINDER_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' CinderPassword:' | awk -F ': ' '{ print $2; }') +GLANCE_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' GlancePassword:' | awk -F ': ' '{ print $2; }') +HEAT_AUTH_ENCRYPTION_KEY=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' HeatAuthEncryptionKey:' | awk -F ': ' '{ print $2; }') +HEAT_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' HeatPassword:' | awk -F ': ' '{ print $2; }') +HEAT_STACK_DOMAIN_ADMIN_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' HeatStackDomainAdminPassword:' | awk -F ': ' '{ print $2; }') +IRONIC_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' IronicPassword:' | awk -F ': ' '{ print $2; }') +MANILA_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' ManilaPassword:' | awk -F ': ' '{ print $2; }') +NEUTRON_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' NeutronPassword:' | awk -F ': ' '{ print $2; }') +NOVA_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' NovaPassword:' | awk -F ': ' '{ print $2; }') +OCTAVIA_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' OctaviaPassword:' | awk -F ': ' '{ print $2; }') +PLACEMENT_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' PlacementPassword:' | awk -F ': ' '{ print $2; }') +SWIFT_PASSWORD=$(cat ${TRIPLEO_PASSWORDS[default]} | grep ' SwiftPassword:' | awk -F ': ' '{ print $2; }') ---- .Procedure @@ -274,12 +253,19 @@ spec: openstack: secret: osp-secret replicas: 3 - storageRequest: 500M + storageRequest: 5G openstack-cell1: secret: osp-secret replicas: 3 - storageRequest: 500M - + storageRequest: 5G + openstack-cell2: + secret: osp-secret + replicas: 1 + storageRequest: 5G + openstack-cell3: + secret: osp-secret + replicas: 1 + storageRequest: 5G memcached: enabled: true templates: @@ -327,6 +313,8 @@ spec: spec: type: LoadBalancer rabbitmq-cell1: + persistence: + storage: 1G override: service: metadata: @@ -335,7 +323,28 @@ spec: metallb.universe.tf/loadBalancerIPs: 172.17.0.86 spec: type: LoadBalancer - + rabbitmq-cell2: + persistence: + storage: 1G + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.87 + spec: + type: LoadBalancer + rabbitmq-cell3: + persistence: + storage: 1G + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.88 + spec: + type: LoadBalancer telemetry: enabled: false @@ -353,13 +362,25 @@ EOF + <1> Select an existing storage class in your {OpenShiftShort} cluster. +This example provides required infrastructure database and messaging services for a 3 compute cells +named `cell1`, `cell2`, and `cell3`. Adjust the names, counts, IP addresses, and numbers, +like `replicas`, `storage`, or `storageRequest`, as needed. + .Verification -. Verify that MariaDB is running: +* Verify that MariaDB and RabbitMQ are running, for all defined cells: + ---- -$ oc get pod openstack-galera-0 -o jsonpath='{.status.phase}{"\n"}' -$ oc get pod openstack-cell1-galera-0 -o jsonpath='{.status.phase}{"\n"}' +$ RENAMED_CELLS="cell1 cell2 cell3" +$ oc get pod openstack-galera-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running +$ oc get pod rabbitmq-server-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running +$ for CELL in $(echo $RENAMED_CELLS); do + oc get pod openstack-$CELL-galera-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running + oc get pod rabbitmq-$CELL-server-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running +done ---- +Futher on, we will be referring to the given cells names via an environment +variable `RENAMED_CELLS`. + . Verify that you can access the `OpenStackClient` pod. For more information, see link:{defaultURL}/maintaining_the_red_hat_openstack_services_on_openshift_deployment/assembly_accessing-the-rhoso-cloud#proc_accessing-the-OpenStackClient-pod_cloud-access-admin[Accessing the OpenStackClient pod] in _Maintaining the {rhos_long_noacro} deployment_. diff --git a/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc b/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc index d61f5a053..2f842d6a9 100644 --- a/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc +++ b/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc @@ -19,16 +19,6 @@ Migrate your databases from the original {rhos_prev_long} ({OpenStackShort}) dep * Define the following shell variables. Replace the following example values with values that are correct for your environment: + ---- -PODIFIED_MARIADB_IP=$(oc get svc --selector "mariadb/name=openstack" -ojsonpath='{.items[0].spec.clusterIP}') -PODIFIED_CELL1_MARIADB_IP=$(oc get svc --selector "mariadb/name=openstack-cell1" -ojsonpath='{.items[0].spec.clusterIP}') -PODIFIED_DB_ROOT_PASSWORD=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d) - -# The CHARACTER_SET and collation should match the source DB -# if the do not then it will break foreign key relationships -# for any tables that are created in the future as part of db sync -CHARACTER_SET=utf8 -COLLATION=utf8_general_ci - ifeval::["{build}" != "downstream"] STORAGE_CLASS=crc-csi-hostpath-provisioner MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified @@ -37,30 +27,106 @@ ifeval::["{build}" == "downstream"] STORAGE_CLASS=local-storage MARIADB_IMAGE=registry.redhat.io/rhosp-dev-preview/openstack-mariadb-rhel9:18.0 endif::[] -# Replace with your environment's MariaDB Galera cluster VIP and backend IPs: -SOURCE_MARIADB_IP=172.17.0.2 -declare -A SOURCE_GALERA_MEMBERS -SOURCE_GALERA_MEMBERS=( + +CELLS="default cell1 cell2" +DEFAULT_CELL_NAME="cell3" +RENAMED_CELLS="cell1 cell2 $DEFAULT_CELL_NAME" + +# The CHARACTER_SET and collation should match the source DB +# if the do not then it will break foreign key relationships +# for any tables that are created in the future as part of db sync +CHARACTER_SET=utf8 +COLLATION=utf8_general_ci + +declare -A PODIFIED_DB_ROOT_PASSWORD +for CELL in $(echo "super $RENAMED_CELLS"); do + PODIFIED_DB_ROOT_PASSWORD[$CELL]=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d) +done + +declare -A PODIFIED_MARIADB_IP +for CELL in $(echo "super $RENAMED_CELLS"); do + if [ "$CELL" = "super" ]; then + PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector "mariadb/name=openstack" -ojsonpath='{.items[0].spec.clusterIP}') + else + PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector "mariadb/name=openstack-$CELL" -ojsonpath='{.items[0].spec.clusterIP}') + fi +done + +declare -A TRIPLEO_PASSWORDS +for CELL in $(echo $CELLS); do + if [ "$CELL" = "default" ]; then + TRIPLEO_PASSWORDS[default]="$HOME/overcloud-passwords.yaml" + else + # in a split-stack source cloud, it should take a stack-specific passwords file instead + TRIPLEO_PASSWORDS[$CELL]="$HOME/overcloud-passwords.yaml" + fi +done + +declare -A SOURCE_DB_ROOT_PASSWORD +for CELL in $(echo $CELLS); do + SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') +done + +declare -A SOURCE_MARIADB_IP +SOURCE_MARIADB_IP[default]=** +SOURCE_MARIADB_IP[cell1]=** +SOURCE_MARIADB_IP[cell2]=** +# ... + +declare -A SOURCE_GALERA_MEMBERS_DEFAULT +SOURCE_GALERA_MEMBERS_DEFAULT=( ["standalone.localdomain"]=172.17.0.100 + # [...]=... +) +declare -A SOURCE_GALERA_MEMBERS_CELL1 +SOURCE_GALERA_MEMBERS_CELL1=( # ... ) -ifeval::["{build}" != "downstream"] -SOURCE_DB_ROOT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') -endif::[] -ifeval::["{build}" == "downstream"] -SOURCE_DB_ROOT_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') +declare -A SOURCE_GALERA_MEMBERS_CELL2 +SOURCE_GALERA_MEMBERS_CELL2=( + # ... ) -endif::[] +# ... ---- + -To get the value to set `SOURCE_MARIADB_IP`, query the puppet-generated configurations in a Controller node: +Here, `CELLS` and `RENAMED_CELLS` represent changes that are going to be made +after importing databases: the `default` cell takes a new name from `DEFAULT_CELL_NAME`. +In a multi-cell adoption scenario, it may retain its original 'default' name as well. + +Note that the `super` is not a cell, but the top-scope Nova +API "upcall" database instance. A super conductor connects to that database. +In this guide, the upcall and cells' databases are going to use the same password +defined in `osp-secret`. Old passwords are only needed to prepare the data exports. + +To get the values for `SOURCE_MARIADB_IP`, query the puppet-generated configurations in a Controller and CellController nodes: ++ +---- +$ sudo grep -rI 'listen mysql' -A10 /var/lib/config-data/puppet-generated/ | grep bind +---- + +To get the values for `SOURCE_GALERA_MEMBERS_*`, query the puppet-generated configurations in a Controller and CellController nodes: + ---- -$ grep -rI 'listen mysql' -A10 /var/lib/config-data/puppet-generated/ | grep bind +$ sudo grep -rI 'listen mysql' -A10 /var/lib/config-data/puppet-generated/ | grep server ---- +The source cloud always uses the same password for cells' databases by design. +For that reason, we chose to use the same passwords file for all cells' stacks. +There is a split-stack topology, however, that allows using different passwords +files for each stack. + * Prepare the MariaDB adoption helper pod: + +. Create a temporary folder to store the adoption helper pod. Choose storage requests that fit the MySQL database size: +A standalone TripleO only creates a 'default' cell, which should be the only `CELLS` value for such a case +(and `DEFAULT_CELL_NAME` should be `cell1`). + +** For each cell defined in `CELLS` +*** Replace `["standalone.localdomain"]="172.17.0.100"`, and complete `SOURCE_GALERA_MEMBERS_*` with the names of MariaDB Galera cluster members and its IP address. +*** Replace `SOURCE_MARIADB_IP[*]= ...`, and complete the records lists for the cell names and VIP addresses of MariaDB Galera clusters. +*** Replace `SOURCE_GALERA_MEMBERS_*[*]= ...`, and complete the records lists for the cell names and IP addresses of MariaDB Galera cluster members. + . Create a temporary volume claim and a pod for the database data copy. Edit the volume claim storage request if necessary, to give it enough space for the overcloud databases: + [source,yaml] @@ -118,106 +184,128 @@ $ oc wait --for condition=Ready pod/mariadb-copy-data --timeout=30s .Procedure -. Check that the source Galera database cluster members are online and synced: +. Check that the source Galera database cluster(s) members are online and synced: + ---- -for i in "${!SOURCE_GALERA_MEMBERS[@]}"; do - echo "Checking for the database node $i WSREP status Synced" - oc rsh mariadb-copy-data mysql \ - -h "${SOURCE_GALERA_MEMBERS[$i]}" -uroot -p"$SOURCE_DB_ROOT_PASSWORD" \ - -e "show global status like 'wsrep_local_state_comment'" | \ - grep -qE "\bSynced\b" +for CELL in $(echo $CELLS); do + MEMBERS=SOURCE_GALERA_MEMBERS_$(echo ${CELL}|tr '[:lower:]' '[:upper:]')[@] + for i in "${!MEMBERS}"; do + echo "Checking for the database node $i WSREP status Synced" + oc rsh mariadb-copy-data mysql \ + -h "$i" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" \ + -e "show global status like 'wsrep_local_state_comment'" | \ + grep -qE "\bSynced\b" + done done ---- ++ +Each additional Nova v2 cell runs a dedicated Galera database cluster, so the checking is done for all of it. . Get the count of source databases with the `NOK` (not-OK) status: + ---- -$ oc rsh mariadb-copy-data mysql -h "${SOURCE_MARIADB_IP}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD}" -e "SHOW databases;" +for CELL in $(echo $CELLS); do + oc rsh mariadb-copy-data mysql -h "${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" -e "SHOW databases;" +end ---- . Check that `mysqlcheck` had no errors: + ---- -. ~/.source_cloud_exported_variables -test -z "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" || [ "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" = " " ] && echo "OK" || echo "CHECK FAILED" +$ for CELL in $(echo $CELLS); do + set +u + . ~/.source_cloud_exported_variables_$CELL + set -u +done +$ test -z "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" || [ "x$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" = "x " ] && echo "OK" || echo "CHECK FAILED" ---- -. Test the connection to the control plane databases: +. Test the connection to the control plane "upcall" and cells' databases: + ---- -$ oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \ - mysql -rsh "$PODIFIED_MARIADB_IP" -uroot -p"$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;' -$ oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \ - mysql -rsh "$PODIFIED_CELL1_MARIADB_IP" -uroot -p"$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;' +for CELL in $(echo "super $RENAMED_CELLS"); do + oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \ + mysql -rsh "${PODIFIED_MARIADB_IP[$CELL]}" -uroot -p"${PODIFIED_DB_ROOT_PASSWORD[$CELL]}" -e 'SHOW databases;' +done ---- + [NOTE] -You must transition {compute_service_first_ref} services that are imported later into a superconductor architecture by deleting the old service records in the cell databases, starting with `cell1`. New records are registered with different hostnames provided by the {compute_service} operator. All Compute services, except the Compute agent, have no internal state, and their service records can be safely deleted. You also need to rename the former `default` cell to `cell1`. +You must transition {compute_service_first_ref} services that are imported later into a superconductor architecture by deleting the old service records in the cell databases, starting with `cell1`. New records are registered with different hostnames provided by the {compute_service} operator. All Compute services, except the Compute agent, have no internal state, and their service records can be safely deleted. You also need to rename the former `default` cell to `DEFAULT_CELL_NAME`. . Create a dump of the original databases: + ---- -$ oc rsh mariadb-copy-data << EOF - mysql -h"${SOURCE_MARIADB_IP}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD}" \ - -N -e "show databases" | grep -E -v "schema|mysql|gnocchi|aodh" | \ - while read dbname; do - echo "Dumping \${dbname}"; - mysqldump -h"${SOURCE_MARIADB_IP}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD}" \ - --single-transaction --complete-insert --skip-lock-tables --lock-tables=0 \ - "\${dbname}" > /backup/"\${dbname}".sql; - done +$ for CELL in $(echo $CELLS); do + oc rsh mariadb-copy-data << EOF + mysql -h"${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" \ + -N -e "show databases" | grep -E -v "schema|mysql|gnocchi|aodh" | \ + while read dbname; do + echo "Dumping $CELL cell \${dbname}"; + mysqldump -h"${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" \ + --single-transaction --complete-insert --skip-lock-tables --lock-tables=0 \ + "\${dbname}" > /backup/"${CELL}.\${dbname}".sql; + done EOF +done ---- ++ +Note filtering the information and performance schema tables. +Gnocchi is no longer used as a metric store as well . Restore the databases from `.sql` files into the control plane MariaDB: + ---- -$ oc rsh mariadb-copy-data << EOF - # db schemas to rename on import - declare -A db_name_map - db_name_map['nova']='nova_cell1' - db_name_map['ovs_neutron']='neutron' - db_name_map['ironic-inspector']='ironic_inspector' - - # db servers to import into - declare -A db_server_map - db_server_map['default']=${PODIFIED_MARIADB_IP} - db_server_map['nova_cell1']=${PODIFIED_CELL1_MARIADB_IP} - - # db server root password map - declare -A db_server_password_map - db_server_password_map['default']=${PODIFIED_DB_ROOT_PASSWORD} - db_server_password_map['nova_cell1']=${PODIFIED_DB_ROOT_PASSWORD} - - cd /backup - for db_file in \$(ls *.sql); do - db_name=\$(echo \${db_file} | awk -F'.' '{ print \$1; }') - if [[ -v "db_name_map[\${db_name}]" ]]; then - echo "renaming \${db_name} to \${db_name_map[\${db_name}]}" - db_name=\${db_name_map[\${db_name}]} - fi - db_server=\${db_server_map["default"]} - if [[ -v "db_server_map[\${db_name}]" ]]; then - db_server=\${db_server_map[\${db_name}]} - fi - db_password=\${db_server_password_map['default']} - if [[ -v "db_server_password_map[\${db_name}]" ]]; then - db_password=\${db_server_password_map[\${db_name}]} - fi - echo "creating \${db_name} in \${db_server}" - mysql -h"\${db_server}" -uroot "-p\${db_password}" -e \ - "CREATE DATABASE IF NOT EXISTS \${db_name} DEFAULT \ - CHARACTER SET ${CHARACTER_SET} DEFAULT COLLATE ${COLLATION};" - echo "importing \${db_name} into \${db_server}" - mysql -h "\${db_server}" -uroot "-p\${db_password}" "\${db_name}" < "\${db_file}" - done +$ for CELL in $(echo $CELLS); do + RCELL=$CELL + [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME + oc rsh mariadb-copy-data << EOF + + declare -A db_name_map + db_name_map['nova']="nova_$RCELL" + db_name_map['ovs_neutron']='neutron' + db_name_map['ironic-inspector']='ironic_inspector' - mysql -h "\${db_server_map['default']}" -uroot -p"\${db_server_password_map['default']}" -e \ - "update nova_api.cell_mappings set name='cell1' where name='default';" - mysql -h "\${db_server_map['nova_cell1']}" -uroot -p"\${db_server_password_map['nova_cell1']}" -e \ - "delete from nova_cell1.services where host not like '%nova-cell1-%' and services.binary != 'nova-compute';" + declare -A db_server_map + db_server_map['default']=${PODIFIED_MARIADB_IP['super']} + db_server_map["nova_$RCELL"]=${PODIFIED_MARIADB_IP[$RCELL]} + + declare -A db_server_password_map + db_server_password_map['default']=${PODIFIED_DB_ROOT_PASSWORD['super']} + db_server_password_map["nova_$RCELL"]=${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} + + cd /backup + for db_file in \$(ls ${CELL}.*.sql); do + db_name=\$(echo \${db_file} | awk -F'.' '{ print \$2; }') + renamed_db_file="${RCELL}_new.\${db_name}.sql" + mv -f \${db_file} \${renamed_db_file} + if [[ -v "db_name_map[\${db_name}]" ]]; then + echo "renaming $CELL cell \${db_name} to $RCELL \${db_name_map[\${db_name}]}" + db_name=\${db_name_map[\${db_name}]} + fi + db_server=\${db_server_map["default"]} + if [[ -v "db_server_map[\${db_name}]" ]]; then + db_server=\${db_server_map[\${db_name}]} + fi + db_password=\${db_server_password_map['default']} + if [[ -v "db_server_password_map[\${db_name}]" ]]; then + db_password=\${db_server_password_map[\${db_name}]} + fi + echo "creating $RCELL cell \${db_name} in \${db_server}" + mysql -h"\${db_server}" -uroot "-p\${db_password}" -e \ + "CREATE DATABASE IF NOT EXISTS \${db_name} DEFAULT \ + CHARACTER SET ${CHARACTER_SET} DEFAULT COLLATE ${COLLATION};" + echo "importing $RCELL cell \${db_name} into \${db_server} from \${renamed_db_file}" + mysql -h "\${db_server}" -uroot "-p\${db_password}" "\${db_name}" < "\${renamed_db_file}" + done + + if [ "$CELL" = "default" ] ; then + mysql -h "\${db_server_map['default']}" -uroot -p"\${db_server_password_map['default']}" -e \ + "update nova_api.cell_mappings set name='$DEFAULT_CELL_NAME' where name='default';" + fi + mysql -h "\${db_server_map["nova_$RCELL"]}" -uroot -p"\${db_server_password_map["nova_$RCELL"]}" -e \ + "delete from nova_${RCELL}.services where host not like '%nova_${RCELL}-%' and services.binary != 'nova-compute';" EOF +done ---- .Verification @@ -228,39 +316,39 @@ For more information, see xref:proc_retrieving-topology-specific-service-configu . Check that the databases are imported correctly: + ---- -. ~/.source_cloud_exported_variables - -# use 'oc exec' and 'mysql -rs' to maintain formatting -dbs=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;') -echo $dbs | grep -Eq '\bkeystone\b' && echo "OK" || echo "CHECK FAILED" - -# ensure neutron db is renamed from ovs_neutron -echo $dbs | grep -Eq '\bneutron\b' -echo $PULL_OPENSTACK_CONFIGURATION_DATABASES | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED" - -# ensure nova cell1 db is extracted to a separate db server and renamed from nova to nova_cell1 -c1dbs=$(oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;') -echo $c1dbs | grep -Eq '\bnova_cell1\b' && echo "OK" || echo "CHECK FAILED" - -# ensure default cell renamed to cell1, and the cell UUIDs retained intact -novadb_mapped_cells=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" \ +$ set +u +$ . ~/.source_cloud_exported_variables_default +$ set -u +$ dbs=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD['super']}" -e 'SHOW databases;') +$ echo $dbs | grep -Eq '\bkeystone\b' && echo "OK" || echo "CHECK FAILED" +$ echo $dbs | grep -Eq '\bneutron\b' +$ echo "${PULL_OPENSTACK_CONFIGURATION_DATABASES[@]}" | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED" +$ novadb_mapped_cells=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD['super']}" \ nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;') -uuidf='\S{8,}-\S{4,}-\S{4,}-\S{4,}-\S{12,}' -left_behind=$(comm -23 \ +$ uuidf='\S{8,}-\S{4,}-\S{4,}-\S{4,}-\S{12,}' +$ left_behind=$(comm -23 \ <(echo $PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS | grep -oE " $uuidf \S+") \ <(echo $novadb_mapped_cells | tr -s "| " " " | grep -oE " $uuidf \S+")) -changed=$(comm -13 \ +$ changed=$(comm -13 \ <(echo $PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS | grep -oE " $uuidf \S+") \ <(echo $novadb_mapped_cells | tr -s "| " " " | grep -oE " $uuidf \S+")) -test $(grep -Ec ' \S+$' <<<$left_behind) -eq 1 && echo "OK" || echo "CHECK FAILED" -default=$(grep -E ' default$' <<<$left_behind) -test $(grep -Ec ' \S+$' <<<$changed) -eq 1 && echo "OK" || echo "CHECK FAILED" -grep -qE " $(awk '{print $1}' <<<$default) cell1$" <<<$changed && echo "OK" || echo "CHECK FAILED" - -# ensure the registered Compute service name has not changed -novadb_svc_records=$(oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" \ - nova_cell1 -e "select host from services where services.binary='nova-compute' order by host asc;") -diff -Z <(echo $novadb_svc_records) <(echo $PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES) && echo "OK" || echo "CHECK FAILED" +$ test $(grep -Ec ' \S+$' <<<$left_behind) -eq 1 && echo "OK" || echo "CHECK FAILED" +$ default=$(grep -E ' default$' <<<$left_behind) +$ test $(grep -Ec ' \S+$' <<<$changed) -eq 1 && echo "OK" || echo "CHECK FAILED" +$ grep -qE " $(awk '{print $1}' <<<$default) ${DEFAULT_CELL_NAME}$" <<<$changed && echo "OK" || echo "CHECK FAILED" + +$ for CELL in $(echo $CELLS | grep -v default); do + set +u + . ~/.source_cloud_exported_variables_$CELL + set -u + RCELL=$CELL + [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME + c1dbs=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} -e 'SHOW databases;') + echo $c1dbs | grep -Eq "\bnova_${CELL}\b" && echo "OK" || echo "CHECK FAILED" + novadb_svc_records=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} \ + nova_$CELL -e "select host from services where services.binary='nova-compute' order by host asc;") + diff -Z <(echo $novadb_svc_records) <(echo ${PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]}) && echo "OK" || echo "CHECK FAILED" +done ---- . Delete the `mariadb-data` pod and the `mariadb-copy-data` persistent volume claim that contains the database backup: diff --git a/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc b/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc index 1fc258c77..12de88324 100644 --- a/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc +++ b/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc @@ -8,13 +8,48 @@ You must upgrade the Compute services from {rhos_prev_long} {rhos_prev_ver} to { * Remove pre-fast-forward upgrade workarounds from the Compute control plane services and Compute data plane services. * Run Compute database online migrations to update live data. +. Prerequisites + +* Define the shell variables necessary to apply the fast-forward upgrade commands, for each Nova compute cell. ++ +---- +DEFAULT_CELL_NAME="cell3" +RENAMED_CELLS="cell1 cell2 $DEFAULT_CELL_NAME" + +declare -A PODIFIED_DB_ROOT_PASSWORD +for CELL in $(echo "super $RENAMED_CELLS"); do + PODIFIED_DB_ROOT_PASSWORD[$CELL]=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d) +done + +NODESETS="" +for CELL in $(echo $RENAMED_CELLS); do + oc get Openstackdataplanenodeset openstack-${CELL} || continue + NODESETS="'openstack-${CELL}', $NODESETS" <1> +done +NODESETS="[${NODESETS%,*}]" + +NOVASERVICES="" +for CELL in $(echo $RENAMED_CELLS); do + NOVASERVICES="'nova-${CELL}', $NOVASERVICES" <2> +done +NOVASERVICES="[${NOVASERVICES%,*}]" +---- ++ +<1> Each dataplane node set name must match the name of the node set that you defined in its corresponding `OpenStackDataPlaneNodeSet` CR. +<2> Each dataplane services name must match the name of the service that you included in the `servicesOverride` key of its corresponding `OpenStackDataPlaneNodeSet` CR. + +[NOTE] +Here, the cells databases share the password defined in `osp-secret`. + .Procedure -. Wait for cell1 Compute data plane services version to update: +. Wait for {compute_service} data plane services version updated for all cells: + ---- -$ oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot -p$PODIFIED_DB_ROOT_PASSWORD \ - -e "select a.version from nova_cell1.services a join nova_cell1.services b where a.version!=b.version and a.binary='nova-compute';" +$ for CELL in $(echo $RENAMED_CELLS); do + oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD[$CELL]}" \ + -e "select a.version from nova_${CELL}.services a join nova_${CELL}.services b where a.version!=b.version and a.binary='nova-compute';" +done ---- + [NOTE] @@ -28,17 +63,10 @@ Review any errors in the nova Compute agent logs on the data plane, and the `nov + [source,yaml] ---- -$ oc patch openstackcontrolplane openstack -n openstack --type=merge --patch ' -spec: - nova: - template: - cellTemplates: - cell0: - conductorServiceTemplate: - customServiceConfig: | - [workarounds] - disable_compute_service_check_for_ffu=false - cell1: +$ rm -f celltemplates +$ for CELL in $(echo $RENAMED_CELLS); do + cat >> celltemplates << EOF + ${CELL}: metadataServiceTemplate: customServiceConfig: | [workarounds] @@ -47,6 +75,13 @@ spec: customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=false +EOF +done + +$ cat > oscp-patch.yaml << EOF +spec: + nova: + template: apiServiceTemplate: customServiceConfig: | [workarounds] @@ -59,7 +94,39 @@ spec: customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=false -' + cellTemplates: + cell0: + conductorServiceTemplate: + customServiceConfig: | + [workarounds] + disable_compute_service_check_for_ffu=false +EOF +$ cat celltemplates >> oscp-patch.yaml +---- ++ + +* If you are adopting the {compute_service} with the {bare_metal_first_ref}, append the following `novaComputeTemplates` in the needed `cell` section(s) of the {compute_service} CR patch: ++ +[source,yaml] +---- + cell: + novaComputeTemplates: + : <1> + customServiceConfig: | + [DEFAULT] + host = + [workarounds] + disable_compute_service_check_for_ffu=true + computeDriver: ironic.IronicDriver + ... +---- ++ +<1> Replace `` with the hostname of the node that is running the `ironic` Compute driver in the source cloud cell. + +. Apply the patch file ++ +---- +$ oc patch openstackcontrolplane openstack -n openstack --type=merge --patch-file=oscp-patch.yaml ---- . Wait until the Compute control plane services CRs are ready: @@ -68,53 +135,49 @@ spec: $ oc wait --for condition=Ready --timeout=300s Nova/nova ---- +. Complete xref:adopting-compute-services-to-the-data-plane_data-plane[Adopting Compute services to the {rhos_acro} data plane] + . Remove the pre-fast-forward upgrade workarounds from the Compute data plane services: + [source,yaml] ---- -$ oc apply -f - < + ifeval::["{build}" != "downstream"] CONTROLLER1_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100" MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified endif::[] ifeval::["{build}" == "downstream"] -CONTROLLER1_SSH="ssh -i ** root@**" +CONTROLLER1_SSH="ssh -i ** root@**" <1> MARIADB_IMAGE=registry.redhat.io/rhosp-dev-preview/openstack-mariadb-rhel9:18.0 endif::[] -SOURCE_MARIADB_IP=172.17.0.2 -ifeval::["{build}" != "downstream"] -SOURCE_DB_ROOT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') -endif::[] -ifeval::["{build}" == "downstream"] -SOURCE_DB_ROOT_PASSWORD=$(cat ~/overcloud-deploy/overcloud/overcloud-passwords.yaml | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') -) -endif::[] MARIADB_CLIENT_ANNOTATIONS='--annotations=k8s.v1.cni.cncf.io/networks=internalapi' + +declare -A TRIPLEO_PASSWORDS +for CELL in $(echo $CELLS); do + if [ "$CELL" = "default" ]; then + TRIPLEO_PASSWORDS[default]="$HOME/overcloud-passwords.yaml" + else + # in a split-stack source cloud, it should take a stack-specific passwords file instead + TRIPLEO_PASSWORDS[$CELL]="$HOME/overcloud-passwords.yaml" +fi +done + +declare -A SOURCE_DB_ROOT_PASSWORD +for CELL in $(echo $CELLS); do + SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') +done + +declare -A SOURCE_MARIADB_IP <2> +SOURCE_MARIADB_IP[default]=** +SOURCE_MARIADB_IP[cell1]=** +SOURCE_MARIADB_IP[cell2]=** +# ... ---- + -To get the value to set `SOURCE_MARIADB_IP`, query the puppet-generated configurations in a Controller node: +<1> Complete `CONTROLLER1_SSH` settings with SSH connection details for any non-cell controller of the source {OpenStackPreviousInstaller} cloud. +<2> For each cell defined in `CELLS`, replace `SOURCE_MARIADB_IP[*]= ...`, and complete the records lists for the cell names and VIP addresses of MariaDB Galera clusters, include cells, of the source {OpenStackPreviousInstaller} cloud. + +To get the values for `SOURCE_MARIADB_IP`, query the puppet-generated configurations in a Controller and CellController node: + ---- -$ grep -rI 'listen mysql' -A10 /var/lib/config-data/puppet-generated/ | grep bind +$ sudo grep -rI 'listen mysql' -A10 /var/lib/config-data/puppet-generated/ | grep bind ---- +[NOTE] +The source cloud always uses the same password for cells' databases by design. +For that reason, we chose to use the same passwords file for all cells' stacks. +There is a split-stack topology, however, that allows using different passwords +files for each stack. + .Procedure . Export the shell variables for the following outputs and test the connection to the {OpenStackShort} database: + ---- -export PULL_OPENSTACK_CONFIGURATION_DATABASES=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysql -rsh "$SOURCE_MARIADB_IP" -uroot -p"$SOURCE_DB_ROOT_PASSWORD" -e 'SHOW databases;') -echo "$PULL_OPENSTACK_CONFIGURATION_DATABASES" +$ unset PULL_OPENSTACK_CONFIGURATION_DATABASES +$ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES +$ for CELL in $(echo $CELLS); do + PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh "${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" -e 'SHOW databases;') +done ---- + [NOTE] -The `nova`, `nova_api`, and `nova_cell0` databases are included in the same database host. +The `nova`, `nova_api`, and `nova_cell0` databases are inlcuded in the same database host for the main overcloud Heat stack. +Additional cells use the `nova` database of their local Galera clusters. . Run `mysqlcheck` on the {OpenStackShort} database to check for inaccuracies: + ---- -export PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysqlcheck --all-databases -h $SOURCE_MARIADB_IP -u root -p"$SOURCE_DB_ROOT_PASSWORD" | grep -v OK) -echo "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" +$ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK +$ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK +$ run_mysqlcheck() { + PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" | grep -v OK) +} +$ for CELL in $(echo $CELLS); do + run_mysqlcheck +done +$ if [ "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" != "" ]; then + # Try mysql_upgrade to fix mysqlcheck failure + for CELL in $(echo $CELLS); do + MYSQL_UPGRADE=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql_upgrade -v -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}") + # rerun mysqlcheck to check if problem is resolved + run_mysqlcheck + done +fi ---- ++ . Get the {compute_service_first_ref} cell mappings: + ---- export PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysql -rsh "${SOURCE_MARIADB_IP}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD}" nova_api -e \ + mysql -rsh "${SOURCE_MARIADB_IP['default]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD['default']}" nova_api -e \ 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;') -echo "$PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS" ---- ++ . Get the hostnames of the registered Compute services: + ---- -export PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysql -rsh "$SOURCE_MARIADB_IP" -uroot -p"$SOURCE_DB_ROOT_PASSWORD" nova_api -e \ - "select host from nova.services where services.binary='nova-compute';") -echo "$PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES" +$ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES +$ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES +$ for CELL in $(echo $CELLS); do + PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh "${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" -e \ + "select host from nova.services where services.binary='nova-compute';") +done ---- . Get the list of the mapped {compute_service} cells: + ---- -export PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=$($CONTROLLER1_SSH sudo podman exec -it nova_api nova-manage cell_v2 list_cells) -echo "$PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS" +export PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=$($CONTROLLER1_SSH sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells) ---- + [IMPORTANT] After the {OpenStackShort} control plane services are shut down, if any of the exported values are lost, re-running the command fails because the control plane services are no longer running on the source cloud, and the data cannot be retrieved. To avoid data loss, preserve the exported values in an environment file before shutting down the control plane services. -. If `neutron-sriov-nic-agent` agents are running in your {OpenStackShort} deployment, get the configuration to use for the data plane adoption: -+ ----- -SRIOV_AGENTS=$ oc run mariadb-client mysql -rsh "$SOURCE_MARIADB_IP" \ --uroot -p"$SOURCE_DB_ROOT_PASSWORD" ovs_neutron -e \ -"select host, configurations from agents where agents.binary='neutron-sriov-nic-agent';" ----- - . Store the exported variables for future use: + ---- -$ cat >~/.source_cloud_exported_variables < +$ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES +$ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK +$ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES +$ for CELL in $(echo $CELLS); do + cat > ~/.source_cloud_exported_variables_$CELL << EOF +PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh ${SOURCE_MARIADB_IP[$CELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;')" +PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[$CELL]="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} | grep -v OK)" +PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh ${SOURCE_MARIADB_IP[$CELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} -e \ + "select host from nova.services where services.binary='nova-compute';")" +EOF + done +$ cat >> ~/.source_cloud_exported_variables_default << EOF +PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh ${SOURCE_MARIADB_IP['default']} -uroot -p${SOURCE_DB_ROOT_PASSWORD['default']} -e \ + 'select uuid,name,transport_url,database_connection,disabled from nova_api.cell_mappings;' || echo None)" +PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS="$($CONTROLLER1_SSH sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells)" EOF +$ chmod 0600 ~/.source_cloud_exported_variables* ---- +<1> If `neutron-sriov-nic-agent` agents are running in your {OpenStackShort} deployment, get the configuration to use for the data plane adoption + +[NOTE] +==== +This configuration will be required later, during the data plane adoption post-checks. +==== diff --git a/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc b/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc index 88c07fa5d..e13eb8b9c 100644 --- a/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc +++ b/docs_user/modules/proc_stopping-infrastructure-management-and-compute-services.adoc @@ -13,21 +13,25 @@ The following procedure applies to a single node standalone {OpenStackPreviousIn [subs=+quotes] ---- ifeval::["{build}" != "downstream"] +CONTROLLER1_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@" +# ... +endif::[] +ifeval::["{build}" == "downstream"] +CONTROLLER1_SSH="ssh -i root@" +# ... +endif::[] +# ... <1> +ifeval::["{build}" != "downstream"] EDPM_PRIVATEKEY_PATH="~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa" endif::[] ifeval::["{build}" == "downstream"] -EDPM_PRIVATEKEY_PATH="" +EDPM_PRIVATEKEY_PATH="" <2> endif::[] -declare -A computes -computes=( - ["standalone.localdomain"]="192.168.122.100" - # ... -) ---- + -** Replace `["standalone.localdomain"]="192.168.122.100"` with the name and IP address of the Compute node. +<1> Complete `CONTROLLER_SSH` settings with SSH connection details for all controllers, including cell controllers, of the source {OpenStackPreviousInstaller} cloud. ifeval::["{build}" == "downstream"] -** Replace `` with the path to your SSH key. +<2> Replace `` with the path to your SSH key. endif::[] .Procedure diff --git a/docs_user/modules/proc_stopping-openstack-services.adoc b/docs_user/modules/proc_stopping-openstack-services.adoc index 5ad49efc4..465c1b292 100644 --- a/docs_user/modules/proc_stopping-openstack-services.adoc +++ b/docs_user/modules/proc_stopping-openstack-services.adoc @@ -34,11 +34,31 @@ ifeval::["{build}" != "downstream"] CONTROLLER1_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100" endif::[] ifeval::["{build}" == "downstream"] -CONTROLLER1_SSH="ssh -i ** root@**" -endif::[] +CONTROLLER1_SSH="ssh -i ** root@**" <1> CONTROLLER2_SSH="ssh -i ** root@**" CONTROLLER3_SSH="ssh -i ** root@**" +endif::[] ---- +* For a multi-cell deployment, specify the overcloud and cell controllers IPs instead, for example: ++ +[subs=+quotes] +---- +ifeval::["{build}" != "downstream"] +CONTROLLER1_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.103" +CONTROLLER2_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.106" +CONTROLLER3_SSH="ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.109" +endif::[] +ifeval::["{build}" == "downstream"] +CONTROLLER1_SSH="ssh -i ** root@**" <2> +CONTROLLER2_SSH="ssh -i ** root@**" +CONTROLLER3_SSH="ssh -i ** root@**" +# ... +endif::[] +---- +ifeval::["{build}" == "downstream"] +<1> Replace `` with the path to your SSH key. +<2> Replace `` with IP addresses of all controllers, includinig cell controllers. +endif::[] .Procedure diff --git a/tests/roles/backend_services/tasks/main.yaml b/tests/roles/backend_services/tasks/main.yaml index 238a4d94d..d1010f602 100644 --- a/tests/roles/backend_services/tasks/main.yaml +++ b/tests/roles/backend_services/tasks/main.yaml @@ -105,27 +105,28 @@ vars: deploy_ctlplane_ospdo: true -- name: Deploy the podified control plane +- name: deploy the OpenStackControlPlane CR ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} oc apply -f ../config/tmp/test_deployment.yaml when: not ospdo_src| bool -- name: wait for services to start up +- name: verify that MariaDB and RabbitMQ are running, for all defined cells ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - oc wait pod --for condition=Ready {{ item }} - register: service_running_result - until: service_running_result is success - retries: 150 + {{ cells_env }} + oc get pod openstack-galera-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running + oc get pod rabbitmq-server-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running + for CELL in $(echo $RENAMED_CELLS); do + oc get pod openstack-$CELL-galera-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running + oc get pod rabbitmq-$CELL-server-0 -o jsonpath='{.status.phase}{"\n"}' | grep Running + done + register: mariadb_running_result + until: mariadb_running_result is success + retries: 60 delay: 2 - loop: - - openstack-galera-0 - - openstack-cell1-galera-0 - - rabbitmq-server-0 - - rabbitmq-cell1-server-0 - name: Patch openstack upstream dns server to set the correct value for the environment when: upstream_dns is defined diff --git a/tests/roles/backend_services/templates/openstack_control_plane.j2 b/tests/roles/backend_services/templates/openstack_control_plane.j2 index 53054f443..6930a7631 100644 --- a/tests/roles/backend_services/templates/openstack_control_plane.j2 +++ b/tests/roles/backend_services/templates/openstack_control_plane.j2 @@ -81,7 +81,16 @@ spec: openstack-cell1: secret: osp-secret replicas: 1 - storageRequest: 1Gi + storageRequest: 250M + # TODO(bogdando): iterate based on renamed_cells value in kustomization.yaml + openstack-cell2: + secret: osp-secret + replicas: 1 + storageRequest: 250M + openstack-cell3: + secret: osp-secret + replicas: 1 + storageRequest: 250M memcached: enabled: true @@ -121,6 +130,8 @@ spec: rabbitmq: templates: rabbitmq: + persistence: + storage: 200M override: service: metadata: @@ -130,6 +141,8 @@ spec: spec: type: LoadBalancer rabbitmq-cell1: + persistence: + storage: 200M override: service: metadata: @@ -138,7 +151,29 @@ spec: metallb.universe.tf/loadBalancerIPs: 172.17.0.86 spec: type: LoadBalancer - + # TODO(bogdando): iterate based on renamed_cells value in kustomization.yaml + rabbitmq-cell2: + persistence: + storage: 200M + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.87 + spec: + type: LoadBalancer + rabbitmq-cell3: + persistence: + storage: 200M + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.88 + spec: + type: LoadBalancer telemetry: enabled: false diff --git a/tests/roles/common_defaults/defaults/main.yaml b/tests/roles/common_defaults/defaults/main.yaml index 035d86f46..51d1cec52 100644 --- a/tests/roles/common_defaults/defaults/main.yaml +++ b/tests/roles/common_defaults/defaults/main.yaml @@ -1,3 +1,15 @@ +# Whether to use no_log on tasks which may output potentially +# sensitive data. +use_no_log: false + +# Whether the adopted node will host compute services +compute_adoption: true + +# The names of cells on the target cloud +renamed_cells: "{{ [default_cell_name] + cells | difference('default') }}" + +## Env headers (should be the same as provided in the docs, as the best effort) + # Header to apply in 'shell' module invocations. If overriding, at # least 'set -e' should be kept to ensure that scripts that fail # somewhere in the middle will also fail the whole Ansible run as @@ -5,12 +17,152 @@ shell_header: | set -euxo pipefail -# Whether to use no_log on tasks which may output potentially -# sensitive data. -use_no_log: false +# Snippet to get the desired 'oc' command onto $PATH. +oc_header: | + eval $(crc oc-env) -# Whether the adopted node will host compute services -compute_adoption: true +# Header for a Compute cells names evaluation for the source and destination cloud +# and renaming of the default cell from tripleo +# NOTE: 'super' is not a cell, but a reference for the top-scope "upcall" API on main controllers +cells_env: | + CELLS="{{ cells | join(' ') }}" + DEFAULT_CELL_NAME={{ default_cell_name }} + RENAMED_CELLS="{{ renamed_cells | join(' ') }}" + +# Header for osdp nodesets names evaluation +nodesets_env: | + {{ edpm_computes_shell_vars_src }} + + NODESETS="" + for CELL in $(echo $RENAMED_CELLS); do + ref="COMPUTES_$(echo ${CELL}|tr '[:lower:]' '[:upper:]')" + eval names=\${!${ref}[@]} + [ -z "$names" ] && continue + NODESETS="'openstack-${CELL}', $NODESETS" + done + NODESETS="[${NODESETS%,*}{% if edpm_nodes_networker is defined %}, 'openstack-networker'{% endif %}]" + +nodesets_env_oc: | + {{ shell_header }} + {{ oc_header }} + {{ cells_env }} + + NODESETS="" + for CELL in $(echo $RENAMED_CELLS); do + oc get Openstackdataplanenodeset openstack-${CELL} || continue + NODESETS="'openstack-${CELL}', $NODESETS" + done + NODESETS="[${NODESETS%,*}]" + +# Header for custom nova osdp services names evaluation +nova_services_env: | + {{ shell_header }} + {{ cells_env }} + + NOVASERVICES="" + for CELL in $(echo $RENAMED_CELLS); do + NOVASERVICES="'nova-${CELL}', $NOVASERVICES" + done + NOVASERVICES="[${NOVASERVICES%,*}]" + +# Headers for DB client CLI image +mariadb_image_env: | + MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified + +# Header for the source databases initial clusters members health check +mariadb_members_env: |- + {{ shell_header }} + {{ cells_env }} + + {% for cell in cells %} + declare -A SOURCE_GALERA_MEMBERS_{{ cell.upper() }} + SOURCE_GALERA_MEMBERS_{{ cell.upper() }}=( + {% for n in source_galera_members[cell] | default([]) %} + ["{{ n.name }}"]={{ n.ip }} + {% endfor %} + ) + {% endfor %} + +# Headers for the destination databases access passwords +mariadb_passwords_env: |- + {{ shell_header }} + {{ cells_env }} + + declare -A PODIFIED_DB_ROOT_PASSWORD + for CELL in $(echo "super $RENAMED_CELLS"); do + PODIFIED_DB_ROOT_PASSWORD[$CELL]=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d) + done + +# Header for the source database access +mariadb_copy_shell_vars_src: |- + {{ shell_header }} + {{ mariadb_image_env }} + {{ cells_env }} + MARIADB_CLIENT_ANNOTATIONS='--annotations=k8s.v1.cni.cncf.io/networks=internalapi' + STORAGE_CLASS={{ storage_class_name }} + + declare -A TRIPLEO_PASSWORDS + for CELL in $(echo $CELLS); do + if [ "$CELL" = "default" ]; then + TRIPLEO_PASSWORDS[default]="$HOME/overcloud-passwords.yaml" + else + # in a split-stack source cloud, it should take a stack-specific passwords file instead + TRIPLEO_PASSWORDS[$CELL]="$HOME/overcloud-passwords.yaml" + fi + done + + declare -A SOURCE_DB_ROOT_PASSWORD + for CELL in $(echo $CELLS); do + SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') + done + + declare -A SOURCE_MARIADB_IP + {% for cell in cells %} + SOURCE_MARIADB_IP[{{ cell }}]={{ source_mariadb_ip[cell] }} + {% endfor %} + +# Header for the destination database access +mariadb_copy_shell_vars_dst: | + {{ shell_header }} + {{ oc_header }} + {{ mariadb_image_env }} + {{ cells_env }} + + # The CHARACTER_SET and collation should match the source DB + # if the do not then it will break foreign key relationships + # for any tables that are created in the future as part of db sync + CHARACTER_SET=utf8 + COLLATION=utf8_general_ci + + {{ mariadb_passwords_env }} + + declare -A PODIFIED_MARIADB_IP + for CELL in $(echo "super $RENAMED_CELLS"); do + if [ "$CELL" = "super" ]; then + PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector "mariadb/name=openstack" -ojsonpath='{.items[0].spec.clusterIP}') + else + PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector "mariadb/name=openstack-$CELL" -ojsonpath='{.items[0].spec.clusterIP}') + fi + done + +# Header for the destination cloud EDPM Nova cell computes FDQN and IP pairs, per a cell +edpm_computes_shell_vars_src: |- + {{ shell_header }} + {{ cells_env }} + + {% for cell in renamed_cells %} + declare -A COMPUTES_{{ cell.upper() }} + COMPUTES_{{ cell.upper() }}=( + {%- for v in edpm_nodes[cell] | default({}) %} + ["{{ edpm_nodes[cell][v].hostName }}"]={{ edpm_nodes[cell][v].ansible.ansibleHost }} + {% endfor -%} + ) + {% endfor %} + +pull_openstack_configuration_ssh_shell_vars: | + CONTROLLER1_SSH="{{ controller1_ssh }}" + CONTROLLER2_SSH="{{ controller2_ssh }}" + CONTROLLER3_SSH="{{ controller3_ssh }}" # Whether to adopt Octavia enable_octavia: true diff --git a/tests/roles/control_plane_rollback/defaults/main.yaml b/tests/roles/control_plane_rollback/defaults/main.yaml index 3fc557ca2..ebbe0c96a 100644 --- a/tests/roles/control_plane_rollback/defaults/main.yaml +++ b/tests/roles/control_plane_rollback/defaults/main.yaml @@ -1,2 +1,3 @@ +os_cloud_name: standalone control_plane_rollback_verify_command: | ssh root@{{ standalone_ip }} OS_CLOUD={{ os_cloud_name }} openstack user list diff --git a/tests/roles/dataplane_adoption/defaults/main.yaml b/tests/roles/dataplane_adoption/defaults/main.yaml index 17a887729..4f0778348 100644 --- a/tests/roles/dataplane_adoption/defaults/main.yaml +++ b/tests/roles/dataplane_adoption/defaults/main.yaml @@ -65,28 +65,29 @@ image_tag: "current-podified" ansible_ssh_private_key_secret: dataplane-adoption-secret default_timesync_ntp_servers: - hostname: pool.ntp.org +# FIXME(bogdando): adapt for mult-cell or single-cell edpm_node_hostname: standalone.localdomain +edpm_node_ip: 192.168.122.100 edpm_user: root edpm_nodes: - standalone: - hostName: "{{ edpm_node_hostname }}" - ansible: - ansibleHost: "{{ edpm_node_ip }}" - networks: - - defaultRoute: true - fixedIP: "{{ edpm_node_ip }}" - name: ctlplane - subnetName: subnet1 - - name: internalapi - subnetName: subnet1 - - name: storage - subnetName: subnet1 - - name: tenant - subnetName: subnet1 - - name: storagemgmt - subnetName: subnet1 -edpm_computes: |- - ["{{ edpm_node_hostname }}"]="{{ edpm_node_ip }}" + default: + standalone: + hostName: "{{ edpm_node_hostname }}" + ansible: + ansibleHost: "{{ edpm_node_ip }}" + networks: + - defaultRoute: true + fixedIP: "{{ edpm_node_ip }}" + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + - name: storagemgmt + subnetName: subnet1 # The variables inside edpm_network_config_template are evaluated based # on the OpenstackDataplaneNodeSet inventory. For adding a variable to the invetory, # please add it to OpenstackDataplaneNodeSet.nodeTemplate.ansibleVars @@ -158,11 +159,12 @@ os_diff_data_dir: tmp/os-diff prelaunch_test_instance: true telemetry_adoption: true +# nodes data will be templated in as a separate dataplane_cr: | apiVersion: dataplane.openstack.org/v1beta1 kind: OpenStackDataPlaneNodeSet metadata: - name: openstack + name: openstack-$CELL spec: tlsEnabled: {{ enable_tlse }} networkAttachments: @@ -175,7 +177,7 @@ dataplane_cr: | - validate-network - install-os - configure-os - - ssh-known-hosts + $GLOBAL - run-os - reboot-os - install-certs @@ -183,7 +185,7 @@ dataplane_cr: | - neutron-metadata {%+ if compute_adoption|bool +%} - libvirt - - nova + - nova-$CELL {%+ endif +%} {% if telemetry_adoption|bool +%} - telemetry @@ -195,7 +197,6 @@ dataplane_cr: | value: "True" - name: ANSIBLE_VERBOSITY value: "{{ dataplane_verbosity | default ('3') }}" - nodes: {{ edpm_nodes }} nodeTemplate: ansibleSSHPrivateKeySecret: {{ ansible_ssh_private_key_secret }} ansible: @@ -272,6 +273,7 @@ dataplane_cr: | ovn_monitor_all: true edpm_ovn_remote_probe_interval: 60000 edpm_ovn_ofctrl_wait_before_clear: 8000 + nodes: dpa_dir: "../.." dpa_tests_dir: "{{ dpa_dir }}/tests" diff --git a/tests/roles/dataplane_adoption/tasks/main.yaml b/tests/roles/dataplane_adoption/tasks/main.yaml index 67fbf8b36..5369f59fb 100644 --- a/tests/roles/dataplane_adoption/tasks/main.yaml +++ b/tests/roles/dataplane_adoption/tasks/main.yaml @@ -6,6 +6,7 @@ ceph_backend_configuration_fsid_shell_vars: | CEPH_FSID=$(oc get secret ceph-conf-files -o json | jq -r '.data."ceph.conf"' | base64 -d | grep fsid | sed -e 's/fsid = //') +# FIXME: missing docs coverage? - name: Patch openstackversion to use image built from source or latest if none is defined when: not skip_patching_ansibleee_csv | bool no_log: "{{ use_no_log }}" @@ -91,7 +92,7 @@ rm -f id* cd - -- name: create a Nova Compute Extra Config service (no ceph backend in use) +- name: create a configuration map which should become common for all cells (local storage back end) when: - compute_adoption|bool - ('ceph' not in [nova_libvirt_backend]) @@ -103,15 +104,15 @@ apiVersion: v1 kind: ConfigMap metadata: - name: nova-extra-config + name: nova-cells-global-config namespace: {{ rhoso_namespace }} data: - 19-nova-compute-cell1-workarounds.conf: | + 99-nova-compute-cells-workarounds.conf: | [workarounds] disable_compute_service_check_for_ffu=true EOF -- name: create a Nova Compute Extra Config service (ceph backend in use) +- name: create a configuration map which should become common for all cells (Ceph storage back end) when: - compute_adoption|bool - ('ceph' in [nova_libvirt_backend]) @@ -124,10 +125,10 @@ apiVersion: v1 kind: ConfigMap metadata: - name: nova-extra-config + name: nova-cells-global-config namespace: {{ rhoso_namespace }} data: - 19-nova-compute-cell1-workarounds.conf: | + 99-nova-compute-cells-workarounds.conf: | [workarounds] disable_compute_service_check_for_ffu=true 03-ceph-nova.conf: | @@ -142,13 +143,107 @@ rbd_secret_uuid=$CEPH_FSID EOF -- name: Create OpenStackDataPlaneNodeSet +- name: create dataplane services for Nova cells to enable pre-upgrade workarounds + when: + - compute_adoption|bool + no_log: "{{ use_no_log }}" + ansible.builtin.shell: | + {{ shell_header }} + {{ oc_header }} + {{ cells_env }} + + for CELL in $(echo $RENAMED_CELLS); do + oc apply -f - < edpm-crd.yaml + {{ nodesets_env }} + + declare -A names + for CELL in $(echo $RENAMED_CELLS); do + ref="COMPUTES_$(echo ${CELL}|tr '[:lower:]' '[:upper:]')" + eval names=\${!${ref}[@]} + [ -z "$names" ] && continue + ind=0 + rm -f computes-$CELL + for compute in $names; do + ip="${ref}['$compute']" + cat >> computes-$CELL << EOF + ${compute}: + hostName: $compute + ansible: + ansibleHost: $compute + networks: + - defaultRoute: true + fixedIP: ${!ip} + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + EOF + ind=$(( ind + 1 )) + done + + test -f computes-$CELL || continue + if [ "$CELL" = "cell1" ]; then + GLOBAL="- ssh-known-hosts" + else + GLOBAL=" " + fi + cat > nodeset-${CELL}.yaml <> nodeset-${CELL}.yaml + done + # NOTE(bogdando): omit computes-$CELL insertion as that is a manual operation only needed by docs. + # Those files are created here only to provide testing coverage of the commands provided in docs. + # Their contents is irrelevant as the real values come from edpm_nodes, by the below task. + +- name: update EDPM nodes data in nodes sets of cells + no_log: "{{ use_no_log }}" + when: + - compute_adoption|bool + ansible.builtin.shell: | + {{ shell_header }} + {% for cell in renamed_cells %} + {% if cell in edpm_nodes %} + cat > computes-real-{{ cell }} << EOF + {% filter indent(width=4) %} + {{ edpm_nodes[cell] | to_yaml(indent=2) }} + {% endfilter %} + EOF + cat computes-real-{{ cell }} >> nodeset-{{ cell }}.yaml + {% endif %} + {% endfor %} - name: Create OpenStackDataPlaneNodeSet_networker when: edpm_nodes_networker is defined @@ -172,7 +267,6 @@ - validate-network - install-os - configure-os - - ssh-known-hosts - run-os - reboot-os - install-certs @@ -236,71 +330,99 @@ edpm_enable_chassis_gw: true EOF +# FIXME: this is different in docs, need to align with tests +# FIXME(bogdando): get ovs_external_ids.json data for multiple node sets - name: check ovs external-ids with os-diff before deployment + failed_when: false tags: pull_openstack_configuration no_log: "{{ use_no_log }}" ansible.builtin.shell: | {{ shell_header }} - {{ os_diff_dir }}/os-diff diff {{ os_diff_data_dir }}/tripleo/ovs_external_ids/standalone/ovs_external_ids.json edpm-crd.yaml --crd --service ovs_external_ids -f ${PWD}/{{ os_diff_dir }}/config.yaml + {{ cells_env }} + for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + {{ os_diff_dir }}/os-diff diff {{ os_diff_data_dir }}/tripleo/ovs_external_ids/standalone/ovs_external_ids.json nodeset-${CELL}.yaml --crd --service ovs_external_ids -f ${PWD}/{{ os_diff_dir }}/config.yaml + done -- name: deploy dataplane +- name: deploy the OpenStackDataPlaneNodeSet CRs for each Nova compute cell ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - cat edpm-crd.yaml | oc apply -f - - {%+ if edpm_nodes_networker is defined +%} - cat edpm-crd-networker.yaml | oc apply -f - - {%+ endif +%} + {{ cells_env }} + + for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + oc apply -f nodeset-${CELL}.yaml + done + + {% if edpm_nodes_networker is defined %} + oc apply -f edpm-crd-networker.yaml + {% endif %} -# TODO: Apply the ceph backend config for Cinder in the original openstack CR, via kustomize -- name: prepare adopted EDPM workloads to use Ceph backend for Cinder, if configured so +# TODO(bogdando): Apply the ceph backend config for Cinder in the original openstack CR, via kustomize perhaps? +- name: prepare the adopted data plane workloads to use Ceph backend for Cinder, if configured so no_log: "{{ use_no_log }}" when: - compute_adoption|bool - - cinder_volume_backend == "ceph" or cinder_backup_backend == "ceph" + - cinder_volume_backend == "ceph" or cinder_backup_backend == "ceph" or ('ceph' in [nova_libvirt_backend]) ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - oc patch osdpns/openstack --type=merge --patch " - spec: - services: - - bootstrap - - download-cache - - configure-network - - validate-network - - install-os - - ceph-hci-pre - - configure-os - - ssh-known-hosts - - run-os - - reboot-os - - ceph-client - - ovn - - neutron-metadata - - libvirt - - nova - {% if telemetry_adoption|bool +%} - - telemetry - {%+ endif +%} - nodeTemplate: - extraMounts: - - extraVolType: Ceph - volumes: - - name: ceph - secret: - secretName: ceph-conf-files - mounts: - - name: ceph - mountPath: "/etc/ceph" - readOnly: true - " - -- name: set neutron-sriov-nic-agent configuration in the OpenStackDataPlaneNodeSet CR + {{ cells_env }} + + for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + if [ "$CELL" = "cell1" ]; then + GLOBAL="- ssh-known-hosts" + else + GLOBAL=" " + fi + oc patch osdpns/openstack-$CELL --type=merge --patch " + spec: + services: + - bootstrap + - download-cache + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + $GLOBAL + - run-os + - reboot-os + - install-certs + - ceph-client + - ovn + - neutron-metadata + - libvirt + - nova-$CELL + {% if telemetry_adoption|bool +%} + - telemetry + {%+ endif +%} + nodeTemplate: + extraMounts: + - extraVolType: Ceph + volumes: + - name: ceph + secret: + secretName: ceph-conf-files + mounts: + - name: ceph + mountPath: "/etc/ceph" + readOnly: true + " + done + +- name: enable neutron-sriov-nic-agent in the OpenStackDataPlaneNodeSet CR no_log: "{{ use_no_log }}" ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - oc patch openstackdataplanenodeset openstack --type='json' --patch='[ + {{ cells_env }} + + for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + oc patch openstackdataplanenodeset openstack-$CELL --type='json' --patch='[ { "op": "add", "path": "/spec/services/-", @@ -318,27 +440,33 @@ "path": "/spec/nodeTemplate/ansible/ansibleVars/edpm_neutron_sriov_agent_SRIOV_NIC_resource_provider_hypervisors", "value": "" }]' + done when: - edpm_neutron_sriov_agent_enabled|bool - compute_adoption|bool -- name: set neutron-dhcp configuration in the OpenStackDataPlaneNodeSet CR +- name: enable neutron-dhcp in the OpenStackDataPlaneNodeSet CR no_log: "{{ use_no_log }}" ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - oc patch openstackdataplanenodeset openstack --type='json' --patch='[ + {{ cells_env }} + + for CELL in $(echo $RENAMED_CELLS); do + test -f nodeset-${CELL}.yaml || continue + oc patch openstackdataplanenodeset openstack-$CELL --type='json' --patch='[ { "op": "add", "path": "/spec/services/-", "value": "neutron-dhcp" }]' + done when: edpm_neutron_dhcp_agent_enabled|bool -- name: Run pre-adoption validation +- name: Run the pre-adoption validation when: run_pre_adoption_validation|bool block: - - name: Create OpenStackDataPlaneService/pre-adoption-validation + - name: create the validation service no_log: "{{ use_no_log }}" ansible.builtin.shell: | {{ shell_header }} @@ -352,27 +480,25 @@ playbook: osp.edpm.pre_adoption_validation EOF - - name: Create OpenStackDataPlaneDeployment to run the validation only + - name: create a OpenStackDataPlaneDeployment CR that runs only the validation no_log: "{{ use_no_log }}" ansible.builtin.shell: | {{ shell_header }} - {{ oc_header }} + {{ nodesets_env_oc }} + oc apply -f - <> celltemplates << EOF + ${CELL}: metadataServiceTemplate: customServiceConfig: | [workarounds] @@ -38,6 +28,13 @@ customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=false + EOF + done + + cat > oscp-patch.yaml << EOF + spec: + nova: + template: apiServiceTemplate: customServiceConfig: | [workarounds] @@ -50,47 +47,58 @@ customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=false - ' + cellTemplates: + cell0: + conductorServiceTemplate: + customServiceConfig: | + [workarounds] + disable_compute_service_check_for_ffu=false + EOF + cat celltemplates >> oscp-patch.yaml -- name: Wait for Nova control plane services' CRs to become ready +- name: Apply the patch file + ansible.builtin.shell: | + {{ shell_header }} + {{ oc_header }} + oc patch openstackcontrolplane openstack -n openstack --type=merge --patch-file=oscp-patch.yaml + +- name: wait until the Compute control plane services CRs are ready ansible.builtin.include_role: name: nova_adoption tasks_from: wait.yaml -- name: remove pre-FFU workarounds for Nova compute EDPM services +- name: remove the pre-fast-forward upgrade workarounds from the Compute data plane services ansible.builtin.shell: | {{ shell_header }} - {{ oc_header }} - oc apply -f - < ~/.source_cloud_exported_variables << EOF - PULL_OPENSTACK_CONFIGURATION_DATABASES="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysql -rsh $SOURCE_MARIADB_IP -uroot -p$SOURCE_DB_ROOT_PASSWORD -e 'SHOW databases;')" - PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysqlcheck --all-databases -h $SOURCE_MARIADB_IP -u root -p$SOURCE_DB_ROOT_PASSWORD | grep -v OK)" - PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysql -rsh $SOURCE_MARIADB_IP -uroot -p$SOURCE_DB_ROOT_PASSWORD nova_api -e \ - 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;')" - PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ - mysql -rsh $SOURCE_MARIADB_IP -uroot -p$SOURCE_DB_ROOT_PASSWORD nova_api -e \ + {{ pull_openstack_configuration_ssh_shell_vars }} + unset PULL_OPENSTACK_CONFIGURATION_DATABASES + unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK + unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES + declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES + declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK + declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES + for CELL in $(echo $CELLS); do + cat > ~/.source_cloud_exported_variables_$CELL << EOF + PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh ${SOURCE_MARIADB_IP[$CELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;')" + PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[$CELL]="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} | grep -v OK)" + PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh ${SOURCE_MARIADB_IP[$CELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} -e \ "select host from nova.services where services.binary='nova-compute';")" - PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS="$($CONTROLLER1_SSH sudo podman exec -it nova_api nova-manage cell_v2 list_cells)" EOF - chmod 0600 ~/.source_cloud_exported_variables + done + cat >> ~/.source_cloud_exported_variables_default << EOF + PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS="$(oc run mariadb-client ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \ + mysql -rsh ${SOURCE_MARIADB_IP['default']} -uroot -p${SOURCE_DB_ROOT_PASSWORD['default']} -e \ + 'select uuid,name,transport_url,database_connection,disabled from nova_api.cell_mappings;' || echo None)" + PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS="$($CONTROLLER1_SSH sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells)" + EOF + chmod 0600 ~/.source_cloud_exported_variables* diff --git a/tests/roles/mariadb_copy/defaults/main.yaml b/tests/roles/mariadb_copy/defaults/main.yaml index 200c3fa09..5372dd0e0 100644 --- a/tests/roles/mariadb_copy/defaults/main.yaml +++ b/tests/roles/mariadb_copy/defaults/main.yaml @@ -1,5 +1,3 @@ edpm_node_hostname: standalone.localdomain mariadb_copy_tmp_dir: tmp/mariadb storage_reclaim_policy: delete -source_galera_members: |- - ["{{ edpm_node_hostname }}"]={{ source_mariadb_ip|default(external_mariadb_ip) }} diff --git a/tests/roles/mariadb_copy/tasks/env_vars_dst.yaml b/tests/roles/mariadb_copy/tasks/env_vars_dst.yaml deleted file mode 100644 index 75f25452a..000000000 --- a/tests/roles/mariadb_copy/tasks/env_vars_dst.yaml +++ /dev/null @@ -1,27 +0,0 @@ -- name: get podified MariaDB service cluster IP - ansible.builtin.shell: | - {{ shell_header }} - {{ oc_header }} - oc get svc --selector "mariadb/name=openstack" -ojsonpath='{.items[0].spec.clusterIP}' - register: podified_mariadb_ip_result - -- name: get podified cell1 MariaDB IP - ansible.builtin.shell: | - {{ shell_header }} - {{ oc_header }} - oc get svc --selector "mariadb/name=openstack-cell1" -ojsonpath='{.items[0].spec.clusterIP}' - register: podified_cell1_mariadb_ip_result - -- name: set MariaDB copy shell vars - no_log: "{{ use_no_log }}" - ansible.builtin.set_fact: - mariadb_copy_shell_vars_dst: | - PODIFIED_MARIADB_IP={{ podified_mariadb_ip_result.stdout }} - PODIFIED_CELL1_MARIADB_IP={{ podified_cell1_mariadb_ip_result.stdout }} - PODIFIED_DB_ROOT_PASSWORD="{{ podified_db_root_password }}" - - # The CHARACTER_SET and collation should match the source DB - # if the do not then it will break foreign key relationships - # for any tables that are created in the future as part of db sync - CHARACTER_SET=utf8 - COLLATION=utf8_general_ci diff --git a/tests/roles/mariadb_copy/tasks/env_vars_src.yaml b/tests/roles/mariadb_copy/tasks/env_vars_src.yaml deleted file mode 100644 index 266ec49f2..000000000 --- a/tests/roles/mariadb_copy/tasks/env_vars_src.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- name: set src MariaDB copy shell vars - no_log: "{{ use_no_log }}" - ansible.builtin.set_fact: - mariadb_copy_shell_vars_src: | - MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified - STORAGE_CLASS={{ storage_class_name }} - # TODO: remove the default(external_...) when CI is transitioned to use 'source_...' - SOURCE_MARIADB_IP={{ source_mariadb_ip|default(external_mariadb_ip) }} - declare -A SOURCE_GALERA_MEMBERS - SOURCE_GALERA_MEMBERS=( - {{ source_galera_members }} - ) - SOURCE_DB_ROOT_PASSWORD="{{ source_db_root_password|default(external_db_root_password) }}" - MARIADB_CLIENT_ANNOTATIONS='--annotations=k8s.v1.cni.cncf.io/networks=internalapi' diff --git a/tests/roles/mariadb_copy/tasks/main.yaml b/tests/roles/mariadb_copy/tasks/main.yaml index 7c762449d..eeaee35e0 100644 --- a/tests/roles/mariadb_copy/tasks/main.yaml +++ b/tests/roles/mariadb_copy/tasks/main.yaml @@ -1,14 +1,5 @@ -- name: get the source database service environment variables - ansible.builtin.include_tasks: - file: env_vars_src.yaml - -- name: get the destination database service environment variables - ansible.builtin.include_tasks: - file: env_vars_dst.yaml - - name: start an adoption mariadb helper pod ansible.builtin.shell: |- - {{ shell_header }} {{ oc_header }} {{ mariadb_copy_shell_vars_src }} @@ -68,30 +59,32 @@ retries: 25 delay: 2 -- name: check that the Galera database cluster members are online and synced +- name: check that the Galera database cluster(s) members are online and synced, for all cells no_log: "{{ use_no_log }}" ansible.builtin.shell: | - {{ shell_header }} {{ oc_header }} + {{ mariadb_members_env }} {{ mariadb_copy_shell_vars_src }} - for i in "${!SOURCE_GALERA_MEMBERS[@]}"; do - echo "Checking for the database node $i WSREP status Synced" - oc rsh mariadb-copy-data mysql \ - -h "${SOURCE_GALERA_MEMBERS[$i]}" -uroot -p"$SOURCE_DB_ROOT_PASSWORD" \ - -e "show global status like 'wsrep_local_state_comment'" | \ - grep -qE "\bSynced\b" + for CELL in $(echo $CELLS); do + MEMBERS=SOURCE_GALERA_MEMBERS_$(echo ${CELL}|tr '[:lower:]' '[:upper:]')[@] + for i in "${!MEMBERS}"; do + echo "Checking for the database node $i WSREP status Synced" + oc rsh mariadb-copy-data mysql \ + -h "$i" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" \ + -e "show global status like 'wsrep_local_state_comment'" | \ + grep -qE "\bSynced\b" + done done - name: Get the count of not-OK source databases no_log: "{{ use_no_log }}" ansible.builtin.shell: | - {% if pulled_openstack_configuration_shell_headers is defined %} - {{ pulled_openstack_configuration_shell_headers }} - {% else %} - . ~/.source_cloud_exported_variables - {% endif %} - - test -z "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" || [ "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" = " " ] && echo "OK" || echo "CHECK FAILED" + for CELL in $(echo $CELLS); do + set +u + . ~/.source_cloud_exported_variables_$CELL + set -u + done + test -z "$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" || [ "x$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK" = "x " ] && echo "OK" || echo "CHECK FAILED" register: result failed_when: result.rc != 0 or 'CHECK FAILED' in result.stdout diff --git a/tests/roles/mariadb_copy/tasks/mariadb_verify.yaml b/tests/roles/mariadb_copy/tasks/mariadb_verify.yaml index f991a9b86..56a1956bd 100644 --- a/tests/roles/mariadb_copy/tasks/mariadb_verify.yaml +++ b/tests/roles/mariadb_copy/tasks/mariadb_verify.yaml @@ -1,11 +1,3 @@ -- name: get the source database service environment variables - ansible.builtin.include_tasks: - file: env_vars_src.yaml - -- name: get the destination database service environment variables - ansible.builtin.include_tasks: - file: env_vars_dst.yaml - - name: MariaDB checks no_log: "{{ use_no_log }}" ansible.builtin.shell: diff --git a/tests/roles/mariadb_copy/templates/dump_dbs.bash b/tests/roles/mariadb_copy/templates/dump_dbs.bash index 26936c22c..7fa3e34a1 100755 --- a/tests/roles/mariadb_copy/templates/dump_dbs.bash +++ b/tests/roles/mariadb_copy/templates/dump_dbs.bash @@ -1,18 +1,20 @@ #!/bin/bash -{{ shell_header }} {{ oc_header }} {{ mariadb_copy_shell_vars_src }} +# Create a dump of the original databases # Note Filter the information and performance schema tables # Gnocchi is no longer used as a metric store, skip dumping gnocchi database as well # Migrating Aodh alarms from previous release is not supported, hence skip aodh database -oc rsh mariadb-copy-data << EOF - mysql -h"${SOURCE_MARIADB_IP}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD}" \ - -N -e "show databases" | grep -E -v "schema|mysql|gnocchi|aodh" | \ - while read dbname; do - echo "Dumping \${dbname}"; - mysqldump -h"${SOURCE_MARIADB_IP}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD}" \ - --single-transaction --complete-insert --skip-lock-tables --lock-tables=0 \ - "\${dbname}" > /backup/"\${dbname}".sql; - done +for CELL in $(echo $CELLS); do + oc rsh mariadb-copy-data << EOF + mysql -h"${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" \ + -N -e "show databases" | grep -E -v "schema|mysql|gnocchi|aodh" | \ + while read dbname; do + echo "Dumping $CELL cell \${dbname}"; + mysqldump -h"${SOURCE_MARIADB_IP[$CELL]}" -uroot -p"${SOURCE_DB_ROOT_PASSWORD[$CELL]}" \ + --single-transaction --complete-insert --skip-lock-tables --lock-tables=0 \ + "\${dbname}" > /backup/"${CELL}.\${dbname}".sql; + done EOF +done diff --git a/tests/roles/mariadb_copy/templates/post_checks.bash b/tests/roles/mariadb_copy/templates/post_checks.bash index f639d6f4a..6ac75f8e1 100755 --- a/tests/roles/mariadb_copy/templates/post_checks.bash +++ b/tests/roles/mariadb_copy/templates/post_checks.bash @@ -1,26 +1,20 @@ -{{ shell_header }} -{{ oc_header }} {{ mariadb_copy_shell_vars_dst }} -{% if pulled_openstack_configuration_shell_headers is defined %} -{{ pulled_openstack_configuration_shell_headers }} -{% else %} -. ~/.source_cloud_exported_variables -{% endif %} - +# Check that the databases were imported correctly # use 'oc exec' and 'mysql -rs' to maintain formatting -dbs=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;') + +set +u +. ~/.source_cloud_exported_variables_default +set -u + +dbs=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD['super']}" -e 'SHOW databases;') echo $dbs | grep -Eq '\bkeystone\b' && echo "OK" || echo "CHECK FAILED" # ensure neutron db is renamed from ovs_neutron echo $dbs | grep -Eq '\bneutron\b' -echo $PULL_OPENSTACK_CONFIGURATION_DATABASES | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED" +echo "${PULL_OPENSTACK_CONFIGURATION_DATABASES[@]}" | grep -Eq '\bovs_neutron\b' && echo "OK" || echo "CHECK FAILED" -# ensure nova cell1 db is extracted to a separate db server and renamed from nova to nova_cell1 -c1dbs=$(oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;') -echo $c1dbs | grep -Eq '\bnova_cell1\b' && echo "OK" || echo "CHECK FAILED" - -# ensure default cell renamed to cell1, and the cell UUIDs retained intact -novadb_mapped_cells=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" \ +# ensure default cell is renamed to $DEFAULT_CELL_NAME, and the cell UUIDs retained intact +novadb_mapped_cells=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p"${PODIFIED_DB_ROOT_PASSWORD['super']}" \ nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;') uuidf='\S{8,}-\S{4,}-\S{4,}-\S{4,}-\S{12,}' left_behind=$(comm -23 \ @@ -32,9 +26,20 @@ changed=$(comm -13 \ test $(grep -Ec ' \S+$' <<<$left_behind) -eq 1 && echo "OK" || echo "CHECK FAILED" default=$(grep -E ' default$' <<<$left_behind) test $(grep -Ec ' \S+$' <<<$changed) -eq 1 && echo "OK" || echo "CHECK FAILED" -grep -qE " $(awk '{print $1}' <<<$default) cell1$" <<<$changed && echo "OK" || echo "CHECK FAILED" +grep -qE " $(awk '{print $1}' <<<$default) ${DEFAULT_CELL_NAME}$" <<<$changed && echo "OK" || echo "CHECK FAILED" + +for CELL in $(echo $CELLS | grep -v default); do + set +u + . ~/.source_cloud_exported_variables_$CELL + set -u + RCELL=$CELL + [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME + # ensure nova cells' db are extracted to separate db servers and renamed from nova to nova_cell + c1dbs=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} -e 'SHOW databases;') + echo $c1dbs | grep -Eq "\bnova_${CELL}\b" && echo "OK" || echo "CHECK FAILED" -# ensure the registered Compute service name has not changed -novadb_svc_records=$(oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot "-p$PODIFIED_DB_ROOT_PASSWORD" \ - nova_cell1 -e "select host from services where services.binary='nova-compute' order by host asc;") -diff -Z <(echo $novadb_svc_records) <(echo $PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES) && echo "OK" || echo "CHECK FAILED" + # ensure the registered Compute service name has not changed + novadb_svc_records=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} \ + nova_$CELL -e "select host from services where services.binary='nova-compute' order by host asc;") + diff -Z <(echo $novadb_svc_records) <(echo ${PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]}) && echo "OK" || echo "CHECK FAILED" +done diff --git a/tests/roles/mariadb_copy/templates/pre_checks.bash b/tests/roles/mariadb_copy/templates/pre_checks.bash index 25eff123e..bc3ceaff1 100755 --- a/tests/roles/mariadb_copy/templates/pre_checks.bash +++ b/tests/roles/mariadb_copy/templates/pre_checks.bash @@ -1,11 +1,8 @@ #!/bin/bash -{{ shell_header }} -{{ oc_header }} -{{ mariadb_copy_shell_vars_src }} {{ mariadb_copy_shell_vars_dst }} -# Test connection to podified DBs (show databases) -oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \ - mysql -rsh "$PODIFIED_MARIADB_IP" -uroot -p"$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;' -oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \ - mysql -rsh "$PODIFIED_CELL1_MARIADB_IP" -uroot -p"$PODIFIED_DB_ROOT_PASSWORD" -e 'SHOW databases;' +# Test the connection to the control plane "upcall" and cells' databases +for CELL in $(echo "super $RENAMED_CELLS"); do + oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \ + mysql -rsh "${PODIFIED_MARIADB_IP[$CELL]}" -uroot -p"${PODIFIED_DB_ROOT_PASSWORD[$CELL]}" -e 'SHOW databases;' +done diff --git a/tests/roles/mariadb_copy/templates/restore_dbs.bash b/tests/roles/mariadb_copy/templates/restore_dbs.bash index 8c31b1a55..3f0419663 100755 --- a/tests/roles/mariadb_copy/templates/restore_dbs.bash +++ b/tests/roles/mariadb_copy/templates/restore_dbs.bash @@ -1,51 +1,78 @@ #!/bin/bash -{{ shell_header }} -{{ oc_header }} {{ mariadb_copy_shell_vars_src }} {{ mariadb_copy_shell_vars_dst }} -oc rsh mariadb-copy-data << EOF - # db schemas to rename on import - declare -A db_name_map - db_name_map['nova']='nova_cell1' - db_name_map['ovs_neutron']='neutron' - db_name_map['ironic-inspector']='ironic_inspector' +# Restore the databases from .sql files into the control plane MariaDB - # db servers to import into - declare -A db_server_map - db_server_map['default']=${PODIFIED_MARIADB_IP} - db_server_map['nova_cell1']=${PODIFIED_CELL1_MARIADB_IP} +for CELL in $(echo $CELLS); do + RCELL=$CELL + [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME + oc rsh mariadb-copy-data << EOF + # db schemas to rename on import + declare -A db_name_map + db_name_map['nova']="nova_$RCELL" + db_name_map['ovs_neutron']='neutron' + db_name_map['ironic-inspector']='ironic_inspector' - # db server root password map - declare -A db_server_password_map - db_server_password_map['default']=${PODIFIED_DB_ROOT_PASSWORD} - db_server_password_map['nova_cell1']=${PODIFIED_DB_ROOT_PASSWORD} + # cells' db schemas to import for cells + declare -A db_cell_map + db_cell_map['nova']="nova_$DEFAULT_CELL_NAME" + db_cell_map["nova_$RCELL"]="nova_$RCELL" + # Omit importing cells' cell0 DBs as we cannot consolidate them + # db_cell_map["nova_cell0"]="nova_$RCELL" - cd /backup - for db_file in \$(ls *.sql); do - db_name=\$(echo \${db_file} | awk -F'.' '{ print \$1; }') - if [[ -v "db_name_map[\${db_name}]" ]]; then - echo "renaming \${db_name} to \${db_name_map[\${db_name}]}" - db_name=\${db_name_map[\${db_name}]} - fi - db_server=\${db_server_map["default"]} - if [[ -v "db_server_map[\${db_name}]" ]]; then - db_server=\${db_server_map[\${db_name}]} - fi - db_password=\${db_server_password_map['default']} - if [[ -v "db_server_password_map[\${db_name}]" ]]; then - db_password=\${db_server_password_map[\${db_name}]} - fi - echo "creating \${db_name} in \${db_server}" - mysql -h"\${db_server}" -uroot -p"\${db_password}" -e \ - "CREATE DATABASE IF NOT EXISTS \${db_name} DEFAULT \ - CHARACTER SET ${CHARACTER_SET} DEFAULT COLLATE ${COLLATION};" - echo "importing \${db_name} into \${db_server}" - mysql -h "\${db_server}" -uroot -p"\${db_password}" "\${db_name}" < "\${db_file}" - done + # db servers to import into + declare -A db_server_map + db_server_map['default']=${PODIFIED_MARIADB_IP['super']} + db_server_map["nova"]=${PODIFIED_MARIADB_IP[$DEFAULT_CELL_NAME]} + db_server_map["nova_$RCELL"]=${PODIFIED_MARIADB_IP[$RCELL]} - mysql -h "\${db_server_map['default']}" -uroot -p"\${db_server_password_map['default']}" -e \ - "update nova_api.cell_mappings set name='cell1' where name='default';" - mysql -h "\${db_server_map['nova_cell1']}" -uroot -p"\${db_server_password_map['nova_cell1']}" -e \ - "delete from nova_cell1.services where host not like '%nova-cell1-%' and services.binary != 'nova-compute';" + # db server root password map + declare -A db_server_password_map + db_server_password_map['default']=${PODIFIED_DB_ROOT_PASSWORD['super']} + db_server_password_map["nova"]=${PODIFIED_DB_ROOT_PASSWORD[$DEFAULT_CELL_NAME]} + db_server_password_map["nova_$RCELL"]=${PODIFIED_DB_ROOT_PASSWORD[$RCELL]} + + cd /backup + for db_file in \$(ls ${CELL}.*.sql); do + db_name=\$(echo \${db_file} | awk -F'.' '{ print \$2; }') + # Only import cells' DBs and omit everything else + [[ "$CELL" != "default" && ! -v "db_cell_map[\${db_name}]" ]] && continue + # Route databases for importing, when extracting cell's / non-cell's DBs from 'default' cell + if [[ "$CELL" == "default" && -v "db_cell_map[\${db_name}]" ]] ; then + target=$DEFAULT_CELL_NAME + elif [[ "$CELL" == "default" && ! -v "db_cell_map[\${db_name}]" ]] ; then + target=super # 'upcall' + else + target=$RCELL + fi + renamed_db_file="\${target}_new.\${db_name}.sql" + mv -f \${db_file} \${renamed_db_file} + if [[ -v "db_name_map[\${db_name}]" ]]; then + echo "renaming $CELL cell \${db_name} to \$target \${db_name_map[\${db_name}]}" + db_name=\${db_name_map[\${db_name}]} + fi + db_server=\${db_server_map["default"]} + if [[ -v "db_server_map[\${db_name}]" ]]; then + db_server=\${db_server_map[\${db_name}]} + fi + db_password=\${db_server_password_map['default']} + if [[ -v "db_server_password_map[\${db_name}]" ]]; then + db_password=\${db_server_password_map[\${db_name}]} + fi + echo "creating $CELL cell \${db_name} in \$target \${db_server}" + mysql -h"\${db_server}" -uroot "-p\${db_password}" -e \ + "CREATE DATABASE IF NOT EXISTS \${db_name} DEFAULT \ + CHARACTER SET ${CHARACTER_SET} DEFAULT COLLATE ${COLLATION};" + echo "importing $CELL cell \${db_name} into \$target \${db_server} from \${renamed_db_file}" + mysql -h "\${db_server}" -uroot "-p\${db_password}" "\${db_name}" < "\${renamed_db_file}" + done + + if [ "$CELL" = "default" ] ; then + mysql -h "\${db_server_map['default']}" -uroot -p"\${db_server_password_map['default']}" -e \ + "update nova_api.cell_mappings set name='$DEFAULT_CELL_NAME' where name='default';" + fi + mysql -h "\${db_server_map["nova_$RCELL"]}" -uroot -p"\${db_server_password_map["nova_$RCELL"]}" -e \ + "delete from nova_${RCELL}.services where host not like '%nova_${RCELL}-%' and services.binary != 'nova-compute';" EOF +done diff --git a/tests/roles/nova_adoption/defaults/main.yaml b/tests/roles/nova_adoption/defaults/main.yaml index f6ced779a..1fa93248f 100644 --- a/tests/roles/nova_adoption/defaults/main.yaml +++ b/tests/roles/nova_adoption/defaults/main.yaml @@ -3,6 +3,37 @@ ironic_adoption: false nova_libvirt_backend: local nova_libvirt_patch: | + {{ cells_env }} + rm -f celltemplates + for CELL in $(echo $RENAMED_CELLS); do + cat >> celltemplates << EOF + ${CELL}: + hasAPIAccess: true + cellDatabaseAccount: nova-$CELL + cellDatabaseInstance: openstack-$CELL + cellMessageBusInstance: rabbitmq-$CELL + metadataServiceTemplate: + enabled: false # enable here to run it in a cell instead + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.$(( 79 + ${CELL##*cell} )) + spec: + type: LoadBalancer + customServiceConfig: | + [workarounds] + disable_compute_service_check_for_ffu=true + conductorServiceTemplate: + customServiceConfig: | + [workarounds] + disable_compute_service_check_for_ffu=true + EOF + done + + cat > oscp-patch.yaml << EOF spec: nova: enabled: true @@ -10,6 +41,7 @@ nova_libvirt_patch: | route: {} template: secret: osp-secret + apiDatabaseAccount: nova-api apiServiceTemplate: override: service: @@ -44,30 +76,19 @@ nova_libvirt_patch: | disable_compute_service_check_for_ffu=true cellTemplates: cell0: + hasAPIAccess: true + cellDatabaseAccount: nova-cell0 + cellDatabaseInstance: openstack + cellMessageBusInstance: rabbitmq conductorServiceTemplate: customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=true - cell1: - metadataServiceTemplate: - enabled: false # enable here to run it in a cell instead - override: - service: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - customServiceConfig: | - [workarounds] - disable_compute_service_check_for_ffu=true - conductorServiceTemplate: - customServiceConfig: | - [workarounds] - disable_compute_service_check_for_ffu=true + EOF + cat celltemplates >> oscp-patch.yaml +# NOTE(bogdando): no exact commands provided in docs for nova-ironic, +# so we can use ansible/jinja2 features to simplify testing these nova_ironic_patch: | spec: nova: @@ -76,6 +97,7 @@ nova_ironic_patch: | route: {} template: secret: osp-secret + apiDatabaseAccount: nova-api apiServiceTemplate: override: service: @@ -114,16 +136,23 @@ nova_ironic_patch: | customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=true - cell1: + {%+ for cell in renamed_cells +%} + {{ cell }}: + hasAPIAccess: true + cellDatabaseAccount: nova-cell{{ loop.index }} + cellDatabaseInstance: openstack-cell{{ loop.index }} + cellMessageBusInstance: rabbitmq-cell{{ loop.index }} conductorServiceTemplate: customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=true + {%+ if ironic_adoption|bool and cell in source_ironic_nodes +%} novaComputeTemplates: - standalone: + {%+ for n in source_ironic_nodes[cell] +%} + {{ n.template }}: customServiceConfig: | [DEFAULT] - host = standalone.localdomain + host = {{ n.name }} [workarounds] disable_compute_service_check_for_ffu=true replicas: 1 @@ -131,6 +160,9 @@ nova_ironic_patch: | computeDriver: ironic.IronicDriver networkAttachments: - internalapi + {%+ endfor +%} + {%+ endif +%} + {%+ endfor +%} remove_ffu_workaround_patch: | spec: @@ -154,15 +186,26 @@ remove_ffu_workaround_patch: | customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=false - cell1: + {%+ for cell in renamed_cells +%} + {{ cell }}: conductorServiceTemplate: customServiceConfig: | [workarounds] disable_compute_service_check_for_ffu=false + {%+ if ironic_adoption|bool and cell in source_ironic_nodes +%} novaComputeTemplates: - standalone: + {%+ for n in source_ironic_nodes[cell] +%} + {{ n.template }}: customServiceConfig: | [DEFAULT] - host = standalone.localdomain + host = {{ n.name }} [workarounds] disable_compute_service_check_for_ffu=false + replicas: 1 + resources: {} + computeDriver: ironic.IronicDriver + networkAttachments: + - internalapi + {%+ endfor +%} + {%+ endif +%} + {%+ endfor +%} diff --git a/tests/roles/nova_adoption/tasks/nova_ironic.yaml b/tests/roles/nova_adoption/tasks/nova_ironic.yaml index e5d826166..4ac04d3f8 100644 --- a/tests/roles/nova_adoption/tasks/nova_ironic.yaml +++ b/tests/roles/nova_adoption/tasks/nova_ironic.yaml @@ -2,20 +2,19 @@ ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - oc patch openstackcontrolplane openstack -n openstack --type=merge --patch '{{ nova_ironic_patch}}' + oc patch openstackcontrolplane openstack -n openstack --type=merge --patch '{{ nova_ironic_patch }}' - -- name: wait for Nova control plane services' CRs to become ready +- name: wait until the Compute control plane services CRs are ready ansible.builtin.include_tasks: file: wait.yaml -- name: Remove FFU workarounds +- name: remove the pre-fast-forward upgrade workarounds from the Compute data plane services ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} oc patch openstackcontrolplane openstack -n openstack --type=merge --patch '{{ remove_ffu_workaround_patch }}' -- name: wait for Nova control plane services' CRs to become ready +- name: wait until the Compute control plane services CRs are ready ansible.builtin.include_tasks: file: wait.yaml diff --git a/tests/roles/nova_adoption/tasks/nova_libvirt.yaml b/tests/roles/nova_adoption/tasks/nova_libvirt.yaml index 04925d666..64fea33e4 100644 --- a/tests/roles/nova_adoption/tasks/nova_libvirt.yaml +++ b/tests/roles/nova_adoption/tasks/nova_libvirt.yaml @@ -2,7 +2,8 @@ ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - oc patch openstackcontrolplane openstack -n openstack --type=merge --patch '{{ nova_libvirt_patch }}' + {{ nova_libvirt_patch }} + oc patch openstackcontrolplane openstack -n openstack --type=merge --patch-file=oscp-patch.yaml - name: wait for Nova control plane services' CRs to become ready ansible.builtin.include_tasks: @@ -13,15 +14,19 @@ file: check_endpoints.yaml # TODO(bogdando): provide automated checks for 'The expected changes to happen' -- name: query the superconductor for cell1 existance and compare it to pre-adoption values +- name: query the superconductor to check that the expected cells exist, and compare it to pre-adoption values ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} - {% if pulled_openstack_configuration_shell_headers is defined %} - {{ pulled_openstack_configuration_shell_headers }} - {% else %} - . ~/.source_cloud_exported_variables - {% endif %} + {{ cells_env }} + for CELL in $(echo $CELLS); do + set +u + . ~/.source_cloud_exported_variables_$CELL + set -u + RCELL=$CELL + [ "$CELL" = "default" ] && RCELL=$DEFAULT_CELL_NAME - echo $PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS - oc rsh nova-cell0-conductor-0 nova-manage cell_v2 list_cells | grep -F '| cell1 |' + echo "comparing $CELL to $RCELL" + echo $PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS | grep -F "| $CELL |" + oc rsh nova-cell0-conductor-0 nova-manage cell_v2 list_cells | grep -F "| $RCELL |" + done diff --git a/tests/roles/nova_adoption/tasks/wait.yaml b/tests/roles/nova_adoption/tasks/wait.yaml index 77a6d8e3c..7d59ed0c5 100644 --- a/tests/roles/nova_adoption/tasks/wait.yaml +++ b/tests/roles/nova_adoption/tasks/wait.yaml @@ -1,6 +1,6 @@ # NOTE(bogdando): Status phase 'Running' doesn't necessarily mean it IS running in fact. # Instead, wait for CR Ready status -- name: wait for Nova control plane services' CRs to become ready +- name: wait until the Compute control plane services CRs are ready ansible.builtin.shell: | {{ shell_header }} {{ oc_header }} diff --git a/tests/roles/ovn_adoption/tasks/main.yaml b/tests/roles/ovn_adoption/tasks/main.yaml index 09ab5df9e..4671c752d 100644 --- a/tests/roles/ovn_adoption/tasks/main.yaml +++ b/tests/roles/ovn_adoption/tasks/main.yaml @@ -139,9 +139,9 @@ {{ oc_header }} {{ ovn_copy_shell_vars }} - $CONTROLLER1_SSH sudo systemctl stop tripleo_ovn_cluster_northd.service - $CONTROLLER2_SSH sudo systemctl stop tripleo_ovn_cluster_northd.service - $CONTROLLER3_SSH sudo systemctl stop tripleo_ovn_cluster_northd.service + $CONTROLLER1_SSH if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi + $CONTROLLER2_SSH if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi + $CONTROLLER3_SSH if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi # If ovn_adoption is done using scenario A (different networks between podified # and tripleo deployments) in order to be able to dump OVN database an iptable @@ -263,10 +263,10 @@ {{ oc_header }} {{ ovn_copy_shell_vars }} - $CONTROLLER1_SSH sudo systemctl stop tripleo_ovn_cluster_north_db_server.service - $CONTROLLER2_SSH sudo systemctl stop tripleo_ovn_cluster_north_db_server.service - $CONTROLLER3_SSH sudo systemctl stop tripleo_ovn_cluster_north_db_server.service + $CONTROLLER1_SSH if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi + $CONTROLLER2_SSH if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi + $CONTROLLER3_SSH if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi - $CONTROLLER1_SSH sudo systemctl stop tripleo_ovn_cluster_south_db_server.service - $CONTROLLER2_SSH sudo systemctl stop tripleo_ovn_cluster_south_db_server.service - $CONTROLLER3_SSH sudo systemctl stop tripleo_ovn_cluster_south_db_server.service + $CONTROLLER1_SSH if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi + $CONTROLLER2_SSH if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi + $CONTROLLER3_SSH if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi diff --git a/tests/roles/pcp_cleanup/tasks/main.yaml b/tests/roles/pcp_cleanup/tasks/main.yaml index dfbfb1f88..dd997e312 100644 --- a/tests/roles/pcp_cleanup/tasks/main.yaml +++ b/tests/roles/pcp_cleanup/tasks/main.yaml @@ -48,4 +48,9 @@ {{ oc_header }} cd {{ install_yamls_path }} for i in {1..3}; do make crc_storage_cleanup crc_storage && break || sleep 5; done + {{ cells_env }} + for CELL in $(echo $RENAMED_CELLS); do + oc delete pvc mysql-db-openstack-$CELL-galera-0 --ignore-not-found=true + oc delete pvc persistence-rabbitmq-$CELL-server-0 --ignore-not-found=true + done when: reset_crc_storage|bool diff --git a/tests/roles/stop_remaining_services/defaults/main.yaml b/tests/roles/stop_remaining_services/defaults/main.yaml index 4f4ab6464..3d4b7d77b 100644 --- a/tests/roles/stop_remaining_services/defaults/main.yaml +++ b/tests/roles/stop_remaining_services/defaults/main.yaml @@ -1,5 +1,3 @@ edpm_node_hostname: standalone.localdomain install_yamls_path: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/ edpm_privatekey_path: "{{ install_yamls_path }}/out/edpm/ansibleee-ssh-key-id_rsa" -edpm_computes: |- - ["{{ edpm_node_hostname }}"]="{{ edpm_node_ip }}" diff --git a/tests/roles/stop_remaining_services/tasks/main.yaml b/tests/roles/stop_remaining_services/tasks/main.yaml index a291c1c2d..ddeffa866 100644 --- a/tests/roles/stop_remaining_services/tasks/main.yaml +++ b/tests/roles/stop_remaining_services/tasks/main.yaml @@ -6,10 +6,6 @@ CONTROLLER2_SSH="{{ controller2_ssh }}" CONTROLLER3_SSH="{{ controller3_ssh }}" EDPM_PRIVATEKEY_PATH="{{ edpm_privatekey_path }}" - declare -A computes - computes=( - {{ edpm_computes }} - ) - name: stop pacemaker services no_log: "{{ use_no_log }}" diff --git a/tests/secrets.sample.yaml b/tests/secrets.sample.yaml index e8f4a34c2..3cc2495a4 100644 --- a/tests/secrets.sample.yaml +++ b/tests/secrets.sample.yaml @@ -2,7 +2,12 @@ oc_login_command: | oc login -u kubeadmin -p {{ admin_password }} -tripleo_passwords: ~/tripleo-standalone-passwords.yaml #CUSTOMIZE_THIS +# Enumerated in terms of TripleO architecture, where 'default' cell may change its name after adoption +tripleo_passwords: #CUSTOMIZE_THIS + # This default has nothing to the real special Nova cell0, just an unfortunate iota iterator + default: ~/overcloud-passwords.yaml + #cell1: ~/cell1-passwords.yaml + #cell2: ~/cell2-passwords.yaml # Adopted OpenStack admin password. Matching the install_yamls default # to reduce developer confusion. @@ -11,31 +16,34 @@ admin_password: 12345678 #CUSTOMIZE_THIS # DB root passwords. Source password needs to be set based on the # original environment, podified can be customized, it matches the # install_yamls default to reduce developer confusion. -source_db_root_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.MysqlRootPassword') | first }}" -podified_db_root_password: 12345678 +source_db_root_password: + default: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.MysqlRootPassword') | first }}" + #cell1: "{{ lookup('file', tripleo_passwords['cell1'], errors='ignore', 'MysqlRootPassword:') | from_yaml | community.general.json_query('*.MysqlRootPassword') | first }}" + #cell2: "{{ lookup('file', tripleo_passwords['cell2'], errors='ignore', 'MysqlRootPassword:') | from_yaml | community.general.json_query('*.MysqlRootPassword') | first }}" # Service account passwords (not DB passwords). -aodh_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.AodhPassword') | first }}" -barbican_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.BarbicanPassword') | first }}" -ceilometer_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.CeilometerPassword') | first }}" -cinder_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.CinderPassword') | first }}" -glance_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.GlancePassword') | first }}" -ironic_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.IronicPassword') | first }}" -manila_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.ManilaPassword') | first }}" -neutron_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.NeutronPassword') | first }}" -heat_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.HeatPassword') | first }}" -heat_stack_domain_admin_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.HeatStackDomainAdminPassword') | first }}" -heat_auth_encryption_key: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.HeatAuthEncryptionKey') | first }}" -nova_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.NovaPassword') | first }}" -octavia_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.OctaviaPassword') | first }}" -placement_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.PlacementPassword') | first }}" -swift_password: "{{ lookup('file', tripleo_passwords) | from_yaml | community.general.json_query('*.SwiftPassword') | first }}" +aodh_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.AodhPassword') | first }}" +barbican_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.BarbicanPassword') | first }}" +ceilometer_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.CeilometerPassword') | first }}" +cinder_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.CinderPassword') | first }}" +glance_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.GlancePassword') | first }}" +ironic_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.IronicPassword') | first }}" +manila_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.ManilaPassword') | first }}" +neutron_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.NeutronPassword') | first }}" +heat_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.HeatPassword') | first }}" +heat_stack_domain_admin_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.HeatStackDomainAdminPassword') | first }}" +heat_auth_encryption_key: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.HeatAuthEncryptionKey') | first }}" +nova_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.NovaPassword') | first }}" +octavia_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.OctaviaPassword') | first }}" +placement_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.PlacementPassword') | first }}" +swift_password: "{{ lookup('file', tripleo_passwords['default']) | from_yaml | community.general.json_query('*.SwiftPassword') | first }}" # FreeIPA SSH connection strings for importing the CA certificate and key ipa_ssh: "ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100 podman exec -ti freeipa-server-container" # Controller SSH connection strings for the MariaDB copy procedure. # Use ":" for controler 2 and 3 if you are testing with a single controller. +# Also specify connection strings for all cells controllers controller1_ssh: "ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa root@192.168.122.100" # CUSTOMIZE THIS controller2_ssh: ":" controller3_ssh: ":" diff --git a/tests/vars.sample.yaml b/tests/vars.sample.yaml index 0067d220b..03290cc86 100644 --- a/tests/vars.sample.yaml +++ b/tests/vars.sample.yaml @@ -4,19 +4,79 @@ install_yamls_path: ~/install_yamls #CUSTOMIZE_THIS # This flag signifies if TLS Everywhere is enabled on the source cloud enable_tlse: false +# Source MariaDB Galera cluster members {name:IP} pairs (also in additional cells) for pre-adoption checks. +# Defaults provided for a single-cell case. Complete the lists for an HA multi-cell adoption. +source_galera_members: + default: + - name: standalone.localdomain + ip: 172.17.0.100 #CUSTOMIZE_THIS + +# Source MariaDB Galera cluster VIP(s) for DB exports of all cells +# Defaults provided for a single-cell case. +source_mariadb_ip: + default: 172.17.0.2 #CUSTOMIZE_THIS + +# EDPM nodes info, for each cell compute (omitting dedicated cell controllers) on the destination cloud. # To enable TLS-E, the standalone hostname must be set to standalone.ooo.test -edpm_node_hostname: standalone.localdomain +# Defaults provided for a single-cell case. +# Provide for each cell on the target cloud, considering default_cell_name value. +# The defined 'networks' connections must match netconfig_networks which manages NetConfig CR +edpm_nodes: + cell1: + standalone: + hostName: standalone.localdomain + ansible: + ansibleHost: 192.168.122.100 + networks: + - defaultRoute: true + fixedIP: 192.168.122.100 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet2 + - name: storage + subnetName: subnet3 + - name: tenant + subnetName: subnet4 + - name: storagemgmt + subnetName: subnet5 # TODO: There is no reason to change the domain depending on the type of # deployment, but we are doing this to keep the CI green when TLS-E is merged. # This setting should at some point be switched in the CI to standalone.ooo.test # for all types of jobs and removed entirely afterwards. +# For a local libvirt setup outside of CI-framework, enable EDPM net config, and define netconfig_networks +# That is required to update DNS config in resolv.conf at very least, so that edpm can reach out OCP pods. +dataplane_os_net_config_set_route: false #CUSTOMIZE_THIS +netconfig_networks: #CUSTOMIZE_THIS + - name: ctlplane + dnsDomain: ctlplane.example.com + subnets: + - name: subnet1 + - name: internalapi + dnsDomain: internalapi.example.com + subnets: + - name: subnet2 + - name: storage + dnsDomain: storage.example.com + subnets: + - name: subnet3 + - name: tenant + dnsDomain: tenant.example.com + subnets: + - name: subnet4 + - name: storagemgmt + dnsDomain: storagemgmt.example.com + subnets: + - name: subnet5 + # If 'true', this flag will create a Barbican secret before the adoption runs # and after the adoption it'll be verified with the secret tills exists with # the same payload. For this flag to work with 'true' value, Barbican should be # available before the adoption prelaunch_barbican_secret: false + # Whether to use 'make crc_storage_cleanup; make crc_storage' before the test reset_crc_storage: true @@ -30,8 +90,14 @@ storage_reclaim_policy: delete # or retain oc_header: | eval $(crc oc-env) -# Source MariaDB Galera cluster VIP for DB exports. -source_mariadb_ip: 172.17.0.2 #CUSTOMIZE_THIS +# Source cloud Nova compute v2 cells to adopt (all must be listed, cannot adopt cells partially) +cells: + - default + +# A cell name for the 'default' cell to take after adoption. +# Must be renamed for a single-cell deployment. Can remain 'default' for a multi-cell one. +# Defaults provided for a single-cell case. For a mult-cell, use the latest 'cells' element index + 1 +default_cell_name: cell1 # Source OS diff config ip for Tripleo source_os_diff_config_ip: 192.168.122.100 @@ -39,9 +105,6 @@ source_os_diff_config_ip: 192.168.122.100 # Source OVN DB IP for DB exports. source_ovndb_ip: 192.168.122.100 #CUSTOMIZE_THIS -# EDPM node IP -edpm_node_ip: 192.168.122.100 #CUSTOMIZE_THIS - # NTP servers list timesync_ntp_servers: # - clock.redhat.com # Will not work outside of RH intranet @@ -52,10 +115,19 @@ auth_url: http://keystone-public-openstack.apps-crc.testing # Set this to true if adopting the ironic services (ironic + ironic-inspector + nova w/compute-ironic) ironic_adoption: false +# provide the source cloud Ironic topology, for any cells with Ironic services +source_ironic_nodes: + default: + - name: standalone.localdomain + template: standalone # Run pre-adoption validation before the deploying run_pre_adoption_validation: true +# Adopt source cloud with additional compute cells v2. +# Defaults provided for a single-cell case. Enable for a multi-cell adoption. +multi_cell: false + # Supported storage backends for Cinder supported_volume_backends: #CUSTOMIZE_THIS - ceph @@ -72,6 +144,9 @@ supported_backup_backends: #CUSTOMIZE_THIS # Whether the adopted node will host compute services compute_adoption: true +# For a multi-node, should be 'overcloud' +os_cloud_name: standalone + # Where perform or not telemetry installation during adoption telemetry_adoption: true