From a13a7a017b60696c1905deb920d5ff1e0e144327 Mon Sep 17 00:00:00 2001 From: Roberto Alfieri Date: Wed, 14 Aug 2024 17:34:15 +0200 Subject: [PATCH 01/39] [reproducer] Enable configuration of NTP service on controller-0 controller-0 was the only vm deployed by the framework where the NTP service wasn't enabled and that led to some issues while parsing log files. We are going to use the `timesync` role from the `linux-system-roles` collection just like we do in EDPM compute nodes in the `edpm-ansible` collection. By default the configuration will enable all the NTP servers provided by the dhcp server, plus `pool.ntp.org` server that can be overridden declaring the `cifmw_ntp_server` variable. Closes: https://issues.redhat.com/browse/OSPRH-9299 Signed-off-by: Roberto Alfieri --- docs/source/usage/01_usage.md | 1 + galaxy.yml | 1 + requirements.yml | 3 +++ roles/reproducer/tasks/configure_controller.yml | 16 ++++++++++++++++ 4 files changed, 21 insertions(+) diff --git a/docs/source/usage/01_usage.md b/docs/source/usage/01_usage.md index 0caed9361f..f69c55fbcc 100644 --- a/docs/source/usage/01_usage.md +++ b/docs/source/usage/01_usage.md @@ -71,6 +71,7 @@ are shared among multiple roles: - `cifmw_run_compute_compliance_scans`: (Bool) Specifies whether to run compliance scans on the first compute. Defaults to `false`. - `cifmw_run_id`: (String) CI Framework run identifier. This is used in libvirt_manager, to add some uniqueness to some types of virtual machines (anything that's not OCP, CRC nor controller). If not set, the Framework will generate a random string for you, and store it on the target host, in `{{ cifmw_basedir }}/artifacts/run-id` +- `cifmw_ntp_server`: (String) Specifies an ntp server to use. Now it's only used in `controller-0` environment. Defaults to `pool.ntp.org`. ```{admonition} Words of caution :class: danger diff --git a/galaxy.yml b/galaxy.yml index 59e5b351ce..d0e53c0f95 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -85,6 +85,7 @@ dependencies: 'git+https://github.com/ansible-collections/mellanox.onyx': 'master' 'git+https://github.com/openshift/community.okd': 'main' 'git+https://github.com/ovirt/ovirt-ansible-collection': 'master' + 'fedora.linux_system_roles': '1.87.1' # The URL of the originating SCM repository repository: https://github.com/openstack-k8s-operators/ci-framework diff --git a/requirements.yml b/requirements.yml index 995e244dd0..f9043fc9a0 100644 --- a/requirements.yml +++ b/requirements.yml @@ -13,6 +13,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +roles: + - name: https://github.com/linux-system-roles/timesync.git + version: 1.9.0 collections: - name: https://github.com/ansible-collections/ansible.posix diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index bf4d14d997..e31b7ea8e0 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -9,6 +9,11 @@ ) | path_join }} +- name: Install linux-system-roles.timesync role + community.general.ansible_galaxy_install: + type: role + name: linux-system-roles.timesync + # The dynamic inventory sets the ansible_ssh_user to zuul once we get the proper # ssh configuration accesses set. - name: Configure controller-0 @@ -472,3 +477,14 @@ register: _sync_dep_install_result until: _sync_dep_install_result.finished retries: 20 + + - name: Configure the NTP service + become: true + vars: + timesync_dhcp_ntp_servers: true + timesync_ntp_servers: + - hostname: "{{ cifmw_ntp_server | default('pool.ntp.org') }}" + block: + - name: Include timesync role + ansible.builtin.include_role: + name: linux-system-roles.timesync From 917014b50569a02a3aca4f07f1587421a1a60a7f Mon Sep 17 00:00:00 2001 From: rebtoor <538845+rebtoor@users.noreply.github.com> Date: Sun, 22 Sep 2024 22:49:38 +0200 Subject: [PATCH 02/39] Revert "[reproducer] Enable configuration of NTP service on controller-0" This reverts commit a13a7a017b60696c1905deb920d5ff1e0e144327. --- docs/source/usage/01_usage.md | 1 - galaxy.yml | 1 - requirements.yml | 3 --- roles/reproducer/tasks/configure_controller.yml | 16 ---------------- 4 files changed, 21 deletions(-) diff --git a/docs/source/usage/01_usage.md b/docs/source/usage/01_usage.md index f69c55fbcc..0caed9361f 100644 --- a/docs/source/usage/01_usage.md +++ b/docs/source/usage/01_usage.md @@ -71,7 +71,6 @@ are shared among multiple roles: - `cifmw_run_compute_compliance_scans`: (Bool) Specifies whether to run compliance scans on the first compute. Defaults to `false`. - `cifmw_run_id`: (String) CI Framework run identifier. This is used in libvirt_manager, to add some uniqueness to some types of virtual machines (anything that's not OCP, CRC nor controller). If not set, the Framework will generate a random string for you, and store it on the target host, in `{{ cifmw_basedir }}/artifacts/run-id` -- `cifmw_ntp_server`: (String) Specifies an ntp server to use. Now it's only used in `controller-0` environment. Defaults to `pool.ntp.org`. ```{admonition} Words of caution :class: danger diff --git a/galaxy.yml b/galaxy.yml index d0e53c0f95..59e5b351ce 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -85,7 +85,6 @@ dependencies: 'git+https://github.com/ansible-collections/mellanox.onyx': 'master' 'git+https://github.com/openshift/community.okd': 'main' 'git+https://github.com/ovirt/ovirt-ansible-collection': 'master' - 'fedora.linux_system_roles': '1.87.1' # The URL of the originating SCM repository repository: https://github.com/openstack-k8s-operators/ci-framework diff --git a/requirements.yml b/requirements.yml index f9043fc9a0..995e244dd0 100644 --- a/requirements.yml +++ b/requirements.yml @@ -13,9 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -roles: - - name: https://github.com/linux-system-roles/timesync.git - version: 1.9.0 collections: - name: https://github.com/ansible-collections/ansible.posix diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index e31b7ea8e0..bf4d14d997 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -9,11 +9,6 @@ ) | path_join }} -- name: Install linux-system-roles.timesync role - community.general.ansible_galaxy_install: - type: role - name: linux-system-roles.timesync - # The dynamic inventory sets the ansible_ssh_user to zuul once we get the proper # ssh configuration accesses set. - name: Configure controller-0 @@ -477,14 +472,3 @@ register: _sync_dep_install_result until: _sync_dep_install_result.finished retries: 20 - - - name: Configure the NTP service - become: true - vars: - timesync_dhcp_ntp_servers: true - timesync_ntp_servers: - - hostname: "{{ cifmw_ntp_server | default('pool.ntp.org') }}" - block: - - name: Include timesync role - ansible.builtin.include_role: - name: linux-system-roles.timesync From 0254480933a147e30538dbb20cefa094f465408f Mon Sep 17 00:00:00 2001 From: Roberto Alfieri Date: Wed, 28 Aug 2024 17:27:28 +0200 Subject: [PATCH 03/39] Add logging for `setup_tests` and `setup_molecule` targets in Makefile In order to debug weird behaviors during CI jobs, it's useful to have logs for those targets. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d992246dd6..e8f57436f1 100644 --- a/Makefile +++ b/Makefile @@ -53,11 +53,11 @@ new_role: create_new_role role_molecule ## Create a new Ansible role and related ##@ Setup steps .PHONY: setup_tests setup_tests: ## Setup the environment - bash scripts/setup_env + bash scripts/setup_env 2>&1 | tee $(LOG_DIR)/setup_env.log .PHONY: setup_molecule setup_molecule: setup_tests ## Setup molecule environment - bash scripts/setup_molecule + bash scripts/setup_molecule 2>&1 | tee $(LOG_DIR)/setup_molecule.log ##@ General testing .PHONY: tests From 5cb063d0f3e4bfb947047e696cc0fe0c0b68fd43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 19 Sep 2024 16:55:59 +0200 Subject: [PATCH 04/39] Fallback sushy-emulator to public net if no ctlplane If ctlplane network is not enabled/attached to the controller-0 node fallback the sushy.utility host record to the public net. This allows me to configure controller with only public net: ``` cifmw_libvirt_manager_configuration: vms: controller: nets: - ocpbm cifmw_networking_definition: instances: controller-0: networks: {} ``` --- roles/reproducer/tasks/prepare_networking.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/reproducer/tasks/prepare_networking.yml b/roles/reproducer/tasks/prepare_networking.yml index ffe9d29203..237f43a5d7 100644 --- a/roles/reproducer/tasks/prepare_networking.yml +++ b/roles/reproducer/tasks/prepare_networking.yml @@ -216,8 +216,8 @@ - "sushy.utility" - "controller-0.utility" ips: - - "{{ _controller_net.ctlplane.ip_v4 | default('') }}" - - "{{ _controller_net.ctlplane.ip_v6 | default('') }}" + - "{{ _controller_net.ctlplane.ip_v4 | default(_controller_net[_pub_net].ip_v4) | default('') }}" + - "{{ _controller_net.ctlplane.ip_v6 | default(_controller_net[_pub_net].ip_v6) | default('') }}" state: present ansible.builtin.include_role: name: "dnsmasq" From 673c941665145d0eabaa26b5215811c3f000e880 Mon Sep 17 00:00:00 2001 From: Lewis Denny Date: Tue, 3 Sep 2024 10:07:21 +1000 Subject: [PATCH 05/39] [Reproducer] Add support for disabling ocp overlays This patch adds a new top level variable `cifmw_use_ocp_overlay` used to control whether the OCP VMs created by dev_scripts via the reproducer role use image overlays or not. Prior to this patch the use of image overlays was not configurable, while image overlays are useful for local deployments, in CI they are not used. The downside to image overlays is the process to create them takes a very long time. The OCP cluster needs come up and stabilize, then we shut the VMs down and copy the images to use as base images, we then need to start the OCP VMs again and wait for it to stabilize, this is all wasted time because in CI environments we start fresh every time. This patch also removes the undocumented internal variable `cifmw_use_reproducer` and it's use replaced with `cifmw_use_ocp_overlay` Jira: https://issues.redhat.com/browse/OSPRH-7771 --- docs/source/usage/01_usage.md | 1 + roles/devscripts/tasks/300_post.yml | 40 ++----------------- roles/libvirt_manager/tasks/create_vms.yml | 11 ++--- roles/libvirt_manager/tasks/deploy_layout.yml | 2 + roles/libvirt_manager/tasks/ocp_layout.yml | 7 +++- roles/reproducer/tasks/ocp_layout.yml | 2 - 6 files changed, 16 insertions(+), 47 deletions(-) diff --git a/docs/source/usage/01_usage.md b/docs/source/usage/01_usage.md index 0caed9361f..70707e7f17 100644 --- a/docs/source/usage/01_usage.md +++ b/docs/source/usage/01_usage.md @@ -66,6 +66,7 @@ are shared among multiple roles: - `cifmw_nolog`: (Bool) Toggle `no_log` value for selected tasks. Defaults to `true` (hiding those logs by default). - `cifmw_parent_scenario`: (String or List(String)) path to existing scenario/parameter file to inherit from. - `cifmw_configure_switches`: (Bool) Specifies whether switches should be configured. Computes in `reproducer.yml` playbook. Defaults to `false`. +- `cifmw_use_ocp_overlay`: (Boolean) Specifies whether OCP nodes deployed via devscripts should use overlay images. Using overlay images speeds up the redeployment when using the reproducer role locally but in CI each job is cleaned up and redeployed. Creating the overlay image takes time so should be disabled when not used. Defaults to `true`. - `cifmw_crc_default_network`: (String) name of the untagged network used to address DNS on the crc node. Default is `default`. - `cifmw_run_operators_compliance_scans`: (Bool) Specifies whether to run operator compliance scans. Defaults to `false`. - `cifmw_run_compute_compliance_scans`: (Bool) Specifies whether to run compliance scans on the first compute. Defaults to `false`. diff --git a/roles/devscripts/tasks/300_post.yml b/roles/devscripts/tasks/300_post.yml index 814e9608fd..6e03cd8d23 100644 --- a/roles/devscripts/tasks/300_post.yml +++ b/roles/devscripts/tasks/300_post.yml @@ -26,42 +26,10 @@ - not cifmw_devscripts_ocp_online | bool ansible.builtin.import_tasks: set_cluster_fact.yml -- name: Prepare for disk overlay configuration. - tags: - - devscripts_deploy +- name: Prepare for disk overlay configuration when: - not cifmw_devscripts_ocp_comply | bool + - cifmw_use_ocp_overlay | default(true) | bool + tags: + - devscripts_deploy ansible.builtin.include_tasks: 310_prepare_overlay.yml - -- name: Bringing cluster online. - when: - - not cifmw_use_reproducer | default(false) | bool - block: - - name: Deploy layout on target host - tags: - - libvirt_layout - when: - - not cifmw_devscripts_ocp_comply | bool - ansible.builtin.include_role: - name: libvirt_manager - tasks_from: deploy_layout - - - name: Apply VLAN configuration for vnet interfaces. - tags: - - devscripts_post - when: - - cifmw_libvirt_manager_configuration_gen.networks is defined - become: true - cifmw.general.bridge_vlan: - networks: >- - {{ - cifmw_libvirt_manager_configuration_gen.networks.keys() | list - }} - failed_when: false - - - name: Ensure the OpenShift cluster is accessible. - tags: - - devscripts_post - when: - - not cifmw_devscripts_ocp_online | bool - ansible.builtin.include_tasks: 330_wait_ocp.yml diff --git a/roles/libvirt_manager/tasks/create_vms.yml b/roles/libvirt_manager/tasks/create_vms.yml index 4f8b20d13e..0e586abe8d 100644 --- a/roles/libvirt_manager/tasks/create_vms.yml +++ b/roles/libvirt_manager/tasks/create_vms.yml @@ -29,12 +29,12 @@ _workload: "{{ cifmw_libvirt_manager_basedir }}/workload" _img_dir: "{{ cifmw_libvirt_manager_basedir }}/images" _chdir: >- - {{ (is_base_img | default(false) | bool) | ternary(_img_dir, _workload) }} + {{ + (_is_base_img | default(false) | bool) | + ansible.builtin.ternary(_img_dir, _workload) + }} block: - name: "Create VM image for {{ vm }}" - vars: - _vm_img: >- - {{ vm }}.qcow2 ansible.builtin.command: cmd: >- qemu-img create @@ -48,9 +48,6 @@ chdir: "{{ _chdir }}" - name: "Ensure file ownership and rights for {{ vm }}" - vars: - _vm_img: >- - {{ vm }}.qcow2 ansible.builtin.file: path: "{{ (_chdir, _vm_img) | path_join }}" group: "qemu" diff --git a/roles/libvirt_manager/tasks/deploy_layout.yml b/roles/libvirt_manager/tasks/deploy_layout.yml index c10506bd81..fc590981e8 100644 --- a/roles/libvirt_manager/tasks/deploy_layout.yml +++ b/roles/libvirt_manager/tasks/deploy_layout.yml @@ -198,6 +198,8 @@ }} pub_key: "{{ pub_ssh_key.content | b64decode }}" priv_key: "{{ priv_ssh_key.content | b64decode }}" + _vm_img: >- + {{ vm }}.qcow2 ansible.builtin.include_tasks: file: create_vms.yml loop: "{{ cifmw_libvirt_manager_all_vms | dict2items }}" diff --git a/roles/libvirt_manager/tasks/ocp_layout.yml b/roles/libvirt_manager/tasks/ocp_layout.yml index 5ffbbee081..f7333c07ae 100644 --- a/roles/libvirt_manager/tasks/ocp_layout.yml +++ b/roles/libvirt_manager/tasks/ocp_layout.yml @@ -6,7 +6,7 @@ notify: Restart firewalld ansible.builtin.command: cmd: >- - firewall-cmd --permanent --zone libvirt --add-forward + firewall-cmd --permanent --zone libvirt --add-forward - name: Enable masquerading for public traffic when: cifmw_libvirt_manager_firewalld_default_zone_masquerade | default(true) | bool @@ -64,6 +64,7 @@ state: directory loop: - "{{ cifmw_libvirt_manager_basedir }}/images" + - "{{ cifmw_libvirt_manager_basedir }}/workload" - "{{ cifmw_libvirt_manager_ocp_pool_dir }}" - name: Create pool in libvirt @@ -79,7 +80,9 @@ {{ _ocp_layout.vms[vm_type] }} - is_base_img: true + _vm_img: >- + {{ vm }}.qcow2 + _is_base_img: "{{ cifmw_use_ocp_overlay | default(true) | bool }}" ansible.builtin.include_role: name: "libvirt_manager" tasks_from: "create_vms.yml" diff --git a/roles/reproducer/tasks/ocp_layout.yml b/roles/reproducer/tasks/ocp_layout.yml index d97798c50b..b93f0753a4 100644 --- a/roles/reproducer/tasks/ocp_layout.yml +++ b/roles/reproducer/tasks/ocp_layout.yml @@ -336,8 +336,6 @@ permanent: true - name: Run devscripts role - vars: - cifmw_use_reproducer: true ansible.builtin.include_role: name: devscripts From d6272538e0b54f09b0415fc4e4d6f02fdbaa455d Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Wed, 21 Aug 2024 13:07:17 +0530 Subject: [PATCH 06/39] [VA HCI]Adds cindervolume and manilashare names for update_containers HCI job, ceph cinder volume and share1 manila share is used. We need to add the cindervolume and manila share name so that proper containers gets updated by update_containers role. Signed-off-by: Chandan Kumar (raukadah) --- roles/update_containers/defaults/main.yml | 6 ++++-- scenarios/reproducers/va-hci.yml | 6 ++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/roles/update_containers/defaults/main.yml b/roles/update_containers/defaults/main.yml index 4c6f915e59..95142c4136 100644 --- a/roles/update_containers/defaults/main.yml +++ b/roles/update_containers/defaults/main.yml @@ -39,8 +39,10 @@ cifmw_update_containers_org: "podified-antelope-centos9" cifmw_update_containers_tag: "current-podified" cifmw_update_containers_openstack: false cifmw_update_containers_rollback: false -cifmw_update_containers_cindervolumes: [] -cifmw_update_containers_manilashares: [] +cifmw_update_containers_cindervolumes: + - default +cifmw_update_containers_manilashares: + - default # cifmw_update_containers_ansibleee_image_url: # cifmw_update_containers_edpm_image_url: # cifmw_update_containers_ipa_image_url: diff --git a/scenarios/reproducers/va-hci.yml b/scenarios/reproducers/va-hci.yml index 7351042fa0..422241a29a 100644 --- a/scenarios/reproducers/va-hci.yml +++ b/scenarios/reproducers/va-hci.yml @@ -114,3 +114,9 @@ cifmw_ceph_daemons_layout: dashboard_enabled: false cephfs_enabled: true ceph_nfs_enabled: false + +# Vars related to update_containers cinder volume and manila share +cifmw_update_containers_cindervolumes: + - ceph +cifmw_update_containers_manilashares: + - share1 From a865d642e4b478b309a481a421dc60c2fdce7994 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Mon, 23 Sep 2024 21:24:41 +0530 Subject: [PATCH 07/39] Run trigger job on net_map changes Signed-off-by: Chandan Kumar (raukadah) --- zuul.d/trigger_jobs.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/zuul.d/trigger_jobs.yaml b/zuul.d/trigger_jobs.yaml index f5db1641ad..58c6c8c0a8 100644 --- a/zuul.d/trigger_jobs.yaml +++ b/zuul.d/trigger_jobs.yaml @@ -28,6 +28,7 @@ - ^roles/ssh_jumper/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/virtualbmc/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^playbooks/06-deploy-architecture.yml + - ^plugins/module_utils/net_map/(networking_definition.py|exceptions.py) vars: # It will create a file trigger_va_hci trigger_job_name: "va_hci" From 8d239cfef4e27788102e3da7d344d82e8e543ae5 Mon Sep 17 00:00:00 2001 From: mkatari Date: Tue, 17 Sep 2024 12:08:40 +0530 Subject: [PATCH 08/39] Fix rbd pools trash purge scheduling This patch ensures to use the right conditions and parameters while trash purge scheduling on the rbd pools. Jira: https://issues.redhat.com/browse/OSPRH-8504 --- playbooks/ceph.yml | 1 + roles/cifmw_cephadm/tasks/pools.yml | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 09ed924786..7ad7b219cb 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -279,6 +279,7 @@ pg_autoscale_mode: true target_size_ratio: 0.3 application: rbd + trash_purge_enabled: true - name: backups pg_autoscale_mode: true target_size_ratio: 0.1 diff --git a/roles/cifmw_cephadm/tasks/pools.yml b/roles/cifmw_cephadm/tasks/pools.yml index 5c34cc7eda..d88ff0525d 100644 --- a/roles/cifmw_cephadm/tasks/pools.yml +++ b/roles/cifmw_cephadm/tasks/pools.yml @@ -47,8 +47,9 @@ - name: Configure the RBD trash purge scheduler when: - - cifmw_enabled_services | default([]) | intersect(['cinder_volume']) - cifmw_cephadm_enable_trash_scheduler | default(false) + - cifmw_cephadm_pools is defined + - cifmw_cephadm_pools | length > 0 block: - name: Get the RBD ceph_cli ansible.builtin.include_tasks: ceph_cli.yml @@ -56,10 +57,11 @@ ceph_command: rbd - name: Set trash interval + when: item.trash_purge_enabled | default(false) ansible.builtin.command: - cmd: | - {{ cifmw_cephadm_ceph_cli }} trash purge schedule add \ - {{ cifmw_cephadm_rbd_trash_interval | default(15) }} --pool {{ item }} + cmd: >- + {{ cifmw_cephadm_ceph_cli }} trash purge schedule add + {{ cifmw_cephadm_rbd_trash_interval | default(15) }} --pool {{ item.name }} changed_when: false become: true - loop: "{{ [ cinder_pool.name | default('volumes') ] + cinder_pool.cinder_extra_pools | default([]) }}" + loop: "{{ cifmw_cephadm_pools | default([]) }}" From e89ddd8b750e8dae9654e4a68de60673335cddff Mon Sep 17 00:00:00 2001 From: Jiri Podivin Date: Thu, 19 Sep 2024 10:08:50 +0200 Subject: [PATCH 09/39] Adding toolbelt catalog metadata file Signed-off-by: Jiri Podivin --- docs/toolbelt-catalog.yaml | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 docs/toolbelt-catalog.yaml diff --git a/docs/toolbelt-catalog.yaml b/docs/toolbelt-catalog.yaml new file mode 100644 index 0000000000..b31edad3ca --- /dev/null +++ b/docs/toolbelt-catalog.yaml @@ -0,0 +1,41 @@ +# Catalog entry for Backstage [backstage.io] + +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: ci-framework + title: ci-framework + description: | + CI Framework - used for CI, QE and Devs to run OSP 18+ jobs in a converged way + annotations: + github.com/project-slug: openstack-k8s-operators/ci-framework + feedback/type: JIRA + feedback/host: https://issues.redhat.com + jira/project-key: OSPRH + links: + - title: docs + url: https://ci-framework.readthedocs.io/en/latest/ + icon: docs + - title: code + url: https://github.com/openstack-k8s-operators/ci-framework + icon: github + - title: #osp-podified-ci-support + url: https://app.slack.com/client/E030G10V24F/C03MD4LG22Z + icon: chat + tags: + - testing + - test-execution + - test-framework + - test-management + - test-reporting + - provisioning + - python + - openstack + - openshift + - cloud + - continuous-integration + namespace: quality-community +spec: + type: tool + owner: group:redhat/openstack-k8s-operators-ci + lifecycle: production From 531e00179197366e4d7a7eb68ff9ec0f142a0487 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 13 Aug 2024 09:47:41 -0400 Subject: [PATCH 10/39] Configure Ceph RGW on a dedicated Swift network When Ceph RGW is used, an endpoint for Swift storage is hosted not in a pod on k8s but on an EDPM node. Thus, a service hosted on an EDPM node will need to be accessed from a separate network. This patch adds the Swift storage network (swift) with VLAN 25 and range 172.22.0.0/24 in the reproducers networking-definition.yml. It also adds a multus range for this network so that the Tempest pod can access this network for testing. The Swift storage network is added to the OCP nodes for the same reason. The swift network is also added to the ci playbook networking-env-definition for the rhoso-architecture-validate-hci github-check. The Ceph playbook cifmw_cephadm_rgw_network parameter may then be set to the new swift network so that RGW is then configured on a separate network. If this network parameter is not set, then the storage network is used. Signed-off-by: John Fulton --- .../files/networking-env-definition.yml | 87 +++++++++++++++++++ playbooks/ceph.yml | 24 ++++- roles/cifmw_cephadm/tasks/check_vip.yml | 2 +- .../reproducers/networking-definition.yml | 17 ++++ 4 files changed, 125 insertions(+), 5 deletions(-) diff --git a/ci/playbooks/files/networking-env-definition.yml b/ci/playbooks/files/networking-env-definition.yml index 201163757a..30915c622d 100644 --- a/ci/playbooks/files/networking-env-definition.yml +++ b/ci/playbooks/files/networking-env-definition.yml @@ -151,6 +151,15 @@ instances: parent_interface: eth1 skip_nm: false vlan_id: 22 + swift: + interface_name: eth1.25 + ip_v4: 172.22.0.100 + mac_addr: '52:54:00:0b:1c:e7' + mtu: 1500 + network_name: swift + parent_interface: eth1 + skip_nm: false + vlan_id: 25 compute-1: hostname: compute-1 name: compute-1 @@ -189,6 +198,15 @@ instances: parent_interface: eth1 skip_nm: false vlan_id: 22 + swift: + interface_name: eth1.25 + ip_v4: 172.22.0.101 + mac_addr: '52:54:00:0b:1c:e8' + mtu: 1500 + network_name: swift + parent_interface: eth1 + skip_nm: false + vlan_id: 25 compute-2: hostname: compute-2 name: compute-2 @@ -227,6 +245,15 @@ instances: parent_interface: eth1 skip_nm: false vlan_id: 22 + swift: + interface_name: eth1.25 + ip_v4: 172.22.0.102 + mac_addr: '52:54:00:0b:1c:e9' + mtu: 1500 + network_name: swift + parent_interface: eth1 + skip_nm: false + vlan_id: 25 controller-0: hostname: controller-0 name: controller-0 @@ -336,6 +363,15 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 + swift: + interface_name: enp6s0.25 + ip_v4: 172.22.0.10 + mac_addr: '52:54:00:18:a0:b6' + mtu: 1500 + network_name: swift + parent_interface: enp6s0 + skip_nm: false + vlan_id: 25 ocp-master-1: hostname: ocp-master-1 name: ocp-master-1 @@ -374,6 +410,15 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 + swift: + interface_name: enp6s0.25 + ip_v4: 172.22.0.11 + mac_addr: '52:54:00:18:a0:b7' + mtu: 1500 + network_name: swift + parent_interface: enp6s0 + skip_nm: false + vlan_id: 25 ocp-master-2: hostname: ocp-master-2 name: ocp-master-2 @@ -412,6 +457,15 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 + swift: + interface_name: enp6s0.25 + ip_v4: 172.22.0.12 + mac_addr: '52:54:00:18:a0:b8' + mtu: 1500 + network_name: swift + parent_interface: enp6s0 + skip_nm: false + vlan_id: 25 networks: ctlplane: dns_v4: @@ -598,3 +652,36 @@ networks: start_host: 100 ipv6_ranges: [] vlan_id: 22 + swift: + dns_v4: [] + dns_v6: [] + mtu: 1496 + network_name: swift + network_v4: 172.22.0.0/24 + search_domain: swift.example.com + tools: + metallb: + ipv4_ranges: + - end: 172.22.0.90 + end_host: 90 + length: 11 + start: 172.22.0.80 + start_host: 80 + ipv6_ranges: [] + multus: + ipv4_ranges: + - end: 172.22.0.70 + end_host: 70 + length: 41 + start: 172.22.0.30 + start_host: 30 + ipv6_ranges: [] + netconfig: + ipv4_ranges: + - end: 172.22.0.250 + end_host: 250 + length: 151 + start: 172.22.0.100 + start_host: 100 + ipv6_ranges: [] + vlan_id: 25 diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 7ad7b219cb..0a2bea363b 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -339,27 +339,43 @@ # public network always exist because is provided by the ceph_spec role - name: Get Storage network range ansible.builtin.set_fact: - cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" + cifmw_cephadm_storage_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" + + - name: Set RGW network range to storage network only if it was not provided + ansible.builtin.set_fact: + cifmw_cephadm_rgw_network: "{{ cifmw_cephadm_storage_network }}" + when: + - cifmw_cephadm_rgw_network is not defined or + cifmw_cephadm_rgw_network | length == 0 - name: Set IP address of first monitor ansible.builtin.set_fact: - cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}" + cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | first }}" vars: this_host: "{{ _target_hosts | first }}" - name: Assert if any EDPM nodes n/w interface is missing in storage network + ansible.builtin.assert: + that: + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_storage_network }}" + loop: "{{ _target_hosts }}" + + - name: Assert if any EDPM nodes n/w interface is missing in RGW network ansible.builtin.assert: that: - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" loop: "{{ _target_hosts }}" + when: + - cifmw_cephadm_rgw_network != cifmw_cephadm_storage_network - - name: Get already assigned IP addresses + - name: Get already assigned RGW IP addresses ansible.builtin.set_fact: ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" loop: "{{ _target_hosts }}" - # cifmw_cephadm_vip is the VIP reserved in the Storage network + # cifmw_cephadm_vip is the VIP reserved in the RGW network - name: Set VIP var as empty string ansible.builtin.set_fact: cifmw_cephadm_vip: "" diff --git a/roles/cifmw_cephadm/tasks/check_vip.yml b/roles/cifmw_cephadm/tasks/check_vip.yml index 0714510e7a..d92bcc0765 100644 --- a/roles/cifmw_cephadm/tasks/check_vip.yml +++ b/roles/cifmw_cephadm/tasks/check_vip.yml @@ -22,7 +22,7 @@ ansible.builtin.set_fact: count: "{{ 2 if count is undefined else count | int + 2 }}" - - name: Get an IP address from the Storage network + - name: Get an IP address from the RGW network ansible.builtin.set_fact: cur_ip: "{{ cifmw_cephadm_rgw_network | ansible.utils.next_nth_usable(count) }}" diff --git a/scenarios/reproducers/networking-definition.yml b/scenarios/reproducers/networking-definition.yml index 4f0ba0ed26..671f83263f 100644 --- a/scenarios/reproducers/networking-definition.yml +++ b/scenarios/reproducers/networking-definition.yml @@ -91,6 +91,19 @@ cifmw_networking_definition: end: 250 vlan: 23 mtu: 1500 + swift: + network: "172.22.0.0/24" + tools: + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + vlan: 25 + mtu: 1500 group-templates: ocps: @@ -106,6 +119,8 @@ cifmw_networking_definition: trunk-parent: ctlplane storage: trunk-parent: ctlplane + swift: + trunk-parent: ctlplane ocp_workers: network-template: range: @@ -127,6 +142,8 @@ cifmw_networking_definition: trunk-parent: ctlplane storagemgmt: trunk-parent: ctlplane + swift: + trunk-parent: ctlplane cephs: network-template: range: From 86c92f5644ae6d71035377d8ec8c7433223647bf Mon Sep 17 00:00:00 2001 From: lkuchlan Date: Thu, 19 Sep 2024 13:02:14 +0300 Subject: [PATCH 11/39] [test-operator] Add wait_timeout for removing CRD instances in test-operator Follow-up on PR [1]: Still encountering timeout issues during the deletion of the test-operator. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2305 --- roles/test_operator/tasks/run-test-operator-job.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index 7ef0c308aa..05d60de071 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -228,6 +228,7 @@ name: "{{ test_operator_job_name }}" namespace: "{{ cifmw_test_operator_namespace }}" wait: true + wait_timeout: 600 - name: Delete CRD for {{ run_test_fw }} kubernetes.core.k8s: @@ -240,6 +241,7 @@ name: "{{ test_operator_crd_name }}" namespace: "{{ cifmw_test_operator_namespace }}" wait: true + wait_timeout: 600 - name: Delete test-operator-logs-pod kubernetes.core.k8s: @@ -252,6 +254,7 @@ name: "test-operator-logs-pod-{{ run_test_fw }}" namespace: "{{ cifmw_test_operator_namespace }}" wait: true + wait_timeout: 600 when: - cifmw_test_operator_cleanup | bool and not cifmw_test_operator_dry_run | bool or cifmw_test_operator_delete_logs_pod | bool From 5b499ca3867ac36eafd7de43d6a929ec0f71fd29 Mon Sep 17 00:00:00 2001 From: Pablo Rodriguez Nava Date: Wed, 5 Jun 2024 15:59:51 +0200 Subject: [PATCH 12/39] [networking_mapper] Add the network to ranges validation message --- plugins/module_utils/net_map/exceptions.py | 13 ++++++----- .../net_map/networking_definition.py | 4 ++++ ...t_networking_definitions_group_template.py | 22 +++++++++++++++++++ 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/plugins/module_utils/net_map/exceptions.py b/plugins/module_utils/net_map/exceptions.py index 2a7fcb806f..eaff49ab8c 100644 --- a/plugins/module_utils/net_map/exceptions.py +++ b/plugins/module_utils/net_map/exceptions.py @@ -6,9 +6,10 @@ class NetworkMappingError(Exception, ansible_encoding.RawConvertibleObject): - def __init__(self, message) -> None: + def __init__(self, message, **kwargs) -> None: super().__init__(message) self.message = message + self.details = kwargs def to_raw(self) -> typing.Dict[str, typing.Any]: return ansible_encoding.decode_ansible_raw(vars(self)) @@ -22,8 +23,9 @@ def __init__( invalid_value=None, parent_name=None, parent_type=None, + **kwargs, ) -> None: - super().__init__(message) + super().__init__(message, **kwargs) self.field = field self.invalid_value = invalid_value self.parent_name = parent_name @@ -31,8 +33,8 @@ def __init__( class HostNetworkRangeCollisionValidationError(NetworkMappingValidationError): - def __init__(self, message, range_1=None, range_2=None) -> None: - super().__init__(message) + def __init__(self, message, range_1=None, range_2=None, **kwargs) -> None: + super().__init__(message, **kwargs) self.range_1 = range_1 self.range_2 = range_2 @@ -48,8 +50,9 @@ def __init__( invalid_value=None, parent_name=None, parent_type=None, + **kwargs, ) -> None: - super().__init__(message) + super().__init__(message, **kwargs) self.field = field self.invalid_value = invalid_value self.parent_name = parent_name diff --git a/plugins/module_utils/net_map/networking_definition.py b/plugins/module_utils/net_map/networking_definition.py index 067b6e6f79..e82bd81524 100644 --- a/plugins/module_utils/net_map/networking_definition.py +++ b/plugins/module_utils/net_map/networking_definition.py @@ -170,6 +170,7 @@ def _validate_fields_one_of( parent_type: str = None, alone_field: str = None, mandatory: bool = False, + **kwargs, ) -> bool: fields_present = any( field_name in raw_definition.keys() for field_name in fields_list @@ -183,6 +184,7 @@ def _validate_fields_one_of( f"at least one of {mandatory_fields} must be provided", parent_name=parent_name, parent_type=parent_type, + **kwargs, ) if alone_field and alone_field in raw_definition: rest = [ @@ -198,6 +200,7 @@ def _validate_fields_one_of( invalid_value=raw_definition[alone_field], parent_name=parent_name, parent_type=parent_type, + **kwargs, ) return True @@ -2100,6 +2103,7 @@ def __parse_raw_net_ranges( parent_name=self.__group_name, parent_type=self.__OBJECT_TYPE_NAME, alone_field=self.__FIELD_NETWORK_RANGE, + network_name=network_definition.name, ) if not ranges_present: return None, None diff --git a/tests/unit/module_utils/net_map/test_networking_definitions_group_template.py b/tests/unit/module_utils/net_map/test_networking_definitions_group_template.py index 7de61ecacc..f50d2dfb70 100644 --- a/tests/unit/module_utils/net_map/test_networking_definitions_group_template.py +++ b/tests/unit/module_utils/net_map/test_networking_definitions_group_template.py @@ -266,6 +266,28 @@ def test_group_template_definition_parse_invalid_range_fail(): assert exc_info.value.field == "length" assert exc_info.value.invalid_value == 0 + # Test mix of single and dual-stack fields + invalid_range = {"start": 10, "length": 10} + group_template_definition_raw = { + "networks": { + first_net.name: { + "range": invalid_range, + "range-v4": {"start": 100, "length": 0}, + }, + }, + } + + with pytest.raises(exceptions.NetworkMappingValidationError) as exc_info: + networking_definition.GroupTemplateDefinition( + inventory_group, group_template_definition_raw, networks_definitions + ) + assert exc_info.value.field == "range" + assert exc_info.value.invalid_value == invalid_range + assert "network_name" in exc_info.value.details + assert exc_info.value.details["network_name"] == first_net.name + assert exc_info.value.parent_name == inventory_group + assert exc_info.value.parent_type == "host-template" + def test_group_template_definition_parse_invalid_net_fail(): # Test invalid start value From 36575aead53ae64f1ffea71553c818e580e5cc39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Podiv=C3=ADn?= <66251151+jpodivin@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:23:57 +0200 Subject: [PATCH 13/39] Link title needs quotes --- docs/toolbelt-catalog.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/toolbelt-catalog.yaml b/docs/toolbelt-catalog.yaml index b31edad3ca..d2576823f1 100644 --- a/docs/toolbelt-catalog.yaml +++ b/docs/toolbelt-catalog.yaml @@ -19,7 +19,7 @@ metadata: - title: code url: https://github.com/openstack-k8s-operators/ci-framework icon: github - - title: #osp-podified-ci-support + - title: "#osp-podified-ci-support" url: https://app.slack.com/client/E030G10V24F/C03MD4LG22Z icon: chat tags: From 575af58b41397254d2ed7f45e1a153f9f0755547 Mon Sep 17 00:00:00 2001 From: Roberto Alfieri Date: Mon, 23 Sep 2024 14:01:41 +0200 Subject: [PATCH 14/39] [cifmw_ntp] Added `cifmw_ntp` role and used it to setup chrony on controller-0 --- docs/dictionary/en-custom.txt | 1 + roles/cifmw_ntp/README.md | 25 +++++++ roles/cifmw_ntp/defaults/main.yml | 23 +++++++ roles/cifmw_ntp/handlers/main.yml | 21 ++++++ roles/cifmw_ntp/meta/main.yml | 30 +++++++++ roles/cifmw_ntp/molecule/default/cleanup.yml | 24 +++++++ roles/cifmw_ntp/molecule/default/converge.yml | 23 +++++++ roles/cifmw_ntp/molecule/default/molecule.yml | 17 +++++ roles/cifmw_ntp/molecule/default/verify.yml | 67 +++++++++++++++++++ roles/cifmw_ntp/molecule/vars.yml | 17 +++++ roles/cifmw_ntp/tasks/cleanup.yml | 34 ++++++++++ roles/cifmw_ntp/tasks/main.yml | 53 +++++++++++++++ roles/cifmw_ntp/templates/chrony.conf.j2 | 5 ++ .../reproducer/tasks/configure_controller.yml | 4 ++ zuul.d/molecule.yaml | 11 +++ zuul.d/projects.yaml | 1 + 16 files changed, 356 insertions(+) create mode 100644 roles/cifmw_ntp/README.md create mode 100644 roles/cifmw_ntp/defaults/main.yml create mode 100644 roles/cifmw_ntp/handlers/main.yml create mode 100644 roles/cifmw_ntp/meta/main.yml create mode 100644 roles/cifmw_ntp/molecule/default/cleanup.yml create mode 100644 roles/cifmw_ntp/molecule/default/converge.yml create mode 100644 roles/cifmw_ntp/molecule/default/molecule.yml create mode 100644 roles/cifmw_ntp/molecule/default/verify.yml create mode 100644 roles/cifmw_ntp/molecule/vars.yml create mode 100644 roles/cifmw_ntp/tasks/cleanup.yml create mode 100644 roles/cifmw_ntp/tasks/main.yml create mode 100644 roles/cifmw_ntp/templates/chrony.conf.j2 diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 78550f06e8..3b633102e5 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -496,6 +496,7 @@ tempestconf testcases testenv timestamper +timesync tldca tls tmp diff --git a/roles/cifmw_ntp/README.md b/roles/cifmw_ntp/README.md new file mode 100644 index 0000000000..f7c71248c9 --- /dev/null +++ b/roles/cifmw_ntp/README.md @@ -0,0 +1,25 @@ +# cifmw_ntp + +This role allows to install and configure an NTP service (chrony) on an host. +It's a *heavily* stripped down version of the [timesync](https://github.com/linux-system-roles/timesync) role. +It shouldn't be used outside the cifmw scope because it only sets the NTP server/pool to use. + +## Privilege escalation + +Privilege escalation is needed to install packages,render the templates in /etc directory and deal with systemd services. + +## Parameters +* `cifmw_ntp_servers` (list) List of NTP servers or pool of NTP servers. It defaults to `pool.ntp.org` if the (global) variable `cifmw_ntp_server` isn't defined. +* `cifmw_ntp_chrony_conf_file` (string) The path of the chrony configuration file. It defaults to `/etc/chrony.conf`. +* `cifmw_ntp_chrony_extra_conf_file` (string) The path of the custom configuration file for chrony. It defaults to `/etc/chrony-cifmw.conf`. + +## Examples + +``` +- name: Configure chrony on controller-0 + hosts: controller-0 + vars: + cifmw_ntp_server: "custom.ntp.server" + roles: + - role: "cifmw_ntp" +``` diff --git a/roles/cifmw_ntp/defaults/main.yml b/roles/cifmw_ntp/defaults/main.yml new file mode 100644 index 0000000000..3c7b0af488 --- /dev/null +++ b/roles/cifmw_ntp/defaults/main.yml @@ -0,0 +1,23 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# All variables intended for modification should be placed in this file. +# All variables within this role should have a prefix of "cifmw_ntp" + +cifmw_ntp_servers: "{{ [ cifmw_ntp_server | default('pool.ntp.org') ] | list }}" +cifmw_ntp_chrony_conf_file: /etc/chrony.conf +cifmw_ntp_chrony_extra_conf_file: /etc/chrony-cifmw.conf diff --git a/roles/cifmw_ntp/handlers/main.yml b/roles/cifmw_ntp/handlers/main.yml new file mode 100644 index 0000000000..7abca89274 --- /dev/null +++ b/roles/cifmw_ntp/handlers/main.yml @@ -0,0 +1,21 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Restart chrony + become: true + ansible.builtin.systemd_service: + name: chronyd.service + state: restarted diff --git a/roles/cifmw_ntp/meta/main.yml b/roles/cifmw_ntp/meta/main.yml new file mode 100644 index 0000000000..827bc3a70d --- /dev/null +++ b/roles/cifmw_ntp/meta/main.yml @@ -0,0 +1,30 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +galaxy_info: + author: CI Framework + description: CI Framework Role -- cifmw_ntp + company: Red Hat + license: Apache-2.0 + min_ansible_version: "2.14" + namespace: cifmw + galaxy_tags: + - cifmw + +# List your role dependencies here, one per line. Be sure to remove the '[]' above, +# if you add dependencies to this list. +dependencies: [] diff --git a/roles/cifmw_ntp/molecule/default/cleanup.yml b/roles/cifmw_ntp/molecule/default/cleanup.yml new file mode 100644 index 0000000000..9f801a92f1 --- /dev/null +++ b/roles/cifmw_ntp/molecule/default/cleanup.yml @@ -0,0 +1,24 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Cleanup + hosts: all + tasks: + - name: Cleanup chrony role + ansible.builtin.include_role: + name: cifmw_ntp + tasks_from: cleanup.yml diff --git a/roles/cifmw_ntp/molecule/default/converge.yml b/roles/cifmw_ntp/molecule/default/converge.yml new file mode 100644 index 0000000000..15bbf5a382 --- /dev/null +++ b/roles/cifmw_ntp/molecule/default/converge.yml @@ -0,0 +1,23 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Converge + hosts: all + vars_files: + - ../vars.yml + roles: + - role: "cifmw_ntp" diff --git a/roles/cifmw_ntp/molecule/default/molecule.yml b/roles/cifmw_ntp/molecule/default/molecule.yml new file mode 100644 index 0000000000..6c31fa930b --- /dev/null +++ b/roles/cifmw_ntp/molecule/default/molecule.yml @@ -0,0 +1,17 @@ +--- +# Mainly used to override the defaults set in .config/molecule/ +# By default, it uses the "config_podman.yml" - in CI, it will use +# "config_local.yml". + +provisioner: + name: ansible + log: true + env: + ANSIBLE_STDOUT_CALLBACK: yaml + +scenario: + test_sequence: + - prepare + - converge + - verify + - cleanup diff --git a/roles/cifmw_ntp/molecule/default/verify.yml b/roles/cifmw_ntp/molecule/default/verify.yml new file mode 100644 index 0000000000..1569fd9f34 --- /dev/null +++ b/roles/cifmw_ntp/molecule/default/verify.yml @@ -0,0 +1,67 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Verify + hosts: all + vars_files: + - ../vars.yml + - ../../defaults/main.yml + tasks: + - name: Get service facts + ansible.builtin.service_facts: + + - name: Assert that the service exists, is enabled and running + ansible.builtin.assert: + that: + - ansible_facts.services["chronyd.service"] is defined + - ansible_facts.services["chronyd.service"]["state"] == "running" + - ansible_facts.services["chronyd.service"]["status"] == "enabled" + + - name: Check if the chrony.conf file has been configured properly + ansible.builtin.lineinfile: + path: "{{ cifmw_ntp_chrony_conf_file }}" + regexp: "{{ item.regexp | default(omit) }}" + line: "{{ item.line }}" + backrefs: "{{ item.backrefs | default(omit) }}" + state: present + check_mode: true + register: _check_chrony_conf + loop: + - { regexp: "^(server.*)$", line: "#\\g<1>", backrefs: true } + - { line: "include {{ cifmw_ntp_chrony_extra_conf_file }}" } + + - name: Check if the provided ntp server is configured in the custom configuration file + ansible.builtin.lineinfile: + path: "{{ cifmw_ntp_chrony_extra_conf_file }}" + regexp: ^(server\ ).*(\ iburst)$ + line: \g<1>{{ cifmw_ntp_servers }}\g<2> + backrefs: true + state: present + check_mode: true + register: _check_ntp_server + + - name: Assert that chrony.conf has been correctly configured + ansible.builtin.assert: + that: item is not changed + fail_msg: "{{ cifmw_ntp_chrony_conf_file }} isn't correctly configured, please check." + loop: "{{ _check_chrony_conf.results }}" + + - name: Assert that the correct server is configured in the custom configuration file + ansible.builtin.assert: + that: _check_ntp_server is not changed + fail_msg: "{{ cifmw_ntp_servers }} wasn't found in the {{ cifmw_ntp_chrony_extra_conf_file }} file" + success_msg: "{{ cifmw_ntp_servers }} was successfully configured in {{ cifmw_ntp_chrony_extra_conf_file }} file" diff --git a/roles/cifmw_ntp/molecule/vars.yml b/roles/cifmw_ntp/molecule/vars.yml new file mode 100644 index 0000000000..0719bff30a --- /dev/null +++ b/roles/cifmw_ntp/molecule/vars.yml @@ -0,0 +1,17 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +cifmw_ntp_server: 'rhel.pool.ntp.org' diff --git a/roles/cifmw_ntp/tasks/cleanup.yml b/roles/cifmw_ntp/tasks/cleanup.yml new file mode 100644 index 0000000000..01fd1fe660 --- /dev/null +++ b/roles/cifmw_ntp/tasks/cleanup.yml @@ -0,0 +1,34 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Disable and remove chrony and its custom configuration + become: true + block: + - name: Disable chrony service + ansible.builtin.systemd_service: + name: chronyd.service + state: stopped + enabled: false + + - name: Delete chrony custom config file + ansible.builtin.file: + path: "{{ cifmw_ntp_chrony_extra_conf_file }}" + state: absent + + - name: Uninstall chrony + ansible.builtin.dnf: + name: chrony + state: absent diff --git a/roles/cifmw_ntp/tasks/main.yml b/roles/cifmw_ntp/tasks/main.yml new file mode 100644 index 0000000000..7b2d5a0038 --- /dev/null +++ b/roles/cifmw_ntp/tasks/main.yml @@ -0,0 +1,53 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Install, configure, and enable chrony + become: true + block: + - name: Install chrony + ansible.builtin.dnf: + name: chrony + state: present + + - name: Configure original chrony.conf file + ansible.builtin.lineinfile: + path: "{{ cifmw_ntp_chrony_conf_file }}" + regexp: "{{ item.regexp | default(omit) }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter | default(omit) }}" + backrefs: "{{ item.backrefs | default(omit) }}" + state: present + backup: true + loop: + - { regexp: "^(server.*)$", line: "#\\g<1>", backrefs: true } + - { line: "include {{ cifmw_ntp_chrony_extra_conf_file }}", insertafter: "EOF" } + + - name: Render chrony extra config file + ansible.builtin.template: + src: chrony.conf.j2 + dest: "{{ cifmw_ntp_chrony_extra_conf_file }}" + owner: root + group: root + mode: '0644' + backup: true + notify: Restart chrony + + - name: Enable chrony service + ansible.builtin.systemd_service: + name: chronyd.service + state: started + enabled: true diff --git a/roles/cifmw_ntp/templates/chrony.conf.j2 b/roles/cifmw_ntp/templates/chrony.conf.j2 new file mode 100644 index 0000000000..cd4d6ae53d --- /dev/null +++ b/roles/cifmw_ntp/templates/chrony.conf.j2 @@ -0,0 +1,5 @@ +{{ ansible_managed | comment }} + +{% for ntp_host in cifmw_ntp_servers %} +{{ 'pool' if 'pool' in ntp_host else 'server' }} {{ ntp_host }} iburst +{% endfor %} diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index bf4d14d997..cc86bc3db6 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -472,3 +472,7 @@ register: _sync_dep_install_result until: _sync_dep_install_result.finished retries: 20 + + - name: Configure ntp service + ansible.builtin.include_role: + name: cifmw_ntp diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 9f15f91279..4f59f08732 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -202,6 +202,17 @@ parent: cifmw-molecule-base vars: TEST_RUN: cifmw_create_admin +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cifmw_ntp/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cifmw_ntp + parent: cifmw-molecule-base + vars: + TEST_RUN: cifmw_ntp - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index de43f274f2..12f4f202f9 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -33,6 +33,7 @@ - cifmw-molecule-cifmw_cephadm - cifmw-molecule-cifmw_create_admin - cifmw-molecule-cifmw_external_dns + - cifmw-molecule-cifmw_ntp - cifmw-molecule-cifmw_test_role - cifmw-molecule-compliance - cifmw-molecule-config_drive From 54bbab95034240938c00e9d6b24d917db886eb75 Mon Sep 17 00:00:00 2001 From: Roberto Alfieri Date: Wed, 25 Sep 2024 18:19:25 +0200 Subject: [PATCH 15/39] Gather ssh private key from the hostvars in ceph playbook --- playbooks/ceph.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 0a2bea363b..e39dad303d 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -58,7 +58,7 @@ - name: Distribute SSH keypair to target nodes tags: admin - hosts: "{{ cifmw_ceph_target | default('computes') }}" + hosts: "{{ cifmw_ceph_target | default('computes') }}" gather_facts: false become: true vars: @@ -265,7 +265,12 @@ gather_facts: false vars: _target_hosts: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" - ansible_ssh_private_key_file: "{{ lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY') }}" + _target: "{{ _target_hosts | first }}" + ansible_ssh_private_key_file: >- + {{ + hostvars[_target]['ansible_ssh_private_key_file'] | + default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) + }} cifmw_cephadm_spec_ansible_host: /tmp/ceph_spec.yml cifmw_cephadm_bootstrap_conf: /tmp/initial_ceph.conf cifmw_ceph_client_vars: /tmp/ceph_client.yml From 1d90303369493375d1e53351ab3c5687c82b617d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 26 Sep 2024 11:50:55 +0200 Subject: [PATCH 16/39] Fix molecule - add swift network The swift network was added to the HCI scenario used in the molecule tests here: https://github.com/openstack-k8s-operators/architecture/pull/404 --- .../molecule/default/files/3-ocp-net-def.yml | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml b/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml index 51a74e174b..dccb620775 100644 --- a/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml +++ b/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml @@ -86,6 +86,15 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 + swift: + interface_name: eth1.25 + ip_v4: 172.22.0.5 + mac_addr: '52:54:00:0b:a1:d9' + mtu: 1500 + network_name: swift + parent_interface: eth1 + skip_nm: false + vlan_id: 25 ocp-master-1: hostname: ocp-master-1 name: ocp-master-1 @@ -124,6 +133,15 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 + swift: + interface_name: eth1.25 + ip_v4: 172.22.0.6 + mac_addr: '52:54:00:0b:a0:d9' + mtu: 1500 + network_name: swift + parent_interface: eth1 + skip_nm: false + vlan_id: 25 ocp-master-2: hostname: ocp-master-2 name: ocp-master-2 @@ -162,6 +180,15 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 + swift: + interface_name: eth1.25 + ip_v4: 172.22.0.7 + mac_addr: '52:54:00:0b:1c:d9' + mtu: 1500 + network_name: swift + parent_interface: eth1 + skip_nm: false + vlan_id: 25 networks: ctlplane: dns_v4: @@ -335,3 +362,35 @@ networks: start_host: 100 ipv6_ranges: [] vlan_id: 22 + swift: + dns_v4: [] + dns_v6: [] + mtu: 1496 + network_name: swift + network_v4: 172.22.0.0/24 + search_domain: swift.example.com + tools: + metallb: + ipv4_ranges: + - end: 172.22.0..90 + end_host: 90 + length: 11 + start: 172.22.0.80 + start_host: 80 + ipv6_ranges: [] + multus: + ipv4_ranges: + - end: 172.22.0.70 + end_host: 70 + length: 41 + start: 172.22.0.30 + start_host: 30 + ipv6_ranges: [] + netconfig: + ipv4_ranges: + - end: 72.22.0.250 + end_host: 250 + length: 151 + start: 172.22.0.100 + start_host: 100 + vlan_id: 25 From 2520f74eff1c85229fb3715215a633a82faf5cdc Mon Sep 17 00:00:00 2001 From: Sergey Bekkerman Date: Mon, 23 Sep 2024 09:44:57 +0200 Subject: [PATCH 17/39] Add DCN compute groups and ctlplane variants for network configuration - Added support for `dcn1-computes` and `dcn2-computes` groups in EDPM-based nodes ctlplane network configuration. - Defined `ctlplane_variants` to include `ctlplane`, `ctlplanedcn1`, and `ctlplanedcn2`. - Introduced new variable `cifmw_reproducer_validate_network_host` to specify the hostname for validating network connectivity from the ctlplane network. Default set to `controller-0.utility`. --- roles/reproducer/README.md | 1 + roles/reproducer/defaults/main.yml | 1 + roles/reproducer/tasks/configure_computes.yml | 2 +- roles/reproducer/tasks/libvirt_layout.yml | 14 ++++++++++++-- scenarios/reproducers/va-dcn.yml | 1 + 5 files changed, 16 insertions(+), 3 deletions(-) diff --git a/roles/reproducer/README.md b/roles/reproducer/README.md index e517e40d7b..0b884bc5cf 100644 --- a/roles/reproducer/README.md +++ b/roles/reproducer/README.md @@ -27,6 +27,7 @@ Those parameters shouldn't be used, unless the user is able to understand potent * `cifmw_reproducer_validate_network`: (Bool) Toggle network validations. Those validations ensure all of listed networks in VMs are defined. Defaults to `true`. * `cifmw_reproducer_validate_ocp_layout`: (Bool) Toggle OCP layout validations. Those validations ensure volumes, amounts and resources are properly defined for OCP cluster members (masters and workers). Defaults to `true`. +* `cifmw_reproducer_validate_network_host`: (String) Specify the hostname that should be pinged in order to validate network connectivity from the ctlplane network. Default value is `controller-0.utility` ### run_job and run_content_provider booleans and risks. diff --git a/roles/reproducer/defaults/main.yml b/roles/reproducer/defaults/main.yml index f911056ebe..8ce55ee5b6 100644 --- a/roles/reproducer/defaults/main.yml +++ b/roles/reproducer/defaults/main.yml @@ -49,4 +49,5 @@ cifmw_reproducer_controller_basedir: >- # Allow to disable validations - user toggle this at their # own risks! cifmw_reproducer_validate_network: true +cifmw_reproducer_validate_network_host: "controller-0.utility" cifmw_reproducer_validate_ocp_layout: true diff --git a/roles/reproducer/tasks/configure_computes.yml b/roles/reproducer/tasks/configure_computes.yml index 75706d2305..3cf4dabc32 100644 --- a/roles/reproducer/tasks/configure_computes.yml +++ b/roles/reproducer/tasks/configure_computes.yml @@ -5,7 +5,7 @@ - name: Ensure we can ping controller-0 from ctlplane ansible.builtin.command: cmd: | - ping -c2 controller-0.utility + ping -c2 {{ cifmw_reproducer_validate_network_host }} - name: Tweak dnf configuration become: true diff --git a/roles/reproducer/tasks/libvirt_layout.yml b/roles/reproducer/tasks/libvirt_layout.yml index 6bd89a74a3..e474d8123d 100644 --- a/roles/reproducer/tasks/libvirt_layout.yml +++ b/roles/reproducer/tasks/libvirt_layout.yml @@ -98,10 +98,20 @@ - >- (compute.key in (groups['computes'] | default([]))) or (compute.key in (groups['cephs'] | default([]))) or - (compute.key in (groups['networkers'] | default([]))) + (compute.key in (groups['networkers'] | default([]))) or + (compute.key in (groups['dcn1-computes'] | default([]))) or + (compute.key in (groups['dcn2-computes'] | default([]))) vars: _host: "{{ compute.key }}" - _prefix: "{{cifmw_networking_env_definition.networks.ctlplane.network_v4 | ansible.utils.ipaddr('prefix') }}" + _prefix: >- + {% set ctlplane_variants = ['ctlplane', 'ctlplanedcn1', 'ctlplanedcn2'] %} + {{ + cifmw_networking_env_definition.networks + | selectattr('key', 'in', ctlplane_variants) + | map(attribute='value.network_v4') + | first + | ansible.utils.ipaddr('prefix') + }} ansible.builtin.include_tasks: configure_computes.yml loop: >- {{ cifmw_networking_env_definition.instances | dict2items }} diff --git a/scenarios/reproducers/va-dcn.yml b/scenarios/reproducers/va-dcn.yml index 595442ff6f..87af087a83 100644 --- a/scenarios/reproducers/va-dcn.yml +++ b/scenarios/reproducers/va-dcn.yml @@ -15,6 +15,7 @@ cifmw_devscripts_cinder_volume_pvs: [] cifmw_run_tests: false cifmw_arch_automation_file: dcn.yaml cifmw_libvirt_manager_pub_net: ocpbm +cifmw_reproducer_validate_network_host: "192.168.111.9" cifmw_libvirt_manager_default_gw_nets: - ocpbm - dcn1_tr From d14df7163b1ce5b8ba92941f6d0059497fa0db32 Mon Sep 17 00:00:00 2001 From: rabi Date: Wed, 25 Sep 2024 16:48:06 +0530 Subject: [PATCH 18/39] Only gather required ansible facts This would ensure we can test the roles as expected with `gather_facts: false`. Signed-off-by: rabi --- ci/playbooks/molecule-test.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ci/playbooks/molecule-test.yml b/ci/playbooks/molecule-test.yml index d95da5bbdb..871988d9f3 100644 --- a/ci/playbooks/molecule-test.yml +++ b/ci/playbooks/molecule-test.yml @@ -1,8 +1,16 @@ --- - name: "Run ci/playbooks/molecule-test.yml" hosts: "{{ cifmw_zuul_target_host | default('controller') }}" - gather_facts: true + gather_facts: false tasks: + - name: Gather required facts + ansible.builtin.setup: + gather_subset: + - "!all" + - "!min" + - "user_dir" + - "env" + - name: Load environment var if instructed to when: - cifmw_reproducer_molecule_env_file is defined From f6e464505a653befe9ee5c458974b6c4737c8129 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Thu, 26 Sep 2024 15:54:49 -0400 Subject: [PATCH 19/39] Use Ceph monitoring (not RGW) network for Dashboard Use the existing `cifmw_cephadm_monitoring_network` parameter to configure the Ceph dashboard instead of the `cifmw_cephadm_rgw_network` parameter. This was not detected earlier because the Ceph playbook set them both to the Ceph public_network, but we recently added a feature to run RGW on a separate network [1]. Also, add retries/delay to the dashboard HTTP check and mange if HTTPS should be used. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2301 Jira: https://issues.redhat.com/browse/OSPCIX-506 Signed-off-by: John Fulton --- playbooks/ceph.yml | 3 ++ roles/cifmw_cephadm/defaults/main.yml | 2 ++ .../configure_dashboard_backends.yml | 2 +- .../tasks/dashboard/validation.yml | 36 +++++++++++++++++-- .../templates/ceph_monitoring_stack.yml.j2 | 6 ++-- 5 files changed, 43 insertions(+), 6 deletions(-) diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index e39dad303d..333691f73f 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -471,6 +471,9 @@ ansible.builtin.import_role: name: cifmw_cephadm tasks_from: post + vars: + cifmw_cephadm_dashboard_crt: "{{ cifmw_cephadm_certificate }}" + cifmw_cephadm_dashboard_key: "{{ cifmw_cephadm_key }}" - name: Render Ceph client configuration tags: client diff --git a/roles/cifmw_cephadm/defaults/main.yml b/roles/cifmw_cephadm/defaults/main.yml index 66fcfecf9d..4354ade258 100644 --- a/roles/cifmw_cephadm/defaults/main.yml +++ b/roles/cifmw_cephadm/defaults/main.yml @@ -138,3 +138,5 @@ cifmw_cephadm_update_log_commands: # Get last cephadm logs in case of failure - type: "mod_cephadm" cmd: "log last cephadm" +cifmw_cephadm_wait_for_dashboard_retries: 10 +cifmw_cephadm_wait_for_dashboard_delay: 20 diff --git a/roles/cifmw_cephadm/tasks/dashboard/configure_dashboard_backends.yml b/roles/cifmw_cephadm/tasks/dashboard/configure_dashboard_backends.yml index ad20445360..52f7881daa 100644 --- a/roles/cifmw_cephadm/tasks/dashboard/configure_dashboard_backends.yml +++ b/roles/cifmw_cephadm/tasks/dashboard/configure_dashboard_backends.yml @@ -34,5 +34,5 @@ ansible.builtin.command: | {{ cifmw_cephadm_ceph_cli }} config set \ mgr mgr/dashboard/{{ current_mgr }}/server_addr \ - {{ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }} + {{ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_monitoring_network) | first }} become: true diff --git a/roles/cifmw_cephadm/tasks/dashboard/validation.yml b/roles/cifmw_cephadm/tasks/dashboard/validation.yml index bb64d6e5e9..b8e6569b89 100644 --- a/roles/cifmw_cephadm/tasks/dashboard/validation.yml +++ b/roles/cifmw_cephadm/tasks/dashboard/validation.yml @@ -1,15 +1,47 @@ +--- +# Copyright 2024 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Update urischeme based on cert/key + ansible.builtin.set_fact: + cifmw_cephadm_urischeme_dashboard: "https" + when: + - cifmw_cephadm_dashboard_crt | default("") | length > 0 + - cifmw_cephadm_dashboard_key | default("") | length > 0 + - name: Validate connection to dashboard service ansible.builtin.get_url: - url: "http://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" + url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_response" + validate_certs: false register: dashboard_response failed_when: dashboard_response.failed == true + until: dashboard_response.failed == false + retries: "{{ cifmw_cephadm_wait_for_dashboard_retries }}" + delay: "{{ cifmw_cephadm_wait_for_dashboard_delay }}" + - name: Check http response code from dashboard service with login ansible.builtin.get_url: - url: "http://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" + url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_http_response" + validate_certs: false username: admin password: admin register: dashboard_resp failed_when: dashboard_resp.status_code != 200 + until: dashboard_resp.status_code == 200 + retries: "{{ cifmw_cephadm_wait_for_dashboard_retries }}" + delay: "{{ cifmw_cephadm_wait_for_dashboard_delay }}" diff --git a/roles/cifmw_cephadm/templates/ceph_monitoring_stack.yml.j2 b/roles/cifmw_cephadm/templates/ceph_monitoring_stack.yml.j2 index ee968d5875..7e07d650f6 100644 --- a/roles/cifmw_cephadm/templates/ceph_monitoring_stack.yml.j2 +++ b/roles/cifmw_cephadm/templates/ceph_monitoring_stack.yml.j2 @@ -11,7 +11,7 @@ placement: - {{ _hosts[0] }} count: 1 networks: -- {{ cifmw_cephadm_rgw_network }} +- {{ cifmw_cephadm_monitoring_network }} --- service_type: prometheus service_id: prometheus @@ -21,7 +21,7 @@ placement: - {{ _hosts[0] }} count: 1 networks: -- {{ cifmw_cephadm_rgw_network }} +- {{ cifmw_cephadm_monitoring_network }} --- service_type: alertmanager service_id: alertmanager @@ -31,4 +31,4 @@ placement: - {{ _hosts[0] }} count: 1 networks: -- {{ cifmw_cephadm_rgw_network }} +- {{ cifmw_cephadm_monitoring_network }} From 4cad2d36957b0583ef43f99747643a12d17de105 Mon Sep 17 00:00:00 2001 From: Pablo Rodriguez Nava Date: Fri, 27 Sep 2024 16:44:33 +0200 Subject: [PATCH 20/39] Fix EDPM instances IPs for VLAN nets Till this change only the non-VLAN networks got fixed to the networking mapper picked IPs, making the mapper output unreliable for those type of networks. --- .../templates/common/edpm-nodeset-values/values.yaml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 index 92fde75b77..cf676889a4 100644 --- a/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 @@ -47,9 +47,9 @@ data: {% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} - name: {{ net }} subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} {% if net is match('ctlplane') %} defaultRoute: true - fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} {% endif %} {% endfor %} {% endfor %} From 19fa47611c228ad883968115a66dedae5dd8da78 Mon Sep 17 00:00:00 2001 From: jgilaber Date: Mon, 30 Sep 2024 10:21:11 +0200 Subject: [PATCH 21/39] Fix ci_gen_kustomize_values role name in files Fix the ci_gen_kustomize_values role name in the files section for the downstream hci va job, which was missing the _values suffix. --- zuul.d/trigger_jobs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/trigger_jobs.yaml b/zuul.d/trigger_jobs.yaml index 58c6c8c0a8..d226b3ade0 100644 --- a/zuul.d/trigger_jobs.yaml +++ b/zuul.d/trigger_jobs.yaml @@ -9,7 +9,7 @@ - ^roles/cert_manager/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/cifmw_ceph*/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/cifmw_external_dns/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/ci_gen_kustomize/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/ci_gen_kustomize_values/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/ci_lvms_storage/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/ci_nmstate/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/config_drive/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* From ba0d2ddc9b154f2c057777dbf74ffb72298c68c0 Mon Sep 17 00:00:00 2001 From: Ricardo Diaz Date: Fri, 27 Sep 2024 08:39:39 +0200 Subject: [PATCH 22/39] Make HCI architecture repo parameterizable --- scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml index 6ab7985193..4aa27ae06d 100644 --- a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml @@ -5,7 +5,7 @@ cifmw_architecture_scenario: "nfv-ovs-dpdk-sriov-hci" # controller-0 as-is and be consumed by the `deploy-va.sh` script. # Please note, all paths are on the controller-0, meaning managed by the # Framework. Please do not edit them! -_arch_repo: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" +_arch_repo: "{{ cifmw_architecture_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/architecture') }}" # HERE if you want to override kustomization, you can uncomment this parameter # and push the data structure you want to apply. From b5d5a014b28fa2d187e4a712510b7a3aee2aae4d Mon Sep 17 00:00:00 2001 From: Fabricio Aguiar Date: Mon, 30 Sep 2024 17:02:55 +0100 Subject: [PATCH 23/39] Bump go version Signed-off-by: Fabricio Aguiar --- ci/playbooks/pre-doc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/playbooks/pre-doc.yml b/ci/playbooks/pre-doc.yml index 2d0b9e2d49..4bd9ac6a67 100644 --- a/ci/playbooks/pre-doc.yml +++ b/ci/playbooks/pre-doc.yml @@ -43,7 +43,7 @@ - name: Download and extract golang ansible.builtin.unarchive: - src: "https://golang.org/dl/go{{ go_version | default('1.20.14') }}.linux-amd64.tar.gz" + src: "https://golang.org/dl/go{{ go_version | default('1.21.13') }}.linux-amd64.tar.gz" dest: "/usr/local" remote_src: true extra_opts: From 83443437323700ee8ccfcf40db6c9ab48bea436c Mon Sep 17 00:00:00 2001 From: Harald Date: Thu, 26 Sep 2024 13:54:21 +0200 Subject: [PATCH 24/39] Revert "Pin devscripts repo at working hash" This reverts commit 2344388cdb351bc64ee7066b23ea2140c41c9b13. --- roles/devscripts/vars/main.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/roles/devscripts/vars/main.yml b/roles/devscripts/vars/main.yml index 97bcc6b7bd..3cb58f4872 100644 --- a/roles/devscripts/vars/main.yml +++ b/roles/devscripts/vars/main.yml @@ -28,11 +28,7 @@ cifmw_devscripts_packages: - python3-jmespath cifmw_devscripts_repo: "https://github.com/openshift-metal3/dev-scripts.git" -# Note(Chandan): Pinning devscripts at https://github.com/openshift-metal3/dev-scripts/commit/5756e9cf094d8c4256461b0fd4242cd84dea8c93 -# as https://github.com/openshift-metal3/dev-scripts/pull/1694 broke IPv6 job -# More details are here: https://issues.redhat.com/browse/OSPCIX-443. -# We should unpin it as more updates needed from devscript. -cifmw_devscripts_repo_branch: 5756e9cf094d8c4256461b0fd4242cd84dea8c93 +cifmw_devscripts_repo_branch: HEAD cifmw_devscripts_config_defaults: working_dir: "/home/dev-scripts" From 9dc292511274032154867ebd70bc0f3c53cf9ea4 Mon Sep 17 00:00:00 2001 From: eshulman Date: Tue, 1 Oct 2024 11:51:16 +0300 Subject: [PATCH 25/39] Allow passing args to deploy-architecture.sh Added a parameter for passing additional arguments to deploy-architecture.sh script --- docs/source/usage/01_usage.md | 1 + reproducer.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/usage/01_usage.md b/docs/source/usage/01_usage.md index 70707e7f17..d8bc7418fe 100644 --- a/docs/source/usage/01_usage.md +++ b/docs/source/usage/01_usage.md @@ -72,6 +72,7 @@ are shared among multiple roles: - `cifmw_run_compute_compliance_scans`: (Bool) Specifies whether to run compliance scans on the first compute. Defaults to `false`. - `cifmw_run_id`: (String) CI Framework run identifier. This is used in libvirt_manager, to add some uniqueness to some types of virtual machines (anything that's not OCP, CRC nor controller). If not set, the Framework will generate a random string for you, and store it on the target host, in `{{ cifmw_basedir }}/artifacts/run-id` +- `cifmw_deploy_architecture_args`: (String) additional args and parameters to pass to the deploy-architecture script. Default is `''`. ```{admonition} Words of caution :class: danger diff --git a/reproducer.yml b/reproducer.yml index 3c7745767c..b3e1b54a2d 100644 --- a/reproducer.yml +++ b/reproducer.yml @@ -89,4 +89,4 @@ poll: 20 delegate_to: controller-0 ansible.builtin.command: - cmd: "/home/zuul/deploy-architecture.sh" + cmd: "/home/zuul/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" From 72c3e3c5013463a6a027b4a0d7fc210b7a67175a Mon Sep 17 00:00:00 2001 From: Pablo Rodriguez Nava Date: Fri, 27 Sep 2024 10:42:37 +0200 Subject: [PATCH 26/39] Ignore non-vars file in fetch_compute_facts Some files may be backups of previous versions, like the networking-environment-definition if the mapper is called twice. To avoid trying to load old backed up content load only the default files (yaml and json). --- hooks/playbooks/fetch_compute_facts.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index f7d5a9532d..96c3c183f8 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -26,6 +26,10 @@ - name: Load parameters ansible.builtin.include_vars: dir: "{{ item }}" + ignore_unknown_extensions: true + extensions: + - yaml + - yml loop: - "{{ cifmw_basedir }}/artifacts/parameters" - "/etc/ci/env" From efba24e6eb6ed18a152c017840079d4787e6f0cb Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 1 Oct 2024 19:26:57 -0400 Subject: [PATCH 27/39] Revert "Configure Ceph RGW on a dedicated Swift network" This reverts commit 531e00179197366e4d7a7eb68ff9ec0f142a0487. Jira: https://issues.redhat.com/browse/OSPRH-10496 --- .../files/networking-env-definition.yml | 87 ------------------- playbooks/ceph.yml | 24 +---- roles/cifmw_cephadm/tasks/check_vip.yml | 2 +- .../reproducers/networking-definition.yml | 17 ---- 4 files changed, 5 insertions(+), 125 deletions(-) diff --git a/ci/playbooks/files/networking-env-definition.yml b/ci/playbooks/files/networking-env-definition.yml index 30915c622d..201163757a 100644 --- a/ci/playbooks/files/networking-env-definition.yml +++ b/ci/playbooks/files/networking-env-definition.yml @@ -151,15 +151,6 @@ instances: parent_interface: eth1 skip_nm: false vlan_id: 22 - swift: - interface_name: eth1.25 - ip_v4: 172.22.0.100 - mac_addr: '52:54:00:0b:1c:e7' - mtu: 1500 - network_name: swift - parent_interface: eth1 - skip_nm: false - vlan_id: 25 compute-1: hostname: compute-1 name: compute-1 @@ -198,15 +189,6 @@ instances: parent_interface: eth1 skip_nm: false vlan_id: 22 - swift: - interface_name: eth1.25 - ip_v4: 172.22.0.101 - mac_addr: '52:54:00:0b:1c:e8' - mtu: 1500 - network_name: swift - parent_interface: eth1 - skip_nm: false - vlan_id: 25 compute-2: hostname: compute-2 name: compute-2 @@ -245,15 +227,6 @@ instances: parent_interface: eth1 skip_nm: false vlan_id: 22 - swift: - interface_name: eth1.25 - ip_v4: 172.22.0.102 - mac_addr: '52:54:00:0b:1c:e9' - mtu: 1500 - network_name: swift - parent_interface: eth1 - skip_nm: false - vlan_id: 25 controller-0: hostname: controller-0 name: controller-0 @@ -363,15 +336,6 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 - swift: - interface_name: enp6s0.25 - ip_v4: 172.22.0.10 - mac_addr: '52:54:00:18:a0:b6' - mtu: 1500 - network_name: swift - parent_interface: enp6s0 - skip_nm: false - vlan_id: 25 ocp-master-1: hostname: ocp-master-1 name: ocp-master-1 @@ -410,15 +374,6 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 - swift: - interface_name: enp6s0.25 - ip_v4: 172.22.0.11 - mac_addr: '52:54:00:18:a0:b7' - mtu: 1500 - network_name: swift - parent_interface: enp6s0 - skip_nm: false - vlan_id: 25 ocp-master-2: hostname: ocp-master-2 name: ocp-master-2 @@ -457,15 +412,6 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 - swift: - interface_name: enp6s0.25 - ip_v4: 172.22.0.12 - mac_addr: '52:54:00:18:a0:b8' - mtu: 1500 - network_name: swift - parent_interface: enp6s0 - skip_nm: false - vlan_id: 25 networks: ctlplane: dns_v4: @@ -652,36 +598,3 @@ networks: start_host: 100 ipv6_ranges: [] vlan_id: 22 - swift: - dns_v4: [] - dns_v6: [] - mtu: 1496 - network_name: swift - network_v4: 172.22.0.0/24 - search_domain: swift.example.com - tools: - metallb: - ipv4_ranges: - - end: 172.22.0.90 - end_host: 90 - length: 11 - start: 172.22.0.80 - start_host: 80 - ipv6_ranges: [] - multus: - ipv4_ranges: - - end: 172.22.0.70 - end_host: 70 - length: 41 - start: 172.22.0.30 - start_host: 30 - ipv6_ranges: [] - netconfig: - ipv4_ranges: - - end: 172.22.0.250 - end_host: 250 - length: 151 - start: 172.22.0.100 - start_host: 100 - ipv6_ranges: [] - vlan_id: 25 diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 333691f73f..321b91de1c 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -344,43 +344,27 @@ # public network always exist because is provided by the ceph_spec role - name: Get Storage network range ansible.builtin.set_fact: - cifmw_cephadm_storage_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - - - name: Set RGW network range to storage network only if it was not provided - ansible.builtin.set_fact: - cifmw_cephadm_rgw_network: "{{ cifmw_cephadm_storage_network }}" - when: - - cifmw_cephadm_rgw_network is not defined or - cifmw_cephadm_rgw_network | length == 0 + cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - name: Set IP address of first monitor ansible.builtin.set_fact: - cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | first }}" + cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}" vars: this_host: "{{ _target_hosts | first }}" - name: Assert if any EDPM nodes n/w interface is missing in storage network - ansible.builtin.assert: - that: - - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | length > 0 - fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_storage_network }}" - loop: "{{ _target_hosts }}" - - - name: Assert if any EDPM nodes n/w interface is missing in RGW network ansible.builtin.assert: that: - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" loop: "{{ _target_hosts }}" - when: - - cifmw_cephadm_rgw_network != cifmw_cephadm_storage_network - - name: Get already assigned RGW IP addresses + - name: Get already assigned IP addresses ansible.builtin.set_fact: ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" loop: "{{ _target_hosts }}" - # cifmw_cephadm_vip is the VIP reserved in the RGW network + # cifmw_cephadm_vip is the VIP reserved in the Storage network - name: Set VIP var as empty string ansible.builtin.set_fact: cifmw_cephadm_vip: "" diff --git a/roles/cifmw_cephadm/tasks/check_vip.yml b/roles/cifmw_cephadm/tasks/check_vip.yml index d92bcc0765..0714510e7a 100644 --- a/roles/cifmw_cephadm/tasks/check_vip.yml +++ b/roles/cifmw_cephadm/tasks/check_vip.yml @@ -22,7 +22,7 @@ ansible.builtin.set_fact: count: "{{ 2 if count is undefined else count | int + 2 }}" - - name: Get an IP address from the RGW network + - name: Get an IP address from the Storage network ansible.builtin.set_fact: cur_ip: "{{ cifmw_cephadm_rgw_network | ansible.utils.next_nth_usable(count) }}" diff --git a/scenarios/reproducers/networking-definition.yml b/scenarios/reproducers/networking-definition.yml index 671f83263f..4f0ba0ed26 100644 --- a/scenarios/reproducers/networking-definition.yml +++ b/scenarios/reproducers/networking-definition.yml @@ -91,19 +91,6 @@ cifmw_networking_definition: end: 250 vlan: 23 mtu: 1500 - swift: - network: "172.22.0.0/24" - tools: - netconfig: - ranges: - - start: 100 - end: 250 - multus: - ranges: - - start: 30 - end: 70 - vlan: 25 - mtu: 1500 group-templates: ocps: @@ -119,8 +106,6 @@ cifmw_networking_definition: trunk-parent: ctlplane storage: trunk-parent: ctlplane - swift: - trunk-parent: ctlplane ocp_workers: network-template: range: @@ -142,8 +127,6 @@ cifmw_networking_definition: trunk-parent: ctlplane storagemgmt: trunk-parent: ctlplane - swift: - trunk-parent: ctlplane cephs: network-template: range: From dc04a3a900327e308f14b1d7ab0680cc2d977cae Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 1 Oct 2024 19:29:41 -0400 Subject: [PATCH 28/39] Revert "Fix molecule - add swift network" This reverts commit 1d90303369493375d1e53351ab3c5687c82b617d. Jira: https://issues.redhat.com/browse/OSPRH-10496 --- .../molecule/default/files/3-ocp-net-def.yml | 59 ------------------- 1 file changed, 59 deletions(-) diff --git a/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml b/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml index dccb620775..51a74e174b 100644 --- a/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml +++ b/roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml @@ -86,15 +86,6 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 - swift: - interface_name: eth1.25 - ip_v4: 172.22.0.5 - mac_addr: '52:54:00:0b:a1:d9' - mtu: 1500 - network_name: swift - parent_interface: eth1 - skip_nm: false - vlan_id: 25 ocp-master-1: hostname: ocp-master-1 name: ocp-master-1 @@ -133,15 +124,6 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 - swift: - interface_name: eth1.25 - ip_v4: 172.22.0.6 - mac_addr: '52:54:00:0b:a0:d9' - mtu: 1500 - network_name: swift - parent_interface: eth1 - skip_nm: false - vlan_id: 25 ocp-master-2: hostname: ocp-master-2 name: ocp-master-2 @@ -180,15 +162,6 @@ instances: parent_interface: enp6s0 skip_nm: false vlan_id: 22 - swift: - interface_name: eth1.25 - ip_v4: 172.22.0.7 - mac_addr: '52:54:00:0b:1c:d9' - mtu: 1500 - network_name: swift - parent_interface: eth1 - skip_nm: false - vlan_id: 25 networks: ctlplane: dns_v4: @@ -362,35 +335,3 @@ networks: start_host: 100 ipv6_ranges: [] vlan_id: 22 - swift: - dns_v4: [] - dns_v6: [] - mtu: 1496 - network_name: swift - network_v4: 172.22.0.0/24 - search_domain: swift.example.com - tools: - metallb: - ipv4_ranges: - - end: 172.22.0..90 - end_host: 90 - length: 11 - start: 172.22.0.80 - start_host: 80 - ipv6_ranges: [] - multus: - ipv4_ranges: - - end: 172.22.0.70 - end_host: 70 - length: 41 - start: 172.22.0.30 - start_host: 30 - ipv6_ranges: [] - netconfig: - ipv4_ranges: - - end: 72.22.0.250 - end_host: 250 - length: 151 - start: 172.22.0.100 - start_host: 100 - vlan_id: 25 From 8631d1f42f7bbaed8377bb2e231fdc3197a1e276 Mon Sep 17 00:00:00 2001 From: jgilaber Date: Mon, 30 Sep 2024 16:20:53 +0200 Subject: [PATCH 29/39] Wait until ocp cluster is ready in adoption playbook Wait until the ocp is ready and stable in the playbook that deploys ocp to be used in the new adoption uni jobs. --- deploy-ocp.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/deploy-ocp.yml b/deploy-ocp.yml index d3d65100ff..b1f0062796 100644 --- a/deploy-ocp.yml +++ b/deploy-ocp.yml @@ -67,3 +67,21 @@ ansible.builtin.import_role: name: "libvirt_manager" tasks_from: "deploy_layout.yml" + + # Run from the hypervisor + - name: Ensure OCP cluster is stable + vars: + _auth_path: >- + {{ + ( + cifmw_devscripts_repo_dir, + 'ocp', + cifmw_devscripts_config.cluster_name, + 'auth' + ) | ansible.builtin.path_join + }} + cifmw_openshift_adm_op: "stable" + cifmw_openshift_kubeconfig: >- + {{ (_auth_path, 'kubeconfig') | ansible.builtin.path_join }} + ansible.builtin.include_role: + name: openshift_adm From b4fe9f27bcee91041542332f9587f6a9e64f9e84 Mon Sep 17 00:00:00 2001 From: Jiri Stransky Date: Fri, 27 Sep 2024 13:02:32 +0200 Subject: [PATCH 30/39] Improve adoption coverage for openstack-operator This should help prevent breakages like [1]. https://github.com/openstack-k8s-operators/openstack-operator/pull/1103 Related: OSPCIX-504 --- zuul.d/adoption.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index 8d700ce779..7293d0b283 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -107,6 +107,7 @@ Has ceph, not TLS. Uses a content provider. parent: adoption-standalone-to-crc-ceph files: + # ci-framework - ^playbooks/01-bootstrap.yml - ^playbooks/02-infra.yml - ^playbooks/06-deploy-edpm.yml @@ -119,8 +120,14 @@ - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^hooks/playbooks/fetch_compute_facts.yml - ^zuul.d/adoption.yaml + # openstack-operator - go.mod - apis/go.mod + - ^config/crd/bases/.* + - ^controllers/core/.* + - ^controllers/dataplane/.* + - ^pkg/dataplane/.* + - ^pkg/openstack/.* required-projects: - openstack-k8s-operators/openstack-operator irrelevant-files: From d1b1e62e06dbf4abf75c03c5a119905d67946ca9 Mon Sep 17 00:00:00 2001 From: Luca Miccini Date: Tue, 30 Jul 2024 14:42:25 +0200 Subject: [PATCH 31/39] add disks and switch to lvms --- scenarios/reproducers/va-pidone.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/scenarios/reproducers/va-pidone.yml b/scenarios/reproducers/va-pidone.yml index 1c4c0a0941..b7f2536182 100644 --- a/scenarios/reproducers/va-pidone.yml +++ b/scenarios/reproducers/va-pidone.yml @@ -61,6 +61,8 @@ cifmw_libvirt_manager_configuration: image_local_dir: "{{ cifmw_basedir }}/images/" disk_file_name: "ocp_master" disksize: "100" + extra_disks_num: 4 + extra_disks_size: "100G" cpus: 10 memory: 32 nets: @@ -74,6 +76,8 @@ cifmw_libvirt_manager_configuration: image_local_dir: "{{ cifmw_basedir }}/images/" disk_file_name: "ocp_worker" disksize: "100" + extra_disks_num: 4 + extra_disks_size: "100G" cpus: 10 memory: 16 nets: @@ -139,3 +143,15 @@ cifmw_test_operator_tolerations: effect: "NoExecute" cifmw_test_operator_node_selector: kubernetes.io/hostname: worker-3 + +cifmw_devscripts_create_logical_volume: true + +# Set Logical Volume Manager Storage by default for local storage +cifmw_use_lvms: true +cifmw_lvms_disk_list: + - /dev/vdb + - /dev/vdc + - /dev/vdd + +# lvms-operator tolerations: +cifmw_lvms_storage_tolerations: "{{ cifmw_test_operator_tolerations }}" From deb8e2f6621501e1a6db9b6ae961b70b331537c8 Mon Sep 17 00:00:00 2001 From: Lewis Denny Date: Fri, 27 Sep 2024 11:29:37 +1000 Subject: [PATCH 32/39] [sushy_emulator] Make instance whitelist optional --- .../sushy_emulator/tasks/collect_details.yml | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/roles/sushy_emulator/tasks/collect_details.yml b/roles/sushy_emulator/tasks/collect_details.yml index d8a7378bc7..2791295ec3 100644 --- a/roles/sushy_emulator/tasks/collect_details.yml +++ b/roles/sushy_emulator/tasks/collect_details.yml @@ -69,20 +69,30 @@ when: - cifmw_libvirt_manager_uuids is not defined block: + - name: "Ensure file exists: {{ _uuid_file }}" + register: _uuid_file_status + ansible.builtin.stat: + path: "{{ _uuid_file }}" + - name: "Slurp content of: {{ _uuid_file }}" - ansible.builtin.slurp: - src: "{{ _uuid_file }}" - register: _libvirt_uuids_file + when: _uuid_file_status.stat.exists + block: + - name: "Slurp content of: {{ _uuid_file }}" + ansible.builtin.slurp: + src: "{{ _uuid_file }}" + register: _libvirt_uuids_file - - name: "Set cifmw_libvirt_manager_uuids fact from {{ _uuid_file }}" - vars: - _yaml: "{{ _libvirt_uuids_file.content | b64decode | from_yaml }}" - ansible.builtin.set_fact: - cifmw_libvirt_manager_uuids: "{{ _yaml.libvirt_uuid }}" + - name: "Set cifmw_libvirt_manager_uuids fact from {{ _uuid_file }}" + vars: + _yaml: "{{ _libvirt_uuids_file.content | b64decode | from_yaml }}" + ansible.builtin.set_fact: + cifmw_libvirt_manager_uuids: "{{ _yaml.libvirt_uuid }}" -- name: Set fact related to Libvirt driver +- name: Set _cifmw_sushy_emulator_instance fact when: - cifmw_sushy_emulator_driver == 'libvirt' + - (cifmw_libvirt_manager_uuids is defined and + cifmw_libvirt_manager_uuids | length > 0) block: - name: Generate list of filtered VMs vars: From ed82055d0de96d0fab5dda71722b9e66b58140d9 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 27 Aug 2024 18:29:06 -0400 Subject: [PATCH 33/39] Verify RGW VIP is properly configured in keystone We had an issue related to the rgw_vip configuration. This patch removes the cidr usage from the facts and passes it to the rgw ingress template. In addition, ipaddr filter is used to properly get the ipaddress and fail if malformed (or is just not what we expect). --- playbooks/ceph.yml | 2 +- roles/cifmw_cephadm/tasks/configure_object.yml | 4 ++-- roles/cifmw_cephadm/tasks/post.yml | 8 +++++++- roles/cifmw_cephadm/tasks/rgw.yml | 4 ++-- roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 | 2 +- 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 321b91de1c..8bdc4ba86e 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -408,7 +408,7 @@ tasks_from: rgw vars: # cifmw_cephadm_vip is computed or passed as an override via -e @extra.yml - cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" + cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}" - name: Configure Monitoring Stack when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool diff --git a/roles/cifmw_cephadm/tasks/configure_object.yml b/roles/cifmw_cephadm/tasks/configure_object.yml index 2540fb1717..649e3ea7c1 100644 --- a/roles/cifmw_cephadm/tasks/configure_object.yml +++ b/roles/cifmw_cephadm/tasks/configure_object.yml @@ -92,8 +92,8 @@ script: |- oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --user {{ all_uuids.results.0.stdout }} --project {{ project_service_uuid.stdout }} {{ all_uuids.results.2.stdout }} oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --user {{ all_uuids.results.0.stdout }} --project {{ project_service_uuid.stdout }} {{ all_uuids.results.3.stdout }} - oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} public {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_ext.values() | first if cifmw_external_dns_vip_ext is defined else cifmw_cephadm_vip }}:8080/swift/v1/AUTH_%\(tenant_id\)s - oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} internal {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_int.values() | first if cifmw_external_dns_vip_int is defined else cifmw_cephadm_vip }}:8080/swift/v1/AUTH_%\(tenant_id\)s + oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} public {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_ext.values() | first if cifmw_external_dns_vip_ext is defined else cifmw_cephadm_rgw_vip | ansible.utils.ipaddr('address') }}:8080/swift/v1/AUTH_%\(tenant_id\)s + oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack endpoint create --region regionOne {{ all_uuids.results.1.stdout }} internal {{ cifmw_cephadm_urischeme }}://{{ cifmw_external_dns_vip_int.values() | first if cifmw_external_dns_vip_int is defined else cifmw_cephadm_rgw_vip | ansible.utils.ipaddr('address') }}:8080/swift/v1/AUTH_%\(tenant_id\)s oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --project {{ all_uuids.results.4.stdout }} --user {{ all_uuids.results.5.stdout }} {{ all_uuids.results.6.stdout }} delegate_to: localhost when: diff --git a/roles/cifmw_cephadm/tasks/post.yml b/roles/cifmw_cephadm/tasks/post.yml index a23ee2e932..d6ec394ed2 100644 --- a/roles/cifmw_cephadm/tasks/post.yml +++ b/roles/cifmw_cephadm/tasks/post.yml @@ -46,8 +46,14 @@ loop: "{{ cifmw_cephadm_log_commands }}" - name: Configure ceph object store to use external ceph object gateway + when: + - cifmw_cephadm_vip is defined + - cifmw_cephadm_vip | default("") | length > 0 + - cifmw_ceph_daemons_layout.rgw_enabled | default(true) | bool ansible.builtin.include_tasks: configure_object.yml - when: cifmw_ceph_daemons_layout.rgw_enabled | default(true) | bool + vars: + cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}" + - name: Dashboard service validation ansible.builtin.include_tasks: dashboard/validation.yml diff --git a/roles/cifmw_cephadm/tasks/rgw.yml b/roles/cifmw_cephadm/tasks/rgw.yml index 4a600c1867..2135d76eed 100644 --- a/roles/cifmw_cephadm/tasks/rgw.yml +++ b/roles/cifmw_cephadm/tasks/rgw.yml @@ -27,12 +27,12 @@ - name: Define cifmw_external_dns_vip_ext ansible.builtin.set_fact: cifmw_external_dns_vip_ext: "{{ cifmw_external_dns_vip_ext | default({}) | - combine({ (cifmw_cephadm_vip): 'rgw-external.ceph.local' }) }}" + combine({ (cifmw_cephadm_rgw_vip | ansible.utils.ipaddr('address')): 'rgw-external.ceph.local' }) }}" - name: Define cifmw_external_dns_vip_int ansible.builtin.set_fact: cifmw_external_dns_vip_int: "{{ cifmw_external_dns_vip_ext | default({}) | - combine({ (cifmw_cephadm_vip): 'rgw-internal.ceph.local' }) }}" + combine({ (cifmw_cephadm_rgw_vip | ansible.utils.ipaddr('address')): 'rgw-internal.ceph.local' }) }}" - name: Create DNS domain and certificate ansible.builtin.include_role: diff --git a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 index dbf42ae222..0c0b2f52c4 100644 --- a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 +++ b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 @@ -31,7 +31,7 @@ spec: monitor_port: 8999 virtual_interface_networks: - {{ cifmw_cephadm_rgw_network }} - virtual_ip: {{ cifmw_cephadm_vip }} + virtual_ip: {{ cifmw_cephadm_rgw_vip }}/{{ cidr }} {% if rgw_frontend_cert is defined %} ssl_cert: | {{ rgw_frontend_cert | indent( width=6 ) }} From ebbd1ae1e28cd7996aff5d8fc3e0503570f9305c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 26 Sep 2024 09:30:42 +0200 Subject: [PATCH 34/39] Make `^nodes?(_[0-9]+)?$` keys removal optional When creating the ConfigMap baseline any keys matching regular expression `^nodes?(_[0-9]+)?$` is removed from architecuture source values.yaml files. This change introduces a new parameter to make this behaviour configurable, `cifmw_ci_gen_kustomize_values_remove_keys_expressions` takes a list of regular expressions. By defualt these are: - "^nodes$" - "^node(_[0-9]+)?$" Disabling the keys removal is useful in the case when the values used in architecture repo example files is a 1-to-1 match with the deployed infrastructure - i.e there is no need to jinja template the values based on the network_mapper data. --- roles/ci_gen_kustomize_values/README.md | 2 ++ roles/ci_gen_kustomize_values/defaults/main.yml | 3 +++ roles/ci_gen_kustomize_values/tasks/generate_snippets.yml | 5 +++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/roles/ci_gen_kustomize_values/README.md b/roles/ci_gen_kustomize_values/README.md index 73d19daa34..4fd81bcb4f 100644 --- a/roles/ci_gen_kustomize_values/README.md +++ b/roles/ci_gen_kustomize_values/README.md @@ -35,6 +35,8 @@ with a message. * `ci_gen_kustomize_fetch_ocp_state`: (Boolean) If true it enables generating CI templates based on the OCP state. Defaults to `true`. * `cifmw_ci_gen_kustomize_values_storage_class_prefix`: (String) Prefix for `storageClass` in generated values.yaml files. Defaults to `"lvms-"` only if `cifmw_use_lvms` is True, otherwise it defaults to `""`. The prefix is prepended to the `cifmw_ci_gen_kustomize_values_storage_class`. It is not recommended to override this value, instead set `cifmw_use_lvms` True or False. * `cifmw_ci_gen_kustomize_values_storage_class`: (String) Value for `storageClass` in generated values.yaml files. Defaults to `"lvms-local-storage"` only if `cifmw_use_lvms` is True, otherwise it defaults to `"local-storage"`. +* `cifmw_ci_gen_kustomize_values_remove_keys_expressions`: (List) Remove keys matching the regular expressions from source ConfigMaps (values.yaml). + Defaults to `["^nodes$", "^node(_[0-9]+)?$"]`. ### Specific parameters for edpm-values This configMap needs some more parameters in order to properly override the `architecture` provided one. diff --git a/roles/ci_gen_kustomize_values/defaults/main.yml b/roles/ci_gen_kustomize_values/defaults/main.yml index 8e1bd7ec35..25b81fc785 100644 --- a/roles/ci_gen_kustomize_values/defaults/main.yml +++ b/roles/ci_gen_kustomize_values/defaults/main.yml @@ -71,6 +71,9 @@ cifmw_ci_gen_kustomize_values_storage_class_prefix: "{{ 'lvms-' if cifmw_use_lvm cifmw_ci_gen_kustomize_values_storage_class: "{{ cifmw_ci_gen_kustomize_values_storage_class_prefix }}local-storage" cifmw_ci_gen_kustomize_values_primary_ip_version: 4 +cifmw_ci_gen_kustomize_values_remove_keys_expressions: + - ^nodes$ + - ^node(_[0-9]+)?$ # Those parameter must be set if you want to edit an "edpm-values" # cifmw_ci_gen_kustomize_values_ssh_authorizedkeys diff --git a/roles/ci_gen_kustomize_values/tasks/generate_snippets.yml b/roles/ci_gen_kustomize_values/tasks/generate_snippets.yml index b42ba876d2..fe6d535786 100644 --- a/roles/ci_gen_kustomize_values/tasks/generate_snippets.yml +++ b/roles/ci_gen_kustomize_values/tasks/generate_snippets.yml @@ -80,8 +80,9 @@ _cifmw_gen_kustomize_values_base_cm_content: >- {{ _config_map_content | - ansible.utils.remove_keys(target=['^nodes?(_[0-9]+)?$'], - matching_parameter='regex') + ansible.utils.remove_keys( + target=cifmw_ci_gen_kustomize_values_remove_keys_expressions, + matching_parameter='regex') }} cacheable: false From 55679909bb1b9d40717fad98fa3d7f10bd120444 Mon Sep 17 00:00:00 2001 From: Pablo Rodriguez Nava Date: Fri, 27 Sep 2024 10:38:31 +0200 Subject: [PATCH 35/39] [ci-boostrap] Add second call to the mapper to ingest interface-info data Renaming the playbook that called the mapper as it's no longer only called once in pre and to avoid it to be used for purposes that are different from calling the networking mapper. --- .../{pre-ci-bootstrap.yml => bootstrap-networking-mapper.yml} | 1 + zuul.d/base.yaml | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) rename ci/playbooks/{pre-ci-bootstrap.yml => bootstrap-networking-mapper.yml} (94%) diff --git a/ci/playbooks/pre-ci-bootstrap.yml b/ci/playbooks/bootstrap-networking-mapper.yml similarity index 94% rename from ci/playbooks/pre-ci-bootstrap.yml rename to ci/playbooks/bootstrap-networking-mapper.yml index ccd448c7d4..e4120002db 100644 --- a/ci/playbooks/pre-ci-bootstrap.yml +++ b/ci/playbooks/bootstrap-networking-mapper.yml @@ -31,6 +31,7 @@ ~/test-python/bin/ansible-playbook {{ ansible_user_dir }}/networking_mapper.yml -i {{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml -e @scenarios/centos-9/base.yml + -e cifmw_networking_mapper_ifaces_info_path=/etc/ci/env/interfaces-info.yml -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" {% if nodepool is defined %} -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/nodepool_params.yml" diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 0a34670ca6..14c69c9fd3 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -216,6 +216,7 @@ required-projects: *multinode_edpm_rp roles: *multinode_edpm_roles pre-run: + - ci/playbooks/bootstrap-networking-mapper.yml - ci/playbooks/crc/reconfigure-kubelet.yml - ci/playbooks/multinode-customizations.yml post-run: *multinode_edpm_post_run @@ -236,6 +237,7 @@ required-projects: *multinode_edpm_rp roles: *multinode_edpm_roles pre-run: + - ci/playbooks/bootstrap-networking-mapper.yml - ci/playbooks/crc/reconfigure-kubelet.yml - ci/playbooks/multinode-customizations.yml post-run: *multinode_edpm_post_run @@ -305,4 +307,4 @@ pre-run: - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml - - ci/playbooks/pre-ci-bootstrap.yml + - ci/playbooks/bootstrap-networking-mapper.yml From 0f4a43e0d367fb236ed53b32d68e8d6134d4da82 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 25 Sep 2024 11:10:32 +0200 Subject: [PATCH 36/39] Add tempest cleanup opt Allow users to enable tempest cleanup feature defined in the tempest image. https://github.com/openstack-k8s-operators/tcib/pull/208 https://github.com/openstack-k8s-operators/test-operator/pull/207 --- roles/test_operator/README.md | 1 + roles/test_operator/defaults/main.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 83adf6cf09..7b1726b2b5 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -11,6 +11,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_logs_image`: (String) Image that should be used to collect logs from the pods spawned by the test-operator. Default value: `quay.io/quay/busybox` * `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. Default value: `8` * `cifmw_test_operator_cleanup`: (Bool) Delete all resources created by the role at the end of the testing. Default value: `false` +* `cifmw_test_operator_tempest_cleanup`: (Bool) Run tempest cleanup after test execution (tempest run) to delete any resources created by tempest that may have been left out. * `cifmw_test_operator_default_groups`: (List) List of groups in the include list to search for tests to be executed. Default value: `[ 'default' ]` * `cifmw_test_operator_default_jobs`: (List) List of jobs in the exclude list to search for tests to be excluded. Default value: `[ 'default' ]` * `cifmw_test_operator_dry_run`: (Boolean) Whether test-operator should run or not. Default value: `false` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index da88f8ab44..5e396c8c9a 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -48,6 +48,7 @@ cifmw_test_operator_tempest_network_attachments: [] cifmw_test_operator_tempest_tests_include_override_scenario: false cifmw_test_operator_tempest_tests_exclude_override_scenario: false cifmw_test_operator_tempest_workflow: [] +cifmw_test_operator_tempest_cleanup: false # Enabling SRBAC by default, in jobs where this does not make sense should be turned off explicitly # @@ -117,6 +118,7 @@ cifmw_test_operator_tempest_config: extraRPMs: "{{ cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ cifmw_test_operator_tempest_extra_images | default([]) }}" tempestconfRun: "{{ cifmw_tempest_tempestconf_config_defaults | combine(cifmw_tempest_tempestconf_config | default({})) }}" + cleanup: "{{ cifmw_test_operator_tempest_cleanup }}" workflow: "{{ cifmw_test_operator_tempest_workflow }}" # Section 3: tobiko parameters - used when run_test_fw is 'tobiko' From 82ccefea4363fbad713e627c882ccf56374d681c Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Wed, 25 Sep 2024 16:26:13 +0530 Subject: [PATCH 37/39] Donot run EDPM/adoption job zuui.d/trigger_jobs.yaml changes zuui.d/trigger_jobs.yaml file contains downstream trigger jobs. It does not make sense to run EDPM/adoption job on these jobs. Let's add it to irrelevant files list to fix that. Signed-off-by: Chandan Kumar (raukadah) --- zuul.d/adoption.yaml | 1 + zuul.d/base.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index 7293d0b283..0fd6b569da 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -172,6 +172,7 @@ - roles/virtualbmc - tests?\/functional - zuul.d/molecule.* + - zuui.d/trigger_jobs.yaml # NOTE(marios): need to keep this old job because of zuul branches, see comments at OSPRH-8452 - job: diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 14c69c9fd3..441b296e22 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -63,6 +63,7 @@ - roles/virtualbmc - roles/validations - zuul.d/molecule.* + - zuui.d/trigger_jobs.yaml # Other openstack operators - containers/ci - .ci-operator.yaml From 468a940dae7ccbab1271617f0d262c9873021008 Mon Sep 17 00:00:00 2001 From: jgilaber Date: Tue, 1 Oct 2024 15:15:31 +0200 Subject: [PATCH 38/39] Only consider ocps networks for network-values template The common template for the network-values in ci_gen_kustomize_values iterates over all the hosts in the cifmw_networking_env_definition and stores in a map the relationship between a network and the interface. For this to work, the ocp nodes must be the last ones, since if there are other nodes after them in the cifmw_networking_env_definition, for example nodes named osp-*, and they use a different network interface, the resulting nncp will fail when applied. --- .../templates/bgp/network-values/values.yaml.j2 | 12 +++++------- .../bgp_dt01/network-values/values.yaml.j2 | 14 +++++--------- .../templates/common/network-values/values.yaml.j2 | 14 ++++++-------- .../shiftstack/network-values/values.yaml.j2 | 14 ++++++-------- .../uni01alpha/network-values/values.yaml.j2 | 14 ++++++-------- .../uni04delta-ipv6/network-values/values.yaml.j2 | 14 ++++++-------- .../uni05epsilon/network-values/values.yaml.j2 | 14 ++++++-------- .../uni06zeta/network-values/values.yaml.j2 | 14 ++++++-------- .../uni07eta/network-values/values.yaml.j2 | 14 ++++++-------- 9 files changed, 52 insertions(+), 72 deletions(-) diff --git a/roles/ci_gen_kustomize_values/templates/bgp/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp/network-values/values.yaml.j2 index 2c023b48f7..a900c08826 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp/network-values/values.yaml.j2 @@ -5,18 +5,16 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} {% set ns.interfaces = ns.interfaces | combine({network.network_name: (network.parent_interface | default(network.interface_name) ) }, - recursive=true) -%} -{% endfor -%} -{% if host is match('^(ocp|crc).*') %} - node_{{ ns.ocp_index }}: - name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} + recursive=true) %} {{ network.network_name }}_ip: {{ network.ip_v4 }} {% endfor %} {% set node_net_orig_content = original_content.data.bgp['net-attach-def']['node' ~ ns.ocp_index] %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 index b3642eb868..2c6caa6823 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 @@ -6,20 +6,16 @@ data: {% for host in cifmw_networking_env_definition.instances.keys() -%} {% set hostname = cifmw_networking_env_definition.instances[host]['hostname'] %} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% if 'worker-3' != hostname %} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: + name: {{ hostname }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} {% set ns.interfaces = ns.interfaces | combine({network.network_name: (network.parent_interface | default(network.interface_name) ) }, - recursive=true) -%} -{% endif %} -{% endfor -%} -{% if host is match('^(ocp|crc).*') %} - node_{{ ns.ocp_index }}: - name: {{ hostname }} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} + recursive=true) %} {{ network.network_name }}_ip: {{ network.ip_v4 }} {% if 'worker-3' == hostname and 'ctlplane' == network.network_name %} base_if: {{ network.interface_name }} diff --git a/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 index 1c1df833ea..5f3cf99b28 100644 --- a/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 @@ -6,19 +6,17 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} {% endfor %} {% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 index 685e773bbb..f9b4a4933a 100644 --- a/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 @@ -4,19 +4,17 @@ data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network.ip_v4 }} {% endfor %} {% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 index db5cf61228..11b7f5230a 100644 --- a/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 @@ -6,19 +6,17 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} {% endfor %} {% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 index dfd093af96..3b48d68758 100644 --- a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 @@ -6,14 +6,6 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} @@ -22,6 +14,12 @@ data: {# Because devscripts use fqdn for node names when ipv6 #} node_name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }}.ocp.openstack.lab {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} {% endfor %} {% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 index df35482f9c..526b09deb6 100644 --- a/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 @@ -6,19 +6,17 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} {% endfor %} {% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 index 7e61b53a8b..a88987a16d 100644 --- a/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 @@ -6,19 +6,17 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} {% endfor %} {% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 index 6c6f68cc16..2df6024b6c 100644 --- a/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 @@ -6,19 +6,17 @@ lb_tools={}) %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} -{% set ns.interfaces = ns.interfaces | - combine({network.network_name: (network.parent_interface | - default(network.interface_name) - ) - }, - recursive=true) -%} -{% endfor -%} {% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} {% endfor %} {% endif %} From eb3e0d9d4294b45b297203a60555373f941f120c Mon Sep 17 00:00:00 2001 From: Lukas Piwowarski Date: Mon, 30 Sep 2024 09:43:16 -0400 Subject: [PATCH 39/39] Enable running the test-operator role multiple times It was not possible to run the test-operator role multiple times without cleaning up all resources created by the first run, such as the logs pod created for the logs collection. This patch makes it possible to run the test operator role multiple times without cleaning up the resources (e.g., after the update in an update job). It is possible because: - test-operator-logs pod now has unique name for each job run - The variable cifmw_test_operator_*_name is now exposed. By specifying a unique name, we can guarantee no name collisions between the test-operator related resources (logs pods, instances of CRs) If you want to run test-operator role twice within a single job then make sure that for the second run you specify a value for the cifmw_test_operator_*_name. For example: cifmw_test_operator_tempest_name: "post-update-tempest-tests" --- roles/test_operator/README.md | 24 +++++++++++++++---- roles/test_operator/defaults/main.yml | 4 ++++ .../tasks/run-test-operator-job.yml | 8 +++---- roles/test_operator/vars/main.yml | 4 ---- 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 7b1726b2b5..1d3016a38b 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -26,6 +26,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_privileged`: (Boolean) Spawn the test pods with `allowPrivilegedEscalation: true` and default linux capabilities. This is required for certain test-operator functionalities to work properly (e.g.: `extraRPMs`, certain set of tobiko tests). Default value: `true` ## Tempest specific parameters +* `cifmw_test_operator_tempest_name`: (String) Value used in the `Tempest.Metadata.Name` field. The value specifies the name of some resources spawned by the test-operator role. Default value: `tempest-tests` * `cifmw_test_operator_tempest_registry`: (String) The registry where to pull tempest container. Default value: `quay.io` * `cifmw_test_operator_tempest_namespace`: (String) Registry's namespace where to pull tempest container. Default value: `podified-antelope-centos9` * `cifmw_test_operator_tempest_container`: (String) Name of the tempest container. Default value: `openstack-tempest` @@ -49,7 +50,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ apiVersion: test.openstack.org/v1beta1 kind: Tempest metadata: - name: tempest-tests + name: "{{ cifmw_test_operator_tempest_name }}" namespace: "{{ cifmw_test_operator_namespace }}" spec: containerImage: "{{ cifmw_test_operator_tempest_image }}:{{ cifmw_test_operator_tempest_image_tag }}" @@ -70,6 +71,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ ``` ## Tobiko specific parameters +* `cifmw_test_operator_tobiko_name`: (String) Value used in the `Tobiko.Metadata.Name` field. The value specifies the name of some resources spawned by the test-operator role. Default value: `tobiko-tests` * `cifmw_test_operator_tobiko_registry`: (String) The registry where to pull tobiko container. Default value: `quay.io` * `cifmw_test_operator_tobiko_namespace`: (String) Registry's namespace where to pull tobiko container. Default value: `podified-antelope-centos9` * `cifmw_test_operator_tobiko_container`: (String) Name of the tobiko container. Default value: `openstack-tobiko` @@ -91,7 +93,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ apiVersion: test.openstack.org/v1beta1 kind: Tobiko metadata: - name: tobiko-tests + name: "{{ cifmw_test_operator_tobiko_name }}" namespace: "{{ cifmw_test_operator_namespace }}" spec: kubeconfigSecretName: "{{ cifmw_test_operator_tobiko_kubeconfig_secret }}" @@ -112,6 +114,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ ``` ## AnsibleTest specific parameters +* `cifmw_test_operator_ansibletest_name`: (String) Value used in the `Ansibletest.Metadata.Name` field. The value specifies the name of some resources spawned by the test-operator role. Default value: `ansibletest` * `cifmw_test_operator_ansibletest_registry`: (String) The registry where to pull ansibletests container. Default value: `quay.io` * `cifmw_test_operator_ansibletest_namespace`: (String) Registry's namespace where to pull ansibletests container. Default value:podified-antelope-centos9 * `cifmw_test_operator_ansibletest_container`: (String) Name of the ansibletests container. Default value: `openstack-ansible-tests` @@ -135,7 +138,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ apiVersion: test.openstack.org/v1beta1 kind: AnsibleTest metadata: - name: horizontest-sample + name: "{{ cifmw_test_operator_ansibletest_name }}" namespace: "{{ cifmw_test_operator_namespace }}" spec: containerImage: "{{ cifmw_test_operator_ansibletest_image }}:{{ cifmw_test_operator_ansibletest_image_tag }}" @@ -156,6 +159,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ ``` ## Horizontest specific parameters +* `cifmw_test_operator_horizontest_name`: (String) Value used in the `Horizontest.Metadata.Name` field. The value specifies the name of some resources spawned by the test-operator role. Default value: `horizontest-tests` * `cifmw_test_operator_horizontest_registry`: (String) The registry where to pull horizontest container. Default value: `quay.io` * `cifmw_test_operator_horizontest_namespace`: (String) Registry's namespace where to pull horizontest container. Default value: `podified-antelope-centos9` * `cifmw_test_operator_horizontest_container`: (String) Name of the horizontest container. Default value: `openstack-horizontest` @@ -179,7 +183,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ apiVersion: test.openstack.org/v1beta1 kind: HorizonTest metadata: - name: horizontest + name: "{{ cifmw_test_operator_horizontest_name }}" namespace: "{{ cifmw_test_operator_namespace }}" spec: containerImage: "{{ cifmw_test_operator_horizontest_image }}:{{ cifmw_test_operator_horizontest_image_tag }}" @@ -197,3 +201,15 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ logsDirectoryName: "{{ cifmw_test_operator_horizontest_logs_directory_name }}" horizonTestDir: "{{ cifmw_test_operator_horizontest_horizon_test_dir }}" ``` + +## Examples + +### Execute the `test-operator` role multiple times within a single job + +If you want to run the `test-operator` role twice within a single job, make sure +that for the second run, you specify a value for the `cifmw_test_operator_*_name` +other than the default one (e.g., `tempest-tests`, `tobiko-tests`, ...): + +``` +cifmw_test_operator_tempest_name: "post-update-tempest-tests" +``` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 5e396c8c9a..d0a2be95a3 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -39,6 +39,7 @@ cifmw_test_operator_delete_logs_pod: false cifmw_test_operator_privileged: true # Section 2: tempest parameters - used when run_test_fw is 'tempest' +cifmw_test_operator_tempest_name: "tempest-tests" cifmw_test_operator_tempest_registry: quay.io cifmw_test_operator_tempest_namespace: podified-antelope-centos9 cifmw_test_operator_tempest_container: openstack-tempest-all @@ -122,6 +123,7 @@ cifmw_test_operator_tempest_config: workflow: "{{ cifmw_test_operator_tempest_workflow }}" # Section 3: tobiko parameters - used when run_test_fw is 'tobiko' +cifmw_test_operator_tobiko_name: "tobiko-tests" cifmw_test_operator_tobiko_registry: quay.io cifmw_test_operator_tobiko_namespace: podified-antelope-centos9 cifmw_test_operator_tobiko_container: openstack-tobiko @@ -163,6 +165,7 @@ cifmw_test_operator_tobiko_config: workflow: "{{ cifmw_test_operator_tobiko_workflow }}" # Section 4: ansibletest parameters - used when run_test_fw is 'ansibletest' +cifmw_test_operator_ansibletest_name: "ansibletest" cifmw_test_operator_ansibletest_registry: quay.io cifmw_test_operator_ansibletest_namespace: podified-antelope-centos9 cifmw_test_operator_ansibletest_container: openstack-ansible-tests @@ -206,6 +209,7 @@ cifmw_test_operator_ansibletest_config: debug: "{{ cifmw_test_operator_ansibletest_debug }}" # Section 5: horizontest parameters - used when run_test_fw is 'horizontest' +cifmw_test_operator_horizontest_name: "horizontest-tests" cifmw_test_operator_horizontest_registry: quay.io cifmw_test_operator_horizontest_namespace: podified-antelope-centos9 cifmw_test_operator_horizontest_container: openstack-horizontest diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index 05d60de071..2299b250b3 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -115,7 +115,7 @@ apiVersion: v1 kind: Pod metadata: - name: "test-operator-logs-pod-{{ run_test_fw }}" + name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_job_name }}" namespace: "{{ cifmw_test_operator_namespace }}" spec: containers: @@ -134,7 +134,7 @@ context: "{{ cifmw_openshift_context | default(omit) }}" namespace: "{{ cifmw_test_operator_namespace }}" kind: Pod - name: "test-operator-logs-pod-{{ run_test_fw }}" + name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_job_name }}" wait: true register: logs_pod until: logs_pod.resources[0].status.phase == "Running" @@ -149,7 +149,7 @@ pod_path: mnt/logs-{{ test_operator_job_name }}-step-{{ index }} ansible.builtin.shell: > oc cp -n {{ cifmw_test_operator_namespace }} - openstack/test-operator-logs-pod-{{ run_test_fw }}:{{ pod_path }} + openstack/test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_job_name }}:{{ pod_path }} {{ cifmw_test_operator_artifacts_basedir }} loop: "{{ logsPVCs.resources }}" loop_control: @@ -251,7 +251,7 @@ kind: Pod state: absent api_version: v1 - name: "test-operator-logs-pod-{{ run_test_fw }}" + name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_job_name }}" namespace: "{{ cifmw_test_operator_namespace }}" wait: true wait_timeout: 600 diff --git a/roles/test_operator/vars/main.yml b/roles/test_operator/vars/main.yml index e56ae73a72..d97639b5e7 100644 --- a/roles/test_operator/vars/main.yml +++ b/roles/test_operator/vars/main.yml @@ -14,10 +14,6 @@ # under the License. cifmw_test_operator_controller_priv_key_secret_name: "test-operator-controller-priv-key" -cifmw_test_operator_tempest_name: "tempest-tests" -cifmw_test_operator_tobiko_name: "tobiko-tests" -cifmw_test_operator_ansibletest_name: "ansibletest" -cifmw_test_operator_horizontest_name: "horizontest-tests" cifmw_test_operator_tempest_kind_name: "Tempest" cifmw_test_operator_tobiko_kind_name: "Tobiko" cifmw_test_operator_ansibletest_kind_name: "AnsibleTest"