diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 1aadd145..20c255ba 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -20,7 +20,7 @@ concurrency: cancel-in-progress: true env: - AWS_REGION: us-east-1 + AWS_REGION: us-west-2 jobs: pr-info: @@ -57,14 +57,14 @@ jobs: ref: ${{ fromJson(steps.pr-api.outputs.data).head.ref }} repo_url: ${{ fromJson(steps.pr-api.outputs.data).head.repo.html_url }} - rhel94-integration: + integration: needs: pr-info if: ${{ needs.pr-info.outputs.allowed_user == 'true' && !contains(github.event.pull_request.labels.*.name, 'control/skip-ci') }} continue-on-error: true strategy: matrix: arch: [x86_64, aarch64] - platform: [aws] + distro: [rhel-9-4, rhel-9-5, centos-stream-9, fedora-40, fedora-41] runs-on: ubuntu-latest steps: @@ -75,46 +75,17 @@ jobs: fetch-depth: 0 - name: Run the tests - uses: sclorg/testing-farm-as-github-action@v1 + uses: sclorg/testing-farm-as-github-action@v2 with: - compose: CentOS-Stream-9 + compose: Fedora-40 api_key: ${{ secrets.TF_API_KEY }} git_url: ${{ needs.pr-info.outputs.repo_url }} git_ref: ${{ needs.pr-info.outputs.ref }} arch: ${{ matrix.arch }} + update_pull_request_status: true + pull_request_status_name: "bootc-${{ matrix.distro }}-${{ matrix.arch }}" tmt_context: "arch=${{ matrix.arch }}" - tmt_plan_regex: "${{ matrix.platform }}" + tmt_plan_regex: "/install-upgrade/" tf_scope: private secrets: "QUAY_USERNAME=${{ secrets.QUAY_USERNAME }};QUAY_PASSWORD=${{ secrets.QUAY_PASSWORD }};QUAY_SECRET=${{ secrets.QUAY_SECRET }};RHEL_REGISTRY_URL=${{ secrets.RHEL_REGISTRY_URL }};DOWNLOAD_NODE=${{ secrets.DOWNLOAD_NODE }};AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }};AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}" - variables: "TEST_OS=rhel-9-4;PLATFORM=${{ matrix.platform }};ARCH=${{ matrix.arch }};AWS_REGION=${{ env.AWS_REGION }}" - - cs9-dev-integration: - needs: pr-info - if: ${{ needs.pr-info.outputs.allowed_user == 'true' && !contains(github.event.pull_request.labels.*.name, 'control/skip-ci') }} - continue-on-error: true - strategy: - matrix: - arch: [x86_64, aarch64] - platform: [aws] - runs-on: ubuntu-latest - - steps: - - name: Clone repository - uses: actions/checkout@v4 - with: - ref: ${{ needs.pr-info.outputs.sha }} - fetch-depth: 0 - - - name: Run the tests - uses: sclorg/testing-farm-as-github-action@v1 - with: - compose: CentOS-Stream-9 - api_key: ${{ secrets.TF_API_KEY }} - git_url: ${{ needs.pr-info.outputs.repo_url }} - git_ref: ${{ needs.pr-info.outputs.ref }} - arch: ${{ matrix.arch }} - tmt_context: "arch=${{ matrix.arch }}" - tmt_plan_regex: "${{ matrix.platform }}" - tf_scope: private - secrets: "QUAY_USERNAME=${{ secrets.QUAY_USERNAME }};QUAY_PASSWORD=${{ secrets.QUAY_PASSWORD }};QUAY_SECRET=${{ secrets.QUAY_SECRET }};AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }};AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}" - variables: "TEST_OS=centos-stream-9;PLATFORM=${{ matrix.platform }};ARCH=${{ matrix.arch }};AWS_REGION=${{ env.AWS_REGION }}" + variables: "TEST_OS=${{ matrix.distro }};ARCH=${{ matrix.arch }};AWS_REGION=${{ env.AWS_REGION }}" diff --git a/plans/install-upgrade.fmf b/plans/install-upgrade.fmf index 200db3d9..0de4cf89 100644 --- a/plans/install-upgrade.fmf +++ b/plans/install-upgrade.fmf @@ -5,11 +5,9 @@ prepare: - how: install package: - ansible-core - - gcc - podman - skopeo - jq - - python3-devel - unzip - how: shell script: ansible-galaxy collection install https://ansible-collection.s3.amazonaws.com/ansible-posix-1.5.4.tar.gz https://ansible-collection.s3.amazonaws.com/community-general-8.5.0.tar.gz @@ -18,7 +16,6 @@ execute: /aws: summary: Run bootc install and upgrade test on aws - tag: aws environment+: PLATFORM: aws discover+: @@ -31,3 +28,74 @@ execute: prepare+: - how: shell script: curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" && unzip awscliv2.zip && sudo ./aws/install + +/libvirt: + summary: Run bootc install and upgrade test locally (nested) + environment+: + PLATFORM: libvirt + AIR_GAPPED: 1 + discover+: + test: + - /rpm-build + - /bootc-install-upgrade + prepare+: + - how: shell + script: | + source /etc/os-release + if [[ "$ID" == "rhel" ]] || [[ "$ID" == "centos" ]]; then + # EPEL for genisoimage + dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + fi + - how: install + package: + - qemu-kvm + - libvirt + - virt-install + - genisoimage + adjust+: + - when: arch == ppc64le + enabled: false + - when: arch == x86_64 or arch == aarch64 + provision+: + hardware: + cpu: + processors: ">= 2" + memory: ">= 6 GB" + virtualization: + is-supported: true + +/to-disk: + summary: Use bootc install to-disk to generate raw image and test locally (nested) + environment+: + PLATFORM: libvirt + IMAGE_TYPE: to-disk + discover+: + test: + - /rpm-build + - /image-install-upgrade + prepare+: + - how: shell + script: | + source /etc/os-release + if [[ "$ID" == "rhel" ]] || [[ "$ID" == "centos" ]]; then + # EPEL for genisoimage + dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + fi + - how: install + package: + - qemu-img + - qemu-kvm + - libvirt + - virt-install + - genisoimage + adjust+: + - when: arch == ppc64le + enabled: false + - when: arch == x86_64 or arch == aarch64 + provision+: + hardware: + cpu: + processors: ">= 2" + memory: ">= 6 GB" + virtualization: + is-supported: true diff --git a/tests/integration/install-upgrade.sh b/tests/integration/bootc-install-upgrade.sh similarity index 59% rename from tests/integration/install-upgrade.sh rename to tests/integration/bootc-install-upgrade.sh index 02a0e87f..8c5888e2 100755 --- a/tests/integration/install-upgrade.sh +++ b/tests/integration/bootc-install-upgrade.sh @@ -1,24 +1,8 @@ #!/bin/bash set -exuo pipefail -# Colorful timestamped output. -function greenprint { - echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" -} - -function redprint { - echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m" -} - -function retry { - n=0 - until [ "$n" -ge 3 ] - do - "$@" && break - n=$((n+1)) - sleep 10 - done -} +source ./shared_lib.sh +dump_runner TEMPDIR=$(mktemp -d) trap 'rm -rf -- "$TEMPDIR"' EXIT @@ -34,14 +18,25 @@ QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; ech INVENTORY_FILE="${TEMPDIR}/inventory" REPLACE_CLOUD_USER="" +TEST_IMAGE_NAME="bootc-workflow-test" +TEST_IMAGE_URL="quay.io/redhat_emp1/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" + case "$TEST_OS" in - "rhel-9-4") - IMAGE_NAME="rhel9-rhel_bootc" - TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/${IMAGE_NAME}:rhel-9.4" + "rhel-9"*) + if [[ "$TEST_OS" == "rhel-9-4" ]]; then + TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.4" + BATCH_COMPOSE="updates/" + LATEST_COMPOSE_ID="latest-RHEL-9.4.0" + REDHAT_VERSION_ID="9.4" + else + TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.5" + BATCH_COMPOSE="" + LATEST_COMPOSE_ID="latest-RHEL-9.5.0" + REDHAT_VERSION_ID="9.5" + fi SSH_USER="cloud-user" - CURRENT_COMPOSE_RHEL94=$(skopeo inspect --tls-verify=false "docker://${TIER1_IMAGE_URL}" | jq -r '.Labels."redhat.compose-id"') - sed "s/REPLACE_ME/${DOWNLOAD_NODE}/; s/REPLACE_COMPOSE_ID/${CURRENT_COMPOSE_RHEL94}/" files/rhel-9.template | tee rhel-9.repo > /dev/null - ADD_REPO="COPY rhel-9.repo /etc/yum.repos.d/rhel-9.repo" + sed "s/REPLACE_ME/${DOWNLOAD_NODE}/; s|REPLACE_BATCH_COMPOSE|${BATCH_COMPOSE}|; s/REPLACE_COMPOSE_ID/${LATEST_COMPOSE_ID}/" files/rhel-9-y.template | tee rhel-9-y.repo > /dev/null + ADD_REPO="COPY rhel-9-y.repo /etc/yum.repos.d/rhel-9-y.repo" if [[ "$PLATFORM" == "aws" ]]; then SSH_USER="ec2-user" REPLACE_CLOUD_USER='RUN sed -i "s/name: cloud-user/name: ec2-user/g" /etc/cloud/cloud.cfg' @@ -50,27 +45,38 @@ case "$TEST_OS" in tee -a "playbooks/user-data" > /dev/null << EOF #cloud-config yum_repos: - rhel-9x-baseos: - name: rhel-9x-baseos - baseurl: http://${DOWNLOAD_NODE}/rhel-9/composes/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/BaseOS/${ARCH}/os/ + rhel-9y-baseos: + name: rhel-9y-baseos + baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/BaseOS/\$basearch/os/ enabled: true gpgcheck: false - rhel-9x-appstream: - name: rhel-9x-appstream - baseurl: http://${DOWNLOAD_NODE}/rhel-9/composes/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/AppStream/${ARCH}/os/ + rhel-9y-appstream: + name: rhel-9y-appstream + baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/AppStream/\$basearch/os/ enabled: true gpgcheck: false EOF ;; "centos-stream-9") - IMAGE_NAME="centos-bootc-dev" - TIER1_IMAGE_URL="quay.io/centos-bootc/${IMAGE_NAME}:stream9" + TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc-dev:stream9" SSH_USER="cloud-user" ADD_REPO="" if [[ "$PLATFORM" == "aws" ]]; then SSH_USER="ec2-user" REPLACE_CLOUD_USER='RUN sed -i "s/name: cloud-user/name: ec2-user/g" /etc/cloud/cloud.cfg' fi + REDHAT_VERSION_ID="9" + ;; + "fedora"*) + if [[ "$TEST_OS" == "fedora-40" ]]; then + TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:40" + REDHAT_VERSION_ID="40" + else + TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:41" + REDHAT_VERSION_ID="41" + fi + SSH_USER="fedora" + ADD_REPO="" ;; *) redprint "Variable TEST_OS has to be defined" @@ -78,10 +84,8 @@ EOF ;; esac -TEST_IMAGE_NAME="${IMAGE_NAME}-test" -TEST_IMAGE_URL="quay.io/redhat_emp1/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" - sed "s/REPLACE_ME/${QUAY_SECRET}/g" files/auth.template | tee auth.json > /dev/null + greenprint "Create $TEST_OS installation Containerfile" tee "$INSTALL_CONTAINERFILE" > /dev/null << EOF FROM "$TIER1_IMAGE_URL" @@ -89,11 +93,31 @@ $ADD_REPO COPY build/bootc-2*.${ARCH}.rpm . RUN dnf -y update ./bootc-2*.${ARCH}.rpm && \ rm -f ./bootc-2*.${ARCH}.rpm +COPY auth.json /etc/ostree/auth.json +EOF + +case "$PLATFORM" in + "aws") + tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF RUN dnf -y install python3 cloud-init && \ dnf -y clean all -COPY auth.json /etc/ostree/auth.json $REPLACE_CLOUD_USER EOF + ;; + "libvirt") + SSH_USER="root" + SSH_KEY_PUB_CONTENT=$(cat "${SSH_KEY_PUB}") + tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF +RUN mkdir -p /usr/etc-system/ && \ + echo 'AuthorizedKeysFile /usr/etc-system/%u.keys' >> /etc/ssh/sshd_config.d/30-auth-system.conf && \ + echo "$SSH_KEY_PUB_CONTENT" > /usr/etc-system/root.keys && \ + chmod 0600 /usr/etc-system/root.keys && \ + dnf -y install qemu-guest-agent && \ + dnf clean all && \ + systemctl enable qemu-guest-agent +EOF + ;; +esac greenprint "Check $TEST_OS installation Containerfile" cat "$INSTALL_CONTAINERFILE" @@ -129,23 +153,38 @@ EOF greenprint "Prepare ansible.cfg" export ANSIBLE_CONFIG="playbooks/ansible.cfg" +# AIR_GAPPED=1 means add passthough mount to test bootc swtich to local disk +if [[ ${AIR_GAPPED-} -eq 1 ]];then + AIR_GAPPED_DIR="$TEMPDIR"/virtiofs + mkdir "$AIR_GAPPED_DIR" +else + AIR_GAPPED=0 + AIR_GAPPED_DIR="" +fi + greenprint "Deploy $PLATFORM instance" ansible-playbook -v \ -i "$INVENTORY_FILE" \ + -e test_os="$TEST_OS" \ + -e ssh_user="$SSH_USER" \ -e ssh_key_pub="$SSH_KEY_PUB" \ -e inventory_file="$INVENTORY_FILE" \ + -e air_gapped_dir="$AIR_GAPPED_DIR" \ "playbooks/deploy-${PLATFORM}.yaml" greenprint "Install $TEST_OS bootc system" ansible-playbook -v \ -i "$INVENTORY_FILE" \ + -e test_os="$TEST_OS" \ -e test_image_url="$TEST_IMAGE_URL" \ playbooks/install.yaml greenprint "Run ostree checking test on $PLATFORM instance" ansible-playbook -v \ -i "$INVENTORY_FILE" \ + -e test_os="$TEST_OS" \ -e bootc_image="$TEST_IMAGE_URL" \ + -e image_label_version_id="$REDHAT_VERSION_ID" \ playbooks/check-system.yaml greenprint "Create upgrade Containerfile" @@ -161,21 +200,32 @@ podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME greenprint "Push $TEST_OS upgrade container image" retry podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL" +if [[ ${AIR_GAPPED-} -eq 1 ]]; then + retry skopeo copy docker://"$TEST_IMAGE_URL" dir://"$AIR_GAPPED_DIR" + BOOTC_IMAGE="/mnt" +else + BOOTC_IMAGE="$TEST_IMAGE_URL" +fi + greenprint "Upgrade $TEST_OS system" ansible-playbook -v \ -i "$INVENTORY_FILE" \ + -e air_gapped_dir="$AIR_GAPPED_DIR" \ playbooks/upgrade.yaml greenprint "Run ostree checking test after upgrade on $PLATFORM instance" ansible-playbook -v \ -i "$INVENTORY_FILE" \ - -e bootc_image="$TEST_IMAGE_URL" \ + -e test_os="$TEST_OS" \ + -e bootc_image="$BOOTC_IMAGE" \ + -e image_label_version_id="$REDHAT_VERSION_ID" \ -e upgrade="true" \ playbooks/check-system.yaml greenprint "Rollback $TEST_OS system" ansible-playbook -v \ -i "$INVENTORY_FILE" \ + -e air_gapped_dir="$AIR_GAPPED_DIR" \ playbooks/rollback.yaml greenprint "Remove $PLATFORM instance" @@ -185,7 +235,7 @@ ansible-playbook -v \ playbooks/remove.yaml greenprint "Clean up" -rm -rf auth.json rhel-9-4.repo +rm -rf auth.json rhel-9-y.repo unset ANSIBLE_CONFIG greenprint "🎉 All tests passed." diff --git a/tests/integration/files/rhel-9-y.template b/tests/integration/files/rhel-9-y.template new file mode 100644 index 00000000..0e4c853f --- /dev/null +++ b/tests/integration/files/rhel-9-y.template @@ -0,0 +1,10 @@ +[rhel-9y-baseos] +baseurl=http://REPLACE_ME/rhel-9/nightly/REPLACE_BATCH_COMPOSERHEL-9/REPLACE_COMPOSE_ID/compose/BaseOS/$basearch/os/ +enabled=1 +gpgcheck=0 + +[rhel-9y-appstream] +baseurl=http://REPLACE_ME/rhel-9/nightly/REPLACE_BATCH_COMPOSERHEL-9/REPLACE_COMPOSE_ID/compose/AppStream/$basearch/os/ +enabled=1 +gpgcheck=0 + diff --git a/tests/integration/files/rhel-9.template b/tests/integration/files/rhel-9.template deleted file mode 100644 index 6b735b92..00000000 --- a/tests/integration/files/rhel-9.template +++ /dev/null @@ -1,10 +0,0 @@ -[rhel-9x-baseos] -baseurl=http://REPLACE_ME/rhel-9/composes/RHEL-9/REPLACE_COMPOSE_ID/compose/BaseOS/$basearch/os/ -enabled=1 -gpgcheck=0 - -[rhel-9x-appstream] -baseurl=http://REPLACE_ME/rhel-9/composes/RHEL-9/REPLACE_COMPOSE_ID/compose/AppStream/$basearch/os/ -enabled=1 -gpgcheck=0 - diff --git a/tests/integration/image-install-upgrade.sh b/tests/integration/image-install-upgrade.sh new file mode 100755 index 00000000..24688023 --- /dev/null +++ b/tests/integration/image-install-upgrade.sh @@ -0,0 +1,241 @@ +#!/bin/bash +set -exuo pipefail + +source ./shared_lib.sh +dump_runner + +TEMPDIR=$(mktemp -d) +trap 'rm -rf -- "$TEMPDIR"' EXIT + +# SSH configurations +SSH_KEY=${TEMPDIR}/id_rsa +ssh-keygen -f "${SSH_KEY}" -N "" -q -t rsa-sha2-256 -b 2048 +SSH_KEY_PUB="${SSH_KEY}.pub" + +INSTALL_CONTAINERFILE=${TEMPDIR}/Containerfile.install +UPGRADE_CONTAINERFILE=${TEMPDIR}/Containerfile.upgrade +QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')}" +INVENTORY_FILE="${TEMPDIR}/inventory" + +TEST_IMAGE_NAME="bootc-workflow-test" + +case "$TEST_OS" in + "rhel-9"*) + if [[ "$TEST_OS" == "rhel-9-4" ]]; then + TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.4" + BATCH_COMPOSE="updates/" + LATEST_COMPOSE_ID="latest-RHEL-9.4.0" + REDHAT_VERSION_ID="9.4" + else + TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.5" + BATCH_COMPOSE="" + LATEST_COMPOSE_ID="latest-RHEL-9.5.0" + REDHAT_VERSION_ID="9.5" + fi + TEST_IMAGE_URL="quay.io/redhat_emp1/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" + SSH_USER="cloud-user" + sed "s/REPLACE_ME/${DOWNLOAD_NODE}/; s|REPLACE_BATCH_COMPOSE|${BATCH_COMPOSE}|; s/REPLACE_COMPOSE_ID/${LATEST_COMPOSE_ID}/" files/rhel-9-y.template | tee rhel-9-y.repo > /dev/null + ADD_REPO="COPY rhel-9-y.repo /etc/yum.repos.d/rhel-9-y.repo" + greenprint "Prepare cloud-init file" + tee -a "playbooks/user-data" > /dev/null << EOF +#cloud-config +yum_repos: + rhel-9y-baseos: + name: rhel-9y-baseos + baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/BaseOS/\$basearch/os/ + enabled: true + gpgcheck: false + rhel-9y-appstream: + name: rhel-9y-appstream + baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/AppStream/\$basearch/os/ + enabled: true + gpgcheck: false +EOF + BOOT_ARGS="uefi" + ;; + "centos-stream-9") + TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc-dev:stream9" + ADD_REPO="" + SSH_USER="cloud-user" + REDHAT_VERSION_ID="9" + TEST_IMAGE_URL="quay.io/bootc-test/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" + BOOT_ARGS="uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no" + ;; + "fedora"*) + if [[ "$TEST_OS" == "fedora-40" ]]; then + TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:40" + REDHAT_VERSION_ID="40" + else + TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:41" + REDHAT_VERSION_ID="41" + fi + SSH_USER="fedora" + ADD_REPO="" + TEST_IMAGE_URL="quay.io/bootc-test/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" + BOOT_ARGS="uefi" + ;; + *) + redprint "Variable TIER1_IMAGE_URL is not supported" + exit 1 + ;; +esac + +sed "s/REPLACE_ME/${QUAY_SECRET}/g" files/auth.template | tee auth.json > /dev/null +greenprint "Create $TEST_OS installation Containerfile" +tee "$INSTALL_CONTAINERFILE" > /dev/null << EOF +FROM "$TIER1_IMAGE_URL" +$ADD_REPO +COPY build/bootc-2*.${ARCH}.rpm . +RUN dnf -y update ./bootc-2*.${ARCH}.rpm && \ + rm -f ./bootc-2*.${ARCH}.rpm +RUN dnf -y install python3 cloud-init && \ + dnf -y clean all +COPY auth.json /etc/ostree/auth.json +EOF + +greenprint "Check $TEST_OS installation Containerfile" +cat "$INSTALL_CONTAINERFILE" + +greenprint "Login quay.io" +sudo podman login -u "${QUAY_USERNAME}" -p "${QUAY_PASSWORD}" quay.io + +greenprint "Build $TEST_OS installation container image" +sudo podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$INSTALL_CONTAINERFILE" . + +greenprint "Push $TEST_OS installation container image" +sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL" + +greenprint "Prepare inventory file" +tee -a "$INVENTORY_FILE" > /dev/null << EOF +[cloud] +localhost + +[guest] + +[cloud:vars] +ansible_connection=local + +[guest:vars] +ansible_user="$SSH_USER" +ansible_private_key_file="$SSH_KEY" +ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + +[all:vars] +ansible_python_interpreter=/usr/bin/python3 +EOF + +greenprint "Prepare ansible.cfg" +export ANSIBLE_CONFIG="${PWD}/playbooks/ansible.cfg" + +case "$IMAGE_TYPE" in + "to-disk") + greenprint "Configure rootfs randomly" + ROOTFS_LIST=( \ + "ext4" \ + "xfs" \ + ) + RND_LINE=$((RANDOM % 2)) + ROOTFS="${ROOTFS_LIST[$RND_LINE]}" + + if [[ "$TEST_OS" == "fedora"* ]]; then + ROOTFS="btrfs" + fi + + greenprint "💾 Create disk.raw" + sudo truncate -s 10G disk.raw + + greenprint "bootc install to disk.raw" + sudo podman run \ + --rm \ + --privileged \ + --pid=host \ + --security-opt label=type:unconfined_t \ + -v /var/lib/containers:/var/lib/containers \ + -v /dev:/dev \ + -v .:/output \ + "$TEST_IMAGE_URL" \ + bootc install to-disk --filesystem "$ROOTFS" --generic-image --via-loopback /output/disk.raw + + sudo qemu-img convert -f raw ./disk.raw -O qcow2 "/var/lib/libvirt/images/disk.qcow2" + rm -f disk.raw + + if [[ "$ARCH" == "x86_64" ]]; then + BIB_FIRMWARE_LIST=( \ + "bios" \ + "uefi" \ + ) + RND_LINE=$((RANDOM % 2)) + BIB_FIRMWARE="${BIB_FIRMWARE_LIST[$RND_LINE]}" + else + BIB_FIRMWARE="uefi" + fi + + greenprint "Deploy $IMAGE_TYPE instance" + ansible-playbook -v \ + -i "$INVENTORY_FILE" \ + -e test_os="$TEST_OS" \ + -e ssh_key_pub="$SSH_KEY_PUB" \ + -e ssh_user="$SSH_USER" \ + -e inventory_file="$INVENTORY_FILE" \ + -e bib="true" \ + -e boot_args="$BOOT_ARGS" \ + -e bib_firmware="$BIB_FIRMWARE" \ + "playbooks/deploy-libvirt.yaml" + ;; + *) + redprint "Variable IMAGE_TYPE has to be defined" + exit 1 + ;; +esac + +greenprint "Run ostree checking test on $PLATFORM instance" +ansible-playbook -v \ + -i "$INVENTORY_FILE" \ + -e test_os="$TEST_OS" \ + -e bootc_image="$TEST_IMAGE_URL" \ + -e image_label_version_id="$REDHAT_VERSION_ID" \ + playbooks/check-system.yaml + +greenprint "Create upgrade Containerfile" +tee "$UPGRADE_CONTAINERFILE" > /dev/null << EOF +FROM "$TEST_IMAGE_URL" +RUN dnf -y install wget && \ + dnf -y clean all +EOF + +greenprint "Build $TEST_OS upgrade container image" +sudo podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$UPGRADE_CONTAINERFILE" . +greenprint "Push $TEST_OS upgrade container image" +sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL" + +greenprint "Upgrade $TEST_OS system" +ansible-playbook -v \ + -i "$INVENTORY_FILE" \ + playbooks/upgrade.yaml + +greenprint "Run ostree checking test after upgrade on $PLATFORM instance" +ansible-playbook -v \ + -i "$INVENTORY_FILE" \ + -e test_os="$TEST_OS" \ + -e bootc_image="$TEST_IMAGE_URL" \ + -e image_label_version_id="$REDHAT_VERSION_ID" \ + -e upgrade="true" \ + playbooks/check-system.yaml + +greenprint "Rollback $TEST_OS system" +ansible-playbook -v \ + -i "$INVENTORY_FILE" \ + playbooks/rollback.yaml + +greenprint "Terminate $PLATFORM instance and deregister AMI" +ansible-playbook -v \ + -i "$INVENTORY_FILE" \ + -e platform="$PLATFORM" \ + playbooks/remove.yaml + +greenprint "Clean up" +rm -rf auth.json rhel-9-y.repo +unset ANSIBLE_CONFIG + +greenprint "🎉 All tests passed." +exit 0 diff --git a/tests/integration/install-upgrade.fmf b/tests/integration/install-upgrade.fmf index 66a33b54..5ccbc12d 100644 --- a/tests/integration/install-upgrade.fmf +++ b/tests/integration/install-upgrade.fmf @@ -5,5 +5,10 @@ /bootc-install-upgrade: summary: bootc install and upgrade test - test: ./install-upgrade.sh + test: ./bootc-install-upgrade.sh + duration: 90m + +/image-install-upgrade: + summary: bootc install to-disk and upgrade test + test: ./image-install-upgrade.sh duration: 90m diff --git a/tests/integration/mockbuild.sh b/tests/integration/mockbuild.sh index 8435b51e..62045061 100755 --- a/tests/integration/mockbuild.sh +++ b/tests/integration/mockbuild.sh @@ -1,6 +1,8 @@ #!/bin/bash set -exuo pipefail +ARCH=$(uname -m) + # Colorful output. function greenprint { echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" @@ -21,34 +23,38 @@ shopt -s extglob TARGET_FOLDER=(target/.tmp*) case "$TEST_OS" in - "rhel-9-4") + "rhel-9"*) TEMPLATE="rhel-9.tpl" greenprint "📝 update mock rhel-9 template" # disable subscription for nightlies sed -i "s/config_opts\['redhat_subscription_required'\] = True/config_opts['redhat_subscription_required'] = False/" /etc/mock/templates/"$TEMPLATE" # delete default cdn compose and add nightly compose - IMAGE_NAME="rhel9-rhel_bootc" - TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/${IMAGE_NAME}:rhel-9.4" - CURRENT_COMPOSE_RHEL94=$(skopeo inspect --tls-verify=false "docker://${TIER1_IMAGE_URL}" | jq -r '.Labels."redhat.compose-id"') sed -i '/user_agent/q' /etc/mock/templates/"$TEMPLATE" + if [[ "$TEST_OS" == "rhel-9-4" ]]; then + BATCH_COMPOSE="updates/" + LATEST_COMPOSE_ID="latest-RHEL-9.4.0" + else + BATCH_COMPOSE="" + LATEST_COMPOSE_ID="latest-RHEL-9.5.0" + fi tee -a /etc/mock/templates/"$TEMPLATE" > /dev/null << EOF [BaseOS] name=Red Hat Enterprise Linux - BaseOS -baseurl=http://${DOWNLOAD_NODE}/rhel-9/composes/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/BaseOS/\$basearch/os/ +baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/BaseOS/\$basearch/os/ enabled=1 gpgcheck=0 [AppStream] name=Red Hat Enterprise Linux - AppStream -baseurl=http://${DOWNLOAD_NODE}/rhel-9/composes/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/AppStream/\$basearch/os/ +baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/AppStream/\$basearch/os/ enabled=1 gpgcheck=0 [CRB] -name = Red Hat Enterprise Linux - CRB -baseurl = http://${DOWNLOAD_NODE}/rhel-9/composes/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/CRB/\$basearch/os/ -enabled = 1 -gpgcheck = 0 +name=Red Hat Enterprise Linux - CRB +baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/CRB/\$basearch/os/ +enabled=1 +gpgcheck=0 """ EOF MOCK_CONFIG="rhel-9-${ARCH}" @@ -56,6 +62,12 @@ EOF "centos-stream-9") MOCK_CONFIG="centos-stream-9-${ARCH}" ;; + "fedora-40") + MOCK_CONFIG="fedora-40-${ARCH}" + ;; + "fedora-41") + MOCK_CONFIG="fedora-41-${ARCH}" + ;; *) redprint "Variable TEST_OS has to be defined" exit 1 diff --git a/tests/integration/playbooks/check-system.yaml b/tests/integration/playbooks/check-system.yaml index 66854526..ede5a3ae 100644 --- a/tests/integration/playbooks/check-system.yaml +++ b/tests/integration/playbooks/check-system.yaml @@ -100,12 +100,19 @@ failed_counter: "{{ failed_counter | int + 1 }}" - name: set mount point device name - command: findmnt -r -o SOURCE -n /sysroot + command: findmnt -r -v -o SOURCE -n /sysroot register: result_sysroot_source - set_fact: device_name: "{{ result_sysroot_source.stdout }}" + - name: get filesystem type + shell: df --output=fstype -v /sysroot | grep -v Type + register: result_fstype + + - set_fact: + fstype: "{{ result_fstype.stdout }}" + - name: get ostree osname shell: rpm-ostree status --json | jq -r '.deployments[0].osname' register: result_osname @@ -162,6 +169,28 @@ failed_counter: "{{ failed_counter | int + 1 }}" vars: var_mount_path: "{{ device_name }}[/ostree/deploy/{{ osname }}/var]" + when: fstype != "btrfs" + + # btrfs defines subvolume /root in fedora + # but for bootc install to-disk will set btrfs subvolume / + - name: /var mount point checking - btrfs + block: + - assert: + that: + - result_var_mount_point.stdout == var_mount_path_1 or result_var_mount_point.stdout == var_mount_path_2 + fail_msg: "/var does not mount on {{ var_mount_path_1 }} or {{ var_mount_path_2 }}" + success_msg: "/var mounts on {{ var_mount_path_1 }} or {{ var_mount_path_2 }}" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + vars: + var_mount_path_1: "{{ device_name }}[/root/ostree/deploy/{{ osname }}/var]" + var_mount_path_2: "{{ device_name }}[/ostree/deploy/{{ osname }}/var]" + when: fstype == "btrfs" # case: check /var mount status - name: check /var mount status @@ -203,6 +232,44 @@ set_fact: failed_counter: "{{ failed_counter | int + 1 }}" + - name: check VERSION_ID in /etc/os-release + shell: awk -F '=' '/^VERSION_ID/ {print $2}' /etc/os-release | tr -d '"' + register: result_os_release_version_id + + - name: redhat.version-id == VERSION_ID + block: + - assert: + that: + - image_label_version_id == result_os_release_version_id.stdout + fail_msg: "version_id in label != version_id in /etc/os-release" + success_msg: "version_id in label == version_id in /etc/os-release" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + - name: check selinux status + shell: getenforce + register: result_selinux + + - name: selinux is Enforcing + block: + - assert: + that: + - result_selinux.stdout == "Enforcing" + fail_msg: "SELinux status is not Enforcing" + success_msg: "SELinux is Enforcing" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + - name: check bootc-fetch-apply-updates.timer left time shell: systemctl list-timers bootc-fetch-apply-updates.timer --output json | jq -r '.[].left' register: result_bootc_timer_left @@ -278,6 +345,17 @@ register: result_dmesg_error become: true + - name: check journal error and fail log + shell: journalctl | grep -i "error\|fail" | grep -v "skipped" | grep -v "failover" | grep -v "ignition" | grep -v "Driver 'pcspkr'" || true + register: result_journalctl_error + become: true + + - name: check selinux deny log + shell: journalctl | grep -i denied + register: result_selinux_denied + become: true + ignore_errors: true + # case: check running container with podman in root - name: run CentOS Stream 9 image with podman in root command: podman run --rm quay.io/centos/centos:stream9 cat /etc/redhat-release @@ -371,6 +449,8 @@ - name: failed count + 1 set_fact: failed_counter: "{{ failed_counter | int + 1 }}" + when: ansible_facts['distribution_version'] != "41" + # workaround for issue https://gitlab.com/fedora/bootc/base-images/-/issues/10 # case: check reboot times - name: check reboot times diff --git a/tests/integration/playbooks/deploy-aws.yaml b/tests/integration/playbooks/deploy-aws.yaml index 779dae23..d6b238b7 100644 --- a/tests/integration/playbooks/deploy-aws.yaml +++ b/tests/integration/playbooks/deploy-aws.yaml @@ -3,18 +3,12 @@ gather_facts: false become: false vars: - test_os: "{{ lookup('env', 'TEST_OS') | default('centos-stream-9', true) }}" + test_os: "" arch: "{{ lookup('env', 'ARCH') | default('x86_64', true) }}" ssh_key_pub: "" inventory_file: "" download_node: "{{ lookup('env', 'DOWNLOAD_NODE') | default('', true) }}" - ami: - x86_64: - rhel-9-4: ami-044442bf4326c9933 - centos-stream-9: ami-08f2fe20b72b2ffa7 - aarch64: - rhel-9-4: ami-0eb96688e8d46c2af - centos-stream-9: ami-09866c25c2d97b6cc + spot_max_price: "0.1" instance_type: x86_64: "0": t2.medium @@ -39,53 +33,16 @@ set_fact: random_instance_type: "{{ lookup('env', 'instance_type') | default(instance_type[arch][instance_type_index], true) }}" - - name: "get available zone for instance {{ random_instance_type }}" - shell: | - aws ec2 describe-instance-type-offerings \ - --location-type availability-zone \ - --filters="Name=instance-type,Values={{ random_instance_type }}" \ - --query InstanceTypeOfferings | jq -r '.[0].Location' - register: ec2_zone - when: "'rhel' not in test_os" - - - name: get subnet - shell: | - aws ec2 describe-subnets \ - --output json \ - --filters "Name=tag:Name,Values=kite-ci" "Name=availabilityZone,Values={{ ec2_zone.stdout }}" | \ - jq -r ".Subnets[0].SubnetId" - register: ec2_vpc_subnet - when: "'rhel' not in test_os" - - - set_fact: - subnet_id: "{{ ec2_vpc_subnet.stdout }}" - when: "'rhel' not in test_os" - - name: get virtqe subnet shell: | aws ec2 describe-subnets \ --output json \ - --filters "Name=tag:Name,Values=InternalA-virtqe" | \ + --filters "Name=tag:Name,Values=virtqe_test_prod_us-west-2_internal-a" | \ jq -r ".Subnets[0].SubnetId" register: ec2_vpc_subnet - when: '"rhel" in test_os' - set_fact: subnet_id: "{{ ec2_vpc_subnet.stdout }}" - when: "'rhel' in test_os" - - - name: get security group - shell: | - aws ec2 describe-security-groups \ - --filters="Name=tag:Name,Values=kite-ci" \ - --output json | \ - jq -r ".SecurityGroups[0].GroupId" - register: ec2_security_group - when: "'rhel' not in test_os" - - - set_fact: - group_id: "{{ ec2_security_group.stdout }}" - when: "'rhel' not in test_os" - name: get virtqe security group shell: | @@ -94,11 +51,9 @@ --output json | \ jq -r ".SecurityGroups[0].GroupId" register: ec2_security_group - when: "'rhel' in test_os" - set_fact: group_id: "{{ ec2_security_group.stdout }}" - when: "'rhel' in test_os" - name: config ssh keypair used by test shell: | @@ -107,6 +62,37 @@ --public-key-material "fileb://{{ ssh_key_pub }}" \ --tag-specification 'ResourceType=key-pair,Tags=[{Key=Name,Value=bootc-test}]' + - name: get ami id from aws ssm + shell: | + aws ssm get-parameter \ + --name "bootc-{{ test_os }}-{{ arch }}" | jq -r '.Parameter.Value' + register: result_ami_id + + - set_fact: + ami_id: "{{ result_ami_id.stdout }}" + + - name: "{{ random_instance_type }} is available in zone us-west-2a" + shell: + aws ec2 describe-instance-type-offerings \ + --location-type availability-zone \ + --filters "Name=instance-type,Values={{ random_instance_type }}" "Name=location,Values=us-west-2a" \ + --query "InstanceTypeOfferings" --region us-west-2 + register: result_instance_type_available + + - name: get available instance type in zone us-west-2a + shell: + aws ec2 describe-instance-type-offerings \ + --location-type availability-zone \ + --filters "Name=instance-type,Values={{ instance_type[arch]["0"] }},{{ instance_type[arch]["1"] }},{{ instance_type[arch]["2"] }}" "Name=location,Values=us-west-2a" \ + --query "InstanceTypeOfferings" \ + --region us-west-2 | jq -r '.[0].InstanceType' + register: result_instance_type + when: result_instance_type_available.stdout == '[]' + + - set_fact: + random_instance_type: "{{ result_instance_type.stdout }}" + when: result_instance_type_available.stdout == '[]' + - name: generate ec2_run_instance script template: src: ec2_run_instance.j2 @@ -126,19 +112,6 @@ aws ec2 wait instance-running \ --instance-ids {{ instance_json.Instances[0].InstanceId }} - - name: get instance public ip - shell: | - aws ec2 describe-instances \ - --instance-ids {{ instance_json.Instances[0].InstanceId }} \ - --query 'Reservations[*].Instances[*].PublicIpAddress' \ - --output text - register: ip_result - when: "'rhel' not in test_os" - - - set_fact: - instance_ip: "{{ ip_result.stdout }}" - when: "'rhel' not in test_os" - - name: get instance private ip shell: | aws ec2 describe-instances \ @@ -146,11 +119,9 @@ --query 'Reservations[*].Instances[*].PrivateIpAddress' \ --output text register: ip_result - when: "'rhel' in test_os" - set_fact: instance_ip: "{{ ip_result.stdout }}" - when: "'rhel' in test_os" - name: waits until instance is reachable wait_for: diff --git a/tests/integration/playbooks/deploy-libvirt.yaml b/tests/integration/playbooks/deploy-libvirt.yaml new file mode 100644 index 00000000..f90df989 --- /dev/null +++ b/tests/integration/playbooks/deploy-libvirt.yaml @@ -0,0 +1,239 @@ +--- +- hosts: cloud + become: false + vars: + test_os: "" + arch: "{{ lookup('env', 'ARCH') | default('x86_64', true) }}" + ssh_key_pub: "" + ssh_user: "cloud-user" + inventory_file: "" + download_node: "{{ lookup('env', 'DOWNLOAD_NODE') | default('', true) }}" + instance_name: "bootc-{{ test_os }}" + image_path: "/var/lib/libvirt/images" + bib: "false" + bib_firmware: "" + boot_args: "" + os_variant: + centos-stream-9: centos-stream9 + rhel-9-5: rhel9-unknown + rhel-9-4: rhel9-unknown + fedora-40: fedora-unknown + fedora-41: fedora-unknown + + tasks: + - name: Get temp folder + command: dirname "{{ inventory_file }}" + register: result_temp_folder + + - set_fact: + temp_folder: "{{ result_temp_folder.stdout }}" + + - set_fact: + random_num: "{{ 9999 | random(start=1001) }}" + + - name: Get CentOS-Stream-GenericCloud image filename + block: + - name: Get CentOS-Stream-GenericCloud image filename + shell: curl -s https://composes.stream.centos.org/production/latest-CentOS-Stream/compose/BaseOS/{{ arch }}/images/ | grep -oP '(?<=href=")CentOS-Stream-GenericCloud-(9|10)-[^"]+.qcow2(?=")' + register: out + + - set_fact: + download_image_name: "{{ out.stdout }}" + rhel_guest_image_fname: "{{ instance_name }}.qcow2" + when: + - "'centos' in test_os" + - bib == "false" + + - name: Get rhel-guest-image filename + block: + - name: Get version from test_os + shell: echo {{ test_os }} | sed 's/rhel-//;s/-/\./' + register: result_os_version + + - set_fact: + test_os_dot_version: "{{ result_os_version.stdout }}" + + - name: Get rhel-guest-image filename + shell: curl -s http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/BaseOS/{{ arch }}/images/ | grep -oP '(?<=href=")rhel-guest-image-[^"]+.qcow2(?=")' + register: out + + - set_fact: + download_image_name: "{{ out.stdout }}" + rhel_guest_image_fname: "{{ instance_name }}.qcow2" + when: + - "'rhel' in test_os" + - bib == "false" + + - name: Get Fedora-Cloud-Base-Generic 40 image filename + block: + - name: Get CentOS-Stream-GenericCloud image filename + shell: curl -s https://dl.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/{{ arch }}/images/ | grep -ioE '>Fedora-Cloud-Base-Generic.*.qcow2' | tr -d '><' + register: out + + - set_fact: + download_image_name: "{{ out.stdout }}" + rhel_guest_image_fname: "{{ instance_name }}.qcow2" + when: + - test_os == "fedora-40" + - bib == "false" + + - name: Get Fedora-Cloud-Base-Generic 41 image filename + block: + - name: Get CentOS-Stream-GenericCloud image filename + shell: curl -s https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud/{{ arch }}/images/ | grep -ioE '>Fedora-Cloud-Base-Generic.*.qcow2' | tr -d '><' + register: out + + - set_fact: + download_image_name: "{{ out.stdout }}" + rhel_guest_image_fname: "{{ instance_name }}.qcow2" + when: + - test_os == "fedora-41" + - bib == "false" + + - name: Download CentOS-Stream-GenericCloud image + get_url: + url: "https://composes.stream.centos.org/production/latest-CentOS-Stream/compose/BaseOS/{{ arch }}/images/{{ download_image_name }}" + dest: "{{ image_path }}/{{ rhel_guest_image_fname }}" + validate_certs: false + become: true + when: + - "'centos' in test_os" + - bib == "false" + + - name: Download rhel-guest-image + get_url: + url: "http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/BaseOS/{{ arch }}/images/{{ download_image_name }}" + dest: "{{ image_path }}/{{ rhel_guest_image_fname }}" + validate_certs: false + become: true + when: + - "'rhel' in test_os" + - bib == "false" + + - name: Download Fedora-Cloud-Base-Generic 40 + get_url: + url: "https://dl.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/{{ arch }}/images/{{ download_image_name }}" + dest: "{{ image_path }}/{{ rhel_guest_image_fname }}" + validate_certs: false + become: true + when: + - test_os == "fedora-40" + - bib == "false" + + - name: Download Fedora-Cloud-Base-Generic 41 + get_url: + url: "https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud/{{ arch }}/images/{{ download_image_name }}" + dest: "{{ image_path }}/{{ rhel_guest_image_fname }}" + validate_certs: false + become: true + when: + - test_os == "fedora-41" + - bib == "false" + + - set_fact: + rhel_guest_image_fname: "disk.qcow2" + when: bib == "true" + + - name: Start libvirtd service + command: systemctl start libvirtd.service + become: true + + - name: Generate user-data and meta-data + template: + src: "{{ item }}.j2" + dest: "{{ temp_folder }}/{{ item }}" + loop: + - user-data + - meta-data + + # virt-install with uefi, the system has a "system reset" in the first boot and shutdown + # Then the --cloud-init will be dropped when start after system reset + - name: Generate seed.iso for NoCloud cloud-init + command: | + genisoimage -output "{{ image_path }}/seed.iso" \ + -volid cidata \ + -joliet \ + -rock \ + "{{ temp_folder }}/user-data" "{{ temp_folder }}/meta-data" + become: true + + - name: Generate virt-install script + template: + src: virt-install.bash.j2 + dest: "{{ temp_folder }}/virt-install.bash" + mode: 0755 + + - name: Run virt-install + command: "{{ temp_folder }}/virt-install.bash" + become: true + + - name: Wait until VM is shut off + command: virsh domstate {{ instance_name }} + become: true + register: result_domestate + retries: 10 + until: result_domestate.stdout == "shut off" + + - name: Start vm + shell: | + virsh start {{ instance_name }} + become: true + + - name: Get VM xml + command: virsh dumpxml {{ instance_name }} + become: true + + - name: Get VM IP address + shell: + virsh domifaddr {{ instance_name }} | grep -oP '(?:\d+\.){3}\d+' + register: result + until: result.stdout != "" + retries: 30 + delay: 10 + become: true + + - set_fact: + instance_ip: "{{ result.stdout }}" + + - name: Remove seed.iso + command: rm -f "{{ image_path }}/seed.iso" + become: true + + - name: Waits until instance is reachable + wait_for: + host: "{{ instance_ip }}" + port: 22 + search_regex: OpenSSH + delay: 10 + retries: 30 + register: result_ssh_check + until: result_ssh_check is success + + - name: Add instance ip into host group guest + add_host: + name: "{{ instance_ip }}" + groups: guest + + - name: Write instance ip to inventory file + community.general.ini_file: + path: "{{ inventory_file }}" + section: guest + option: guest ansible_host + value: "{{ instance_ip }}" + no_extra_spaces: true + + - name: Write random number to inventory file + community.general.ini_file: + path: "{{ inventory_file }}" + section: cloud:vars + option: rhel_guest_image_fname + value: "{{ rhel_guest_image_fname }}" + no_extra_spaces: true + + - name: Write instance name to inventory file + community.general.ini_file: + path: "{{ inventory_file }}" + section: cloud:vars + option: instance_name + value: "{{ instance_name }}" + no_extra_spaces: true diff --git a/tests/integration/playbooks/install.yaml b/tests/integration/playbooks/install.yaml index 1e64714e..1fdb93d1 100644 --- a/tests/integration/playbooks/install.yaml +++ b/tests/integration/playbooks/install.yaml @@ -2,7 +2,7 @@ - hosts: guest become: false vars: - test_os: "{{ lookup('env', 'TEST_OS') | default('centos-stream-9', true) }}" + test_os: "" platform: "{{ lookup('env', 'PLATFORM') | default('aws', true) }}" test_image_url: "" @@ -25,18 +25,27 @@ command: findmnt ignore_errors: true + # installing SELinux-enabled targets from SELinux-disabled hosts + # https://github.com/containers/bootc/issues/419 + - name: disable selinux for libvirt only + command: setenforce 0 + become: true + ignore_errors: true + when: + - platform == "libvirt" + - name: Install podman dnf: name: - podman state: present become: true - when: ('rhel' not in test_os) or (platform != 'aws') + when: ('rhel' not in test_os and test_os != 'fedora-41') or (platform != 'aws' and test_os != 'fedora-41') - name: Install podman from internal dnf: disablerepo: "*" - enablerepo: "rhel-9x-*" + enablerepo: "rhel-9y-*" name: - podman state: present @@ -45,6 +54,12 @@ - "'rhel' in test_os" - platform == "aws" + # ansible dnf5 module needs python3-libdnf5 + - name: Install podman on fedora-41(dnf5) + command: dnf -y install podman + become: true + when: test_os == 'fedora-41' + - name: Auth for RHEL private image command: podman login \ diff --git a/tests/integration/playbooks/remove.yaml b/tests/integration/playbooks/remove.yaml index e21dd40b..cedd0f65 100644 --- a/tests/integration/playbooks/remove.yaml +++ b/tests/integration/playbooks/remove.yaml @@ -22,3 +22,25 @@ aws ec2 delete-key-pair \ --key-name "kp-bootc-{{ random_num }}" when: platform == "aws" + + - name: Destroy and undefine libvirt vm + block: + - name: "Destroy vm" + command: virsh destroy {{ instance_name }} + become: true + ignore_errors: true + - name: "Undefine vm" + command: virsh undefine {{ instance_name }} + become: true + register: result_undefine + ignore_errors: true + - name: "Undefine vm with --nvram" + command: virsh undefine {{ instance_name }} --nvram + become: true + ignore_errors: true + when: result_undefine is failed + - name: "Delete disk file" + command: virsh vol-delete --pool images "{{ rhel_guest_image_fname }}" + become: true + ignore_errors: true + when: platform == "libvirt" diff --git a/tests/integration/playbooks/rollback.yaml b/tests/integration/playbooks/rollback.yaml index e193ff50..7512659f 100644 --- a/tests/integration/playbooks/rollback.yaml +++ b/tests/integration/playbooks/rollback.yaml @@ -47,6 +47,8 @@ - name: failed count + 1 set_fact: failed_counter: "{{ failed_counter | int + 1 }}" + when: + - air_gapped_dir | default('') == "" - name: check installed package shell: rpm -qa | sort diff --git a/tests/integration/playbooks/templates/ec2_run_instance.j2 b/tests/integration/playbooks/templates/ec2_run_instance.j2 index f196cd11..ec91f3aa 100644 --- a/tests/integration/playbooks/templates/ec2_run_instance.j2 +++ b/tests/integration/playbooks/templates/ec2_run_instance.j2 @@ -9,8 +9,8 @@ {% if test_os.startswith('rhel') %} --user-data file://user-data \ {% endif %} - --image-id {{ ami[arch][test_os] }} \ - --instance-market-options MarketType=spot,SpotOptions=\{MaxPrice=0.1,SpotInstanceType=one-time,InstanceInterruptionBehavior=terminate\} \ + --image-id {{ ami_id }} \ + --instance-market-options MarketType=spot,SpotOptions=\{MaxPrice={{ spot_max_price }},SpotInstanceType=one-time,InstanceInterruptionBehavior=terminate\} \ --instance-type {{ random_instance_type }} \ --key-name kp-bootc-{{ random_num }} \ --security-group-ids {{ group_id }} \ diff --git a/tests/integration/playbooks/templates/meta-data.j2 b/tests/integration/playbooks/templates/meta-data.j2 new file mode 100644 index 00000000..d8f30877 --- /dev/null +++ b/tests/integration/playbooks/templates/meta-data.j2 @@ -0,0 +1,2 @@ +instance-id: libvirt-{{ random_num }} +local-hostname: libvirt-{{ test_os }} diff --git a/tests/integration/playbooks/templates/user-data.j2 b/tests/integration/playbooks/templates/user-data.j2 new file mode 100644 index 00000000..0c51a9f9 --- /dev/null +++ b/tests/integration/playbooks/templates/user-data.j2 @@ -0,0 +1,27 @@ +#cloud-config +users: + - default + - name: {{ ssh_user }} + groups: wheel + sudo: ALL=(ALL) NOPASSWD:ALL + lock_passwd: true + ssh_authorized_keys: + - {{ lookup('ansible.builtin.file', ssh_key_pub) }} +{% if bib == 'false' and test_os.startswith('rhel') %} +yum_repos: + rhel-9y-baseos: + name: rhel-9y-baseos + baseurl: http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/BaseOS/$basearch/os/ + enabled: true + gpgcheck: false + rhel-9y-appstream: + name: rhel-9y-appstream + baseurl: http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/AppStream/$basearch/os/ + enabled: true + gpgcheck: false +{% endif %} +power_state: + delay: now + mode: poweroff + message: Cloud Init Finalized - Shutting down machine + timeout: 30 diff --git a/tests/integration/playbooks/templates/virt-install.bash.j2 b/tests/integration/playbooks/templates/virt-install.bash.j2 new file mode 100644 index 00000000..73a4e87e --- /dev/null +++ b/tests/integration/playbooks/templates/virt-install.bash.j2 @@ -0,0 +1,20 @@ +#!/bin/bash + +virt-install \ + --name {{ instance_name }} \ + --ram 3072 \ + --vcpus 2 \ + --os-variant {{ os_variant[test_os] }} \ + --network default \ + --disk size=10,path="{{ image_path }}/{{ rhel_guest_image_fname }}" \ +{% if bib_firmware == 'uefi' %} + --boot {{ boot_args }} \ +{% endif %} + --cdrom "{{ image_path }}/seed.iso" \ + --install no_install=yes \ +{% if bib == 'false' %} + --filesystem={{ air_gapped_dir }},mount_tag,driver.type=virtiofs,accessmode=passthrough \ + --memorybacking=source.type=memfd,access.mode=shared \ +{% endif %} + --noautoconsole \ + --wait diff --git a/tests/integration/playbooks/upgrade.yaml b/tests/integration/playbooks/upgrade.yaml index 09697455..27ab7ba4 100644 --- a/tests/integration/playbooks/upgrade.yaml +++ b/tests/integration/playbooks/upgrade.yaml @@ -1,11 +1,34 @@ --- - hosts: guest become: false + vars: tasks: + - name: Air-gapped upgrade + block: + - name: Mount virtiofs + mount: + path: /mnt + src: mount_tag + fstype: virtiofs + state: ephemeral + become: true + + - name: ls + command: ls /mnt + become: true + + - name: bootc switch + command: bootc switch --transport dir /mnt + become: true + when: + - air_gapped_dir | default('') != "" + - name: bootc upgrade command: bootc upgrade become: true + when: + - air_gapped_dir | default('') == "" - name: Reboot to deploy new system reboot: @@ -17,3 +40,7 @@ - name: Wait for connection to become reachable/usable wait_for_connection: delay: 30 + + - name: bootc booted status + command: bootc status --booted + become: true diff --git a/tests/integration/shared_lib.sh b/tests/integration/shared_lib.sh new file mode 100755 index 00000000..0ef30a3f --- /dev/null +++ b/tests/integration/shared_lib.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Dumps details about the instance running the CI job. +function dump_runner { + RUNNER_CPUS=$(nproc) + RUNNER_MEM=$(free -m | grep -oP '\d+' | head -n 1) + RUNNER_DISK=$(df --output=size -h / | sed '1d;s/[^0-9]//g') + RUNNER_HOSTNAME=$(uname -n) + RUNNER_USER=$(whoami) + RUNNER_ARCH=$(uname -m) + RUNNER_KERNEL=$(uname -r) + + echo -e "\033[0;36m" + cat << EOF +------------------------------------------------------------------------------ +CI MACHINE SPECS +------------------------------------------------------------------------------ + Hostname: ${RUNNER_HOSTNAME} + User: ${RUNNER_USER} + CPUs: ${RUNNER_CPUS} + RAM: ${RUNNER_MEM} MB + DISK: ${RUNNER_DISK} GB + ARCH: ${RUNNER_ARCH} + KERNEL: ${RUNNER_KERNEL} +------------------------------------------------------------------------------ +EOF +} + +# Colorful timestamped output. +function greenprint { + echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" +} + +function redprint { + echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m" +} + +# Retry container image pull and push +function retry { + n=0 + until [ "$n" -ge 3 ] + do + "$@" && break + n=$((n+1)) + sleep 10 + done +}