diff --git a/hack/ci/cloud-init/controller.yaml.tpl b/hack/ci/cloud-init/controller.yaml.tpl index 174bf04154..b34e62b4ce 100644 --- a/hack/ci/cloud-init/controller.yaml.tpl +++ b/hack/ci/cloud-init/controller.yaml.tpl @@ -81,6 +81,12 @@ # query_placement_for_availability_zone is the default from Xena query_placement_for_availability_zone = True + [workarounds] + # FIXME(stephenfin): This is temporary while we get to the bottom of + # https://bugs.launchpad.net/nova/+bug/2091114 It should not be kept after + # we bump to 2025.1 + disable_deep_image_inspection = True + [[post-config|$CINDER_CONF]] [DEFAULT] storage_availability_zone = ${PRIMARY_AZ} diff --git a/hack/ci/cloud-init/worker.yaml.tpl b/hack/ci/cloud-init/worker.yaml.tpl index ccbf79411d..6e4a5b44e6 100644 --- a/hack/ci/cloud-init/worker.yaml.tpl +++ b/hack/ci/cloud-init/worker.yaml.tpl @@ -47,6 +47,12 @@ [DEFAULT] cpu_allocation_ratio = 2.0 + [workarounds] + # FIXME(stephenfin): This is temporary while we get to the bottom of + # https://bugs.launchpad.net/nova/+bug/2091114 It should not be kept after + # we bump to 2025.1 + disable_deep_image_inspection = True + [[post-config|$CINDER_CONF]] [DEFAULT] storage_availability_zone = ${SECONDARY_AZ} diff --git a/hack/ci/create_devstack.sh b/hack/ci/create_devstack.sh index 31c18ebfb1..60c8ce7f62 100755 --- a/hack/ci/create_devstack.sh +++ b/hack/ci/create_devstack.sh @@ -31,7 +31,7 @@ source "${scriptdir}/${RESOURCE_TYPE}.sh" CLUSTER_NAME=${CLUSTER_NAME:-"capo-e2e"} -OPENSTACK_RELEASE=${OPENSTACK_RELEASE:-"2024.1"} +OPENSTACK_RELEASE=${OPENSTACK_RELEASE:-"2024.2"} OPENSTACK_ENABLE_HORIZON=${OPENSTACK_ENABLE_HORIZON:-"false"} # Devstack will create a provider network using this range @@ -47,6 +47,9 @@ PRIVATE_NETWORK_CIDR=${PRIVATE_NETWORK_CIDR:-"10.0.3.0/24"} CONTROLLER_IP=${CONTROLLER_IP:-"10.0.3.15"} WORKER_IP=${WORKER_IP:-"10.0.3.16"} +SKIP_INIT_INFRA=${SKIP_INIT_INFRA:-} +SKIP_SECONDARY_AZ=${SKIP_SECONDARY_AZ:-} + PRIMARY_AZ=testaz1 SECONDARY_AZ=testaz2 @@ -273,7 +276,11 @@ function main() { # is available, and wait if it is not. # # For efficiency, tests which require multi-AZ SHOULD run as late as possible. - create_worker + if [[ -n "${SKIP_SECONDARY_AZ:-}" ]]; then + echo "Skipping worker creation..." + else + create_worker + fi public_ip=$(get_public_ip) cat << EOF > "${REPO_ROOT_ABSOLUTE}/clouds.yaml" diff --git a/hack/ci/gce-project.sh b/hack/ci/gce-project.sh index 1aae2de9b9..c915f40ce0 100755 --- a/hack/ci/gce-project.sh +++ b/hack/ci/gce-project.sh @@ -95,7 +95,7 @@ function create_vm { --zone "$GCP_ZONE" \ --enable-nested-virtualization \ --image-project ubuntu-os-cloud \ - --image-family ubuntu-2204-lts \ + --image-family ubuntu-2404-lts-amd64 \ --boot-disk-size 200G \ --boot-disk-type pd-ssd \ --can-ip-forward \ diff --git a/hack/ci/libvirt.sh b/hack/ci/libvirt.sh new file mode 100755 index 0000000000..d6b8d1ab64 --- /dev/null +++ b/hack/ci/libvirt.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# hack script for preparing libvirt to run cluster-api-provider-openstack e2e + +set -x -o errexit -o nounset -o pipefail + +# Required environment variables: +# SSH_PUBLIC_KEY_FILE +# SSH_PRIVATE_KEY_FILE +# LIBVIRT_NETWORK_NAME + +function cloud_init { + LIBVIRT_NETWORK_NAME=${LIBVIRT_NETWORK_NAME:-${CLUSTER_NAME}-network} + LIBVIRT_IMAGE_NAME=${LIBVIRT_IMAGE_NAME:-ubuntu-2404-lts} + + LIBVIRT_MEMORY=${LIBVIRT_MEMORY:-8192} + LIBVIRT_MEMORY_controller=${LIBVIRT_MEMORY_controller:-$LIBVIRT_MEMORY} + LIBVIRT_MEMORY_worker=${LIBVIRT_MEMORY_worker:-$LIBVIRT_MEMORY} + + LIBVIRT_VCPU=${LIBVIRT_VCPU:-4} + LIBVIRT_VCPU_controller=${LIBVIRT_VCPU_controller:-$LIBVIRT_VCPU} + LIBVIRT_VCPU_worker=${LIBVIRT_VCPU_worker:-$LIBVIRT_VCPU} + + LIBVIRT_MAC_controller="00:60:2f:32:81:00" + LIBVIRT_MAC_worker="00:60:2f:32:81:01" +} + +function init_infrastructure() { + if ! virsh net-info "${LIBVIRT_NETWORK_NAME}" &>/dev/null; then + virsh net-define <(cat < + ${LIBVIRT_NETWORK_NAME} + + + + + + + + + + + + + + +EOF +) + virsh net-start "${LIBVIRT_NETWORK_NAME}" + virsh net-autostart "${LIBVIRT_NETWORK_NAME}" + fi + + if [ ! -f "/tmp/${LIBVIRT_IMAGE_NAME}.qcow2" ]; then + curl -o "/tmp/${LIBVIRT_IMAGE_NAME}.qcow2" https://cloud-images.ubuntu.com/releases/noble/release/ubuntu-24.04-server-cloudimg-amd64.img + fi +} + +function create_vm { + local name=$1 && shift + local ip=$1 && shift + local userdata=$1 && shift + local public=$1 && shift + + local memory=LIBVIRT_MEMORY_${name} + memory=${!memory} + local vcpu=LIBVIRT_VCPU_${name} + vcpu=${!vcpu} + local servername="${CLUSTER_NAME}-${name}" + local mac=LIBVIRT_MAC_${name} + mac=${!mac} + + # Values which weren't initialised if we skipped init_infrastructure. Use names instead. + networkid=${networkid:-${LIBVIRT_NETWORK_NAME}} + volumeid=${volumeid:-${LIBVIRT_IMAGE_NAME}_${name}.qcow2} + + sudo cp "/tmp/${LIBVIRT_IMAGE_NAME}.qcow2" "/var/lib/libvirt/images/${volumeid}" + sudo qemu-img resize "/var/lib/libvirt/images/${volumeid}" +200G + + local serverid + local serverid + if ! virsh dominfo "${servername}" &>/dev/null; then + sudo virt-install \ + --name "${servername}" \ + --memory "${memory}" \ + --vcpus "${vcpu}" \ + --import \ + --disk "/var/lib/libvirt/images/${volumeid},format=qcow2,bus=virtio" \ + --network network="${networkid}",mac="${mac}" \ + --os-variant=ubuntu22.04 \ + --graphics none \ + --cloud-init user-data="${userdata}" \ + --noautoconsole + fi +} + +function get_public_ip { + echo "${CONTROLLER_IP}" +} + +function get_mtu { + # Set MTU statically for libvirt + echo 1500 +} + +function get_ssh_public_key_file { + echo "${SSH_PUBLIC_KEY_FILE}" +} + +function get_ssh_private_key_file { + # Allow this to be unbound. This is handled in create_devstack.sh + echo "${SSH_PRIVATE_KEY_FILE:-}" +} + +function cloud_cleanup { + for serverid in $(virsh list --all --name | grep -E "${CLUSTER_NAME}-controller|${CLUSTER_NAME}-worker"); do + virsh destroy "${serverid}" + virsh undefine "${serverid}" --remove-all-storage + done + + for networkid in $(virsh net-list --name | grep -E "${CLUSTER_NAME}"); do + virsh net-destroy "${networkid}" + virsh net-undefine "${networkid}" + done + + true +}