diff --git a/images/capi/packer/azure/.pipelines/build-sig.yaml b/images/capi/packer/azure/.pipelines/build-sig.yaml new file mode 100644 index 0000000000..b1c64f064a --- /dev/null +++ b/images/capi/packer/azure/.pipelines/build-sig.yaml @@ -0,0 +1,78 @@ +# Required pipeline variables: +# - BUILD_POOL - Azure DevOps build pool to use +# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.31.1` +# - OS - operating system distro, such as 'Ubuntu', 'AzureLinux', or `Windows` +# - OS_VERSION - version of distro, such as `24.04` or `2022-containerd` +# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI +# Optional pipeline variables: +# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries +# - STAGING_GALLERY_NAME - name of the Azure compute gallery for initial image publishing + +jobs: +- job: build_sig + timeoutInMinutes: 120 + strategy: + maxParallel: 0 + pool: + name: $(BUILD_POOL) + steps: + - template: k8s-config.yaml + - script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + kube_proxy_url="sigwindowstools/kube-proxy:v${KUBERNETES_VERSION/+/_}-calico-hostprocess" + echo "Checking for Windows kube-proxy image $kube_proxy_url" + if ! stderr="$(docker pull $kube_proxy_url 2>&1 > /dev/null)"; then + # It's a Windows image, so expect an error after pulling it on Linux + if [[ $stderr != *"cannot be used on this platform"* ]]; then + echo "Failed to pull kube-proxy image: $stderr" + exit 1 + fi + fi + displayName: Check for Windows kube-proxy image + condition: and(eq(variables['PREFLIGHT_CHECKS'], 'true'), eq(variables['OS'], 'Windows')) + - task: AzureCLI@2 + displayName: Build SIG Image + inputs: + azureSubscription: '$(SERVICE_CONNECTION)' + scriptLocation: inlineScript + scriptType: bash + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' + inlineScript: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + # Generate locales properly on Azure Linux or ansible will complain + sudo tdnf -y install glibc-i18n + sudo locale-gen.sh + export LC_ALL=en_US.UTF-8 + + export PATH=$PATH:$HOME/.local/bin + os=$(echo "${OS}" | tr '[:upper:]' '[:lower:]') + version=$(echo "${OS_VERSION}" | tr '[:upper:]' '[:lower:]' | tr -d .) + export RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + export RESOURCE_GROUP_NAME="${RESOURCE_GROUP}" + export SIG_OFFER="reference-images" + + # timestamp is in RFC-3339 format to match kubetest + export TIMESTAMP="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + export JOB_NAME="${JOB_NAME:-"image-builder-sig-${os}-${version}"}" + export TAGS="${TAGS:-creationTimestamp=${TIMESTAMP} jobName=${JOB_NAME} DO-NOT-DELETE=UpstreamInfra}" + printf "${TAGS}" | tee packer/azure/tags.out + export GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" + DISTRO="${os}-${version}" + echo "##vso[task.setvariable variable=DISTRO]$DISTRO" + + # Add build tags in ADO + echo "##vso[build.addbuildtag]$KUBERNETES_VERSION" + echo "##vso[build.addbuildtag]$DISTRO" + + export PACKER_FLAGS="${PACKER_FLAGS} --var sig_image_version=${KUBERNETES_VERSION}" + export USE_AZURE_CLI_AUTH="True" + make build-azure-sig-$os-$version | tee packer/azure/packer.out + - template: sig-publishing-info.yaml + - task: PublishPipelineArtifact@1 + inputs: + artifact: 'publishing-info' + path: '$(system.defaultWorkingDirectory)/images/capi/packer/azure/sig-publishing-info.json' diff --git a/images/capi/packer/azure/.pipelines/build-vhd.yaml b/images/capi/packer/azure/.pipelines/build-vhd.yaml deleted file mode 100644 index 7ec1e919fb..0000000000 --- a/images/capi/packer/azure/.pipelines/build-vhd.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZURE_TENANT_ID - tenant ID -# - AZURE_CLIENT_ID - Service principal ID -# - AZURE_CLIENT_SECRET - Service principal secret -# - AZURE_SUBSCRIPTION_ID - Subscription ID used by the pipeline -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.16.2` -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2019/2022-containerd` - -jobs: -- job: build_vhd - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - template: k8s-config.yaml - - script: | - set -euo pipefail - kube_proxy_url="sigwindowstools/kube-proxy:v${KUBERNETES_VERSION/+/_}-calico-hostprocess" - echo "Checking for Windows kube-proxy image $kube_proxy_url" - if ! stderr="$(docker pull $kube_proxy_url 2>&1 > /dev/null)"; then - # It's a Windows image, so expect an error after pulling it on Linux - if [[ $stderr != *"cannot be used on this platform"* ]]; then - echo "Failed to pull kube-proxy image: $stderr" - exit 1 - fi - fi - displayName: Checking for Windows kube-proxy image - condition: and(eq(variables['PREFLIGHT_CHECKS'], 'true'), eq(variables['OS'], 'Windows')) - - script: | - set -o pipefail - export PATH=$PATH:$HOME/.local/bin LC_ALL=en_US.UTF-8 - make deps-azure - os=$(echo "${OS}" | tr '[:upper:]' '[:lower:]') - version=$(echo "${OS_VERSION}" | tr '[:upper:]' '[:lower:]' | tr -d .) - export RESOURCE_GROUP_NAME="cluster-api-images" - - # timestamp is in RFC-3339 format to match kubetest - export TIMESTAMP="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - export JOB_NAME="${JOB_NAME:-"image-builder-vhd"}" - export TAGS="creationTimestamp=${TIMESTAMP} jobName=${JOB_NAME}" - printf "${TAGS}" | tee packer/azure/tags.out - make build-azure-vhd-$os-$version | tee packer/azure/packer.out - displayName: Building VHD - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - template: generate-sas.yaml - - template: vhd-publishing-info.yaml - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'publishing-info' - path: '$(system.defaultWorkingDirectory)/images/capi/packer/azure/vhd-publishing-info.json' - condition: eq(variables.CLEANUP, 'False') - - template: delete-storage-account.yaml - - script: | - chown -R $USER:$USER . - displayName: cleanup - chown all files in work directory - condition: always() diff --git a/images/capi/packer/azure/.pipelines/clean-sig.yaml b/images/capi/packer/azure/.pipelines/clean-sig.yaml new file mode 100644 index 0000000000..b04ba58fce --- /dev/null +++ b/images/capi/packer/azure/.pipelines/clean-sig.yaml @@ -0,0 +1,48 @@ +# Required pipeline variables: +# - BUILD_POOL - Azure DevOps build pool to use +# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI +# Optional pipeline variables: +# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries +# - STAGING_GALLERY_NAME - name of the Azure compute gallery for initial image publishing + +jobs: +- job: clean_sig + timeoutInMinutes: 120 + strategy: + maxParallel: 0 + pool: + name: $(BUILD_POOL) + steps: + - task: DownloadPipelineArtifact@2 + inputs: + source: current + artifact: publishing-info + path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ + - script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME]$SHARED_IMAGE_GALLERY_IMAGE_NAME" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" + displayName: Import variables from build SIG job + - task: AzureCLI@2 + displayName: Clean up staging resources + inputs: + azureSubscription: '$(SERVICE_CONNECTION)' + scriptLocation: inlineScript + scriptType: bash + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' + inlineScript: | + set -euo pipefail + + GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" + RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + + az sig image-version delete \ + --resource-group ${RESOURCE_GROUP} \ + --gallery-name ${GALLERY_NAME} \ + --gallery-image-definition ${SHARED_IMAGE_GALLERY_IMAGE_NAME} \ + --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} diff --git a/images/capi/packer/azure/.pipelines/cleanup.yaml b/images/capi/packer/azure/.pipelines/cleanup.yaml deleted file mode 100644 index aac80f4a01..0000000000 --- a/images/capi/packer/azure/.pipelines/cleanup.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZURE_CLIENT_ID_SKU - Service principal ID to PUT the SKU -# - AZURE_CLIENT_SECRET_SKU - Service principal secret to PUT the SKU -# - AZURE_TENANT_ID_SKU - tenant ID to PUT the SKU -# - AZURE_CLIENT_ID_VHD - Service principal ID to build the vhd -# - AZURE_CLIENT_SECRET_VHD - Service principal secret to build the vhd -# - AZURE_SUBSCRIPTION_ID_VHD - Subscription ID to build the vhd -# - AZURE_TENANT_ID_VHD - tenant ID to build the vhd -# - DAYS_OLD - consider deleting resources older than this many days -# - DEBUG - whether or not to print script debug output -# - DRY_RUN - whether or not to actually delete resources -# - PUB_VERSION - version of pub command-line tool to use - -trigger: none - -schedules: - - cron: "0 2 * * *" - displayName: "nightly cleanup" - always: true - branches: - include: - - main - -stages: - - stage: clean - jobs: - - job: - timeoutInMinutes: 120 - pool: - name: $(BUILD_POOL) - steps: - - script: | - ./scripts/delete-unused-storage.sh - displayName: Cleaning up unused storage - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi/packer/azure' - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - AZURE_CLIENT_SECRET_VHD: $(AZURE_CLIENT_SECRET_VHD) - variables: - AZURE_CLIENT_ID: $(AZURE_CLIENT_ID_SKU) - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET_SKU) - AZURE_TENANT_ID: $(AZURE_TENANT_ID_SKU) diff --git a/images/capi/packer/azure/.pipelines/create-disk-version.yaml b/images/capi/packer/azure/.pipelines/create-disk-version.yaml deleted file mode 100644 index 49070ef0e6..0000000000 --- a/images/capi/packer/azure/.pipelines/create-disk-version.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZURE_TENANT_ID - tenant ID -# - AZURE_CLIENT_ID - Service principal ID -# - AZURE_CLIENT_SECRET - Service principal secret -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2019` - -jobs: -- job: create_disk_version - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/vhd/ - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: sku-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sku/ - - script: | - ./scripts/new-disk-version.sh - displayName: Create a new marketplace SKU - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi/packer/azure' - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'version_info' - path: '$(system.defaultWorkingDirectory)/images/capi/packer/azure/version.json' diff --git a/images/capi/packer/azure/.pipelines/create-sku.yaml b/images/capi/packer/azure/.pipelines/create-sku.yaml deleted file mode 100644 index 16c9497a4c..0000000000 --- a/images/capi/packer/azure/.pipelines/create-sku.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZURE_CLIENT_ID - Service principal ID -# - AZURE_CLIENT_SECRET - Service principal secret -# - AZURE_TENANT_ID - tenant ID -# - KUBERNETES_VERSION - version of Kubernetes to create the sku for, e.g. `1.16.2` -# - OFFER - the name of the offer to create the sku for -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2019/2022-containerd` -# - PUBLISHER - the name of the publisher to create the sku for -# - SKU_TEMPLATE_FILE - the base template file to use for the sku -# - VM_GENERATION - VM generation to use, e.g. `gen2` - -jobs: -- job: create_sku - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - script: | - ./scripts/new-sku.sh - displayName: Create a new marketplace SKU - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi/packer/azure' - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'sku-info' - path: '$(system.defaultWorkingDirectory)/images/capi/packer/azure/sku-publishing-info.json' diff --git a/images/capi/packer/azure/.pipelines/delete-storage-account.yaml b/images/capi/packer/azure/.pipelines/delete-storage-account.yaml deleted file mode 100644 index 7cf48465a1..0000000000 --- a/images/capi/packer/azure/.pipelines/delete-storage-account.yaml +++ /dev/null @@ -1,17 +0,0 @@ -steps: -- script: | - set -o pipefail - RESOURCE_GROUP_NAME=$(jq -r '.builds[-1].custom_data.resource_group_name' manifest.json | cut -d ":" -f2) - STORAGE_ACCOUNT_NAME=$(jq -r '.builds[-1].custom_data.storage_account_name' manifest.json | cut -d ":" -f2) - if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" - else - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" -p "${AZURE_CLIENT_SECRET}" - fi - az account set -s ${AZURE_SUBSCRIPTION_ID} - az storage account delete -n ${STORAGE_ACCOUNT_NAME} -g ${RESOURCE_GROUP_NAME} --yes - displayName: cleanup - delete storage account - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - condition: eq(variables.CLEANUP, 'True') - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) diff --git a/images/capi/packer/azure/.pipelines/generate-sas.yaml b/images/capi/packer/azure/.pipelines/generate-sas.yaml deleted file mode 100644 index 2df956dbb1..0000000000 --- a/images/capi/packer/azure/.pipelines/generate-sas.yaml +++ /dev/null @@ -1,26 +0,0 @@ -steps: -- script: | - set -o pipefail - RESOURCE_GROUP_NAME=$(jq -r '.builds[-1].custom_data.resource_group_name' manifest.json | cut -d ":" -f2) - STORAGE_ACCOUNT_NAME=$(jq -r '.builds[-1].custom_data.storage_account_name' manifest.json | cut -d ":" -f2) - OS_DISK_URI=$(cat packer/azure/packer.out | grep "OSDiskUri:" -m 1 | cut -d " " -f 2) - printf "${STORAGE_ACCOUNT_NAME}" | tee packer/azure/storage-account-name.out - printf "${OS_DISK_URI}" | tee packer/azure/vhd-base-url.out - printf "${OS_DISK_URI}?" | tee packer/azure/vhd-url.out - printf "${RESOURCE_GROUP_NAME}" | tee packer/azure/resource-group-name.out - if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" - export AZURE_STORAGE_AUTH_MODE="login" # Use auth mode "login" in az storage commands. - else - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" -p "${AZURE_CLIENT_SECRET}" - fi - az account set -s ${AZURE_SUBSCRIPTION_ID} - ACCOUNT_KEY=$(az storage account keys list -g ${RESOURCE_GROUP_NAME} --subscription ${AZURE_SUBSCRIPTION_ID} --account-name ${STORAGE_ACCOUNT_NAME} --query '[0].value') - start_date=$(date +"%Y-%m-%dT00:00Z" -d "-1 day") - expiry_date=$(date +"%Y-%m-%dT00:00Z" -d "+1 year") - az storage container generate-sas --name system --permissions lr --account-name ${STORAGE_ACCOUNT_NAME} --account-key ${ACCOUNT_KEY} --start $start_date --expiry $expiry_date | tr -d '\"' | tee -a packer/azure/vhd-url.out - displayName: Getting OS VHD URL - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - condition: eq(variables.CLEANUP, 'False') - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) diff --git a/images/capi/packer/azure/.pipelines/promote-sig.yaml b/images/capi/packer/azure/.pipelines/promote-sig.yaml new file mode 100644 index 0000000000..2fb9819795 --- /dev/null +++ b/images/capi/packer/azure/.pipelines/promote-sig.yaml @@ -0,0 +1,137 @@ +# Required pipeline variables: +# - BUILD_POOL - Azure DevOps build pool to use +# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI +# - KUBERNETES_VERSION - version of Kubernetes to create the sku for, e.g. `1.16.2` +# - OFFER - the name of the offer to create the sku for +# - OS - target of build e.g. `Ubuntu/Windows` +# - OS_VERSION - target of build e.g. `22.04/2019/2022-containerd` +# - PUBLISHER - the name of the publisher to create the sku for +# - STAGING_GALLERY_NAME - name of the Azure Compute Gallery for initial image publishing + +jobs: +- job: publish_to_sig + timeoutInMinutes: 120 + strategy: + maxParallel: 0 + pool: + name: $(BUILD_POOL) + steps: + - task: DownloadPipelineArtifact@2 + inputs: + source: current + artifact: publishing-info + path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ + - script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + DISTRO=$(jq -r .distro $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + HYPERV_GEN=$(jq -r .hyperv_gen $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + OS_TYPE=$(jq -r .os_type $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(jq -r .managed_image_resource_group_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_NAME=$(jq -r .managed_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_LOCATION=$(jq -r .managed_image_location $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(jq -r .managed_image_shared_image_gallery_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(jq -r .shared_image_gallery_resource_group $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_NAME=$(jq -r .shared_image_gallery_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + + set +o xtrace + echo "##vso[task.setvariable variable=DISTRO]$DISTRO" + echo "##vso[task.setvariable variable=HYPERV_GEN]$HYPERV_GEN" + echo "##vso[task.setvariable variable=OS_TYPE]$OS_TYPE" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_RESOURCE_GROUP_NAME]$MANAGED_IMAGE_RESOURCE_GROUP_NAME" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_NAME]$MANAGED_IMAGE_NAME" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID]$MANAGED_IMAGE_ID" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_LOCATION]$MANAGED_IMAGE_LOCATION" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID]$MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_RESOURCE_GROUP]$SHARED_IMAGE_GALLERY_RESOURCE_GROUP" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_NAME]$SHARED_IMAGE_GALLERY_NAME" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME]$SHARED_IMAGE_GALLERY_IMAGE_NAME" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" + echo "##vso[task.setvariable variable=TAGS]$TAGS" + displayName: Import variables from build SIG job + - task: AzureCLI@2 + displayName: Publish to community gallery + inputs: + azureSubscription: '$(SERVICE_CONNECTION)' + scriptLocation: inlineScript + scriptType: bash + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' + inlineScript: | + set -euo pipefail + + GALLERY_NAME=${GALLERY_NAME:-community_gallery} + SIG_OFFER="${SIG_OFFER:-reference-images}" + PUBLIC_NAME_PREFIX=${PUBLIC_NAME_PREFIX:-cluster-api} + PUBLISHER_URI=${PUBLISHER_URI:-https://github.com/kubernetes-sigs/cluster-api-provider-azure} + PUBLISHER_EMAIL=${PUBLISHER_EMAIL:-az-k8s-up-infra@microsoft.com} + EULA_LINK=${EULA_LINK:-https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/LICENSE} + EOL_DATE=$(date --date='+6 months' +"%Y-%m-%dT00:00:00+00:00") + RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + REPLICATED_REGIONS="${REPLICATED_REGIONS:-${MANAGED_IMAGE_LOCATION} australiaeast canadacentral francecentral germanywestcentral northeurope switzerlandnorth uksouth}" + + # Create the resource group if needed + if ! az group show -n ${RESOURCE_GROUP} -o none 2>/dev/null; then + az group create -n ${RESOURCE_GROUP} -l ${MANAGED_IMAGE_LOCATION} --tags ${TAGS:-} + fi + + # Create the public community shared image gallery if it doesn't exist + if ! az sig show --gallery-name ${GALLERY_NAME} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then + az sig create \ + --gallery-name ${GALLERY_NAME} \ + --resource-group ${RESOURCE_GROUP} \ + --description "Shared image gallery for Cluster API Provider Azure" \ + --eula ${EULA_LINK} \ + --location ${MANAGED_IMAGE_LOCATION} \ + --public-name-prefix ${PUBLIC_NAME_PREFIX} \ + --publisher-email ${PUBLISHER_EMAIL} \ + --publisher-uri ${PUBLISHER_URI} \ + --tags ${TAGS} \ + --permissions Community + fi + + # translate prohibited words to alternatives in the image definition name + GALLERY_IMAGE_DEFINITION=${SHARED_IMAGE_GALLERY_IMAGE_NAME//ubuntu/ubun2} + GALLERY_IMAGE_DEFINITION=${GALLERY_IMAGE_DEFINITION//windows/win} + # TODO: add --features ${5:-''} + # Create image definition if it doesn't exist + if ! az sig image-definition show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then + az sig image-definition create --debug \ + --resource-group ${RESOURCE_GROUP} \ + --gallery-name ${GALLERY_NAME} \ + --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ + --publisher ${SIG_PUBLISHER:-capz} \ + --offer ${SIG_OFFER:-reference-images} \ + --sku ${DISTRO} \ + --hyper-v-generation ${HYPERV_GEN} \ + --os-type ${OS_TYPE} \ + | tee -a sig-publishing.json + fi + + # Delete the image version if it exists (assuming a force-update) + if az sig image-version show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then + az sig image-version delete \ + --resource-group ${RESOURCE_GROUP} \ + --gallery-name ${GALLERY_NAME} \ + --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ + --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} + fi + + # Create the image version + az sig image-version create \ + --resource-group ${RESOURCE_GROUP} \ + --gallery-name ${GALLERY_NAME} \ + --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ + --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} \ + --target-regions ${REPLICATED_REGIONS} \ + --managed-image "${MANAGED_IMAGE_ID}" \ + --end-of-life-date ${EOL_DATE} \ + | tee -a sig-publishing.json + - task: PublishPipelineArtifact@1 + inputs: + artifact: 'sig-publishing' + path: '$(system.defaultWorkingDirectory)/sig-publishing.json' diff --git a/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml b/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml new file mode 100644 index 0000000000..6f805f3fe6 --- /dev/null +++ b/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml @@ -0,0 +1,42 @@ +steps: +- script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + PACKER_OUTPUT=packer/azure/packer.out + OS_TYPE=$(sed -n 's/^OSType: \(.*\)/\1/p' $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(sed -n "s/^ManagedImageResourceGroupName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_NAME=$(sed -n "s/^ManagedImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_ID=$(sed -n "s/^ManagedImageId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_LOCATION=$(sed -n "s/^ManagedImageLocation: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(sed -n "s/^ManagedImageSharedImageGalleryId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(sed -n "s/^SharedImageGalleryResourceGroup: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_NAME=$(sed -n "s/^SharedImageGalleryName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_IMAGE_NAME=$(sed -n "s/^SharedImageGalleryImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(sed -n "s/^SharedImageGalleryImageVersion: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + TAGS=$(cat packer/azure/tags.out) + if [[ SHARED_IMAGE_GALLERY_IMAGE_NAME == *gen2 ]]; then + HYPERV_GEN="V2" + else + HYPERV_GEN="V1" + fi + + cat < packer/azure/sig-publishing-info.json + { + "distro": "${DISTRO}", + "hyperv_gen": "${HYPERV_GEN}", + "os_type": "${OS_TYPE}", + "managed_image_resource_group_name": "${MANAGED_IMAGE_RESOURCE_GROUP_NAME}", + "managed_image_name": "${MANAGED_IMAGE_NAME}", + "managed_image_id": "${MANAGED_IMAGE_ID}", + "managed_image_location": "${MANAGED_IMAGE_LOCATION}", + "managed_image_shared_image_gallery_id": "${MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID}", + "shared_image_gallery_resource_group": "${SHARED_IMAGE_GALLERY_RESOURCE_GROUP}", + "shared_image_gallery_name": "${SHARED_IMAGE_GALLERY_NAME}", + "shared_image_gallery_image_name": "${SHARED_IMAGE_GALLERY_IMAGE_NAME}", + "shared_image_gallery_image_version": "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}", + "tags": "${TAGS}" + } + EOF + displayName: Generate SIG publishing info + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' diff --git a/images/capi/packer/azure/.pipelines/smoke-test.yaml b/images/capi/packer/azure/.pipelines/smoke-test.yaml deleted file mode 100644 index 33f746da0c..0000000000 --- a/images/capi/packer/azure/.pipelines/smoke-test.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZURE_TENANT_ID_VHD - tenant ID to build the vhd -# - AZURE_CLIENT_ID_VHD - Service principal ID to build the vhd -# - AZURE_CLIENT_SECRET_VHD - Service principal secret to build the vhd -# - AZURE_SUBSCRIPTION_ID_VHD - Subscription ID to build the vhd -# - KUBERNETES_VERSION - version of Kubernetes to create the sku for, e.g. `1.21.3` -# - LINUX_OS_VERSION - version of Ubuntu Linux to test with, e.g. `22.04` -# - WINDOWS_OS_VERSION - version of Windows Server to test with, e.g. `2022-containerd` -# - CLEANUP - whether or not to clean up resources created in the run - -trigger: none - -schedules: - - cron: "0 1 * * *" - displayName: "nightly build" - always: true - branches: - include: - - master - - main - -stages: - - stage: vhd - jobs: - - job: - timeoutInMinutes: 120 - pool: - name: $(BUILD_POOL) - steps: - - template: k8s-config.yaml - - script: | - set -o pipefail - export PATH=$PATH:$HOME/.local/bin LC_ALL=en_US.UTF-8 - make deps-azure - os=$(echo "$OS" | tr '[:upper:]' '[:lower:]') - version=$(echo "$OS_VERSION" | tr '[:upper:]' '[:lower:]' | tr -d .) - make build-azure-vhd-$os-$version | tee packer/azure/packer.out - displayName: Building VHD - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - template: delete-storage-account.yaml - - script: | - chown -R $USER:$USER . - displayName: cleanup - chown all files in work directory - condition: always() - strategy: - maxParallel: 0 - matrix: - Windows: - OS: Windows - OS_VERSION: $(WINDOWS_OS_VERSION) - Linux: - OS: Ubuntu - OS_VERSION: $(LINUX_OS_VERSION) - variables: - AZURE_TENANT_ID: $(AZURE_TENANT_ID_VHD) - AZURE_CLIENT_ID: $(AZURE_CLIENT_ID_VHD) - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET_VHD) - AZURE_SUBSCRIPTION_ID: $(AZURE_SUBSCRIPTION_ID_VHD) diff --git a/images/capi/packer/azure/.pipelines/stages.yaml b/images/capi/packer/azure/.pipelines/stages.yaml index e952f4197f..91be64e4ed 100644 --- a/images/capi/packer/azure/.pipelines/stages.yaml +++ b/images/capi/packer/azure/.pipelines/stages.yaml @@ -1,56 +1,30 @@ # Required pipeline variables: # - BUILD_POOL - Azure DevOps build pool to use -# - AZURE_TENANT_ID_VHD - tenant ID to build the vhd -# - AZURE_CLIENT_ID_VHD - Service principal ID to build the vhd -# - AZURE_CLIENT_SECRET_VHD - Service principal secret to build the vhd -# - AZURE_SUBSCRIPTION_ID_VHD - Subscription ID to build the vhd -# - AZURE_TENANT_ID_SKU - tenant ID to PUT the SKU -# - AZURE_CLIENT_ID_SKU - Service principal ID to PUT the SKU -# - AZURE_CLIENT_SECRET_SKU - Service principal secret to PUT the SKU -# - KUBERNETES_VERSION - version of Kubernetes to create the sku for, e.g. `1.16.2` -# - PUBLISHER - the name of the publisher to create the sku for +# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.31.1` # - OFFER - the name of the offer to create the sku for -# - SKU_TEMPLATE_FILE - the base template file to use for the sku -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2004/2019` +# - OS - target of build, one of `Ubuntu` or `Windows` +# - OS_VERSION - target of build, one of `24.04`, `22.04`, `2022-containerd`, or `2019-containerd` +# - PUBLISHER - the name of the publisher to create the sku for +# - RESOURCE_GROUP - name of the Azure resource group to use for the Compute galleries +# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI +# - STAGING_GALLERY_NAME - name of the Azure Compute Gallery for initial image publishing trigger: none pr: none stages: - - stage: vhd + - stage: build jobs: - - template: build-vhd.yaml - variables: - AZURE_TENANT_ID: $(AZURE_TENANT_ID_VHD) - AZURE_CLIENT_ID: $(AZURE_CLIENT_ID_VHD) - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET_VHD) - AZURE_SUBSCRIPTION_ID: $(AZURE_SUBSCRIPTION_ID_VHD) + - template: build-sig.yaml - stage: test - condition: and(succeeded(), eq(variables.CLEANUP, 'False')) jobs: - - template: test-vhd.yaml - variables: - AZURE_TENANT_ID: $(AZURE_TENANT_ID_VHD) - AZURE_CLIENT_ID: $(AZURE_CLIENT_ID_VHD) - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET_VHD) - AZURE_SUBSCRIPTION_ID: $(AZURE_SUBSCRIPTION_ID_VHD) + - template: test-sig.yaml - - stage: sku - condition: and(succeeded(), eq(variables.CLEANUP, 'False')) + - stage: promote jobs: - - template: create-sku.yaml - variables: - AZURE_TENANT_ID: $(AZURE_TENANT_ID_SKU) - AZURE_CLIENT_ID: $(AZURE_CLIENT_ID_SKU) - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET_SKU) + - template: promote-sig.yaml - - stage: disk_version - condition: and(succeeded(), eq(variables.CLEANUP, 'False')) + - stage: clean jobs: - - template: create-disk-version.yaml - variables: - AZURE_TENANT_ID: $(AZURE_TENANT_ID_SKU) - AZURE_CLIENT_ID: $(AZURE_CLIENT_ID_SKU) - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET_SKU) + - template: clean-sig.yaml diff --git a/images/capi/packer/azure/.pipelines/test-sig.yaml b/images/capi/packer/azure/.pipelines/test-sig.yaml new file mode 100644 index 0000000000..2b89276cea --- /dev/null +++ b/images/capi/packer/azure/.pipelines/test-sig.yaml @@ -0,0 +1,142 @@ +# Required pipeline variables: +# - BUILD_POOL - Azure DevOps build pool to use +# - AZ_CAPI_EXTENSION_URL - URL to the Azure CAPI extension build. +# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.16.2` +# - OS - target of build e.g. `Ubuntu/Windows` +# - OS_VERSION - target of build e.g. `22.04/2004/2019` +# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI + +jobs: +- job: test_sig + timeoutInMinutes: 120 + strategy: + maxParallel: 0 + pool: + name: $(BUILD_POOL) + steps: + - task: DownloadPipelineArtifact@2 + inputs: + source: current + artifact: publishing-info + path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ + - script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + OS_TYPE=$(jq -r .os_type $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(jq -r .managed_image_resource_group_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_NAME=$(jq -r .managed_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_LOCATION=$(jq -r .managed_image_location $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(jq -r .managed_image_shared_image_gallery_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(jq -r .shared_image_gallery_resource_group $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_NAME=$(jq -r .shared_image_gallery_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + SHARED_IMAGE_GALLERY_REPLICATED_REGIONS=$(jq -r .shared_image_gallery_replicated_regions $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) + + echo "##vso[task.setvariable variable=OS_TYPE;]$OS_TYPE" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_RESOURCE_GROUP_NAME;]$MANAGED_IMAGE_RESOURCE_GROUP_NAME" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_NAME;]$MANAGED_IMAGE_NAME" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID;]$MANAGED_IMAGE_ID" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_LOCATION;]$MANAGED_IMAGE_LOCATION" + echo "##vso[task.setvariable variable=MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID;]$MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_RESOURCE_GROUP;]$SHARED_IMAGE_GALLERY_RESOURCE_GROUP" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_NAME;]$SHARED_IMAGE_GALLERY_NAME" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME;]$SHARED_IMAGE_GALLERY_IMAGE_NAME" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION;]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" + echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_REPLICATED_REGIONS;]$SHARED_IMAGE_GALLERY_REPLICATED_REGIONS" + echo "##vso[task.setvariable variable=TAGS;]$TAGS" + displayName: Import variables from build SIG job + - template: k8s-config.yaml + - script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + export PATH=${PATH}:.local/bin + ./packer/azure/scripts/ensure-kustomize.sh + + # Generate cluster template with kustomize + if [ "$OS_TYPE" == "Windows" ]; then + kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/windows/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml + else + kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/linux/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml + fi + TEST_TEMPLATE=$(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml + echo "##vso[task.setvariable variable=TEST_TEMPLATE;]$TEST_TEMPLATE" + displayName: generate cluster template + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' + - task: PipAuthenticate@1 + inputs: + artifactFeeds: 'AzureContainerUpstream' + onlyAddExtraIndex: true + - script: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + os=$(echo "$OS_TYPE" | tr '[:upper:]' '[:lower:]') + + # Set up the Azure CLI Cluster API extension + # For example, https://github.com/Azure/azure-capi-cli-extension/releases/download/az-capi-nightly/capi-0.0.vnext-py2.py3-none-any.whl + az extension add --yes --source "${AZ_CAPI_EXTENSION_URL}" + + # Install required binaries + mkdir ~/test-binaries + export PATH=${PATH}:~/test-binaries + az capi install -a -ip ~/test-binaries + + echo "##vso[task.setvariable variable=PATH;]$PATH" + displayName: Install and configure az capi extension + - task: AzureCLI@2 + displayName: Create a cluster + inputs: + azureSubscription: '$(SERVICE_CONNECTION)' + scriptLocation: inlineScript + scriptType: bash + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' + inlineScript: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + params=() + if [ "$OS_TYPE" == "Windows" ]; then + params+=(--windows) + fi + + RESOURCE_GROUP=${MANAGED_IMAGE_RESOURCE_GROUP_NAME} + AZURE_LOCATION=${MANAGED_IMAGE_LOCATION} + # Create a cluster + az capi create \ + --yes \ + --debug \ + --name testvm \ + --kubernetes-version="${KUBERNETES_VERSION}" \ + --location="${AZURE_LOCATION}" \ + --resource-group="${RESOURCE_GROUP}" \ + --management-cluster-resource-group-name="${RESOURCE_GROUP}" \ + --control-plane-machine-count=1 \ + --node-machine-count=1 \ + --template="${TEST_TEMPLATE}" \ + --tags="${TAGS}" \ + --wait-for-nodes=2 \ + "${params[@]}" + + # test if the vm's provisionState is "Succeeded" otherwise fail + # even though the node is reporting Ready, it still takes a moment for the Azure VM to go to Succeeded + timeout 60s bash -c "while ! az vm list -g ${RESOURCE_GROUP} | jq -e 'all(.provisioningState == \"Succeeded\")'; do sleep 1; done" + - task: AzureCLI@2 + displayName: Clean up test resource group + inputs: + azureSubscription: '$(SERVICE_CONNECTION)' + scriptLocation: inlineScript + scriptType: bash + workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' + inlineScript: | + set -euo pipefail + [[ -n ${DEBUG:-} ]] && set -o xtrace + + # Clean up the test resource group + RESOURCE_GROUP=${MANAGED_IMAGE_RESOURCE_GROUP_NAME} + echo az group delete -n "${RESOURCE_GROUP}" --yes --no-wait + condition: always() diff --git a/images/capi/packer/azure/.pipelines/test-vhd.yaml b/images/capi/packer/azure/.pipelines/test-vhd.yaml deleted file mode 100644 index 22aa50ef38..0000000000 --- a/images/capi/packer/azure/.pipelines/test-vhd.yaml +++ /dev/null @@ -1,158 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZ_CAPI_EXTENSION_URL - URL to the Azure CAPI extension build. -# - AZURE_TENANT_ID - tenant ID -# - AZURE_CLIENT_ID - Service principal ID -# - AZURE_CLIENT_SECRET - Service principal secret -# - AZURE_SUBSCRIPTION_ID - Subscription ID used by the pipeline -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.16.2` -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2004/2019` - -jobs: -- job: test_vhd - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/vhd/ - - script: | - set -x - set -e -o pipefail - - VHD_RESOURCE_ID=$(jq -r .vhd_base_url $(system.defaultWorkingDirectory)/images/capi/packer/azure/vhd/vhd-publishing-info.json) - STORAGE_ACCOUNT_NAME=$(jq -r .storage_account_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/vhd/vhd-publishing-info.json) - TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/vhd/vhd-publishing-info.json) - - echo "##vso[task.setvariable variable=VHD_RESOURCE_ID]$VHD_RESOURCE_ID" - echo "##vso[task.setvariable variable=STORAGE_ACCOUNT_NAME]$STORAGE_ACCOUNT_NAME" - echo "##vso[task.setvariable variable=TAGS;]$TAGS" - displayName: Import variables from build vhd job - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - script: | - set -x - set -e -o pipefail - - RANDOM=$(bash -c 'echo $RANDOM') - RESOURCE_GROUP="capi-testvmimage-${RANDOM}" - echo "${RESOURCE_GROUP}" is the group - - # Azure CLI login - if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" - else - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" -p "${AZURE_CLIENT_SECRET}" - fi - - # Find the VHD blob location from its storage account - AZURE_LOCATION=$(az storage account show --name "${STORAGE_ACCOUNT_NAME}" --query '[location]' -o tsv) - - # Create the resource group - # Note: the tags parameter is not surrounded by quotes for the Azure CLI to parse it correctly. - az group create --name "${RESOURCE_GROUP}" --location "${AZURE_LOCATION}" --tags ${TAGS} - - # Create a managed image from the VHD blob - OS_TYPE="Linux" - if [ "$OS" == "Windows" ]; then - OS_TYPE="Windows" - fi - az image create -n testvmimage -g "${RESOURCE_GROUP}" --os-type "${OS_TYPE}" --source "${VHD_RESOURCE_ID}" - - # Pass the managed image resource ID on to the next step - IMAGE_ID=$(az image show -g "${RESOURCE_GROUP}" -n testvmimage --query '[id]' --output tsv) - echo "##vso[task.setvariable variable=RESOURCE_GROUP;]$RESOURCE_GROUP" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID;]$IMAGE_ID" - echo "##vso[task.setvariable variable=AZURE_LOCATION;]$AZURE_LOCATION" - displayName: promote VHD blob to managed image - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - template: k8s-config.yaml - - script: | - set -x - set -e -o pipefail - - export PATH=${PATH}:.local/bin - ./packer/azure/scripts/ensure-kustomize.sh - - # Generate cluster template with kustomize - if [ "$OS" == "Windows" ]; then - kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/windows/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - else - kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/linux/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - fi - TEST_TEMPLATE=$(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - echo "##vso[task.setvariable variable=TEST_TEMPLATE;]$TEST_TEMPLATE" - displayName: generate cluster template - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - task: PipAuthenticate@1 - inputs: - artifactFeeds: 'AzureContainerUpstream' - onlyAddExtraIndex: true - - script: | - set -x - set -e -o pipefail - - os=$(echo "$OS" | tr '[:upper:]' '[:lower:]') - - # Set up the Azure CLI Cluster API extension - # https://github.com/Azure/azure-capi-cli-extension/releases/download/az-capi-nightly/capi-0.0.vnext-py2.py3-none-any.whl - python3 -m pip install --upgrade pip - az extension add --yes --source "${AZ_CAPI_EXTENSION_URL}" - - # Install required binaries - mkdir ~/test-binaries - export PATH=${PATH}:~/test-binaries - az capi install -a -p ~/test-binaries - - echo "##vso[task.setvariable variable=PATH;]$PATH" - displayName: Install and configure az capi extension - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - script: | - set -x - set -e -o pipefail - - params=() - if [ "$OS" == "Windows" ]; then - params+=(--windows) - fi - - # Create a cluster - az capi create \ - --yes \ - --debug \ - --name testvm \ - --kubernetes-version="${KUBERNETES_VERSION}" \ - --location="${AZURE_LOCATION}" \ - --resource-group="${RESOURCE_GROUP}" \ - --management-cluster-resource-group-name="${RESOURCE_GROUP}" \ - --control-plane-machine-count=1 \ - --node-machine-count=1 \ - --template="${TEST_TEMPLATE}" \ - --tags="${TAGS}" \ - --wait-for-nodes=2 \ - "${params[@]}" - - # test if the vm's provisionState is "Succeeded" otherwise fail - # even though the node is reporting Ready, it still takes a moment for the Azure VM to go to Succeeded - timeout 60s bash -c "while ! az vm list -g ${RESOURCE_GROUP} | jq -e 'all(.provisioningState == \"Succeeded\")'; do sleep 1; done" - displayName: Create a cluster - env: - AZURE_CLIENT_SECRET: $(AZURE_CLIENT_SECRET) - - script: | - set -x - set -e -o pipefail - - # Clean up the test resource group - az group delete -n "${RESOURCE_GROUP}" --yes --no-wait - displayName: Clean up test resource group - condition: always() diff --git a/images/capi/packer/azure/.pipelines/vhd-publishing-info.yaml b/images/capi/packer/azure/.pipelines/vhd-publishing-info.yaml deleted file mode 100644 index d0928c34dd..0000000000 --- a/images/capi/packer/azure/.pipelines/vhd-publishing-info.yaml +++ /dev/null @@ -1,19 +0,0 @@ -steps: -- script: | - VHD_BASE_URL="$(cat packer/azure/vhd-base-url.out)" - VHD_URL="$(cat packer/azure/vhd-url.out)" - STORAGE_ACCOUNT_NAME="$(cat packer/azure/storage-account-name.out)" - RESOURCE_GROUP_NAME="$(cat packer/azure/resource-group-name.out)" - TAGS="$(cat packer/azure/tags.out)" - cat < packer/azure/vhd-publishing-info.json - { - "vhd_base_url": "${VHD_BASE_URL}", - "vhd_url": "${VHD_URL}", - "storage_account_name": "${STORAGE_ACCOUNT_NAME}", - "resource_group_name": "${RESOURCE_GROUP_NAME}", - "tags": "${TAGS}" - } - EOF - displayName: Generating publishing info for VHD - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - condition: eq(variables.CLEANUP, 'False') diff --git a/images/capi/packer/azure/config.pkr.hcl b/images/capi/packer/azure/config.pkr.hcl index ce3d534991..4247e2c506 100644 --- a/images/capi/packer/azure/config.pkr.hcl +++ b/images/capi/packer/azure/config.pkr.hcl @@ -1,7 +1,7 @@ packer { required_plugins { azure = { - version = "< 2.0.0" + version = ">= 2.1.8" source = "github.com/hashicorp/azure" } } diff --git a/images/capi/packer/azure/packer.json b/images/capi/packer/azure/packer.json index 2899187b1f..5966d9ca42 100644 --- a/images/capi/packer/azure/packer.json +++ b/images/capi/packer/azure/packer.json @@ -67,6 +67,7 @@ "plan_product": "{{user `plan_image_offer`}}", "plan_publisher": "{{user `plan_image_publisher`}}" }, + "public_ip_sku": "Standard", "private_virtual_network_with_public_ip": "{{user `private_virtual_network_with_public_ip`}}", "shared_gallery_image_version_exclude_from_latest": "{{ user `exclude_from_latest` }}", "shared_image_gallery": { diff --git a/images/capi/packer/azure/scripts/delete-unused-storage.sh b/images/capi/packer/azure/scripts/delete-unused-storage.sh deleted file mode 100755 index 380a76c5a8..0000000000 --- a/images/capi/packer/azure/scripts/delete-unused-storage.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/bin/bash -# Copyright 2021 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script deletes unused Azure storage accounts created in the process of -# building CAPZ reference images. It also archives existing accounts into one -# main storage account to reduce the limited number of accounts in use. -# Usage: -# delete-unused-storage.sh -# -# The `pub` tool (https://github.com/devigned/pub) and the `az` CLI tool -# (https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) must be found -# in the PATH. -# -# In order to run this script, log in to the publishing account with the -# `az account set -s ` command. Then export these environment -# variables to enable access to the storage accounts: -# AZURE_CLIENT_ID -# AZURE_CLIENT_SECRET -# AZURE_SUBSCRIPTION_ID -# AZURE_TENANT_ID -# -# By default, the script will not modify any resources. Pass the environment variable -# DRY_RUN=false to enable the script to archive and to delete the storage accounts. - -set -o errexit -set -o pipefail - -[[ -n ${DEBUG:-} ]] && set -o xtrace - -RESOURCE_GROUP=${RESOURCE_GROUP:-cluster-api-images} -PUBLISHER=${PUBLISHER:-cncf-upstream} -OFFERS=${OFFERS:-capi capi-windows} -PREFIX=${PREFIX:-capi} -LONG_PREFIX=${LONG_PREFIX:-${PREFIX}[0-9]{10\}} -ARCHIVE_STORAGE_ACCOUNT=${ARCHIVE_STORAGE_ACCOUNT:-${PREFIX}archive} -DAYS_OLD=${DAYS_OLD:-30} -DRY_RUN=${DRY_RUN:-true} -PUB_VERSION=${PUB_VERSION:-"v0.3.3"} -RED='\033[0;31m' -NC='\033[0m' - -required_env_vars=( - "AZURE_CLIENT_ID" - "AZURE_CLIENT_SECRET" - "AZURE_TENANT_ID" - "AZURE_CLIENT_ID_VHD" - "AZURE_CLIENT_SECRET_VHD" - "AZURE_SUBSCRIPTION_ID_VHD" - "AZURE_TENANT_ID_VHD" -) - -for v in "${required_env_vars[@]}" -do - if [ -z "${!v}" ]; then - echo "$v was not set!" - exit 1 - fi -done - -set -o nounset - -if ${DRY_RUN}; then - echo "DRY_RUN: This script will not copy or delete any resources." - ECHO=echo -else - ECHO= -fi - -echo "Getting pub..." -curl -fsSL https://github.com/devigned/pub/releases/download/${PUB_VERSION}/pub_${PUB_VERSION}_linux_amd64.tar.gz -o pub.tgz; tar -xzf pub.tgz; mv ./pub_linux_amd64 ./pub -export PATH=$PATH:$(pwd) -which pub &> /dev/null || (echo "Please install pub from https://github.com/devigned/pub/releases" && exit 1) - -if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" - export AZURE_STORAGE_AUTH_MODE="login" # Use auth mode "login" in az storage commands. -else - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" -p "${AZURE_CLIENT_SECRET}" -fi -az account set -s ${AZURE_SUBSCRIPTION_ID_VHD} - -# Get URLs in use by the marketplace offers -URLS="" -for name in ${OFFERS}; do - echo "Getting URLs for ${name}..." - offer=$(pub offers show -p "$PUBLISHER" -o "$name") - # Capture "label" as well as "osVhdUrl" so we can archive storage accounts with something readable. - urls=$(echo "${offer}" | jq -r '.definition["plans"][]."microsoft-azure-corevm.vmImagesPublicAzure"[] | [.label, .osVhdUrl] | @csv') - if [[ -z $URLS ]]; then - URLS=${urls} - else - URLS=${URLS}$'\n'${urls} - fi -done -NOW=$(date +%s) - -# ensure the existence of the archive storage account -if ! az storage account show -g "${RESOURCE_GROUP}" -n "${ARCHIVE_STORAGE_ACCOUNT}" &> /dev/null; then - echo "Creating archive storage account ${ARCHIVE_STORAGE_ACCOUNT}..." - $ECHO az storage account create -g "${RESOURCE_GROUP}" -n "${ARCHIVE_STORAGE_ACCOUNT}" --access-tier Cool --allow-blob-public-access false -fi - -IFS=$'\n' -archived=0 -deleted=0 -# For each storage account in the subscription, -for account in $(az storage account list -g "${RESOURCE_GROUP}" -o tsv --query "[?starts_with(name, '${PREFIX}')].[name,creationTime]"); do - IFS=$'\t' read -r storage_account creation_time <<< "$account" - created=$(date -d "${creation_time}" +%s 2>/dev/null || date -j -f "%F" "${creation_time}" +%s 2>/dev/null) - age=$(( (NOW - created) / 86400 )) - # if it's too old - if [[ $age -gt ${DAYS_OLD} ]]; then - # and it has the right naming pattern - if [[ ${storage_account} =~ ^${LONG_PREFIX} ]]; then - # but isn't referenced in the offer osVhdUrls - if [[ ! ${URLS} =~ ${storage_account} ]]; then - # delete it. - echo "Deleting unreferenced storage account ${storage_account} that is ${age} days old" - ${ECHO} az storage account delete -g "${RESOURCE_GROUP}" -n "${storage_account}" -y - deleted=$((deleted+1)) - else - # archive it. - for URL in ${URLS}; do - IFS=$',' read -r label url <<< "${URL}" - # container names are somewhat strict, so transform the label into a valid container name - # See https://github.com/MicrosoftDocs/azure-docs/blob/master/includes/storage-container-naming-rules-include.md - dest_label=${label//[ .]/-} - dest_label=${dest_label//[^a-zA-Z0-9-]/} - dest_label=$(echo "${dest_label}" | tr '[:upper:]' '[:lower:]') - if [[ ${url} =~ ${storage_account} ]]; then - echo "Archiving storage account ${storage_account} (${label}) that is ${age} days old" - # create a destination container - if [[ $(az storage container exists --account-name "${ARCHIVE_STORAGE_ACCOUNT}" -n "${dest_label}" -o tsv 2>/dev/null) != "True" ]]; then - ${ECHO} az storage container create --only-show-errors --public-access=container \ - -n ${dest_label} -g "${RESOURCE_GROUP}" --account-name "${ARCHIVE_STORAGE_ACCOUNT}" 2>/dev/null - fi - # for each source container - for container in $(az storage container list --only-show-errors --account-name ${storage_account} --query "[].name" -o tsv 2>/dev/null); do - # copy it to the destination container - ${ECHO} az storage blob copy start-batch \ - --account-name ${ARCHIVE_STORAGE_ACCOUNT} \ - --destination-container ${dest_label} \ - --destination-path ${container} \ - --source-container ${container} \ - --source-account-name ${storage_account} \ - --pattern '*capi-*' \ - 2>/dev/null - done - # poll the target container until all blobs have "succeeded" copy status - for target in $(az storage blob list --account-name ${ARCHIVE_STORAGE_ACCOUNT} -c ${dest_label} --query '[].name' -o tsv 2>/dev/null); do - while true; do - status=$(az storage blob show --account-name ${ARCHIVE_STORAGE_ACCOUNT} --container-name ${dest_label} --name $target -o tsv --query 'properties.copy.status' 2>/dev/null) - if [[ ${status} == "success" ]]; then - echo "Copied ${dest_label}/${target}" - break - else - echo "Copying ${dest_label}/${target} ..." - sleep 20 - fi - done - done - echo "Deleting source storage account ${storage_account}..." - ${ECHO} az storage account delete -g "${RESOURCE_GROUP}" -n "${storage_account}" -y - archived=$((archived+1)) - fi - done - echo -e "Pausing for 10 seconds. ${RED}Hit Ctrl-C to stop.${NC}" - sleep 10 - echo - fi - fi - fi -done - -echo "Deleted ${deleted} storage accounts." -echo "Archived ${archived} storage accounts." diff --git a/images/capi/packer/azure/scripts/init-sig.sh b/images/capi/packer/azure/scripts/init-sig.sh index 991fec82a1..c2ed66fd36 100755 --- a/images/capi/packer/azure/scripts/init-sig.sh +++ b/images/capi/packer/azure/scripts/init-sig.sh @@ -1,10 +1,26 @@ #!/bin/bash +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + [[ -n ${DEBUG:-} ]] && set -o xtrace tracestate="$(shopt -po xtrace)" set +o xtrace -if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then +if [[ "${USE_AZURE_CLI_AUTH:-}" == "True" ]]; then + : # Assume we did "az login" before running this script +elif [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" >/dev/null 2>&1 else az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" -p "${AZURE_CLIENT_SECRET}" >/dev/null 2>&1 @@ -12,7 +28,7 @@ fi az account set -s ${AZURE_SUBSCRIPTION_ID} >/dev/null 2>&1 eval "$tracestate" -export RESOURCE_GROUP_NAME="${RESOURCE_GROUP_NAME:-cluster-api-images}" +export RESOURCE_GROUP_NAME="${RESOURCE_GROUP_NAME:-cluster-api-gallery}" export AZURE_LOCATION="${AZURE_LOCATION:-northcentralus}" if ! az group show -n ${RESOURCE_GROUP_NAME} -o none 2>/dev/null; then az group create -n ${RESOURCE_GROUP_NAME} -l ${AZURE_LOCATION} --tags ${TAGS:-} @@ -83,17 +99,19 @@ fi ############################################################################## create_image_definition() { - az sig image-definition create \ - --resource-group ${RESOURCE_GROUP_NAME} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${SIG_IMAGE_DEFINITION:-capi-${SIG_SKU:-$1}} \ - --publisher ${SIG_PUBLISHER:-capz} \ - --offer ${SIG_OFFER:-capz-demo} \ - --sku ${SIG_SKU:-$2} \ - --hyper-v-generation ${3} \ - --os-type ${4} \ - --features ${5:-''} \ - "${plan_args[@]}" # TODO: Delete this line after the image is GA + if ! az sig image-definition show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${SIG_IMAGE_DEFINITION:-capi-${SIG_SKU:-$1}} --resource-group ${RESOURCE_GROUP_NAME} -o none 2>/dev/null; then + az sig image-definition create \ + --resource-group ${RESOURCE_GROUP_NAME} \ + --gallery-name ${GALLERY_NAME} \ + --gallery-image-definition ${SIG_IMAGE_DEFINITION:-capi-${SIG_SKU:-$1}} \ + --publisher ${SIG_PUBLISHER:-capz} \ + --offer ${SIG_OFFER:-capz-demo} \ + --sku ${SIG_SKU:-$2} \ + --hyper-v-generation ${3} \ + --os-type ${4} \ + --features ${5:-''} \ + "${plan_args[@]}" # TODO: Delete this line after the image is GA + fi } case ${SIG_TARGET} in diff --git a/images/capi/packer/azure/scripts/init-vhd.sh b/images/capi/packer/azure/scripts/init-vhd.sh deleted file mode 100755 index a88c17b896..0000000000 --- a/images/capi/packer/azure/scripts/init-vhd.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -[[ -n ${DEBUG:-} ]] && set -o xtrace - -echo "Sign into Azure" -tracestate="$(shopt -po xtrace)" -set +o xtrace - -if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" > /dev/null 2>&1 - export AZURE_STORAGE_AUTH_MODE="login" # Use auth mode "login" in az storage commands. -else - az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" -p ${AZURE_CLIENT_SECRET} >/dev/null 2>&1 -fi -az account set -s ${AZURE_SUBSCRIPTION_ID} >/dev/null 2>&1 -eval "$tracestate" - -echo "Create storage account" -export RESOURCE_GROUP_NAME="${RESOURCE_GROUP_NAME:-cluster-api-images}" -export AZURE_LOCATION="${AZURE_LOCATION:-northcentralus}" -if ! az group show -n ${RESOURCE_GROUP_NAME} -o none 2>/dev/null; then - az group create -n ${RESOURCE_GROUP_NAME} -l ${AZURE_LOCATION} --tags ${TAGS:-} -fi -CREATE_TIME="$(date +%s)" -RANDOM_SUFFIX="$(head /dev/urandom | LC_ALL=C tr -dc a-z | head -c 4 ; echo '')" -get_random_region() { - local REGIONS=("canadacentral" "eastus" "eastus2" "northeurope" "uksouth" "westeurope" "westus2" "westus3") - echo "${REGIONS[${RANDOM} % ${#REGIONS[@]}]}" -} -RANDOMIZE_STORAGE_ACCOUNT="${RANDOMIZE_STORAGE_ACCOUNT:-"false"}" -if [ "$RANDOMIZE_STORAGE_ACCOUNT" == "true" ]; then - export AZURE_LOCATION="$(get_random_region)" -fi -export STORAGE_ACCOUNT_NAME="${STORAGE_ACCOUNT_NAME:-capi${CREATE_TIME}${RANDOM_SUFFIX}}" -az storage account check-name --name ${STORAGE_ACCOUNT_NAME} -az storage account create -n ${STORAGE_ACCOUNT_NAME} -g ${RESOURCE_GROUP_NAME} -l ${AZURE_LOCATION} --allow-blob-public-access false - -echo "done" diff --git a/images/capi/packer/azure/scripts/new-disk-version.sh b/images/capi/packer/azure/scripts/new-disk-version.sh deleted file mode 100755 index e057f3dcd0..0000000000 --- a/images/capi/packer/azure/scripts/new-disk-version.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -e - -[[ -n ${DEBUG:-} ]] && set -o xtrace - -echo "PWD: $PWD" - -OS=${OS:-"Ubuntu"} -OS_VERSION=${OS_VERSION:-"22.04"} -PUB_VERSION=${PUB_VERSION:-"v0.3.3"} - -required_env_vars=( - "AZURE_CLIENT_ID" - "AZURE_CLIENT_SECRET" - "AZURE_TENANT_ID" - "OS" - "OS_VERSION" - "PUB_VERSION" -) - -for v in "${required_env_vars[@]}" -do - if [ -z "${!v}" ]; then - echo "$v was not set!" - exit 1 - fi -done - -SKU_INFO="sku/sku-publishing-info.json" -VHD_INFO="vhd/vhd-publishing-info.json" - -required_files=( - "SKU_INFO" - "VHD_INFO" -) - -for f in "${required_files[@]}" -do - if [ ! -f "${!f}" ]; then - echo "could not find file: ${!f}" - exit 1 - fi -done - -echo "Getting pub..." -(set -x ; curl -fsSL https://github.com/devigned/pub/releases/download/${PUB_VERSION}/pub_${PUB_VERSION}_linux_amd64.tar.gz -o pub; tar -xzf pub) - -echo "SKU publishing info:" -cat $SKU_INFO -echo - -echo "VHD publishing info:" -cat $VHD_INFO -echo - - -# get Kubernetes version and split into major, minor, and patch -k8s_version=$(< $SKU_INFO jq -r ".k8s_version") -IFS='.' # set period (.) as delimiter -read -ra ADDR <<< "${k8s_version}" # str is read into an array as tokens separated by IFS -IFS=' ' # reset to default value after usage -major=${ADDR[0]} -minor=${ADDR[1]} -patch=${ADDR[2]} - -# generate image version -image_version=${major}${minor}.${patch}.$(date +"%Y%m%d") - -# generate media name -sku_id=$(< $SKU_INFO jq -r ".sku_id") -media_name="${sku_id}-${image_version}" - -# generate published date -published_date=$(date +"%m/%d/%Y") - -# get vhd url -vhd_url=$(< $VHD_INFO jq -r ".vhd_url") - -label="Kubernetes $k8s_version $OS $OS_VERSION" -description="Kubernetes $k8s_version $OS $OS_VERSION" - -# create version.json -cat < version.json -{ - "$image_version" : { - "mediaName": "$media_name", - "showInGui": false, - "publishedDate": "$published_date", - "label": "$label", - "description": "$description", - "osVHdUrl": "$vhd_url" - } -} -EOF - -echo "Version info:" -cat version.json - -publisher=$(< $SKU_INFO jq -r ".publisher") -offer=$(< $SKU_INFO jq -r ".offer") -sku=$(< $SKU_INFO jq -r ".sku_id") - -# TODO: Update pub versions put to take in version.json as a file -echo "Create new disk version" -set -x -./pub_linux_amd64 versions put corevm -p $publisher -o $offer -s $sku --version $image_version --vhd-uri $vhd_url --media-name $media_name --label "$label" --desc "$description" --published-date "$published_date" -set +x -echo -e "\nCreated disk version" diff --git a/images/capi/packer/azure/scripts/new-sku.sh b/images/capi/packer/azure/scripts/new-sku.sh deleted file mode 100755 index 75c1b2cb2f..0000000000 --- a/images/capi/packer/azure/scripts/new-sku.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -e - -OS=${OS:-"Ubuntu"} -OS_VERSION=${OS_VERSION:-"22.04"} -PUB_VERSION=${PUB_VERSION:-"v0.3.3"} -VM_GENERATION=${VM_GENERATION:-"gen1"} -[[ -n ${DEBUG:-} ]] && set -o xtrace - -required_env_vars=( - "AZURE_CLIENT_ID" - "AZURE_CLIENT_SECRET" - "AZURE_TENANT_ID" - "KUBERNETES_VERSION" - "OFFER" - "OS" - "OS_VERSION" - "PUB_VERSION" - "PUBLISHER" - "SKU_TEMPLATE_FILE" - "VM_GENERATION" -) - -for v in "${required_env_vars[@]}" -do - if [ -z "${!v}" ]; then - echo "$v was not set!" - exit 1 - fi -done - -if [ ! -f "$SKU_TEMPLATE_FILE" ]; then - echo "Could not find sku template file: ${SKU_TEMPLATE_FILE}!" - exit 1 -fi - -os=$(echo "$OS" | tr '[:upper:]' '[:lower:]') -version=$(echo "$OS_VERSION" | tr '[:upper:]' '[:lower:]' | tr -d .) -sku_id="${os}-${version}-${VM_GENERATION}" - -if [ "$OS" == "Ubuntu" ]; then - os_type="Ubuntu" - os_family="Linux" -elif [ "$OS" == "AzureLinux" ] || [ "$OS" == "Mariner" ]; then - os_type="CBL-Mariner" - os_family="Linux" -elif [ "$OS" == "Windows" ]; then - os_type="Other" - os_family="Windows" -else - echo "Cannot configure unknown OS: ${OS}!" - exit 1 -fi - -< $SKU_TEMPLATE_FILE sed s/{{ID}}/"$sku_id"/ \ - | sed s/{{KUBERNETES_VERSION}}/"$KUBERNETES_VERSION/" \ - | sed s/{{OS}}/"$OS/" \ - | sed s/{{OS_VERSION}}/"$OS_VERSION/" \ - | sed s/{{OS_TYPE}}/"$os_type/" \ - | sed s/{{OS_FAMILY}}/"$os_family/" \ - > sku.json -cat sku.json - -echo -echo "Getting pub..." -(set -x ; curl -fsSL https://github.com/devigned/pub/releases/download/${PUB_VERSION}/pub_${PUB_VERSION}_linux_amd64.tar.gz -o pub; tar -xzf pub) - -echo "Creating new SKU" -set -x -./pub_linux_amd64 skus put -p $PUBLISHER -o "$OFFER" -f sku.json -set +x -echo -e "\nCreated sku" - -echo "Writing publishing info" -cat < sku-publishing-info.json -{ - "publisher" : "$PUBLISHER", - "offer" : "$OFFER", - "sku_id" : "$sku_id", - "k8s_version" : "$KUBERNETES_VERSION" -} -EOF - -cat sku-publishing-info.json