diff --git a/charmcraft.yaml b/charmcraft.yaml index 1adcbf2ae..e850c9221 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -13,7 +13,8 @@ parts: - rust-all # for cryptography - pkg-config # for cryptography prime: - - scripts/build-image.sh + - scripts/build-lxd-image.sh + - scripts/build-openstack-image.sh - scripts/repo_policy_compliance_service.py bases: - build-on: diff --git a/scripts/build-image.sh b/scripts/build-lxd-image.sh similarity index 100% rename from scripts/build-image.sh rename to scripts/build-lxd-image.sh diff --git a/scripts/build-openstack-image.sh b/scripts/build-openstack-image.sh new file mode 100755 index 000000000..8b97486ca --- /dev/null +++ b/scripts/build-openstack-image.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash + +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +set -euo pipefail + +# GitHub runner bin args +RUNNER_TAR_URL="$1" + +# Proxy args +HTTP_PROXY="$2" +HTTPS_PROXY="$3" +NO_PROXY="$4" +DOCKER_PROXY_SERVICE_CONF="$5" +DOCKER_PROXY_CONF="$6" + +# retry function +retry() { + local command="$1" + local wait_message="$2" + local max_try="$3" + + local attempt=0 + + while ! $command + do + attempt=$((attempt + 1)) + if [[ attempt -ge $max_try ]]; then + return + fi + + echo "$wait_message" + sleep 10 + done +} + +# cleanup any existing mounts +cleanup() { + sudo umount /mnt/ubuntu-image/dev/ || true + sudo umount /mnt/ubuntu-image/proc/ || true + sudo umount /mnt/ubuntu-image/sys/ || true + sudo umount /mnt/ubuntu-image || true + sudo qemu-nbd --disconnect /dev/nbd0 +} + +# Check if proxy variables set, doesn't exist or is a different value then update. +if [[ -n "$HTTP_PROXY" ]]; then + if ! grep -q "HTTP_PROXY=" /etc/environment || ! grep -q "HTTP_PROXY=$HTTP_PROXY" /etc/environment; then + sed -i "/^HTTP_PROXY=/d" /etc/environment + echo "HTTP_PROXY=$HTTP_PROXY" >> /etc/environment + fi + if ! grep -q "http_proxy=" /etc/environment || ! grep -q "http_proxy=$HTTP_PROXY" /etc/environment; then + sed -i "/^http_proxy=/d" /etc/environment + echo "http_proxy=$HTTP_PROXY" >> /etc/environment + fi + if ! grep -q "Acquire::http::Proxy" /etc/apt/apt.conf || ! grep -q "Acquire::http::Proxy \"$HTTP_PROXY\";" /etc/apt/apt.conf; then + sed -i "/^Acquire::http::Proxy/d" /etc/apt/apt.conf + echo "Acquire::http::Proxy \"$HTTP_PROXY\";" >> /etc/apt/apt.conf + fi +fi + +if [[ -n "$HTTPS_PROXY" ]]; then + if ! grep -q "HTTPS_PROXY=" /etc/environment || ! grep -q "HTTPS_PROXY=$HTTPS_PROXY" /etc/environment; then + sed -i "/^HTTPS_PROXY=/d" /etc/environment + echo "HTTPS_PROXY=$HTTPS_PROXY" >> /etc/environment + fi + if ! grep -q "https_proxy=" /etc/environment || ! grep -q "https_proxy=$HTTPS_PROXY" /etc/environment; then + sed -i "/^https_proxy=/d" /etc/environment + echo "https_proxy=$HTTPS_PROXY" >> /etc/environment + fi + if ! grep -q "Acquire::https::Proxy" /etc/apt/apt.conf || ! grep -q "Acquire::https::Proxy \"$HTTPS_PROXY\";" /etc/apt/apt.conf; then + sed -i "/^Acquire::https::Proxy/d" /etc/apt/apt.conf + echo "Acquire::https::Proxy \"$HTTPS_PROXY\";" >> /etc/apt/apt.conf + fi +fi + +if [[ -n "$NO_PROXY" ]]; then + if ! grep -q "NO_PROXY=" /etc/environment || ! grep -q "NO_PROXY=$NO_PROXY" /etc/environment; then + sed -i "/^NO_PROXY=/d" /etc/environment + echo "NO_PROXY=$NO_PROXY" >> /etc/environment + fi + if ! grep -q "no_proxy=" /etc/environment || ! grep -q "no_proxy=$NO_PROXY" /etc/environment; then + sed -i "/^no_proxy=/d" /etc/environment + echo "no_proxy=$NO_PROXY" >> /etc/environment + fi +fi + +# Architecture args +ARCH=$(uname -m) +if [[ $ARCH == 'aarch64' ]]; then + BIN_ARCH="arm64" +elif [[ $ARCH == 'arm64' ]]; then + BIN_ARCH="arm64" +elif [[ $ARCH == 'x86_64' ]]; then + BIN_ARCH="amd64" +else + echo "Unsupported CPU architecture: $ARCH" + return 1 +fi + +# qemu-utils required to unpack qcow image +sudo DEBIAN_FRONTEND=noninteractive apt-get install qemu-utils libguestfs-tools -y + +# enable network block device +sudo modprobe nbd + +# cleanup any existing mounts +cleanup + +retry "sudo wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-$BIN_ARCH.img \ + -O jammy-server-cloudimg-$BIN_ARCH.img" "Downloading cloud image" 3 + +# resize image - installing dependencies requires more disk space +sudo qemu-img resize jammy-server-cloudimg-$BIN_ARCH.img +1.5G + +# mount nbd +echo "Connecting network block device to image" +sudo qemu-nbd --connect=/dev/nbd0 jammy-server-cloudimg-$BIN_ARCH.img +sudo mkdir -p /mnt/ubuntu-image +retry "sudo mount -o rw /dev/nbd0p1 /mnt/ubuntu-image" "Mounting nbd0p1 device" 3 + +# mount required system dirs +echo "Mounting sys dirs" +retry "sudo mount --bind /dev/ /mnt/ubuntu-image/dev/" "Mounting /dev/" 3 +retry "sudo mount --bind /proc/ /mnt/ubuntu-image/proc/" "Mounting /proc/" 3 +retry "sudo mount --bind /sys/ /mnt/ubuntu-image/sys/" "Mounting /sys/" 3 +sudo rm /mnt/ubuntu-image/etc/resolv.conf -f +sudo cp /etc/resolv.conf /mnt/ubuntu-image/etc/resolv.conf + +# resize mount +echo "Resizing mounts" +sudo growpart /dev/nbd0 1 # grow partition size to available space +sudo resize2fs /dev/nbd0p1 # resize fs accordingly + +# chroot and install dependencies +echo "Installing dependencies in chroot env" +sudo chroot /mnt/ubuntu-image/ < /etc/systemd/system/docker.service.d/http-proxy.conf +fi +if [[ -n "$DOCKER_PROXY_CONF" ]]; then + mkdir -p /root/.docker + echo "$DOCKER_PROXY_CONF" > /root/.docker/config.json + mkdir -p /home/ubuntu/.docker + echo "$DOCKER_PROXY_CONF" > /home/ubuntu/.docker/config.json +fi + +# Reduce image size +/usr/bin/npm cache clean --force +DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get clean + +# Download and verify checksum of yq +/usr/bin/wget "https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$BIN_ARCH" -O "yq_linux_$BIN_ARCH" +/usr/bin/wget https://github.com/mikefarah/yq/releases/latest/download/checksums -O checksums +/usr/bin/wget https://github.com/mikefarah/yq/releases/latest/download/checksums_hashes_order -O checksums_hashes_order +/usr/bin/wget https://github.com/mikefarah/yq/releases/latest/download/extract-checksum.sh -O extract-checksum.sh +/usr/bin/bash extract-checksum.sh SHA-256 "yq_linux_$BIN_ARCH" | /usr/bin/awk '{print \$2,\$1}' | /usr/bin/sha256sum -c | /usr/bin/grep OK +rm checksums checksums_hashes_order extract-checksum.sh +/usr/bin/chmod 755 yq_linux_$BIN_ARCH +/usr/bin/mv yq_linux_$BIN_ARCH /usr/bin/yq + +# Download runner bin and verify checksum +mkdir -p /home/ubuntu/actions-runner && cd /home/ubuntu/actions-runner +/usr/bin/curl -o /home/ubuntu/actions-runner.tar.gz -L $RUNNER_TAR_URL +/usr/bin/tar xzf /home/ubuntu/actions-runner.tar.gz +rm /home/ubuntu/actions-runner.tar.gz +chown -R ubuntu /home/ubuntu/ +EOF + +# sync & cleanup +echo "Syncing" +sudo sync +cleanup + +# Reduce image size by removing sparse space & compressing +sudo virt-sparsify --compress jammy-server-cloudimg-$BIN_ARCH.img jammy-server-cloudimg-$BIN_ARCH-compressed.img diff --git a/scripts/setup-microstack.sh b/scripts/setup-microstack.sh index 3fe775aa7..e62a49df3 100644 --- a/scripts/setup-microstack.sh +++ b/scripts/setup-microstack.sh @@ -39,10 +39,12 @@ sunbeam prepare-node-script | bash -x sleep 10 # The following can take a while.... retry 'sudo -g snap_daemon sunbeam cluster bootstrap --accept-defaults' 'Waiting for cluster bootstrap to complete' 3 -clouds_yaml="${PWD}/admin-clouds.yaml" -sg snap_daemon -c "sunbeam cloud-config -a" | tee "$clouds_yaml" +# 2024/03/11 Demo user setup should be removed after openstack server creation PR. +retry 'sudo -g snap_daemon sunbeam configure --accept-defaults --openrc demo-openrc' 'Configuring sunbeam cluster' 3 +clouds_yaml="${PWD}/clouds.yaml" +sg snap_daemon -c "sunbeam cloud-config" | tee "$clouds_yaml" # Test connection -OS_CLIENT_CONFIG_FILE="$clouds_yaml" openstack --os-cloud sunbeam user list +OS_CLIENT_CONFIG_FILE="$clouds_yaml" openstack --os-cloud sunbeam user show demo juju clouds || echo "Failed to list clouds" juju bootstrap localhost lxd diff --git a/src-docs/charm.py.md b/src-docs/charm.py.md index 474302d3e..ffa94edac 100644 --- a/src-docs/charm.py.md +++ b/src-docs/charm.py.md @@ -13,7 +13,7 @@ Charm for creating and managing GitHub self-hosted runner instances. --- - + ## function `catch_charm_errors` @@ -39,7 +39,7 @@ Catch common errors in charm. --- - + ## function `catch_action_errors` @@ -68,7 +68,7 @@ Catch common errors in actions. ## class `GithubRunnerCharm` Charm for managing GitHub self-hosted runners. - + ### function `__init__` diff --git a/src-docs/charm_state.py.md b/src-docs/charm_state.py.md index 401811ba1..84577b6b4 100644 --- a/src-docs/charm_state.py.md +++ b/src-docs/charm_state.py.md @@ -16,7 +16,7 @@ State of the Charm. --- - + ## function `parse_github_path` @@ -70,7 +70,7 @@ Some charm configurations are grouped into other configuration models. --- - + ### classmethod `check_fields` @@ -93,7 +93,7 @@ Validate the general charm configuration. --- - + ### classmethod `from_charm` @@ -126,7 +126,7 @@ Raised when charm config is invalid. - `msg`: Explanation of the error. - + ### function `__init__` @@ -166,7 +166,7 @@ The charm state. --- - + ### classmethod `from_charm` @@ -205,7 +205,7 @@ Represent GitHub organization. --- - + ### function `path` @@ -238,7 +238,7 @@ Represent GitHub repository. --- - + ### function `path` @@ -279,7 +279,7 @@ Return the aproxy address. --- - + ### classmethod `check_fields` @@ -302,7 +302,7 @@ Validate the proxy configuration. --- - + ### classmethod `from_charm` @@ -342,7 +342,7 @@ Runner configurations for the charm. --- - + ### classmethod `check_fields` @@ -365,7 +365,7 @@ Validate the runner configuration. --- - + ### classmethod `from_charm` @@ -415,7 +415,7 @@ SSH connection information for debug workflow. --- - + ### classmethod `from_charm` @@ -443,7 +443,7 @@ Raised when given machine charm architecture is unsupported. - `arch`: The current machine architecture. - + ### function `__init__` diff --git a/src-docs/cos.py.md b/src-docs/cos.py.md deleted file mode 100644 index 3b18e5cd4..000000000 --- a/src-docs/cos.py.md +++ /dev/null @@ -1,237 +0,0 @@ - - - - -# module `cos.py` -The COS integration observer. - -**Global Variables** ---------------- -- **METRICS_LOGGING_INTEGRATION_NAME** -- **PROMTAIL_HEALTH_CHECK_INTERVAL_MINUTES** - - ---- - -## class `LokiEndpoint` -Information about the Loki endpoint. - -Attrs: url: The URL of the Loki endpoint. - - ---- - -#### property model_computed_fields - -Get the computed fields of this model instance. - - - -**Returns:** - A dictionary of computed field names and their corresponding `ComputedFieldInfo` objects. - ---- - -#### property model_extra - -Get extra fields set during validation. - - - -**Returns:** - A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. - ---- - -#### property model_fields_set - -Returns the set of fields that have been set on this model instance. - - - -**Returns:** - A set of strings representing the fields that have been set, i.e. that were not filled from defaults. - - - - ---- - -## class `LokiIntegrationData` -Represents Loki integration data. - -Attrs: endpoints: The Loki endpoints. promtail_binaries: The Promtail binaries. - - ---- - -#### property model_computed_fields - -Get the computed fields of this model instance. - - - -**Returns:** - A dictionary of computed field names and their corresponding `ComputedFieldInfo` objects. - ---- - -#### property model_extra - -Get extra fields set during validation. - - - -**Returns:** - A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. - ---- - -#### property model_fields_set - -Returns the set of fields that have been set on this model instance. - - - -**Returns:** - A set of strings representing the fields that have been set, i.e. that were not filled from defaults. - - - - ---- - -## class `LokiIntegrationDataIncompleteError` -Indicates an error if the Loki integration data is not complete for Promtail startup. - - - -### function `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the LokiIntegrationDataNotComplete exception. - - - -**Args:** - - - `msg`: Explanation of the error. - - - - - ---- - -## class `Observer` -COS integration observer. - - - -### function `__init__` - -```python -__init__(charm: CharmBase, state: State) -``` - -Initialize the COS observer and register event handlers. - - - -**Args:** - - - `charm`: The parent charm to attach the observer to. - - `state`: The charm state. - - ---- - -#### property model - -Shortcut for more simple access the model. - - - ---- - - - -### function `metrics_logging_available` - -```python -metrics_logging_available() → bool -``` - -Check that the metrics logging integration is set up correctly. - - - -**Returns:** - True if the integration is established, False otherwise. - - ---- - -## class `PromtailBinary` -Information about the Promtail binary. - -Attrs: url: The URL to download the Promtail binary from. zipsha: The SHA256 hash of the Promtail zip file. binsha: The SHA256 hash of the Promtail binary. - - ---- - -#### property model_computed_fields - -Get the computed fields of this model instance. - - - -**Returns:** - A dictionary of computed field names and their corresponding `ComputedFieldInfo` objects. - ---- - -#### property model_extra - -Get extra fields set during validation. - - - -**Returns:** - A dictionary of extra fields, or `None` if `config.extra` is not set to `"allow"`. - ---- - -#### property model_fields_set - -Returns the set of fields that have been set on this model instance. - - - -**Returns:** - A set of strings representing the fields that have been set, i.e. that were not filled from defaults. - - - - ---- - -## class `PromtailHealthCheckEvent` -Event representing a periodic check to ensure Promtail is running. - - - - - ---- - -## class `PromtailNotRunningError` -Indicates an error if Promtail is not running. - - - - - diff --git a/src-docs/errors.py.md b/src-docs/errors.py.md index 09bc34084..6a3a19b4a 100644 --- a/src-docs/errors.py.md +++ b/src-docs/errors.py.md @@ -151,6 +151,24 @@ Represents an unauthorized connection to OpenStack. +--- + +## class `OpenstackImageBuildError` +Exception representing an error during image build process. + + + + + +--- + +## class `OpenstackInstanceLaunchError` +Exception representing an error during instance launch process. + + + + + --- ## class `QuarantineSharedFilesystemError` diff --git a/src-docs/github_client.py.md b/src-docs/github_client.py.md index e8a8e4d71..af58b2311 100644 --- a/src-docs/github_client.py.md +++ b/src-docs/github_client.py.md @@ -10,7 +10,7 @@ Migrate to PyGithub in the future. PyGithub is still lacking some API such as re --- - + ## function `catch_http_errors` @@ -26,7 +26,7 @@ Catch HTTP errors and raise custom exceptions. ## class `GithubClient` GitHub API client. - + ### function `__init__` @@ -48,7 +48,7 @@ Instantiate the GiHub API client. --- - + ### function `delete_runner` @@ -67,7 +67,7 @@ Delete the self-hosted runner from GitHub. --- - + ### function `get_job_info` @@ -96,28 +96,42 @@ Get information about a job for a specific workflow run. --- - + -### function `get_runner_applications` +### function `get_runner_application` ```python -get_runner_applications(path: GithubOrg | GithubRepo) → List[RunnerApplication] +get_runner_application( + path: GithubOrg | GithubRepo, + arch: Arch, + os: str = 'linux' +) → RunnerApplication ``` -Get list of runner applications available for download. +Get runner application available for download for given arch. **Args:** - `path`: GitHub repository path in the format '/', or the GitHub organization name. + - `arch`: The runner architecture. + - `os`: The operating system that the runner binary should run on. + + + +**Raises:** + + - `RunnerBinaryError`: If the runner application for given architecture and OS is not found. + + **Returns:** - List of runner applications. + The runner application. --- - + ### function `get_runner_github_info` @@ -140,7 +154,7 @@ Get runner information on GitHub under a repo or org. --- - + ### function `get_runner_registration_token` @@ -163,7 +177,7 @@ Get token from GitHub used for registering runners. --- - + ### function `get_runner_remove_token` diff --git a/src-docs/openstack_manager.md b/src-docs/openstack_manager.md new file mode 100644 index 000000000..ef228b256 --- /dev/null +++ b/src-docs/openstack_manager.md @@ -0,0 +1,169 @@ + + + + +# module `openstack_manager` +Module for handling interactions with OpenStack. + +**Global Variables** +--------------- +- **IMAGE_PATH_TMPL** +- **IMAGE_NAME** +- **BUILD_OPENSTACK_IMAGE_SCRIPT_FILENAME** + +--- + + + +## function `build_image` + +```python +build_image( + arch: Arch, + cloud_config: dict[str, dict], + github_client: GithubClient, + path: GithubOrg | GithubRepo, + proxies: Optional[ProxyConfig] = None +) → str +``` + +Build and upload an image to OpenStack. + + + +**Args:** + + - `cloud_config`: The cloud configuration to connect OpenStack with. + - `github_client`: The Github client to interact with Github API. + - `path`: Github organisation or repository path. + - `proxies`: HTTP proxy settings. + + + +**Raises:** + + - `ImageBuildError`: If there were errors building/creating the image. + + + +**Returns:** + The created OpenStack image id. + + +--- + + + +## function `create_instance_config` + +```python +create_instance_config( + unit_name: str, + openstack_image: Image, + path: GithubOrg | GithubRepo, + github_client: GithubClient +) → InstanceConfig +``` + +Create an instance config from charm data. + + + +**Args:** + + - `unit_name`: The charm unit name. + - `image`: Ubuntu image flavor. + - `path`: Github organisation or repository path. + - `github_client`: The Github client to interact with Github API. + + +--- + + + +## function `create_instance` + +```python +create_instance( + cloud_config: dict[str, dict], + instance_config: InstanceConfig, + proxies: Optional[ProxyConfig] = None, + dockerhub_mirror: Optional[str] = None, + ssh_debug_connections: list[SSHDebugConnection] | None = None +) → None +``` + +Create an OpenStack instance. + + + +**Args:** + + - `cloud_config`: The cloud configuration to connect Openstack with. + - `instance_config`: The configuration values for Openstack instance to launch. + + + +**Raises:** + + - `OpenstackInstanceLaunchError`: if any errors occurred while launching Openstack instance. + + +--- + + + +## class `ProxyStringValues` +Wrapper class to proxy values to string. + + + +**Attributes:** + + - `http`: HTTP proxy address. + - `https`: HTTPS proxy address. + - `no_proxy`: Comma-separated list of hosts that should not be proxied. + + + + + +--- + + + +## class `InstanceConfig` +The configuration values for creating a single runner instance. + + + +**Args:** + + - `name`: Name of the image to launch the GitHub runner instance with. + - `labels`: The runner instance labels. + - `registration_token`: Token for registering the runner on GitHub. + - `github_path`: The GitHub repo/org path + - `openstack_image`: The Openstack image to use to boot the instance with. + + + +### method `__init__` + +```python +__init__( + name: str, + labels: Iterable[str], + registration_token: str, + github_path: GithubOrg | GithubRepo, + openstack_image: Image +) → None +``` + + + + + + + + + diff --git a/src-docs/openstack_manager.py.md b/src-docs/openstack_manager.py.md deleted file mode 100644 index a311094fc..000000000 --- a/src-docs/openstack_manager.py.md +++ /dev/null @@ -1,57 +0,0 @@ - - - - -# module `openstack_manager.py` -Module for handling interactions with OpenStack. - - ---- - - - -## function `initialize` - -```python -initialize(cloud_config: dict) → None -``` - -Initialize Openstack integration. - -Validates config and writes it to disk. - - - -**Args:** - - - `cloud_config`: The configuration in clouds.yaml format to apply. - - - -**Raises:** - - - `InvalidConfigError`: if the format of the config is invalid. - - ---- - - - -## function `list_projects` - -```python -list_projects(cloud_config: dict) → list[Project] -``` - -List all projects in the OpenStack cloud. - -The purpose of the method is just to try out openstack integration and it may be removed in the future. - -It currently returns objects directly from the sdk, which may not be ideal (mapping to domain objects may be preferable). - - - -**Returns:** - A list of projects. - - diff --git a/src-docs/promtail.py.md b/src-docs/promtail.py.md deleted file mode 100644 index 10ea70f8c..000000000 --- a/src-docs/promtail.py.md +++ /dev/null @@ -1,128 +0,0 @@ - - - - -# module `promtail.py` -Functions for operating Promtail. - -**Global Variables** ---------------- -- **PROMTAIL_BASE_URL** -- **SYSTEMCTL_PATH_STR** -- **PROMTAIL_BINARY_FILE_MODE** -- **JINJA2_TEMPLATE_PATH** - ---- - - - -## function `setup` - -```python -setup(config: Config) → None -``` - -Set up Promtail. - -Installs, configures and starts Promtail. - -If Promtail has not already been installed, it will be installed and configured to send logs to Loki. If Promtail is already running, it will be reconfigured and restarted. - - - -**Args:** - - - `config`: The configuration for Promtail. - - ---- - - - -## function `restart` - -```python -restart() → None -``` - -Restart Promtail. - - ---- - - - -## function `stop` - -```python -stop() → None -``` - -Stop Promtail. - - ---- - - - -## function `is_running` - -```python -is_running() → bool -``` - -Check if Promtail is running. - - - -**Returns:** - True if Promtail is running, False otherwise. - - ---- - -## class `Config` -Configuration options for Promtail. - -Attrs: loki_endpoint: The Loki endpoint to send logs to. proxies: Proxy settings. promtail_download_info: Information about the Promtail download. - - - - - ---- - -## class `PromtailDownloadInfo` -Information about the Promtail download. - -Attrs: url: The URL to download Promtail from. zip_sha256: The SHA256 hash of the Promtail zip file. bin_sha256: The SHA256 hash of the Promtail binary. - - - - - ---- - -## class `PromtailInstallationError` -Represents an error during installation of Promtail. - - - -### function `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the PromtailInstallationError exception. - - - -**Args:** - - - `msg`: Explanation of the error. - - - - - diff --git a/src-docs/runner_manager.py.md b/src-docs/runner_manager.py.md index eed328733..1b4c074bd 100644 --- a/src-docs/runner_manager.py.md +++ b/src-docs/runner_manager.py.md @@ -43,7 +43,7 @@ Construct RunnerManager object for creating and managing runners. --- - + ### function `build_runner_image` @@ -80,7 +80,7 @@ Check if runner binary exists. --- - + ### function `flush` @@ -103,7 +103,7 @@ Remove existing runners. --- - + ### function `get_github_info` @@ -140,12 +140,18 @@ The runner binary URL changes when a new version is available. +**Raises:** + + - `RunnerBinaryError`: If an error occurred while fetching runner application info. + + + **Returns:** Information on the runner application. --- - + ### function `reconcile` @@ -169,7 +175,7 @@ Bring runners in line with target. --- - + ### function `schedule_build_runner_image` @@ -181,7 +187,7 @@ Install cron job for building runner image. --- - + ### function `update_runner_bin` diff --git a/src/charm.py b/src/charm.py index eb8091d78..e595d195a 100755 --- a/src/charm.py +++ b/src/charm.py @@ -3,6 +3,9 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +# 2024/03/12 The module contains too many lines which are scheduled for refactoring. +# pylint: disable=too-many-lines + """Charm for creating and managing GitHub self-hosted runner instances.""" import functools @@ -32,7 +35,6 @@ from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus import metrics -import openstack_manager from charm_state import ( DEBUG_SSH_INTEGRATION_NAME, LABELS_CONFIG_NAME, @@ -56,7 +58,9 @@ ) from event_timer import EventTimer, TimerStatusError from firewall import Firewall, FirewallEntry +from github_client import GithubClient from github_type import GitHubRunnerStatus +from openstack_cloud import openstack_manager from runner import LXD_PROFILE_YAML from runner_manager import RunnerManager, RunnerManagerConfig from runner_manager_type import FlushMode @@ -337,6 +341,21 @@ def _get_runner_manager( ), ) + def _block_on_openstack_config(self, state: CharmState) -> bool: + """Set unit to blocked status on openstack configuration set. + + Returns: + Whether openstack configuration is enabled. + """ + if state.charm_config.openstack_clouds_yaml: + # Go into BlockedStatus as Openstack is not supported yet + self.unit.status = BlockedStatus( + "OpenStack integration is not supported yet. " + "Please remove the openstack-clouds-yaml config." + ) + return True + return False + @catch_charm_errors def _on_install(self, _event: InstallEvent) -> None: """Handle the installation of charm. @@ -346,6 +365,36 @@ def _on_install(self, _event: InstallEvent) -> None: """ state = self._setup_state() + if state.charm_config.openstack_clouds_yaml: + # Only build it in test mode since it may interfere with users systems. + if self.config.get("test-mode") == "insecure": + self.unit.status = MaintenanceStatus("Building Openstack image") + github = GithubClient(token=state.charm_config.token) + image = openstack_manager.build_image( + arch=state.arch, + cloud_config=state.charm_config.openstack_clouds_yaml, + github_client=github, + path=state.charm_config.path, + proxies=state.proxy_config, + ) + instance_config = openstack_manager.create_instance_config( + unit_name=self.unit.name, + openstack_image=image, + path=state.charm_config.path, + github_client=github, + ) + self.unit.status = MaintenanceStatus("Creating Openstack test instance") + instance = openstack_manager.create_instance( + cloud_config=state.charm_config.openstack_clouds_yaml, + instance_config=instance_config, + proxies=state.proxy_config, + dockerhub_mirror=state.charm_config.dockerhub_mirror, + ssh_debug_connections=state.ssh_debug_connections, + ) + logger.info("OpenStack instance: %s", instance) + self._block_on_openstack_config(state) + return + self.unit.status = MaintenanceStatus("Installing packages") try: # The `_start_services`, `_install_deps` includes retry. @@ -394,6 +443,10 @@ def _on_start(self, _event: StartEvent) -> None: event: Event of starting the charm. """ state = self._setup_state() + + if self._block_on_openstack_config(state): + return + runner_manager = self._get_runner_manager(state) self._check_and_update_dependencies( @@ -501,6 +554,9 @@ def _on_config_changed(self, _event: ConfigChangedEvent) -> None: state = self._setup_state() self._set_reconcile_timer() + if self._block_on_openstack_config(state): + return + prev_config_for_flush: dict[str, str] = {} should_flush_runners = False if state.charm_config.token != self._stored.token: @@ -525,17 +581,6 @@ def _on_config_changed(self, _event: ConfigChangedEvent) -> None: state = self._setup_state() self._refresh_firewall(state) - if state.charm_config.openstack_clouds_yaml: - # Test out openstack integration and then go - # into BlockedStatus as it is not supported yet - projects = openstack_manager.list_projects(state.charm_config.openstack_clouds_yaml) - logger.info("OpenStack projects: %s", projects) - self.unit.status = BlockedStatus( - "OpenStack integration is not supported yet. " - "Please remove the openstack-clouds-yaml config." - ) - return - runner_manager = self._get_runner_manager(state) self._reconcile_runners( runner_manager, @@ -614,6 +659,10 @@ def _on_reconcile_runners(self, _event: ReconcileRunnersEvent) -> None: event: Event of reconciling the runner state. """ state = self._setup_state() + + if self._block_on_openstack_config(state): + return + runner_manager = self._get_runner_manager(state) self._check_and_update_dependencies( @@ -737,8 +786,11 @@ def _on_stop(self, _: StopEvent) -> None: event: Event of stopping the charm. """ self._event_timer.disable_event_timer("reconcile-runners") - state = self._setup_state() + + if self._block_on_openstack_config(state): + return + runner_manager = self._get_runner_manager(state) runner_manager.flush(FlushMode.FLUSH_BUSY) diff --git a/src/charm_state.py b/src/charm_state.py index f0f511870..fd475f1b8 100644 --- a/src/charm_state.py +++ b/src/charm_state.py @@ -18,7 +18,7 @@ from pydantic import AnyHttpUrl, BaseModel, Field, ValidationError, root_validator from pydantic.networks import IPvAnyAddress -import openstack_manager +import openstack_cloud from errors import OpenStackInvalidConfigError from firewall import FirewallEntry from utilities import get_env_var @@ -26,7 +26,6 @@ logger = logging.getLogger(__name__) ARCHITECTURES_ARM64 = {"aarch64", "arm64"} - ARCHITECTURES_X86 = {"x86_64"} CHARM_STATE_PATH = Path("charm_state.json") @@ -217,7 +216,7 @@ class CharmConfig(BaseModel): denylist: list[FirewallEntry] dockerhub_mirror: str | None labels: tuple[str, ...] - openstack_clouds_yaml: dict | None + openstack_clouds_yaml: dict[str, dict] | None path: GithubPath reconcile_interval: int token: str @@ -285,16 +284,16 @@ def from_charm(cls, charm: CharmBase) -> "CharmConfig": try: openstack_clouds_yaml = yaml.safe_load(openstack_clouds_yaml_str) except yaml.YAMLError as exc: - logger.error("Invalid openstack-clouds-yaml config: %s.", exc) + logger.error("Invalid experimental-openstack-clouds-yaml config: %s.", exc) raise CharmConfigInvalidError( - "Invalid openstack-clouds-yaml config. Invalid yaml." + "Invalid experimental-openstack-clouds-yaml config. Invalid yaml." ) from exc if (config_type := type(openstack_clouds_yaml)) is not dict: raise CharmConfigInvalidError( f"Invalid openstack config format, expected dict, got {config_type}" ) try: - openstack_manager.initialize(openstack_clouds_yaml) + openstack_cloud.initialize(openstack_clouds_yaml) except OpenStackInvalidConfigError as exc: logger.error("Invalid openstack config, %s.", exc) raise CharmConfigInvalidError( diff --git a/src/errors.py b/src/errors.py index 672222a14..bf8d4b794 100644 --- a/src/errors.py +++ b/src/errors.py @@ -158,3 +158,11 @@ class OpenStackInvalidConfigError(OpenStackError): class OpenStackUnauthorizedError(OpenStackError): """Represents an unauthorized connection to OpenStack.""" + + +class OpenstackImageBuildError(Exception): + """Exception representing an error during image build process.""" + + +class OpenstackInstanceLaunchError(Exception): + """Exception representing an error during instance launch process.""" diff --git a/src/github_client.py b/src/github_client.py index 1a719edb9..d3a2896a8 100644 --- a/src/github_client.py +++ b/src/github_client.py @@ -16,11 +16,12 @@ from typing_extensions import assert_never import errors -from charm_state import GithubOrg, GithubPath, GithubRepo +from charm_state import Arch, GithubOrg, GithubPath, GithubRepo from github_type import ( JobStats, RegistrationToken, RemoveToken, + RunnerApplication, RunnerApplicationList, SelfHostedRunner, ) @@ -61,24 +62,44 @@ def __init__(self, token: str): self._client = GhApi(token=self._token) @catch_http_errors - def get_runner_applications(self, path: GithubPath) -> RunnerApplicationList: - """Get list of runner applications available for download. + def get_runner_application( + self, path: GithubPath, arch: Arch, os: str = "linux" + ) -> RunnerApplication: + """Get runner application available for download for given arch. Args: path: GitHub repository path in the format '/', or the GitHub organization name. + arch: The runner architecture. + os: The operating system that the runner binary should run on. + + Raises: + RunnerBinaryError: If the runner application for given architecture and OS is not + found. + Returns: - List of runner applications. + The runner application. """ - runner_bins: RunnerApplicationList = [] + runner_applications: RunnerApplicationList = [] if isinstance(path, GithubRepo): - runner_bins = self._client.actions.list_runner_applications_for_repo( + runner_applications = self._client.actions.list_runner_applications_for_repo( owner=path.owner, repo=path.repo ) if isinstance(path, GithubOrg): - runner_bins = self._client.actions.list_runner_applications_for_org(org=path.org) - - return runner_bins + runner_applications = self._client.actions.list_runner_applications_for_org( + org=path.org + ) + logger.debug("Response of runner applications list: %s", runner_applications) + try: + return next( + bin + for bin in runner_applications + if bin["os"] == os and bin["architecture"] == arch + ) + except StopIteration as err: + raise errors.RunnerBinaryError( + f"Unable query GitHub runner binary information for {os} {arch}" + ) from err @catch_http_errors def get_runner_github_info(self, path: GithubPath) -> list[SelfHostedRunner]: diff --git a/src/github_type.py b/src/github_type.py index 4d71dd20a..8f232b2ce 100644 --- a/src/github_type.py +++ b/src/github_type.py @@ -8,7 +8,7 @@ from datetime import datetime from enum import Enum -from typing import List, Optional, TypedDict +from typing import List, Literal, Optional, TypedDict from pydantic import BaseModel from typing_extensions import NotRequired @@ -21,6 +21,8 @@ class GitHubRunnerStatus(Enum): OFFLINE = "offline" +# See response schema for +# https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#list-runner-applications-for-an-organization class RunnerApplication(TypedDict, total=False): """Information on the runner application. @@ -34,8 +36,8 @@ class RunnerApplication(TypedDict, total=False): sha256_check_sum: SHA256 Checksum of the runner application. """ - os: str - architecture: str + os: Literal["linux", "win", "osx"] + architecture: Literal["arm", "arm64", "x64"] download_url: str filename: str temp_download_token: NotRequired[str] diff --git a/src/openstack_cloud/__init__.py b/src/openstack_cloud/__init__.py new file mode 100644 index 000000000..3e87cf695 --- /dev/null +++ b/src/openstack_cloud/__init__.py @@ -0,0 +1,67 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Module for managing Openstack cloud.""" + +from pathlib import Path +from typing import TypedDict, cast + +import yaml + +from errors import OpenStackInvalidConfigError + +CLOUDS_YAML_PATH = Path(Path.home() / ".config/openstack/clouds.yaml") + + +class CloudConfig(TypedDict): + """The parsed clouds.yaml configuration dictionary. + + Attributes: + clouds: A mapping of key "clouds" to cloud name mapped to cloud configuration. + """ + + clouds: dict[str, dict] + + +def _validate_cloud_config(cloud_config: dict) -> CloudConfig: + """Validate the format of the cloud configuration. + + Args: + cloud_config: The configuration in clouds.yaml format to validate. + + Raises: + InvalidConfigError: if the format of the config is invalid. + """ + # dict of format: {clouds: : } + try: + clouds = list(cloud_config["clouds"].keys()) + except KeyError as exc: + raise OpenStackInvalidConfigError("Missing key 'clouds' from config.") from exc + if not clouds: + raise OpenStackInvalidConfigError("No clouds defined in clouds.yaml.") + return cast(CloudConfig, cloud_config) + + +def _write_config_to_disk(cloud_config: CloudConfig) -> None: + """Write the cloud configuration to disk. + + Args: + cloud_config: The configuration in clouds.yaml format to write to disk. + """ + CLOUDS_YAML_PATH.parent.mkdir(parents=True, exist_ok=True) + CLOUDS_YAML_PATH.write_text(encoding="utf-8", data=yaml.dump(cloud_config)) + + +def initialize(cloud_config: dict) -> None: + """Initialize Openstack integration. + + Validates config and writes it to disk. + + Args: + cloud_config: The configuration in clouds.yaml format to apply. + + Raises: + InvalidConfigError: if the format of the config is invalid. + """ + valid_config = _validate_cloud_config(cloud_config) + _write_config_to_disk(valid_config) diff --git a/src/openstack_cloud/openstack_manager.py b/src/openstack_cloud/openstack_manager.py new file mode 100644 index 000000000..58bd30ad8 --- /dev/null +++ b/src/openstack_cloud/openstack_manager.py @@ -0,0 +1,401 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Module for handling interactions with OpenStack.""" +import json +import logging +import secrets +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Generator, Iterable, Literal, NamedTuple, Optional + +import jinja2 +import openstack +import openstack.compute.v2.server +import openstack.connection +import openstack.exceptions +import openstack.image.v2.image +from openstack.exceptions import OpenStackCloudException + +from charm_state import Arch, ProxyConfig, SSHDebugConnection, UnsupportedArchitectureError +from errors import ( + OpenstackImageBuildError, + OpenstackInstanceLaunchError, + OpenStackUnauthorizedError, + RunnerBinaryError, + SubprocessError, +) +from github_client import GithubClient +from github_type import RunnerApplication +from runner_type import GithubPath +from utilities import execute_command, retry + +logger = logging.getLogger(__name__) + +IMAGE_PATH_TMPL = "jammy-server-cloudimg-{architecture}-compressed.img" +IMAGE_NAME = "jammy" +BUILD_OPENSTACK_IMAGE_SCRIPT_FILENAME = "scripts/build-openstack-image.sh" + + +@contextmanager +def _create_connection( + cloud_config: dict[str, dict] +) -> Generator[openstack.connection.Connection, None, None]: + """Create a connection context managed object, to be used within with statements. + + This method should be called with a valid cloud_config. See _validate_cloud_config. + Also, this method assumes that the clouds.yaml exists on ~/.config/openstack/clouds.yaml. + See charm_state.py _write_openstack_config_to_disk. + + Args: + cloud_config: The configuration in clouds.yaml format to apply. + + Raises: + OpenStackUnauthorizedError: if the credentials provided is not authorized. + + Returns: + An openstack.connection.Connection object. + """ + clouds = list(cloud_config["clouds"].keys()) + if len(clouds) > 1: + logger.warning("Multiple clouds defined in clouds.yaml. Using the first one to connect.") + cloud_name = clouds[0] + + # api documents that keystoneauth1.exceptions.MissingRequiredOptions can be raised but + # I could not reproduce it. Therefore, no catch here for such exception. + try: + with openstack.connect(cloud=cloud_name) as conn: + conn.authorize() + yield conn + # pylint thinks this isn't an exception, but does inherit from Exception class. + except openstack.exceptions.HttpException as exc: # pylint: disable=bad-exception-cause + raise OpenStackUnauthorizedError("Unauthorized credentials.") from exc + + +class ProxyStringValues(NamedTuple): + """Wrapper class to proxy values to string. + + Attributes: + http: HTTP proxy address. + https: HTTPS proxy address. + no_proxy: Comma-separated list of hosts that should not be proxied. + """ + + http: str + https: str + no_proxy: str + + +def _get_default_proxy_values(proxies: Optional[ProxyConfig] = None) -> ProxyStringValues: + """Get default proxy string values, empty string if None. + + Used to parse proxy values for file configurations, empty strings if None. + + Args: + proxies: The proxy configuration information. + + Returns: + Proxy strings if set, empty string otherwise. + """ + if not proxies: + return ProxyStringValues(http="", https="", no_proxy="") + return ProxyStringValues( + http=str(proxies.http or ""), + https=str(proxies.https or ""), + no_proxy=proxies.no_proxy or "", + ) + + +def _generate_docker_proxy_unit_file(proxies: Optional[ProxyConfig] = None) -> str: + """Generate docker proxy systemd unit file. + + Args: + proxies: HTTP proxy settings. + + Returns: + Contents of systemd-docker-proxy unit file. + """ + environment = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"), autoescape=True) + return environment.get_template("systemd-docker-proxy.j2").render(proxies=proxies) + + +def _generate_docker_client_proxy_config_json(http_proxy: str, https_proxy: str, no_proxy: str): + """Generate proxy config.json for docker client. + + Args: + http_proxy: HTTP proxy URL. + https_proxy: HTTPS proxy URL. + no_proxy: URLs to not proxy through. + + Returns: + Contents of docker config.json file. + """ + return json.dumps( + { + "proxies": { + "default": { + key: value + for key, value in ( + ("httpProxy", http_proxy), + ("httpsProxy", https_proxy), + ("noProxy", no_proxy), + ) + if value + } + } + } + ) + + +def _build_image_command( + runner_info: RunnerApplication, proxies: Optional[ProxyConfig] = None +) -> list[str]: + """Get command for building runner image. + + Args: + runner_info: The runner application to fetch runner tar download url. + proxies: HTTP proxy settings. + + Returns: + Command to execute to build runner image. + """ + docker_proxy_service_conf_content = _generate_docker_proxy_unit_file(proxies=proxies) + + proxy_values = _get_default_proxy_values(proxies=proxies) + + docker_client_proxy_content = _generate_docker_client_proxy_config_json( + http_proxy=proxy_values.http, + https_proxy=proxy_values.https, + no_proxy=proxy_values.no_proxy, + ) + + cmd = [ + "/usr/bin/bash", + BUILD_OPENSTACK_IMAGE_SCRIPT_FILENAME, + runner_info["download_url"], + proxy_values.http, + proxy_values.https, + proxy_values.no_proxy, + docker_proxy_service_conf_content, + docker_client_proxy_content, + ] + + return cmd + + +@dataclass +class InstanceConfig: + """The configuration values for creating a single runner instance. + + Args: + name: Name of the image to launch the GitHub runner instance with. + labels: The runner instance labels. + registration_token: Token for registering the runner on GitHub. + github_path: The GitHub repo/org path + openstack_image: The Openstack image to use to boot the instance with. + """ + + name: str + labels: Iterable[str] + registration_token: str + github_path: GithubPath + openstack_image: openstack.image.v2.image.Image + + +def _get_supported_runner_arch(arch: str) -> Literal["amd64", "arm64"]: + """Validate and return supported runner architecture. + + The supported runner architecture takes in arch value from Github supported architecture and + outputs architectures supported by ubuntu cloud images. + See: https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners\ +/about-self-hosted-runners#architectures + and https://cloud-images.ubuntu.com/jammy/current/ + + Args: + arch: str + + Raises: + UnsupportedArchitectureError: If an unsupported architecture was passed. + + Returns: + The supported architecture. + """ + match arch: + case "x64": + return "amd64" + case "arm64": + return "arm64" + case _: + raise UnsupportedArchitectureError(arch) + + +def build_image( + arch: Arch, + cloud_config: dict[str, dict], + github_client: GithubClient, + path: GithubPath, + proxies: Optional[ProxyConfig] = None, +) -> str: + """Build and upload an image to OpenStack. + + Args: + cloud_config: The cloud configuration to connect OpenStack with. + github_client: The Github client to interact with Github API. + path: Github organisation or repository path. + proxies: HTTP proxy settings. + + Raises: + ImageBuildError: If there were errors building/creating the image. + + Returns: + The created OpenStack image id. + """ + try: + runner_application = github_client.get_runner_application(path=path, arch=arch) + except RunnerBinaryError as exc: + raise OpenstackImageBuildError("Failed to fetch runner application.") from exc + + try: + execute_command(_build_image_command(runner_application, proxies), check_exit=True) + except SubprocessError as exc: + raise OpenstackImageBuildError("Failed to build image.") from exc + + try: + runner_arch = runner_application["architecture"] + image_arch = _get_supported_runner_arch(arch=runner_arch) + except UnsupportedArchitectureError as exc: + raise OpenstackImageBuildError(f"Unsupported architecture {runner_arch}") from exc + + try: + with _create_connection(cloud_config) as conn: + existing_image: openstack.image.v2.image.Image + for existing_image in conn.search_images(name_or_id=IMAGE_NAME): + # images with same name (different ID) can be created and will error during server + # instantiation. + if not conn.delete_image(name_or_id=existing_image.id, wait=True): + raise OpenstackImageBuildError( + "Failed to delete duplicate image on Openstack." + ) + image: openstack.image.v2.image.Image = conn.create_image( + name=IMAGE_NAME, + filename=IMAGE_PATH_TMPL.format(architecture=image_arch), + wait=True, + ) + return image.id + except OpenStackCloudException as exc: + raise OpenstackImageBuildError("Failed to upload image.") from exc + + +def create_instance_config( + unit_name: str, + openstack_image: openstack.image.v2.image.Image, + path: GithubPath, + github_client: GithubClient, +) -> InstanceConfig: + """Create an instance config from charm data. + + Args: + unit_name: The charm unit name. + image: Ubuntu image flavor. + path: Github organisation or repository path. + github_client: The Github client to interact with Github API. + """ + app_name, unit_num = unit_name.rsplit("/", 1) + suffix = secrets.token_hex(12) + registration_token = github_client.get_runner_registration_token(path=path) + return InstanceConfig( + name=f"{app_name}-{unit_num}-{suffix}", + labels=(app_name, "jammy"), + registration_token=registration_token, + github_path=path, + openstack_image=openstack_image, + ) + + +def _generate_runner_env( + templates_env: jinja2.Environment, + proxies: Optional[ProxyConfig] = None, + dockerhub_mirror: Optional[str] = None, + ssh_debug_connections: list[SSHDebugConnection] | None = None, +) -> str: + """Generate Github runner .env file contents. + + Args: + templates_env: The jinja template environment. + proxies: Proxy values to enable on the Github runner. + dockerhub_mirror: The url to Dockerhub to reduce rate limiting. + ssh_debug_connections: Tmate SSH debug connection information to load as environment vars. + + Returns: + The .env contents to be loaded by Github runner. + """ + return templates_env.get_template("env.j2").render( + proxies=proxies, + pre_job_script="", + dockerhub_mirror=dockerhub_mirror or "", + ssh_debug_info=(secrets.choice(ssh_debug_connections) if ssh_debug_connections else None), + ) + + +def _generate_cloud_init_userdata( + templates_env: jinja2.Environment, instance_config: InstanceConfig, runner_env: str +) -> str: + """Generate cloud init userdata to launch at startup. + + Args: + templates_env: The jinja template environment. + instance_config: The configuration values for Openstack instance to launch. + runner_env: The contents of .env to source when launching Github runner. + + Returns: + The cloud init userdata script. + """ + return templates_env.get_template("openstack-userdata.sh.j2").render( + github_url=f"https://github.com/{instance_config.github_path.path()}", + token=instance_config.registration_token, + instance_labels=",".join(instance_config.labels), + instance_name=instance_config.name, + env_contents=runner_env, + ) + + +@retry(tries=5, delay=5, max_delay=60, backoff=2, local_logger=logger) +def create_instance( + cloud_config: dict[str, dict], + instance_config: InstanceConfig, + proxies: Optional[ProxyConfig] = None, + dockerhub_mirror: Optional[str] = None, + ssh_debug_connections: list[SSHDebugConnection] | None = None, +) -> None: + """Create an OpenStack instance. + + Args: + cloud_config: The cloud configuration to connect Openstack with. + instance_config: The configuration values for Openstack instance to launch. + + Raises: + OpenstackInstanceLaunchError: if any errors occurred while launching Openstack instance. + """ + environment = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"), autoescape=True) + + env_contents = _generate_runner_env( + templates_env=environment, + proxies=proxies, + dockerhub_mirror=dockerhub_mirror, + ssh_debug_connections=ssh_debug_connections, + ) + cloud_userdata = _generate_cloud_init_userdata( + templates_env=environment, instance_config=instance_config, runner_env=env_contents + ) + + try: + with _create_connection(cloud_config) as conn: + conn.create_server( + name=instance_config.name, + image=instance_config.openstack_image, + flavor="m1.small", + userdata=cloud_userdata, + wait=True, + ) + except OpenStackCloudException as exc: + raise OpenstackInstanceLaunchError("Failed to launch instance.") from exc diff --git a/src/openstack_manager.py b/src/openstack_manager.py deleted file mode 100644 index 06e6203b5..000000000 --- a/src/openstack_manager.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Module for handling interactions with OpenStack.""" -import logging -from pathlib import Path - -import keystoneauth1.exceptions.http -import openstack -import openstack.connection -import openstack.exceptions -import yaml -from openstack.identity.v3.project import Project - -from errors import OpenStackInvalidConfigError, OpenStackUnauthorizedError - -logger = logging.getLogger(__name__) - -CLOUDS_YAML_PATH = Path(Path.home() / ".config/openstack/clouds.yaml") - - -def _validate_cloud_config(cloud_config: dict) -> None: - """Validate the format of the cloud configuration. - - Args: - cloud_config: The configuration in clouds.yaml format to validate. - - Raises: - InvalidConfigError: if the format of the config is invalid. - """ - # dict of format: {clouds: : } - try: - clouds = list(cloud_config["clouds"].keys()) - except KeyError as exc: - raise OpenStackInvalidConfigError("Invalid clouds.yaml.") from exc - - if not clouds: - raise OpenStackInvalidConfigError("No clouds defined in clouds.yaml.") - - -def _write_config_to_disk(cloud_config: dict) -> None: - """Write the cloud configuration to disk. - - Args: - cloud_config: The configuration in clouds.yaml format to write to disk. - """ - CLOUDS_YAML_PATH.parent.mkdir(parents=True, exist_ok=True) - CLOUDS_YAML_PATH.write_text(encoding="utf-8", data=yaml.dump(cloud_config)) - - -def _create_connection(cloud_config: dict) -> openstack.connection.Connection: - """Create a connection object. - - This method should be called with a valid cloud_config. See def _validate_cloud_config. - Also, this method assumes that the clouds.yaml exists on CLOUDS_YAML_PATH. See def - _write_config_to_disk. - - Args: - cloud_config: The configuration in clouds.yaml format to apply. - - Raises: - InvalidConfigError: if the config has not all required information. - - Returns: - An openstack.connection.Connection object. - """ - clouds = list(cloud_config["clouds"].keys()) - if len(clouds) > 1: - logger.warning("Multiple clouds defined in clouds.yaml. Using the first one to connect.") - cloud_name = clouds[0] - - # api documents that keystoneauth1.exceptions.MissingRequiredOptions can be raised but - # I could not reproduce it. Therefore, no catch here. - return openstack.connect(cloud_name) - - -def initialize(cloud_config: dict) -> None: - """Initialize Openstack integration. - - Validates config and writes it to disk. - - Args: - cloud_config: The configuration in clouds.yaml format to apply. - - Raises: - InvalidConfigError: if the format of the config is invalid. - """ - _validate_cloud_config(cloud_config) - _write_config_to_disk(cloud_config) - - -def list_projects(cloud_config: dict) -> list[Project]: - """List all projects in the OpenStack cloud. - - The purpose of the method is just to try out openstack integration and - it may be removed in the future. - - It currently returns objects directly from the sdk, - which may not be ideal (mapping to domain objects may be preferable). - - Returns: - A list of projects. - """ - conn = _create_connection(cloud_config) - try: - projects = conn.list_projects() - logger.debug("OpenStack connection successful.") - logger.debug("Projects: %s", projects) - # pylint thinks this isn't an exception - except keystoneauth1.exceptions.http.Unauthorized as exc: - raise OpenStackUnauthorizedError( # pylint: disable=bad-exception-cause - "Unauthorized to connect to OpenStack." - ) from exc - - return projects diff --git a/src/runner_manager.py b/src/runner_manager.py index ef6042249..8a8992b1c 100644 --- a/src/runner_manager.py +++ b/src/runner_manager.py @@ -42,7 +42,7 @@ logger = logging.getLogger(__name__) -BUILD_IMAGE_SCRIPT_FILENAME = Path("scripts/build-image.sh") +BUILD_IMAGE_SCRIPT_FILENAME = Path("scripts/build-lxd-image.sh") IssuedMetricEventsStats = dict[Type[metrics.Event], int] @@ -122,22 +122,19 @@ def get_latest_runner_bin_url(self, os_name: str = "linux") -> RunnerApplication Args: os_name: Name of operating system. + Raises: + RunnerBinaryError: If an error occurred while fetching runner application info. + Returns: Information on the runner application. """ - runner_bins = self._clients.github.get_runner_applications(self.config.path) - - logger.debug("Response of runner binary list: %s", runner_bins) - try: - arch = self.config.charm_state.arch.value - return next( - bin for bin in runner_bins if bin["os"] == os_name and bin["architecture"] == arch + return self._clients.github.get_runner_application( + path=self.config.path, arch=self.config.charm_state.arch.value, os=os_name ) - except StopIteration as err: - raise RunnerBinaryError( - f"Unable query GitHub runner binary information for {os_name} {arch}" - ) from err + except RunnerBinaryError: + logger.error("Failed to get runner application info.") + raise @retry(tries=5, delay=30, local_logger=logger) def update_runner_bin(self, binary: RunnerApplication) -> None: diff --git a/templates/openstack-userdata.sh.j2 b/templates/openstack-userdata.sh.j2 new file mode 100644 index 000000000..fb7f0b7f6 --- /dev/null +++ b/templates/openstack-userdata.sh.j2 @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +# Write .env contents +su - ubuntu -c 'cd ~/actions-runner && echo "{{ env_contents }}" > .env' + +# Create the runner and start the configuration experience +su - ubuntu -c "cd ~/actions-runner && ./config.sh \ + --url {{ github_url }} \ + --token {{ token }} --ephemeral --unattended \ + --labels {{ instance_labels }} --name {{ instance_name }}" + +# Run runner +su - ubuntu -c "cd ~/actions-runner && /home/ubuntu/actions-runner/run.sh" diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index f5a263a27..e9f631c94 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -10,6 +10,8 @@ from time import sleep from typing import Any, AsyncIterator, Iterator, Optional +import openstack +import openstack.connection import pytest import pytest_asyncio import yaml @@ -22,6 +24,7 @@ from juju.model import Model from pytest_operator.plugin import OpsTest +from charm_state import OPENSTACK_CLOUDS_YAML_CONFIG_NAME from github_client import GithubClient from tests.integration.helpers import ( deploy_github_runner_charm, @@ -48,11 +51,16 @@ def app_name() -> str: @pytest.fixture(scope="module") -def charm_file(pytestconfig: pytest.Config, loop_device: Optional[str]) -> str: +def charm_file( + pytestconfig: pytest.Config, loop_device: Optional[str], openstack_clouds_yaml: Optional[str] +) -> str: """Path to the built charm.""" charm = pytestconfig.getoption("--charm-file") assert charm, "Please specify the --charm-file command line option" + if openstack_clouds_yaml: + return f"./{charm}" + lxd_profile_str = """config: security.nesting: true security.privileged: true @@ -151,6 +159,20 @@ def openstack_clouds_yaml(pytestconfig: pytest.Config) -> Optional[str]: return Path(clouds_yaml).read_text(encoding="utf-8") if clouds_yaml else None +@pytest.fixture(scope="module", name="openstack_connection") +def openstack_connection_fixture( + openstack_clouds_yaml: Optional[str], +) -> openstack.connection.Connection: + """The openstack connection instance.""" + assert openstack_clouds_yaml, "Openstack clouds yaml was not provided." + + openstack_clouds_yaml_yaml = yaml.safe_load(openstack_clouds_yaml) + clouds_yaml_path = Path.cwd() / "clouds.yaml" + clouds_yaml_path.write_text(data=openstack_clouds_yaml, encoding="utf-8") + first_cloud = next(iter(openstack_clouds_yaml_yaml["clouds"].keys())) + return openstack.connect(first_cloud) + + @pytest.fixture(scope="module") def model(ops_test: OpsTest) -> Model: """Juju model used in the test.""" @@ -191,6 +213,42 @@ async def app_no_runner( return application +@pytest_asyncio.fixture(scope="module") +async def app_openstack_runner( + model: Model, + charm_file: str, + app_name: str, + path: str, + token: str, + http_proxy: str, + https_proxy: str, + no_proxy: str, + openstack_clouds_yaml: str, +) -> AsyncIterator[Application]: + """Application launching VMs and no runners.""" + application = await deploy_github_runner_charm( + model=model, + charm_file=charm_file, + app_name=app_name, + path=path, + token=token, + runner_storage="juju-storage", + http_proxy=http_proxy, + https_proxy=https_proxy, + no_proxy=no_proxy, + reconcile_interval=60, + constraints={ + "root-disk": 20 * 1024, + "cores": 4, + "mem": 16 * 1024, + "virt-type": "virtual-machine", + }, + config={OPENSTACK_CLOUDS_YAML_CONFIG_NAME: openstack_clouds_yaml}, + wait_idle=False, + ) + return application + + @pytest_asyncio.fixture(scope="module") async def app_one_runner(model: Model, app_no_runner: Application) -> AsyncIterator[Application]: """Application with a single runner. diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index c4342448b..b8dd5b93f 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -10,7 +10,8 @@ import typing from asyncio import sleep from datetime import datetime, timezone -from typing import Any, Awaitable, Callable, Union +from functools import partial +from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast import github import juju.version @@ -34,6 +35,9 @@ DISPATCH_CRASH_TEST_WORKFLOW_FILENAME = "workflow_dispatch_crash_test.yaml" DISPATCH_FAILURE_TEST_WORKFLOW_FILENAME = "workflow_dispatch_failure_test.yaml" DISPATCH_WAIT_TEST_WORKFLOW_FILENAME = "workflow_dispatch_wait_test.yaml" +DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME = "e2e_test_run.yaml" + +DEFAULT_RUNNER_CONSTRAINTS = {"root-disk": 15} async def check_runner_binary_exists(unit: Unit) -> bool: @@ -320,6 +324,8 @@ async def deploy_github_runner_charm( https_proxy: str, no_proxy: str, reconcile_interval: int, + constraints: dict | None = None, + config: dict | None = None, wait_idle: bool = True, ) -> Application: """Deploy github-runner charm. @@ -334,6 +340,9 @@ async def deploy_github_runner_charm( https_proxy: HTTPS proxy for the application to use. no_proxy: No proxy configuration for the application. reconcile_interval: Time between reconcile for the application. + constraints: The custom machine constraints to use. See DEFAULT_RUNNER_CONSTRAINTS + otherwise. + config: Additional custom config to use. wait_idle: wait for model to become idle. """ subprocess.run(["sudo", "modprobe", "br_netfilter"]) @@ -351,20 +360,25 @@ async def deploy_github_runner_charm( if runner_storage == "juju-storage": storage["runner"] = {"pool": "rootfs", "size": 11} + default_config = { + "path": path, + "token": token, + "virtual-machines": 0, + "denylist": "10.10.0.0/16", + "test-mode": "insecure", + "reconcile-interval": reconcile_interval, + "runner-storage": runner_storage, + } + + if config: + default_config.update(config) + application = await model.deploy( charm_file, application_name=app_name, series="jammy", - config={ - "path": path, - "token": token, - "virtual-machines": 0, - "denylist": "10.10.0.0/16", - "test-mode": "insecure", - "reconcile-interval": reconcile_interval, - "runner-storage": runner_storage, - }, - constraints={"root-disk": 15}, + config=default_config, + constraints=constraints or DEFAULT_RUNNER_CONSTRAINTS, storage=storage, ) @@ -455,25 +469,35 @@ async def _assert_workflow_run_conclusion( ) -async def _wait_for_workflow_to_complete( - unit: Unit, workflow: Workflow, conclusion: str, start_time: datetime -): - """Wait for the workflow to complete. +def _get_latest_run( + workflow: Workflow, start_time: datetime, branch: Branch | None = None +) -> WorkflowRun | None: + """Get the latest run after start_time. Args: - unit: The unit which contains the runner. - workflow: The workflow to wait for. - conclusion: The workflow conclusion to wait for. - start_time: The start time of the workflow. + workflow: The workflow to get the latest run for. + start_time: The minimum start time of the run. + + Returns: + The latest workflow run if the workflow has started. None otherwise. """ - runner_name = await get_runner_name(unit) - await _wait_until_runner_is_used_up(runner_name, unit) - # Wait for the workflow log to contain the conclusion - await sleep(120) + try: + return workflow.get_runs( + branch=branch, created=f">={start_time.isoformat(timespec='seconds')}" + )[0] + except IndexError: + return None - await _assert_workflow_run_conclusion( - runner_name=runner_name, conclusion=conclusion, workflow=workflow, start_time=start_time - ) + +def _is_workflow_run_complete(run: WorkflowRun) -> bool: + """Wait for the workflow status to turn to complete. + + Args: + run: The workflow run to check status for. + """ + if run.update(): + return run.status == "completed" + return False async def dispatch_workflow( @@ -482,6 +506,7 @@ async def dispatch_workflow( github_repository: Repository, conclusion: str, workflow_id_or_name: str, + dispatch_input: dict | None = None, ): """Dispatch a workflow on a branch for the runner to run. @@ -503,18 +528,34 @@ async def dispatch_workflow( workflow = github_repository.get_workflow(id_or_file_name=workflow_id_or_name) # The `create_dispatch` returns True on success. - assert workflow.create_dispatch(branch, {"runner": app.name}) - await _wait_for_workflow_to_complete( - unit=app.units[0], workflow=workflow, conclusion=conclusion, start_time=start_time + assert workflow.create_dispatch( + branch, dispatch_input or {"runner": app.name} + ), "Failed to create workflow" + + # There is a very small chance of selecting a run not created by the dispatch above. + run = await wait_for( + partial(_get_latest_run, workflow=workflow, start_time=start_time, branch=branch) ) + assert run, f"Run not found for workflow: {workflow.name} ({workflow.id})" + await wait_for(partial(_is_workflow_run_complete, run=run), timeout=60 * 30, check_interval=60) + + # The run object is updated by _is_workflow_run_complete function above. + assert ( + run.conclusion == conclusion + ), f"Unexpected run conclusion, expected: {conclusion}, got: {run.conclusion}" + return workflow +P = ParamSpec("P") +R = TypeVar("R") + + async def wait_for( - func: Callable[[], Union[Awaitable, Any]], + func: Callable[P, R], timeout: int = 300, check_interval: int = 10, -) -> Any: +) -> R: """Wait for function execution to become truthy. Args: @@ -532,7 +573,7 @@ async def wait_for( is_awaitable = inspect.iscoroutinefunction(func) while time.time() < deadline: if is_awaitable: - if result := await func(): + if result := await cast(Awaitable, func()): return result else: if result := func(): @@ -541,7 +582,7 @@ async def wait_for( # final check before raising TimeoutError. if is_awaitable: - if result := await func(): + if result := await cast(Awaitable, func()): return result else: if result := func(): diff --git a/tests/integration/test_openstack.py b/tests/integration/test_openstack.py index 370f7f89e..a7b088ab0 100644 --- a/tests/integration/test_openstack.py +++ b/tests/integration/test_openstack.py @@ -3,38 +3,55 @@ """Integration tests for OpenStack integration.""" - -import yaml +import openstack.connection +import pytest +from github.Branch import Branch +from github.Repository import Repository +from github.WorkflowRun import WorkflowRun from juju.application import Application from juju.model import Model +from openstack.compute.v2.server import Server -from charm_state import OPENSTACK_CLOUDS_YAML_CONFIG_NAME -from tests.integration.helpers import run_in_unit +from tests.integration.helpers import DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, dispatch_workflow +# 2024/03/19 - The firewall configuration on openstack will be implemented by follow up PR on +# launching openstack instances. +@pytest.mark.xfail(reason="Firewall to be implemented") async def test_openstack_integration( - model: Model, app_no_runner: Application, openstack_clouds_yaml: str + model: Model, + app_openstack_runner: Application, + openstack_connection: openstack.connection.Connection, + github_repository: Repository, + test_github_branch: Branch, ): """ - arrange: Load the OpenStack clouds.yaml config. Parse project name from the config. - act: Set the openstack-clouds-yaml config in the charm - assert: Check the unit log for successful OpenStack connection and that the project is listed. + arrange: given a runner with openstack cloud configured. + act: + 1. when the e2e_test_run workflow is created. + 2. when the servers are listed. + assert: + 1. the workflow run completes successfully. + 2. a server with image name jammy is created. """ - openstack_clouds_yaml_yaml = yaml.safe_load(openstack_clouds_yaml) - first_cloud = next(iter(openstack_clouds_yaml_yaml["clouds"].values())) - project_name = first_cloud["auth"]["project_name"] - - await app_no_runner.set_config({OPENSTACK_CLOUDS_YAML_CONFIG_NAME: openstack_clouds_yaml}) - await model.wait_for_idle(apps=[app_no_runner.name]) - - unit = app_no_runner.units[0] - unit_name_with_dash = unit.name.replace("/", "-") - ret_code, unit_log = await run_in_unit( - unit=unit, - command=f"cat /var/log/juju/unit-{unit_name_with_dash}.log", + await model.wait_for_idle(apps=[app_openstack_runner.name], status="blocked", timeout=40 * 60) + + # 1. when the e2e_test_run workflow is created. + workflow = await dispatch_workflow( + app=app_openstack_runner, + branch=test_github_branch, + github_repository=github_repository, + conclusion="success", + workflow_id_or_name=DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, + dispatch_input={"runner-tag": app_openstack_runner.name}, ) - assert ret_code == 0, "Failed to read the unit log" - assert unit_log is not None, "Failed to read the unit log, no stdout message" - assert "OpenStack connection successful." in unit_log - assert "OpenStack projects:" in unit_log - assert project_name in unit_log + # 1. the workflow run completes successfully. + workflow_run: WorkflowRun = workflow.get_runs()[0] + assert workflow_run.status == "success" + + # 2. when the servers are listed. + servers = openstack_connection.list_servers(detailed=True) + assert len(servers) == 1, f"Unexpected number of servers: {len(servers)}" + server: Server = servers[0] + # 2. a server with image name jammy is created. + assert server.image.name == "jammy" diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 8798cb524..fb5a97efd 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1,12 +1,14 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +import copy +import secrets import unittest.mock from pathlib import Path import pytest -import openstack_manager +import openstack_cloud from tests.unit.mock import MockGhapiClient, MockLxdClient, MockRepoPolicyComplianceClient @@ -34,7 +36,7 @@ def disk_usage_mock(total_disk): @pytest.fixture(autouse=True) def mocks(monkeypatch, tmp_path, exec_command, lxd_exec_command, runner_binary_path): - openstack_manager_mock = unittest.mock.MagicMock(spec=openstack_manager) + openstack_manager_mock = unittest.mock.MagicMock(spec=openstack_cloud) cron_path = tmp_path / "cron.d" cron_path.mkdir() @@ -56,7 +58,6 @@ def mocks(monkeypatch, tmp_path, exec_command, lxd_exec_command, runner_binary_p monkeypatch.setattr("charm.shutil", unittest.mock.MagicMock()) monkeypatch.setattr("charm.shutil.disk_usage", disk_usage_mock(30 * 1024 * 1024 * 1024)) monkeypatch.setattr("charm_state.CHARM_STATE_PATH", Path(tmp_path / "charm_state.json")) - monkeypatch.setattr("charm_state.openstack_manager", openstack_manager_mock) monkeypatch.setattr("event_timer.jinja2", unittest.mock.MagicMock()) monkeypatch.setattr("event_timer.execute_command", exec_command) monkeypatch.setattr( @@ -83,3 +84,57 @@ def mocks(monkeypatch, tmp_path, exec_command, lxd_exec_command, runner_binary_p "runner_manager.RepoPolicyComplianceClient", MockRepoPolicyComplianceClient ) monkeypatch.setattr("utilities.time", unittest.mock.MagicMock()) + + +@pytest.fixture(autouse=True, name="cloud_name") +def cloud_name_fixture() -> str: + """The testing cloud name.""" + return "microstack" + + +@pytest.fixture(autouse=True, name="clouds_yaml_path") +def clouds_yaml_path(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> Path: + """Mocked clouds.yaml path. + + Returns: + Path: Mocked clouds.yaml path. + """ + clouds_yaml_path = tmp_path / "clouds.yaml" + monkeypatch.setattr("openstack_cloud.CLOUDS_YAML_PATH", clouds_yaml_path) + return clouds_yaml_path + + +@pytest.fixture(name="clouds_yaml") +def clouds_yaml_fixture(cloud_name: str) -> dict: + """Testing clouds.yaml.""" + return { + "clouds": { + cloud_name: { + "auth": { + "auth_url": secrets.token_hex(16), + "project_name": secrets.token_hex(16), + "project_domain_name": secrets.token_hex(16), + "username": secrets.token_hex(16), + "user_domain_name": secrets.token_hex(16), + "password": secrets.token_hex(16), + } + } + } + } + + +@pytest.fixture(name="multi_clouds_yaml") +def multi_clouds_yaml_fixture(clouds_yaml: dict) -> dict: + """Testing clouds.yaml with multiple clouds.""" + multi_clouds_yaml = copy.deepcopy(clouds_yaml) + multi_clouds_yaml["clouds"]["unused_cloud"] = { + "auth": { + "auth_url": secrets.token_hex(16), + "project_name": secrets.token_hex(16), + "project_domain_name": secrets.token_hex(16), + "username": secrets.token_hex(16), + "user_domain_name": secrets.token_hex(16), + "password": secrets.token_hex(16), + } + } + return multi_clouds_yaml diff --git a/tests/unit/test_charm_state.py b/tests/unit/test_charm_state.py index fa2bf7a2a..3559bb7da 100644 --- a/tests/unit/test_charm_state.py +++ b/tests/unit/test_charm_state.py @@ -306,7 +306,7 @@ def test_openstack_config_invalid_yaml(): with pytest.raises(CharmConfigInvalidError) as exc: CharmState.from_charm(mock_charm) - assert "Invalid openstack-clouds-yaml config. Invalid yaml." in str(exc.value) + assert "Invalid experimental-openstack-clouds-yaml config. Invalid yaml." in str(exc.value) @pytest.mark.parametrize( diff --git a/tests/unit/test_openstack_cloud.py b/tests/unit/test_openstack_cloud.py new file mode 100644 index 000000000..e3b9870ca --- /dev/null +++ b/tests/unit/test_openstack_cloud.py @@ -0,0 +1,42 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +from pathlib import Path + +import pytest +import yaml + +import openstack_cloud +from errors import OpenStackInvalidConfigError + + +def test_initialize(clouds_yaml_path: Path, clouds_yaml: dict): + """ + arrange: Mocked clouds.yaml data and path. + act: Call initialize. + assert: The clouds.yaml file is written to disk. + """ + openstack_cloud.initialize(clouds_yaml) + + assert yaml.safe_load(clouds_yaml_path.read_text(encoding="utf-8")) == clouds_yaml + + +@pytest.mark.parametrize( + "invalid_yaml, expected_err_msg", + [ + pytest.param( + {"wrong-key": {"cloud_name": {"auth": {}}}}, "Missing key 'clouds' from config." + ), + pytest.param({}, "Missing key 'clouds' from config."), + pytest.param({"clouds": {}}, "No clouds defined in clouds.yaml."), + ], +) +def test_initialize_validation_error(invalid_yaml: dict, expected_err_msg): + """ + arrange: Mocked clouds.yaml data with invalid data. + act: Call initialize. + assert: InvalidConfigError is raised. + """ + + with pytest.raises(OpenStackInvalidConfigError) as exc: + openstack_cloud.initialize(invalid_yaml) + assert expected_err_msg in str(exc) diff --git a/tests/unit/test_openstack_manager.py b/tests/unit/test_openstack_manager.py index 8f15196e3..88f26b436 100644 --- a/tests/unit/test_openstack_manager.py +++ b/tests/unit/test_openstack_manager.py @@ -1,149 +1,483 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import secrets -from pathlib import Path +from typing import Optional from unittest.mock import MagicMock -import keystoneauth1.exceptions +import jinja2 +import openstack.exceptions import pytest -import yaml -from openstack.identity.v3 import project -from openstack.test import fakes -import openstack_manager -from errors import OpenStackInvalidConfigError, OpenStackUnauthorizedError - -INVALID_CLOUDS_YAML_ERR_MSG = "Invalid clouds.yaml." +from errors import OpenStackUnauthorizedError +from openstack_cloud import openstack_manager CLOUD_NAME = "microstack" -@pytest.fixture(autouse=True, name="clouds_yaml_path") -def clouds_yaml_path(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> Path: - """Mocked clouds.yaml path. +@pytest.fixture(autouse=True, name="openstack_connect_mock") +def mock_openstack_connect_fixture(monkeypatch: pytest.MonkeyPatch) -> MagicMock: + """Mock openstack.connect.""" + mock_connect = MagicMock(spec=openstack_manager.openstack.connect) + monkeypatch.setattr("openstack_cloud.openstack_manager.openstack.connect", mock_connect) + + return mock_connect - Returns: - Path: Mocked clouds.yaml path. - """ - clouds_yaml_path = tmp_path / "clouds.yaml" - monkeypatch.setattr("openstack_manager.CLOUDS_YAML_PATH", clouds_yaml_path) - return clouds_yaml_path +@pytest.fixture(name="mock_github_client") +def mock_github_client_fixture() -> MagicMock: + """Mocked github client that returns runner application.""" + mock_github_client = MagicMock(spec=openstack_manager.GithubClient) + mock_github_client.get_runner_application.return_value = openstack_manager.RunnerApplication( + os="linux", + architecture="x64", + download_url="http://test_url", + filename="test_filename", + temp_download_token="test_token", + ) + return mock_github_client -@pytest.fixture(name="projects") -def projects_fixture() -> list: - """Mocked list of projects.""" - return list(fakes.generate_fake_resources(project.Project, count=3)) +@pytest.fixture(name="patch_execute_command") +def patch_execute_command_fixture(monkeypatch: pytest.MonkeyPatch): + """Patch execute command to a MagicMock instance.""" + monkeypatch.setattr( + openstack_manager, + "execute_command", + MagicMock(spec=openstack_manager.execute_command), + ) -@pytest.fixture(autouse=True, name="openstack_connect_mock") -def mock_openstack(monkeypatch: pytest.MonkeyPatch, projects) -> MagicMock: - """Mock openstack.connect.""" - mock_connect = MagicMock(spec=openstack_manager.openstack.connect) - mock_connect.return_value.list_projects.return_value = projects - monkeypatch.setattr("openstack_manager.openstack.connect", mock_connect) - return mock_connect +@pytest.fixture(name="patched_create_connection_context") +def patched_create_connection_context_fixture(monkeypatch: pytest.MonkeyPatch): + """Return a mocked openstack connection context manager and patch create_connection.""" + mock_connection = MagicMock(spec=openstack_manager.openstack.connection.Connection) + monkeypatch.setattr( + openstack_manager, + "_create_connection", + MagicMock(spec=openstack_manager._create_connection, return_value=mock_connection), + ) + return mock_connection.__enter__() -def _create_clouds_yaml() -> dict: - """Create a fake clouds.yaml.""" - return { - "clouds": { - CLOUD_NAME: { - "auth": { - "auth_url": secrets.token_hex(16), - "project_name": secrets.token_hex(16), - "project_domain_name": secrets.token_hex(16), - "username": secrets.token_hex(16), - "user_domain_name": secrets.token_hex(16), - "password": secrets.token_hex(16), - } - } - } - } +def test__create_connection_error(clouds_yaml: dict, openstack_connect_mock: MagicMock): + """ + arrange: given a monkeypatched connection.authorize() function that raises an error. + act: when _create_connection is called. + assert: OpenStackUnauthorizedError is raised. + """ + connection_mock = MagicMock() + connection_context = MagicMock() + connection_context.authorize.side_effect = openstack.exceptions.HttpException + connection_mock.__enter__.return_value = connection_context + openstack_connect_mock.return_value = connection_mock + + with pytest.raises(OpenStackUnauthorizedError) as exc: + with openstack_manager._create_connection(cloud_config=clouds_yaml): + pass + assert "Unauthorized credentials" in str(exc) -def test_initialize(clouds_yaml_path: Path): + +def test__create_connection( + multi_clouds_yaml: dict, clouds_yaml: dict, cloud_name: str, openstack_connect_mock: MagicMock +): """ - arrange: Mocked clouds.yaml data and path. - act: Call initialize. - assert: The clouds.yaml file is written to disk. + arrange: given a cloud config yaml dict with 1. multiple clouds 2. single cloud. + act: when _create_connection is called. + assert: connection with first cloud in the config is used. """ - clouds_yaml = _create_clouds_yaml() + # 1. multiple clouds + with openstack_manager._create_connection(cloud_config=multi_clouds_yaml): + openstack_connect_mock.assert_called_with(cloud=CLOUD_NAME) + + # 2. single cloud + with openstack_manager._create_connection(cloud_config=clouds_yaml): + openstack_connect_mock.assert_called_with(cloud=cloud_name) - openstack_manager.initialize(clouds_yaml) - assert yaml.safe_load(clouds_yaml_path.read_text(encoding="utf-8")) == clouds_yaml +@pytest.mark.parametrize( + "arch", + [ + pytest.param("s390x", id="s390x"), + pytest.param("riscv64", id="riscv64"), + pytest.param("ppc64el", id="ppc64el"), + pytest.param("armhf", id="armhf"), + pytest.param("test", id="test"), + ], +) +def test__get_supported_runner_arch_invalid_arch(arch: str): + """ + arrange: given supported architectures. + act: when _get_supported_runner_arch is called. + assert: supported cloud image architecture type is returned. + """ + with pytest.raises(openstack_manager.UnsupportedArchitectureError) as exc: + openstack_manager._get_supported_runner_arch(arch=arch) + + assert arch in str(exc) @pytest.mark.parametrize( - "invalid_yaml, expected_err_msg", + "arch, image_arch", [ - pytest.param({"wrong-key": _create_clouds_yaml()["clouds"]}, INVALID_CLOUDS_YAML_ERR_MSG), - pytest.param({}, INVALID_CLOUDS_YAML_ERR_MSG), - pytest.param({"clouds": {}}, "No clouds defined in clouds.yaml."), + pytest.param("x64", "amd64", id="x64"), + pytest.param("arm64", "arm64", id="arm64"), + ], +) +def test__get_supported_runner_arch(arch: str, image_arch: str): + """ + arrange: given supported architectures. + act: when _get_supported_runner_arch is called. + assert: supported cloud image architecture type is returned. + """ + assert openstack_manager._get_supported_runner_arch(arch=arch) == image_arch + + +@pytest.mark.parametrize( + "proxy_config, dockerhub_mirror, ssh_debug_connections, expected_env_contents", + [ + pytest.param( + None, + None, + None, + """PATH=/home/ubuntu/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin + + + + + +LANG=C.UTF-8 +ACTIONS_RUNNER_HOOK_JOB_STARTED= +""", + id="all values empty", + ), + pytest.param( + openstack_manager.ProxyConfig( + http="http://test.internal", + https="https://test.internal", + no_proxy="http://no_proxy.internal", + ), + None, + None, + """PATH=/home/ubuntu/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin + +HTTP_PROXY=http://test.internal +http_proxy=http://test.internal + + +HTTPS_PROXY=https://test.internal +https_proxy=https://test.internal + + + +NO_PROXY=http://no_proxy.internal +no_proxy=http://no_proxy.internal + + +LANG=C.UTF-8 +ACTIONS_RUNNER_HOOK_JOB_STARTED= +""", + id="proxy value set", + ), + pytest.param( + None, + "http://dockerhub_mirror.test", + None, + """PATH=/home/ubuntu/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin + + + + + +DOCKERHUB_MIRROR=http://dockerhub_mirror.test +CONTAINER_REGISTRY_URL=http://dockerhub_mirror.test + +LANG=C.UTF-8 +ACTIONS_RUNNER_HOOK_JOB_STARTED= +""", + id="dockerhub mirror set", + ), + pytest.param( + None, + None, + [ + openstack_manager.SSHDebugConnection( + host="127.0.0.1", + port=10022, + rsa_fingerprint="SHA256:testrsa", + ed25519_fingerprint="SHA256:tested25519", + ) + ], + """PATH=/home/ubuntu/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin + + + + + +LANG=C.UTF-8 +ACTIONS_RUNNER_HOOK_JOB_STARTED= + +TMATE_SERVER_HOST=127.0.0.1 +TMATE_SERVER_PORT=10022 +TMATE_SERVER_RSA_FINGERPRINT=SHA256:testrsa +TMATE_SERVER_ED25519_FINGERPRINT=SHA256:tested25519 +""", + id="ssh debug connection set", + ), + pytest.param( + openstack_manager.ProxyConfig( + http="http://test.internal", + https="https://test.internal", + no_proxy="http://no_proxy.internal", + ), + "http://dockerhub_mirror.test", + [ + openstack_manager.SSHDebugConnection( + host="127.0.0.1", + port=10022, + rsa_fingerprint="SHA256:testrsa", + ed25519_fingerprint="SHA256:tested25519", + ) + ], + """PATH=/home/ubuntu/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin + +HTTP_PROXY=http://test.internal +http_proxy=http://test.internal + + +HTTPS_PROXY=https://test.internal +https_proxy=https://test.internal + + + +NO_PROXY=http://no_proxy.internal +no_proxy=http://no_proxy.internal + + +DOCKERHUB_MIRROR=http://dockerhub_mirror.test +CONTAINER_REGISTRY_URL=http://dockerhub_mirror.test + +LANG=C.UTF-8 +ACTIONS_RUNNER_HOOK_JOB_STARTED= + +TMATE_SERVER_HOST=127.0.0.1 +TMATE_SERVER_PORT=10022 +TMATE_SERVER_RSA_FINGERPRINT=SHA256:testrsa +TMATE_SERVER_ED25519_FINGERPRINT=SHA256:tested25519 +""", + id="all values set", + ), ], ) -def test_initialize_validation_error(invalid_yaml: dict, expected_err_msg): +def test__generate_runner_env( + proxy_config: Optional[openstack_manager.ProxyConfig], + dockerhub_mirror: Optional[str], + ssh_debug_connections: Optional[list[openstack_manager.SSHDebugConnection]], + expected_env_contents: str, +): + """ + arrange: given configuration values for runner environment. + act: when _generate_runner_env is called. + assert: expected .env contents are generated. + """ + environment = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"), autoescape=True) + assert ( + openstack_manager._generate_runner_env( + templates_env=environment, + proxies=proxy_config, + dockerhub_mirror=dockerhub_mirror, + ssh_debug_connections=ssh_debug_connections, + ) + == expected_env_contents + ) + + +def test__build_image_command(): """ - arrange: Mocked clouds.yaml data with invalid data. - act: Call initialize. - assert: InvalidConfigError is raised. + arrange: given a mock Github runner application and proxy config. + act: when _build_image_command is called. + assert: command for build image bash script with args are returned. """ + test_runner_info = openstack_manager.RunnerApplication( + os="linux", + architecture="x64", + download_url=(test_download_url := "https://testdownloadurl.com"), + filename="test_filename", + temp_download_token=secrets.token_hex(16), + ) + test_proxy_config = openstack_manager.ProxyConfig( + http=(test_http_proxy := "http://proxy.test"), + https=(test_https_proxy := "https://proxy.test"), + no_proxy=(test_no_proxy := "http://no.proxy"), + use_aproxy=False, + ) + + command = openstack_manager._build_image_command( + runner_info=test_runner_info, proxies=test_proxy_config + ) + assert command == [ + "/usr/bin/bash", + openstack_manager.BUILD_OPENSTACK_IMAGE_SCRIPT_FILENAME, + test_download_url, + test_http_proxy, + test_https_proxy, + test_no_proxy, + f"""[Service] + +Environment="HTTP_PROXY={test_http_proxy}" + - with pytest.raises(OpenStackInvalidConfigError) as exc: - openstack_manager.initialize(invalid_yaml) - assert expected_err_msg in str(exc) +Environment="HTTPS_PROXY={test_https_proxy}" -def test_list_projects(clouds_yaml_path: Path, openstack_connect_mock: MagicMock, projects): +Environment="NO_PROXY={test_no_proxy}" +""", + f"""{{"proxies": {{"default": {{"httpProxy": "{test_http_proxy}", \ +"httpsProxy": "{test_https_proxy}", "noProxy": "{test_no_proxy}"}}}}}}""", + ], "Unexpected build image command." + + +def test_build_image_runner_binary_error(): + """ + arrange: given a mocked github client get_runner_application function that raises an error. + act: when build_image is called. + assert: ImageBuildError is raised. + """ + mock_github_client = MagicMock(spec=openstack_manager.GithubClient) + mock_github_client.get_runner_application.side_effect = openstack_manager.RunnerBinaryError + + with pytest.raises(openstack_manager.OpenstackImageBuildError) as exc: + openstack_manager.build_image( + arch=openstack_manager.Arch.X64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + ) + + assert "Failed to fetch runner application." in str(exc) + + +def test_build_image_script_error(monkeypatch: pytest.MonkeyPatch): """ - arrange: Mocked clouds.yaml data. - act: Call initialize and list_projects. - assert: openstack.connect and list_projects is called and the projects are returned. + arrange: given a monkeypatched execute_command function that raises an error. + act: when build_image is called. + assert: ImageBuildError is raised. """ - clouds_yaml = _create_clouds_yaml() + monkeypatch.setattr( + openstack_manager, + "execute_command", + MagicMock( + side_effect=openstack_manager.SubprocessError( + cmd=[], return_code=1, stdout="", stderr="" + ) + ), + ) - openstack_manager.initialize(clouds_yaml) - actual_projects = openstack_manager.list_projects(clouds_yaml) + with pytest.raises(openstack_manager.OpenstackImageBuildError) as exc: + openstack_manager.build_image( + arch=openstack_manager.Arch.X64, + cloud_config=MagicMock(), + github_client=MagicMock(), + path=MagicMock(), + ) - openstack_connect_mock.assert_called_once_with(CLOUD_NAME) - assert actual_projects == projects + assert "Failed to build image." in str(exc) -def test_list_projects_openstack_uses_first_cloud( - clouds_yaml_path: Path, openstack_connect_mock: MagicMock +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image_runner_arch_error( + monkeypatch: pytest.MonkeyPatch, mock_github_client: MagicMock ): """ - arrange: Mocked clouds.yaml data with multiple clouds. - act: Call initialize and list_projects. - assert: openstack.connect is called with the first cloud name. + arrange: given _get_supported_runner_arch that raises unsupported architecture error. + act: when build_image is called. + assert: ImageBuildError error is raised with unsupported arch message. """ - clouds_yaml = _create_clouds_yaml() - clouds_yaml["clouds"]["microstack2"] = clouds_yaml["clouds"][CLOUD_NAME] + mock_get_supported_runner_arch = MagicMock( + spec=openstack_manager._get_supported_runner_arch, + side_effect=openstack_manager.UnsupportedArchitectureError(arch="x64"), + ) + monkeypatch.setattr( + openstack_manager, "_get_supported_runner_arch", mock_get_supported_runner_arch + ) - openstack_manager.initialize(clouds_yaml) - openstack_manager.list_projects(clouds_yaml) + with pytest.raises(openstack_manager.OpenstackImageBuildError) as exc: + openstack_manager.build_image( + arch=openstack_manager.Arch.X64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + ) - openstack_connect_mock.assert_called_once_with(CLOUD_NAME) + assert "Unsupported architecture" in str(exc) -def test_list_projects_missing_credentials_error(openstack_connect_mock: MagicMock): +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image_delete_image_error( + mock_github_client: MagicMock, patched_create_connection_context: MagicMock +): """ - arrange: Mocked clouds.yaml data and openstack.list_projects raising keystone...Unauthorized. - act: Call initialize and list_projects. - assert: UnauthorizedError is raised. + arrange: given a mocked openstack connection that returns existing images and delete_image + that returns False (failed to delete image). + act: when build_image is called. + assert: ImageBuildError is raised. """ - cloud_yaml = _create_clouds_yaml() - openstack_connect_mock.return_value.list_projects.side_effect = ( - keystoneauth1.exceptions.http.Unauthorized + patched_create_connection_context.search_images.return_value = ( + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), ) + patched_create_connection_context.delete_image.return_value = False - openstack_manager.initialize(cloud_yaml) + with pytest.raises(openstack_manager.OpenstackImageBuildError) as exc: + openstack_manager.build_image( + arch=openstack_manager.Arch.X64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + ) + + assert "Failed to delete duplicate image on Openstack." in str(exc) - with pytest.raises(OpenStackUnauthorizedError) as exc: - openstack_manager.list_projects(cloud_yaml) - assert "Unauthorized to connect to OpenStack." in str(exc) - openstack_connect_mock.assert_called_once_with(CLOUD_NAME) +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image_create_image_error( + patched_create_connection_context: MagicMock, mock_github_client: MagicMock +): + """ + arrange: given a mocked connection that raises OpenStackCloudException on create_image. + act: when build_image is called. + assert: ImageBuildError is raised. + """ + patched_create_connection_context.create_image.side_effect = ( + openstack_manager.OpenStackCloudException + ) + + with pytest.raises(openstack_manager.OpenstackImageBuildError) as exc: + openstack_manager.build_image( + arch=openstack_manager.Arch.X64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + proxies=None, + ) + + assert "Failed to upload image." in str(exc) + + +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image(patched_create_connection_context: MagicMock, mock_github_client: MagicMock): + """ + arrange: given monkeypatched execute_command and mocked openstack connection. + act: when build_image is called. + assert: Openstack image is successfully created. + """ + patched_create_connection_context.search_images.return_value = ( + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + ) + + openstack_manager.build_image( + arch=openstack_manager.Arch.X64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + ) diff --git a/tests/unit/test_runner_manager.py b/tests/unit/test_runner_manager.py index c383bec76..ea6620ae6 100644 --- a/tests/unit/test_runner_manager.py +++ b/tests/unit/test_runner_manager.py @@ -143,8 +143,7 @@ def test_get_latest_runner_bin_url(runner_manager: RunnerManager, arch: Arch, ch download_url=(download_url := "https://www.example.com"), filename=(filename := "test_runner_binary"), ) - mock_gh_client.get_runner_applications.return_value = (app,) - mock_gh_client.get_runner_applications.return_value = (app,) + mock_gh_client.get_runner_application.return_value = app runner_manager._clients.github = mock_gh_client runner_bin = runner_manager.get_latest_runner_bin_url(os_name="linux") @@ -161,8 +160,7 @@ def test_get_latest_runner_bin_url_missing_binary(runner_manager: RunnerManager) assert: Error related to runner bin raised. """ runner_manager._clients.github = MagicMock() - runner_manager._clients.github.get_runner_applications.return_value = [] - runner_manager._clients.github.get_runner_applications.return_value = [] + runner_manager._clients.github.get_runner_application.side_effect = RunnerBinaryError with pytest.raises(RunnerBinaryError): runner_manager.get_latest_runner_bin_url(os_name="not_exist")