diff --git a/.codespellignore b/.codespellignore new file mode 100644 index 000000000..e69de29bb diff --git a/.copier-answers.yml b/.copier-answers.yml new file mode 100644 index 000000000..2f40b5bf7 --- /dev/null +++ b/.copier-answers.yml @@ -0,0 +1,5 @@ +# Changes here will be overwritten by Copier +_commit: c1ca71c +_src_path: gh:charmed-kubernetes/pytest-operator-template +charm_type: machine +class_name: GithubRunnerOperator diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..8ef84fcd4 --- /dev/null +++ b/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..466be6c0c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,57 @@ +name: Bug Report +description: File a bug report +labels: ["Type: Bug", "Status: Triage"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this bug report! Before submitting your issue, please make + sure you are using the latest version of the charm. If not, please switch to this image prior to + posting your report to make sure it's not already solved. + - type: textarea + id: bug-description + attributes: + label: Bug Description + description: > + If applicable, add screenshots to help explain the problem you are facing. + validations: + required: true + - type: textarea + id: reproduction + attributes: + label: To Reproduce + description: > + Please provide a step-by-step instruction of how to reproduce the behavior. + placeholder: | + 1. `juju deploy ...` + 2. `juju relate ...` + 3. `juju status --relations` + validations: + required: true + - type: textarea + id: environment + attributes: + label: Environment + description: > + We need to know a bit more about the context in which you run the charm. + - Are you running Juju locally, on lxd, in multipass or on some other platform? + - What track and channel you deployed the charm from (i.e. `latest/edge` or similar). + - Version of any applicable components, like the juju snap, the model controller, lxd, microk8s, and/or multipass. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: > + Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + Fetch the logs using `juju debug-log --replay` and `kubectl logs ...`. Additional details available in the juju docs + at https://juju.is/docs/olm/juju-logs + render: shell + validations: + required: true + - type: textarea + id: additional-context + attributes: + label: Additional context + diff --git a/.github/ISSUE_TEMPLATE/enhancement_proposal.yml b/.github/ISSUE_TEMPLATE/enhancement_proposal.yml new file mode 100644 index 000000000..b2348b9fb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement_proposal.yml @@ -0,0 +1,17 @@ +name: Enhancement Proposal +description: File an enhancement proposal +labels: ["Type: Enhancement", "Status: Triage"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this enhancement proposal! Before submitting your issue, please make + sure there isn't already a prior issue concerning this. If there is, please join that discussion instead. + - type: textarea + id: enhancement-proposal + attributes: + label: Enhancement Proposal + description: > + Describe the enhancement you would like to see in as much detail as needed. + validations: + required: true diff --git a/.github/workflows/build_charm.yaml b/.github/workflows/build_charm.yaml new file mode 100644 index 000000000..41122d32d --- /dev/null +++ b/.github/workflows/build_charm.yaml @@ -0,0 +1,32 @@ +name: Build charm + +on: + pull_request: + workflow_call: + workflow_dispatch: + +jobs: + get-runner-image: + name: Get runner image + uses: canonical/operator-workflows/.github/workflows/get_runner_image.yaml@main + build-charm: + name: Build the charm + needs: get-runner-image + runs-on: ${{ needs.get-runner-image.outputs.runs-on }} + steps: + - uses: actions/checkout@v3 + - name: Enable network in LXD + run: sudo iptables -I DOCKER-USER -j ACCEPT + - name: Add user to lxd group + run: sudo adduser runner lxd + - name: Initialize LXD + run: sudo -u runner lxd init --auto + - name: Install charmcraft + run: sudo snap install charmcraft --classic + - name: Pack charm + run: sudo -u runner charmcraft pack + - uses: actions/upload-artifact@v3 + with: + name: github-runner-charm + path: github-runner_*.charm + retention-days: 5 diff --git a/.github/workflows/comment.yaml b/.github/workflows/comment.yaml new file mode 100644 index 000000000..26ac226df --- /dev/null +++ b/.github/workflows/comment.yaml @@ -0,0 +1,12 @@ +name: Comment on the pull request + +on: + workflow_run: + workflows: ["Tests"] + types: + - completed + +jobs: + comment-on-pr: + uses: canonical/operator-workflows/.github/workflows/comment.yaml@main + secrets: inherit diff --git a/.github/workflows/end_to_end_test.yaml b/.github/workflows/end_to_end_test.yaml new file mode 100644 index 000000000..98510a44b --- /dev/null +++ b/.github/workflows/end_to_end_test.yaml @@ -0,0 +1,61 @@ +name: End to end tests + +on: + pull_request: + workflow_call: + workflow_dispatch: + +jobs: + e2e-test: + name: end to end test + runs-on: [self-hosted, linux, x64, e2e-runner] + steps: + - name: Echo hello world + run: echo "hello world" + - name: File permission for /usr/local/bin + run: ls -ld /usr/local/bin | grep drwxrwxrwx + - name: Test file permission for /usr/local/bin + run: touch /usr/local/bin/test_file + # "Install microk8s" step will test if the proxies settings are correct. + - name: Proxy set in /etc/environment + run: cat /etc/environment | grep HTTP_PROXY + # "Update apt in python docker container" step will test docker default proxy settings due to + # pulling the python image. + - name: Proxy set in docker daemon + run: sudo cat /etc/systemd/system/docker.service.d/http-proxy.conf | grep HTTP_PROXY + # "Update apt in python docker container" step will test docker client default proxy settings. + - name: Proxy set in docker client + run: cat /home/ubuntu/.docker/config.json | grep httpProxy + - name: Install microk8s + run: sudo snap install microk8s --classic + - name: Wait for microk8s + run: sudo microk8s status --wait-ready + - name: Deploy nginx for testing + run: sudo microk8s kubectl create deployment nginx --image=nginx + - name: Wait for nginx to be ready + run: sudo microk8s kubectl rollout status deployment/nginx --timeout=30m + - name: Update apt in python docker container + run: docker run python:3.10-slim apt update + dep-test: + # Test the dependencies in one job to avoid using too many runners at once. + name: dependency test + needs: e2e-test + runs-on: [self-hosted, linux, x64, e2e-runner] + steps: + - name: Docker version + run: docker version + - name: pip version + run: python3 -m pip --version + - name: npm version + run: npm --version + - name: shellcheck version + run: shellcheck --version + - name: jq version + run: jq --version + - name: yq version + run: yq --version + - name: install check-jsonschema + run: python3 -m pip install check-jsonschema + # Test program installed by pip. The directory `~/.local/bin` need to be added to PATH. + - name: test check-jsonschema + run: check-jsonschema --version diff --git a/.github/workflows/promote_charm.yaml b/.github/workflows/promote_charm.yaml new file mode 100644 index 000000000..390b82d4e --- /dev/null +++ b/.github/workflows/promote_charm.yaml @@ -0,0 +1,26 @@ +name: Promote charm + +on: + workflow_dispatch: + inputs: + origin-channel: + type: choice + description: 'Origin Channel' + options: + - latest/edge + destination-channel: + type: choice + description: 'Destination Channel' + options: + - latest/stable + secrets: + CHARMHUB_TOKEN: + required: true + +jobs: + promote-charm: + uses: canonical/operator-workflows/.github/workflows/promote_charm.yaml@promote-charm-base + with: + origin-channel: ${{ github.event.inputs.origin-channel }} + destination-channel: ${{ github.event.inputs.destination-channel }} + secrets: inherit diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..ee5fa3eee --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,9 @@ +name: Tests + +on: + pull_request: + +jobs: + unit-tests: + uses: canonical/operator-workflows/.github/workflows/test.yaml@main + secrets: inherit diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..7a0647f3d --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +.tox/ +__pycache__/ +*.pyc +placeholders/ +*.charm +build/ +.coverage diff --git a/.jujuignore b/.jujuignore new file mode 100644 index 000000000..65f441027 --- /dev/null +++ b/.jujuignore @@ -0,0 +1,4 @@ +/venv +*.py[cod] +*.charm +/.github diff --git a/.licenserc.yaml b/.licenserc.yaml new file mode 100644 index 000000000..54f318b80 --- /dev/null +++ b/.licenserc.yaml @@ -0,0 +1,24 @@ +header: + license: + spdx-id: Apache-2.0 + copyright-owner: Canonical Ltd. + content: | + Copyright [year] [owner] + See LICENSE file for licensing details. + paths: + - '**' + paths-ignore: + - '.github/**' + - '**/*.j2' + - '**/*.md' + - '**/*.txt' + - '.codespellignore' + - '.copier-answers.yml' + - '.flake8' + - '.jujuignore' + - '.gitignore' + - '.licenserc.yaml' + - 'CODEOWNERS' + - 'icon.svg' + - 'LICENSE' + comment: on-failure diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..844a0e558 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @canonical/is-charms diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..f82b34421 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Canonical Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 000000000..2545f65e5 --- /dev/null +++ b/README.md @@ -0,0 +1,51 @@ +[![CharmHub Badge](https://charmhub.io/github-runner-operator/badge.svg)](https://charmhub.io/github-runner-operator) +[![Promote charm](https://github.com/canonical/github-runner-operator/actions/workflows/promote_charm.yaml/badge.svg)](https://github.com/canonical/github-runner-operator/actions/workflows/promote_charm.yaml) +[![Discourse Status](https://img.shields.io/discourse/status?server=https%3A%2F%2Fdiscourse.charmhub.io&style=flat&label=CharmHub%20Discourse)](https://discourse.charmhub.io) + +# GitHub runner + +## Description + +This machine charm creates self-hosted GitHub runners. Each unit of this charm will start a configurable number of LXD based containers and virtual +machines to host them. Each runner performs only one job, after which it unregisters from GitHub to ensure that each job runs in +a clean environment. + +The charm will periodically check the number of idle runners and spawn or destroy runners as necessary to match the number provided by configuration of +runners. Both the reconciliation interval and the number of runners to maintain are configurable. + +## Usage + +There are two mandatory configuration options - `path` and `token`. +* `path` determines the organization or repository that the runner will be registered with; +* `token` is a [GitHub Personal Access Token (PAT)](https://github.com/settings/tokens) (note: this is not the same as the token given in the Add a Runner instructions). The PAT token requires either: + * the **`repo`** ("Full control of private repositories") permission for +use with repositories or; + * both the **`repo`** and **`admin:org`** ("Full control of orgs and teams, read and write org projects") permissions for use with an organization. This is necessary because the charm will create and remove runners as needed to ensure that each runner executes only one job to protect jobs from leaking information to other jobs running on the same runner. + +The number of runners on a single unit is configured using two configuration options that can be both used at the same time: +* the `containers` option configures the number of LXD container runners; +* the `virtual-machines` option configures the number of LXD virtual machine runners. + +For example, if the charm is deployed with 2 units `juju deploy -n 2` and the `containers` value of 3 is in use, +there will be a total of 6 container based runners, three on each unit. + +## Reconciliation + +Each unit will periodically check the number of idle runners at the interval specified by `check-interval` to maintain the appropriate number. During the check, all the offline runners are unregistered from GitHub and corresponding containers or virtual machines are destroyed. + +If there are more idle runners than configured, the oldest idle runners are unregistered and destroyed. If there are less idle runners than configured, new runners are spawn and registered with GitHub. + +This means that each interval, each unit will make one or more API calls to GitHub. The interval may need to be adjusted if the number of units is large enough to trigger [Rate Limiting](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting). + +## Development + +This charm uses black and flake8 for formatting. Both run with the lint stage of tox. + + +## Testing + +Testing is run via tox and pytest. To run the full test run: + + tox + +Dependencies are installed in virtual environments. Integration testing requires a juju controller to execute. These tests will use the existing controller, creating an ephemeral model for the tests which is removed after the testing. If you do not already have a controller setup, you can configure a local instance via LXD, see the [upstream documentation](https://juju.is/docs/lxd-cloud) for details. \ No newline at end of file diff --git a/actions.yaml b/actions.yaml new file mode 100644 index 000000000..1556a9798 --- /dev/null +++ b/actions.yaml @@ -0,0 +1,11 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +check-runners: + description: Get info on active and registered runners +reconcile-runners: + description: Remove offline runners and replace any missing runners +flush-runners: + description: Clear out all runners and start a new set +update-runner-bin: + description: Update GitHub self-hosted runner binary diff --git a/charmcraft.yaml b/charmcraft.yaml new file mode 100644 index 000000000..90b86d4e5 --- /dev/null +++ b/charmcraft.yaml @@ -0,0 +1,20 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +type: charm +parts: + charm: + charm-python-packages: + - setuptools # for jinja2 + build-packages: + - git # for installing git source of pylxd + - libffi-dev # for cffi + - libssl-dev # for cryptography + - rust-all # for cryptography +bases: + - build-on: + - name: "ubuntu" + channel: "22.04" + run-on: + - name: "ubuntu" + channel: "22.04" diff --git a/config.yaml b/config.yaml new file mode 100644 index 000000000..e238e5381 --- /dev/null +++ b/config.yaml @@ -0,0 +1,54 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +options: + path: + type: string + default: "" + description: > + The organization or the repository to register the self-hosted runners. For repository the + path should be in the "{owner}/{repo}" format. + group: + type: string + default: "default" + description: > + The organization runner group to register the self-hosted runner under. This has no effect on + runners under a repository. + token: + type: string + default: "" + description: The GitHub Personal Access Token for registering the self-hosted runners. + virtual-machines: + type: int + default: 0 + description: > + The number of virtual machine runners. This charm will spawn or destroy virtual machines + runners to match this setting. + vm-cpu: + type: int + default: 2 + description: > + The number of CPUs used per virtual machine runner. + vm-memory: + type: string + default: 7GiB + description: > + Amount of memory to allocate per virtual machine runner. Positive integers with GiB suffix. + vm-disk: + type: string + default: 10GiB + description: > + Amount of disk space to allocate to root disk for virtual machine runner. Positive integers + with GiB suffix. + reconcile-interval: + type: int + default: 10 + description: > + Minutes between each reconciliation between the current state and the target state of the + runners. On reconciliation the charm polls the state of runners, such as, the number of + container runners, and see if actions are needed. + update-interval: + type: int + default: 60 + description: > + Minutes between each check for new versions of the runner binary. diff --git a/docs/how-to-guides/contribute.md b/docs/how-to-guides/contribute.md new file mode 100644 index 000000000..16c01c61f --- /dev/null +++ b/docs/how-to-guides/contribute.md @@ -0,0 +1,41 @@ +# How to contribute + +## Overview + +This document explains the processes and practices recommended for contributing enhancements to the GitHub Runner operator. + +* Generally, before developing enhancements to this charm, you should consider [opening an issue](https://github.com/canonical/github-runner-operator/issues) explaining your use case. +* If you would like to chat with us about your use-cases or proposed implementation, you can reach us at [Canonical Mattermost public channel](https://chat.charmhub.io/charmhub/channels/charm-dev) or [Discourse](https://discourse.charmhub.io/). +* Familiarizing yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library will help you a lot when working on new features or bug fixes. +* All enhancements require review before being merged. Code review typically examines + * code quality + * test coverage + * user experience for Juju administrators of this charm. +For more details, check our [contributing guide](https://github.com/canonical/is-charms-contributing-guide/blob/main/CONTRIBUTING.md). + +## Developing + +For any problems with this charm, please [report bugs here](https://github.com/canonical/github-runner-operator/issues). + +The code for this charm can be downloaded as follows: + +``` +git clone https://github.com/canonical/github-runner-operator.git +``` + +To run tests, run `tox` from within the charm code directory. + +To build and deploy a local version of the charm, simply run: + +``` +charmcraft pack +# Ensure you're connected to a juju k8s model +# Configure the machine resource created by the model +juju set-model-constraints mem=8G cores=2 root-disk=50G +# Assuming you're on amd64 +juju deploy ./github-runner_ubuntu-22.04-amd64_ubuntu-20.04-amd64.charm +``` + +## Canonical Contributor Agreement + +Canonical welcomes contributions to the GitHub Runner Operator. Please check out our [contributor agreement](https://ubuntu.com/legal/contributors) if you’re interested in contributing to the solution. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..064511f9e --- /dev/null +++ b/docs/index.md @@ -0,0 +1,5 @@ +GitHub self-hosted runner offer a way to run GitHub action workloads on non-GitHub servers. + +For information on GitHub Actions, see [this page](https://docs.github.com/en/actions). + +For information on self-hosted runner, see [this page](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). diff --git a/icon.svg b/icon.svg new file mode 100644 index 000000000..2a4d3f75c --- /dev/null +++ b/icon.svg @@ -0,0 +1,230 @@ + + + + + + + + + + diff --git a/metadata.yaml b/metadata.yaml new file mode 100644 index 000000000..9f912f1e5 --- /dev/null +++ b/metadata.yaml @@ -0,0 +1,23 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +name: github-runner +display-name: GitHub runner +maintainers: + - launchpad.net/~canonical-is-devops +docs: https://discourse.charmhub.io/t/github-runner-documentation-overview/7817 +issues: https://github.com/canonical/github-runner-operator/issues +source: https://github.com/canonical/github-runner-operator +summary: Creates a cluster of self-hosted github runners. +description: | + A [Juju](https://juju.is/) [charm](https://juju.is/docs/olm/charmed-operators) + deploying self-hosted GitHub runners. + + Each unit of this charm will start a configurable number of LXD based containers + and virtual machines to host them. Each runner performs only one job, after which + it unregisters from GitHub to ensure that each job runs in a clean environment. + The charm will periodically check the number of idle runners and spawn or destroy them as + necessary to maintain the configured number of runners. Both the reconciliation interval and the + number of runners to maintain are configurable. +series: + - jammy diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..c72f8d0ef --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,47 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +[tool.bandit] +exclude_dirs = ["/venv/"] +[tool.bandit.assert_used] +skips = ["*/*test.py", "*/test_*.py"] + +# Testing tools configuration +[tool.coverage.run] +branch = true +omit = [ + # Contains interface for calling LXD. Tested in integration tests and end to end tests. + "src/lxd.py", + # Contains interface for calling repo policy compliance service. Tested in integration test and end to end tests. + "src/repo_policy_compliance_client.py", +] + +[tool.coverage.report] +fail_under = 38 +show_missing = true + + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" + +# Formatting tools configuration +[tool.black] +line-length = 99 + +[tool.isort] +line_length = 99 +profile = "black" + +# Linting tools configuration +[tool.flake8] +max-line-length = 99 +max-doc-length = 99 +max-complexity = 10 +exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"] +docstring-convention = "google" + +[tool.mypy] +ignore_missing_imports = true +explicit_package_bases = true +namespace_packages = true diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..76bbb1da0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +ghapi +jinja2 +ops +pylxd @ git+https://github.com/lxc/pylxd +requests +typing-extensions +# Newer version does not work with default OpenSSL version on jammy. +cryptography <= 38.0.4 diff --git a/script/deploy_runner.sh b/script/deploy_runner.sh new file mode 100644 index 000000000..58ba5b0c8 --- /dev/null +++ b/script/deploy_runner.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +set -e + +rm -f github_runner.zip + +# Request a download URL for the artifact. +echo "Requesting github runner charm download link..." +DOWNLOAD_LOCATION=$(curl \ + --head \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}"\ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/canonical/github-runner-operator/actions/artifacts/{$GITHUB_RUNNER_ARTIFACT_ID}/zip" \ + | grep location) +# Parse out the URL from the format "Location: URL\r". +read -ra LOCATION_ARRAY <<< "$DOWNLOAD_LOCATION" +URL=$(echo "${LOCATION_ARRAY[1]}" | tr -d '\r') + +# Download the github runner charm. +echo "Downloading github runner charm..." +curl -o github_runner.zip "$URL" + +# Decompress the zip. +echo "Decompressing github runner charm..." +unzip -p github_runner.zip > github-runner.charm +rm github_runner.zip + +# Deploy the charm. +juju deploy ./github-runner.charm --series=jammy e2e-runner +juju config e2e-runner token="$GITHUB_TOKEN" path=canonical/github-runner-operator virtual-machines=1 diff --git a/script/remove_offline_runners.py b/script/remove_offline_runners.py new file mode 100644 index 000000000..9df78d365 --- /dev/null +++ b/script/remove_offline_runners.py @@ -0,0 +1,76 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import sys + +import requests + +ORG = "" +TOKEN = "" + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_runners(): + try: + response = requests.get( + f"https://api.github.com/orgs/{ORG}/actions/runners?per_page=100", + # "https://api.github.com/repos/canonical/github-runner-operator/actions/runners", + headers={ + "X-GitHub-Api-Version": "2022-11-28", + "Authorization": "Bearer " + TOKEN, + "Accept": "application/vnd.github+json", + }, + timeout=60, + ) + + response.raise_for_status() + runners = response.json() + logger.info("Runners found: %s", runners) + return runners + except requests.HTTPError as http_err: + sys.exit(f"HTTP error occurred: {http_err}") + except Exception as err: + sys.exit(f"Other error occurred: {err}") + + +def filter_offline_runners(runners): + offline_runners = [] + + runner_list = runners["runners"] + for runner in runner_list: + if runner["status"] == "offline": + offline_runners.append(runner) + + return offline_runners + + +def delete_runner(runner): + logger.info("Deleting runner with id %s", runner["id"]) + + try: + response = requests.delete( + f"https://api.github.com/orgs/{ORG}/actions/runners/{runner['id']}", + # f"https://api.github.com/repos/canonical/github-runner-operator/actions/runners/{runner['id']}", + headers={ + "X-GitHub-Api-Version": "2022-11-28", + "Authorization": "Bearer " + TOKEN, + "Accept": "application/vnd.github+json", + }, + timeout=60, + ) + + response.raise_for_status() + except requests.HTTPError as http_err: + sys.exit(f"HTTP error occurred: {http_err}") + except Exception as err: + sys.exit(f"Other error occurred: {err}") + + +if __name__ == "__main__": + while offline_runners := filter_offline_runners(get_runners()) > 0: + for runner in offline_runners: + delete_runner(runner) diff --git a/script/remove_runner.sh b/script/remove_runner.sh new file mode 100644 index 000000000..635d6beca --- /dev/null +++ b/script/remove_runner.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +juju remove-application e2e-runner --force --destroy-storage --no-wait diff --git a/src/charm.py b/src/charm.py new file mode 100755 index 000000000..9cc18d0f2 --- /dev/null +++ b/src/charm.py @@ -0,0 +1,595 @@ +#!/usr/bin/env python3 + +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Charm for creating and managing GitHub self-hosted runner instances.""" + +import functools +import logging +import os +import secrets +import shutil +import urllib.error +from pathlib import Path +from typing import TYPE_CHECKING, Callable, Dict, Optional, TypeVar + +import jinja2 +from ops.charm import ( + ActionEvent, + CharmBase, + ConfigChangedEvent, + InstallEvent, + StopEvent, + UpgradeCharmEvent, +) +from ops.framework import EventBase, StoredState +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus + +from errors import RunnerError, SubprocessError +from event_timer import EventTimer, TimerDisableError, TimerEnableError +from github_type import GitHubRunnerStatus +from runner_manager import RunnerManager, RunnerManagerConfig +from runner_type import GitHubOrg, GitHubRepo, ProxySetting, VirtualMachineResources +from utilities import execute_command, get_env_var, retry + +if TYPE_CHECKING: + from ops.model import JsonObject # pragma: no cover + +logger = logging.getLogger(__name__) + + +class ReconcileRunnersEvent(EventBase): + """Event representing a periodic check to ensure runners are ok.""" + + +class UpdateRunnerBinEvent(EventBase): + """Event representing a periodic check for new versions of the runner binary.""" + + +CharmT = TypeVar("CharmT") +EventT = TypeVar("EventT") + + +def catch_unexpected_charm_errors( + func: Callable[[CharmT, EventT], None] +) -> Callable[[CharmT, EventT], None]: + """Catch unexpected errors in charm. + + This decorator is for unrecoverable errors and sets the charm to + `BlockedStatus`. + + Args: + func: Charm function to be decorated. + + Returns: + Decorated charm function with catching unexpected errors. + """ + + @functools.wraps(func) + def func_with_catch_unexpected_errors(self, event: EventT) -> None: + # Safe guard against unexpected error. + try: + func(self, event) + except Exception as err: # pylint: disable=broad-exception-caught + logger.exception(err) + self.unit.status = BlockedStatus(str(err)) + + return func_with_catch_unexpected_errors + + +def catch_unexpected_action_errors( + func: Callable[[CharmT, ActionEvent], None] +) -> Callable[[CharmT, ActionEvent], None]: + """Catch unexpected errors in actions. + + Args: + func: Action function to be decorated. + + Returns: + Decorated charm function with catching unexpected errors. + """ + + @functools.wraps(func) + def func_with_catch_unexpected_errors(self, event: ActionEvent) -> None: + # Safe guard against unexpected error. + try: + func(self, event) + except Exception as err: # pylint: disable=broad-exception-caught + logger.exception(err) + event.fail(f"Failed to get runner info: {err}") + + return func_with_catch_unexpected_errors + + +class GithubRunnerCharm(CharmBase): + """Charm for managing GitHub self-hosted runners.""" + + _stored = StoredState() + + service_token_path = Path("service_token") + repo_check_web_service_path = Path("/home/ubuntu/repo_policy_compliance_service") + repo_check_web_service_script = Path("src/repo_policy_compliance_service.py") + repo_check_systemd_service = Path("/etc/systemd/system/repo-policy-compliance.service") + + def __init__(self, *args, **kargs) -> None: + """Construct the charm. + + Args: + args: List of arguments to be passed to the `CharmBase` class. + kargs: List of keyword arguments to be passed to the `CharmBase` + class. + """ + super().__init__(*args, **kargs) + + self._event_timer = EventTimer(self.unit.name) + + self._stored.set_default( + path=self.config["path"], # for detecting changes + runner_bin_url=None, + ) + + self.proxies: ProxySetting = {} + if http_proxy := get_env_var("JUJU_CHARM_HTTP_PROXY"): + self.proxies["http"] = http_proxy + if https_proxy := get_env_var("JUJU_CHARM_HTTPS_PROXY"): + self.proxies["https"] = https_proxy + if no_proxy := get_env_var("JUJU_CHARM_NO_PROXY"): + self.proxies["no_proxy"] = no_proxy + + self.service_token = None + + self.on.define_event("reconcile_runners", ReconcileRunnersEvent) + self.on.define_event("update_runner_bin", UpdateRunnerBinEvent) + + self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm) + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.reconcile_runners, self._on_reconcile_runners) + self.framework.observe(self.on.update_runner_bin, self._on_update_runner_bin) + self.framework.observe(self.on.stop, self._on_stop) + + self.framework.observe(self.on.check_runners_action, self._on_check_runners_action) + self.framework.observe(self.on.reconcile_runners_action, self._on_reconcile_runners_action) + self.framework.observe(self.on.flush_runners_action, self._on_flush_runners_action) + self.framework.observe(self.on.update_runner_bin_action, self._on_update_runner_bin) + + def _get_runner_manager( + self, token: Optional[str] = None, path: Optional[str] = None + ) -> Optional[RunnerManager]: + """Get a RunnerManager instance, or None if missing config. + + Args: + token: GitHub personal access token to manager the runners with. + path: GitHub repository path in the format '/', or the GitHub organization + name. + + Returns: + A instance of RunnerManager if the token and path configuration can be found. + """ + if token is None: + token = self.config["token"] + if path is None: + path = self.config["path"] + + if not token or not path: + return None + + if self.service_token is None: + self.service_token = self._get_service_token() + + if "/" in path: + paths = path.split("/") + if len(paths) != 2: + logger.error("Invalid path %s", path) + return None + + owner, repo = paths + path = GitHubRepo(owner=owner, repo=repo) + else: + path = GitHubOrg(org=path, group=self.config["group"]) + + app_name, unit = self.unit.name.rsplit("/", 1) + return RunnerManager( + app_name, + unit, + RunnerManagerConfig(path, token, "jammy", self.service_token), + proxies=self.proxies, + ) + + @catch_unexpected_charm_errors + def _on_install(self, _event: InstallEvent) -> None: + """Handle the installation of charm. + + Args: + event: Event of installing charm. + """ + self.unit.status = MaintenanceStatus("Installing packages") + + try: + # The `_start_services`, `_install_deps` includes retry. + self._install_deps() + self._start_services() + except SubprocessError as err: + logger.exception(err) + # The charm cannot proceed without dependencies. + self.unit.status = BlockedStatus("Failed to install dependencies") + return + + runner_manager = self._get_runner_manager() + if runner_manager: + self.unit.status = MaintenanceStatus("Downloading runner binary") + try: + runner_info = runner_manager.get_latest_runner_bin_url() + logger.info( + "Downloading %s from: %s", runner_info.filename, runner_info.download_url + ) + self._stored.runner_bin_url = runner_info.download_url + runner_manager.update_runner_bin(runner_info) + # Safe guard against transient unexpected error. + except Exception as err: # pylint: disable=broad-exception-caught + logger.exception("Failed to update runner binary") + # Failure to download runner binary is a transient error. + # The charm automatically update runner binary on a schedule. + self.unit.status = MaintenanceStatus(f"Failed to update runner binary: {err}") + return + self.unit.status = MaintenanceStatus("Starting runners") + try: + self._reconcile_runners(runner_manager) + self.unit.status = ActiveStatus() + except RunnerError as err: + logger.exception("Failed to start runners") + self.unit.status = MaintenanceStatus(f"Failed to start runners: {err}") + else: + self.unit.status = BlockedStatus("Missing token or org/repo path config") + + @catch_unexpected_charm_errors + def _on_upgrade_charm(self, _event: UpgradeCharmEvent) -> None: + """Handle the update of charm. + + Args: + event: Event of charm upgrade. + """ + logger.info("Reinstalling dependencies...") + self._install_deps() + self._start_services() + + logger.info("Flushing the runners...") + runner_manager = self._get_runner_manager() + if not runner_manager: + return + + runner_manager.flush() + self._reconcile_runners(runner_manager) + + @catch_unexpected_charm_errors + def _on_config_changed(self, _event: ConfigChangedEvent) -> None: + """Handle the configuration change. + + Args: + event: Event of configuration change. + """ + try: + self._event_timer.ensure_event_timer( + "update-runner-bin", self.config["update-interval"] + ) + self._event_timer.ensure_event_timer( + "reconcile-runners", self.config["reconcile-interval"] + ) + except TimerEnableError as ex: + logger.exception("Failed to start the event timer") + self.unit.status = BlockedStatus( + f"Failed to start timer for regular reconciliation and binary update checks: {ex}" + ) + + if self.config["path"] != self._stored.path: + prev_runner_manager = self._get_runner_manager( + path=str(self._stored.path) + ) # Casting for mypy checks. + if prev_runner_manager: + self.unit.status = MaintenanceStatus("Removing runners from old org/repo") + prev_runner_manager.flush() + self._stored.path = self.config["path"] + + runner_manager = self._get_runner_manager() + if runner_manager: + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("Missing token or org/repo path config") + + @catch_unexpected_charm_errors + def _on_update_runner_bin(self, _event: UpdateRunnerBinEvent) -> None: + """Handle checking update of runner binary event. + + Args: + event: Event of checking update of runner binary. + """ + runner_manager = self._get_runner_manager() + if not runner_manager: + return + try: + self.unit.status = MaintenanceStatus("Checking for runner updates") + runner_info = runner_manager.get_latest_runner_bin_url() + except urllib.error.URLError as err: + logger.exception("Failed to check for runner updates") + # Failure to download runner binary is a transient error. + # The charm automatically update runner binary on a schedule. + self.unit.status = MaintenanceStatus(f"Failed to check for runner updates: {err}") + return + + if runner_info.download_url != self._stored.runner_bin_url: + self.unit.status = MaintenanceStatus("Updating runner binary") + try: + runner_manager.update_runner_bin(runner_info) + # Safe guard against transient unexpected error. + except Exception as err: # pylint: disable=broad-exception-caught + logger.exception("Failed to update runner binary") + # Failure to download runner binary is a transient error. + # The charm automatically update runner binary on a schedule. + self.unit.status = MaintenanceStatus(f"Failed to update runner binary: {err}") + return + self._stored.runner_bin_url = runner_info.download_url + + # Flush the non-busy runner and reconcile. + runner_manager.flush(flush_busy=False) + self._reconcile_runners(runner_manager) + + self.unit.status = ActiveStatus() + + @catch_unexpected_charm_errors + def _on_reconcile_runners(self, _event: ReconcileRunnersEvent) -> None: + """Handle the reconciliation of runners. + + Args: + event: Event of reconciling the runner state. + """ + if not RunnerManager.runner_bin_path.is_file(): + logger.warning("Unable to reconcile due to missing runner binary") + return + + runner_manager = self._get_runner_manager() + if not runner_manager: + self.unit.status = BlockedStatus("Missing token or org/repo path config") + return + self.unit.status = MaintenanceStatus("Reconciling runners") + try: + self._reconcile_runners(runner_manager) + # Safe guard against transient unexpected error. + except Exception as err: # pylint: disable=broad-exception-caught + logger.exception("Failed to reconcile runners") + self.unit.status = MaintenanceStatus(f"Failed to reconcile runners: {err}") + return + + self.unit.status = ActiveStatus() + + @catch_unexpected_action_errors + def _on_check_runners_action(self, event: ActionEvent) -> None: + """Handle the action of checking of runner state. + + Args: + event: Action event of checking runner states. + """ + runner_manager = self._get_runner_manager() + if not runner_manager: + event.fail("Missing token or org/repo path config") + return + if runner_manager.runner_bin_path is None: + event.fail("Missing runner binary") + return + + online = 0 + offline = 0 + unknown = 0 + runner_names = [] + + runner_info = runner_manager.get_github_info() + + for runner in runner_info: + if runner.status == GitHubRunnerStatus.ONLINE.value: + online += 1 + runner_names.append(runner.name) + elif runner.status == GitHubRunnerStatus.OFFLINE.value: + offline += 1 + else: + # might happen if runner dies and GH doesn't notice immediately + unknown += 1 + event.set_results( + { + "online": online, + "offline": offline, + "unknown": unknown, + "runners": ", ".join(runner_names), + } + ) + + @catch_unexpected_action_errors + def _on_reconcile_runners_action(self, event: ActionEvent) -> None: + """Handle the action of reconcile of runner state. + + Args: + event: Action event of reconciling the runner. + """ + runner_manager = self._get_runner_manager() + if not runner_manager: + event.fail("Missing token or org/repo path config") + return + + delta = self._reconcile_runners(runner_manager) + + self._on_check_runners_action(event) + event.set_results(delta) + + @catch_unexpected_action_errors + def _on_flush_runners_action(self, event: ActionEvent) -> None: + """Handle the action of flushing all runner and reconciling afterwards. + + Args: + event: Action event of flushing all runners. + """ + runner_manager = self._get_runner_manager() + if not runner_manager: + event.fail("Missing token or org/repo path config") + return + + runner_manager.flush() + delta = self._reconcile_runners(runner_manager) + + self._on_check_runners_action(event) + event.set_results(delta) + + @catch_unexpected_charm_errors + def _on_stop(self, _: StopEvent) -> None: + """Handle the stopping of the charm. + + Args: + event: Event of stopping the charm. + """ + try: + self._event_timer.disable_event_timer("update-runner-bin") + self._event_timer.disable_event_timer("reconcile-runners") + except TimerDisableError as ex: + logger.exception("Failed to stop the timer") + self.unit.status = BlockedStatus(f"Failed to stop charm event timer: {ex}") + + runner_manager = self._get_runner_manager() + if runner_manager: + try: + runner_manager.flush() + # Safe guard against unexpected error. + except Exception: # pylint: disable=broad-exception-caught + # Log but ignore error since we're stopping anyway. + logger.exception("Failed to clear runners") + + def _reconcile_runners(self, runner_manager: RunnerManager) -> Dict[str, "JsonObject"]: + """Reconcile the current runners state and intended runner state. + + Args: + runner_manager: For querying and managing the runner state. + + Returns: + Changes in runner number due to reconciling runners. + """ + virtual_machines_resources = VirtualMachineResources( + self.config["vm-cpu"], self.config["vm-memory"], self.config["vm-disk"] + ) + + virtual_machines = self.config["virtual-machines"] + + try: + delta_virtual_machines = runner_manager.reconcile( + virtual_machines, virtual_machines_resources + ) + return {"delta": {"virtual-machines": delta_virtual_machines}} + # Safe guard against transient unexpected error. + except Exception as err: # pylint: disable=broad-exception-caught + logger.exception("Failed to update runner binary") + # Failure to reconcile runners is a transient error. + # The charm automatically reconciles runners on a schedule. + self.unit.status = MaintenanceStatus(f"Failed to reconcile runners: {err}") + return {"delta": {"virtual-machines": 0}} + + @retry(tries=10, delay=15, max_delay=60, backoff=1.5) + def _install_deps(self) -> None: + """Install dependencies.""" + logger.info("Installing charm dependencies.") + + # Binding for snap, apt, and lxd init commands are not available so subprocess.run used. + env = {} + if "http" in self.proxies: + env["HTTP_PROXY"] = self.proxies["http"] + env["http_proxy"] = self.proxies["http"] + if "https" in self.proxies: + env["HTTPS_PROXY"] = self.proxies["https"] + env["https_proxy"] = self.proxies["https"] + if "no_proxy" in self.proxies: + env["NO_PROXY"] = self.proxies["no_proxy"] + env["no_proxy"] = self.proxies["no_proxy"] + + execute_command(["/usr/bin/apt-get", "install", "-qy", "gunicorn", "python3-pip"]) + execute_command( + [ + "/usr/bin/pip", + "install", + "flask", + "git+https://github.com/canonical/repo-policy-compliance@main", + ], + env=env, + ) + + execute_command( + ["/usr/bin/apt-get", "remove", "-qy", "lxd", "lxd-client"], check_exit=False + ) + execute_command( + [ + "/usr/bin/apt-get", + "install", + "-qy", + "cpu-checker", + "libvirt-clients", + "libvirt-daemon-driver-qemu", + ], + ) + execute_command(["/usr/bin/snap", "install", "lxd", "--channel=latest/stable"]) + execute_command(["/usr/bin/snap", "refresh", "lxd", "--channel=latest/stable"]) + execute_command(["/snap/bin/lxd", "waitready"]) + execute_command(["/snap/bin/lxd", "init", "--auto"]) + execute_command(["/usr/bin/chmod", "a+wr", "/var/snap/lxd/common/lxd/unix.socket"]) + execute_command(["/snap/bin/lxc", "network", "set", "lxdbr0", "ipv6.address", "none"]) + logger.info("Finished installing charm dependencies.") + + @retry(tries=10, delay=15, max_delay=60, backoff=1.5) + def _start_services(self) -> None: + """Start services.""" + logger.info("Starting charm services...") + + if self.service_token is None: + self.service_token = self._get_service_token() + + # Move script to home directory + logger.info("Loading the repo policy compliance flask app...") + os.makedirs(self.repo_check_web_service_path, exist_ok=True) + shutil.copyfile( + self.repo_check_web_service_script, + self.repo_check_web_service_path / "app.py", + ) + + # Move the systemd service. + logger.info("Loading the repo policy compliance gunicorn systemd service...") + environment = jinja2.Environment( + loader=jinja2.FileSystemLoader("templates"), autoescape=True + ) + + service_content = environment.get_template("repo-policy-compliance.service.j2").render( + working_directory=str(self.repo_check_web_service_path), + charm_token=self.service_token, + github_token=self.config["token"], + proxies=self.proxies, + ) + self.repo_check_systemd_service.write_text(service_content, encoding="utf-8") + + execute_command(["/usr/bin/systemctl", "start", "repo-policy-compliance"]) + execute_command(["/usr/bin/systemctl", "enable", "repo-policy-compliance"]) + + logger.info("Finished starting charm services") + + def _get_service_token(self) -> str: + """Get the service token. + + Returns: + The service token. + """ + logger.info("Getting the secret token...") + if self.service_token_path.exists(): + logger.info("Found existing token file.") + service_token = self.service_token_path.read_text(encoding="utf-8") + else: + logger.info("Generate new token.") + service_token = secrets.token_hex(16) + self.service_token_path.write_text(service_token, encoding="utf-8") + + return service_token + + +if __name__ == "__main__": + main(GithubRunnerCharm) diff --git a/src/errors.py b/src/errors.py new file mode 100644 index 000000000..7f8949410 --- /dev/null +++ b/src/errors.py @@ -0,0 +1,72 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Errors used by the charm.""" +from __future__ import annotations + +from typing import Union + + +class RunnerError(Exception): + """Generic runner error as base exception.""" + + +class RunnerExecutionError(RunnerError): + """Error for executing commands on runner.""" + + +class RunnerFileLoadError(RunnerError): + """Error for loading file on runner.""" + + +class RunnerCreateError(RunnerError): + """Error for runner creation failure.""" + + +class RunnerRemoveError(RunnerError): + """Error for runner removal failure.""" + + +class RunnerStartError(RunnerError): + """Error for runner start failure.""" + + +class RunnerBinaryError(RunnerError): + """Error of getting runner binary.""" + + +class LxdError(Exception): + """Error for executing LXD actions.""" + + +class SubprocessError(Exception): + """Error for Subprocess calls. + + Attrs: + cmd: Command in list form. + return_code: Return code of the subprocess. + stdout: Content of stdout of the subprocess. + stderr: Content of stderr of the subprocess. + """ + + def __init__( + self, + cmd: list[str], + return_code: int, + stdout: Union[bytes, str], + stderr: Union[bytes, str], + ): + """Construct the subprocess error. + + Args: + cmd: Command in list form. + return_code: Return code of the subprocess. + stdout: Content of stdout of the subprocess. + stderr: Content of stderr of the subprocess. + """ + super().__init__(f"[{' '.join(cmd)}] failed with return code {return_code!r}: {stderr!r}") + + self.cmd = cmd + self.return_code = return_code + self.stdout = stdout + self.stderr = stderr diff --git a/src/event_timer.py b/src/event_timer.py new file mode 100644 index 000000000..4b77fa1d0 --- /dev/null +++ b/src/event_timer.py @@ -0,0 +1,122 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""EventTimer for scheduling dispatch of juju event on regular intervals.""" + +import subprocess # nosec B404 +from pathlib import Path +from typing import Optional, TypedDict + +from jinja2 import Environment, FileSystemLoader + + +class TimerEnableError(Exception): + """Raised when unable to enable a event timer.""" + + +class TimerDisableError(Exception): + """Raised when unable to disable a event timer.""" + + +class EventConfig(TypedDict): + """Configuration used by service and timer templates.""" + + event: str + interval: float + jitter: float + timeout: float + unit: str + + +class EventTimer: + """Manages the timer to emit juju events at regular intervals. + + Attributes: + unit_name (str): Name of the juju unit to emit events to. + """ + + _systemd_path = Path("/etc/systemd/system") + + def __init__(self, unit_name: str): + """Construct the timer manager. + + Args: + unit_name: Name of the juju unit to emit events to. + """ + self.unit_name = unit_name + self._jinja = Environment(loader=FileSystemLoader("templates"), autoescape=True) + + def _render_event_template(self, template_type: str, event_name: str, context: EventConfig): + """Write event configuration files to systemd path. + + Args: + template_type: Name of the template type to use. Can be 'service' or 'timer'. + event_name: Name of the event to schedule. + context: Addition configuration for the event to schedule. + """ + template = self._jinja.get_template(f"dispatch-event.{template_type}.j2") + dest = self._systemd_path / f"ghro.{event_name}.{template_type}" + dest.write_text(template.render(context)) + + def ensure_event_timer( + self, event_name: str, interval: float, timeout: Optional[float] = None + ): + """Ensure that a systemd service and timer are registered to dispatch the given event. + + The interval is how frequently, in minutes, that the event should be dispatched. + + The timeout is the number of seconds before an event is timed out. If not given or 0, + it defaults to half the interval period. + + Args: + event_name: Name of the juju event to schedule. + interval: Number of minutes between emitting each event. + + Raises: + TimerEnableError: Timer cannot be started. Events will be not emitted. + """ + context: EventConfig = { + "event": event_name, + "interval": interval, + "jitter": interval / 4, + "timeout": timeout or (interval * 30), + "unit": self.unit_name, + } + self._render_event_template("service", event_name, context) + self._render_event_template("timer", event_name, context) + try: + # Binding for systemctl do no exist, so `subprocess.run` used. + subprocess.run(["/usr/bin/systemctl", "daemon-reload"], check=True) # nosec B603 + subprocess.run( # nosec B603 + ["/usr/bin/systemctl", "enable", f"ghro.{event_name}.timer"], check=True + ) + subprocess.run( # nosec B603 + ["/usr/bin/systemctl", "start", f"ghro.{event_name}.timer"], check=True + ) + except subprocess.CalledProcessError as ex: + raise TimerEnableError from ex + except subprocess.TimeoutExpired as ex: + raise TimerEnableError from ex + + def disable_event_timer(self, event_name: str): + """Disable the systemd timer for the given event. + + Args: + event_name: Name of the juju event to disable. + + Raises: + TimerDisableError: Timer cannot be stopped. Events will be emitted continuously. + """ + try: + # Don't check for errors in case the timer wasn't registered. + # Binding for systemctl do no exist, so `subprocess.run` used. + subprocess.run( # nosec B603 + ["/usr/bin/systemctl", "stop", f"ghro.{event_name}.timer"], check=False + ) + subprocess.run( # nosec B603 + ["/usr/bin/systemctl", "disable", f"ghro.{event_name}.timer"], check=False + ) + except subprocess.CalledProcessError as ex: + raise TimerEnableError from ex + except subprocess.TimeoutExpired as ex: + raise TimerEnableError from ex diff --git a/src/github_type.py b/src/github_type.py new file mode 100644 index 000000000..61278abf7 --- /dev/null +++ b/src/github_type.py @@ -0,0 +1,73 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Type of returned data from GitHub web API.""" + + +from __future__ import annotations + +from enum import Enum +from typing import List, TypedDict + +from typing_extensions import NotRequired + + +class GitHubRunnerStatus(Enum): + """Status of runner on GitHub.""" + + ONLINE = "online" + OFFLINE = "offline" + + +class RunnerApplication(TypedDict, total=False): + """Information on a single runner application.""" + + os: str + architecture: str + download_url: str + filename: str + temp_download_token: NotRequired[str] + sha256_checksum: NotRequired[str] + + +RunnerApplicationList = List[RunnerApplication] + + +class SelfHostedRunnerLabel(TypedDict, total=False): + """A single label of self-hosted runners.""" + + id: NotRequired[int] + name: str + type: NotRequired[str] + + +class SelfHostedRunner(TypedDict): + """Information on a single self-hosted runner.""" + + id: int + name: str + os: str + status: GitHubRunnerStatus + busy: bool + labels: list[SelfHostedRunnerLabel] + + +class SelfHostedRunnerList(TypedDict): + """Information on a collection of self-hosted runners.""" + + total_count: int + runners: list[SelfHostedRunner] + + +class RegistrationToken(TypedDict): + """Token used for registering github runners.""" + + token: str + expires_at: str + + +class RemoveToken(TypedDict): + """Token used for removing github runners.""" + + token: str + expires_at: str diff --git a/src/lxd.py b/src/lxd.py new file mode 100644 index 000000000..4d2aaaa49 --- /dev/null +++ b/src/lxd.py @@ -0,0 +1,388 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Low-level LXD client interface. + +The LxdClient class offer a low-level interface isolate the underlying implementation of LXD. +""" +from __future__ import annotations + +import io +import logging +import tempfile +from typing import IO, Optional, Tuple, Union + +import pylxd.models + +from errors import LxdError, SubprocessError +from lxd_type import ( + LxdInstanceConfig, + LxdNetwork, + LxdResourceProfileConfig, + LxdResourceProfileDevices, +) +from utilities import execute_command, secure_run_subprocess + +logger = logging.getLogger(__name__) + + +class LxdInstanceFileManager: + """File manager of a LXD instance. + + Attrs: + instance (LxdInstance): LXD instance where the files are located in. + """ + + def __init__(self, instance: LxdInstance): + """Construct the file manager. + + Args: + instance: LXD instance where the files are located in. + """ + self.instance = instance + + def mk_dir(self, dir_name: str) -> None: + """Create a directory in the LXD instance. + + Args: + dir: Name of the directory to create. + """ + self.instance.execute(["/usr/bin/mkdir", "-p", dir_name]) + + def push_file(self, source: str, destination: str, mode: Optional[str] = None) -> None: + """Push a file to the LXD instance. + + Args: + source: Path of the file to push to the LXD instance. + destination: Path in the LXD instance to load the file. + mode: File permission setting. + + Raises: + LxdException: Unable to load the file into the LXD instance. + """ + lxc_cmd = [ + "/snap/bin/lxc", + "file", + "push", + "--create-dirs", + source, + f"{self.instance.name}/{destination.lstrip('/')}", + ] + + if mode: + lxc_cmd += ["--mode", mode] + + try: + execute_command(lxc_cmd) + except SubprocessError as err: + logger.exception("Failed to push file") + raise LxdError(f"Unable to push file into LXD instance {self.instance.name}") from err + + def write_file( + self, filepath: str, content: Union[str, bytes], mode: Optional[str] = None + ) -> None: + """Write a file with the given content in the LXD instance. + + Args: + filepath: Path in the LXD instance to load the file. + content: Content of the file. + mode: File permission setting. + + Raises: + LxdException: Unable to load the file to the LXD instance. + """ + if isinstance(content, str): + content = content.encode("utf-8") + + with tempfile.NamedTemporaryFile() as file: + file.write(content) + file.flush() + + self.push_file(file.name, filepath, mode) + + def pull_file(self, source: str, destination: str) -> None: + """Pull a file from the LXD instance. + + Args: + source: Path of the file to pull in the LXD instance. + destination: Path of load the file. + + Raises: + LxdException: Unable to load the file from the LXD instance. + """ + lxc_cmd = [ + "/snap/bin/lxc", + "file", + "pull", + f"{self.instance.name}/{source.lstrip('/')}", + destination, + ] + + try: + execute_command(lxc_cmd) + except SubprocessError as err: + logger.exception("Failed to pull file") + raise LxdError( + f"Unable to pull file {source} from LXD instance {self.instance.name}" + ) from err + + def read_file(self, filepath: str) -> str: + """Read content of a file in the LXD instance. + + Args: + filepath: Path of the file in the LXD instance. + + Raises: + LxdException: Unable to load the file from the LXD instance. + + Returns: + The content of the file. + """ + with tempfile.NamedTemporaryFile() as file: + self.pull_file(filepath, file.name) + + return file.read().decode("utf-8") + + +class LxdInstance: + """A LXD instance. + + Attrs: + name (str): Name of LXD instance. + files (LxdInstanceFiles): Manager for the files on the LXD instance. + status (str): Status of the LXD instance. + """ + + def __init__(self, name: str, pylxd_instance: pylxd.models.Instance): + """Construct the LXD instance representation. + + Args: + name: Name of the LXD instance. + pylxd_instance: Instance of pylxd.models.Instance for the LXD instance. + """ + self.name = name + self._pylxd_instance = pylxd_instance + self.files = LxdInstanceFileManager(self._pylxd_instance) + + @property + def status(self) -> str: + """Status of the LXD instance. + + Returns: + Status of the LXD instance. + """ + return self._pylxd_instance.status + + def start(self, timeout: int = 30, force: bool = True, wait: bool = False) -> None: + """Start the LXD instance. + + Args: + timeout: Timeout for starting the LXD instance. + force: Whether to force start the LXD instance. + wait: Whether to wait until the LXD instance started before returning. + + Raises: + LxdException: Unable to start the LXD instance. + """ + try: + self._pylxd_instance.start(timeout, force, wait) + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to start LXD instance") + raise LxdError(f"Unable to start LXD instance {self.name}") from err + + def stop(self, timeout: int = 30, force: bool = True, wait: bool = False) -> None: + """Stop the LXD instance. + + Args: + timeout: Timeout for stopping the LXD instance. + force: Whether to force stop the LXD instance. + wait: Whether to wait until the LXD instance stopped before returning. + + Raises: + LxdException: Unable to stop the LXD instance. + """ + try: + self._pylxd_instance.stop(timeout, force, wait) + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to stop LXD instance") + raise LxdError(f"Unable to stop LXD instance {self.name}") from err + + def delete(self, wait: bool = False) -> None: + """Delete the LXD instance. + + Args: + wait: Whether to wait until the LXD instance stopped before returning. + + Raises: + LxdException: Unable to delete the LXD instance. + """ + try: + self._pylxd_instance.delete(wait) + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to delete LXD instance") + raise LxdError(f"Unable to delete LXD instance {self.name}") from err + + def execute(self, cmd: list[str], cwd: Optional[str] = None) -> Tuple[int, IO, IO]: + """Execute a command within the LXD instance. + + Exceptions are not raise if command execution failed. Caller should check the exit code and + stderr for failures. + + Args: + cmd: Commands to be executed. + cwd: Working directory to execute the commands. + + Returns: + Tuple containing the exit code, stdout, stderr. + """ + lxc_cmd = ["/snap/bin/lxc", "exec", self.name] + if cwd: + lxc_cmd += ["--cwd", cwd] + + lxc_cmd += ["--"] + cmd + + result = secure_run_subprocess(lxc_cmd) + return (result.returncode, io.BytesIO(result.stdout), io.BytesIO(result.stderr)) + + +class LxdInstanceManager: + """LXD instance manager.""" + + def __init__(self, pylxd_client: pylxd.Client): + """Construct the LXD instance manager. + + Args: + pylxd_client: Instance of pylxd.Client. + """ + self._pylxd_client = pylxd_client + + def all(self) -> list[LxdInstance]: + """Get list of LXD instances. + + Raises: + LxdException: Unable to get all LXD instance. + + Returns: + List of LXD instances. + """ + try: + return [ + LxdInstance(instance.name, instance) + for instance in self._pylxd_client.instances.all() + ] + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to get all LXD instance") + raise LxdError("Unable to get all LXD instances") from err + + def create(self, config: LxdInstanceConfig, wait: bool) -> LxdInstance: + """Create a LXD instance. + + Args: + config: Configuration for the LXD instance. + wait: Whether to wait until the LXD instance created before returning. + + Raises: + LxdException: Unable to get all LXD instance. + + Returns: + The created LXD instance. + """ + try: + pylxd_instance = self._pylxd_client.instances.create(config=config, wait=wait) + return LxdInstance(config["name"], pylxd_instance) + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to create LXD instance") + raise LxdError(f"Unable to create LXD instance {config['name']}") from err + + +class LxdProfileManager: + """LXD profile manager.""" + + def __init__(self, pylxd_client: pylxd.Client): + """Construct the LXD profile manager. + + Args: + pylxd_client: Instance of pylxd.Client. + """ + self._pylxd_client = pylxd_client + + def exists(self, name: str) -> bool: + """Check whether a LXD profile of a given name exists. + + Args: + name: Name for LXD profile to check. + + Raises: + LxdException: Unable to check the LXD profile existence. + + Returns: + Whether the LXD profile of the given name exists. + """ + try: + return self._pylxd_client.profiles.exists(name) + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to check if LXD profile exists") + raise LxdError(f"Unable to check if LXD profile {name} exists") from err + + def create( + self, name: str, config: LxdResourceProfileConfig, devices: LxdResourceProfileDevices + ) -> None: + """Create a LXD profile. + + Args: + name: Name of the LXD profile to create. + config: Configuration of the LXD profile. + devices Devices configuration of the LXD profile. + + Raises: + LxdException: Unable to create the LXD profile. + """ + try: + self._pylxd_client.profiles.create(name, config, devices) + except pylxd.exceptions.LXDAPIException as err: + logger.exception("Failed to create LXD profile") + raise LxdError(f"Unable to create LXD profile {name}") from err + + +# Disable pylint as other method of this class can be extended in the future. +class LxdNetworkManager: # pylint: disable=too-few-public-methods + """LXD network manager.""" + + def __init__(self, pylxd_client: pylxd.Client): + """Construct the LXD profile manager. + + Args: + pylxd_client: Instance of pylxd.Client. + """ + self._pylxd_client = pylxd_client + + def get(self, name: str) -> LxdNetwork: + """Get a LXD network information. + + Args: + name: The name of the LXD network. + + Returns: + Information on the LXD network. + """ + network = self._pylxd_client.networks.get(name) + return LxdNetwork( + network.name, + network.description, + network.type, + network.config, + network.managed, + network.used_by, + ) + + +# Disable pylint as the public methods of this class in split into instances and profiles. +class LxdClient: # pylint: disable=too-few-public-methods + """LXD client.""" + + def __init__(self): + """Construct the LXD client.""" + pylxd_client = pylxd.Client() + self.instances = LxdInstanceManager(pylxd_client) + self.profiles = LxdProfileManager(pylxd_client) + self.networks = LxdNetworkManager(pylxd_client) diff --git a/src/lxd_type.py b/src/lxd_type.py new file mode 100644 index 000000000..7e6d0dbc4 --- /dev/null +++ b/src/lxd_type.py @@ -0,0 +1,77 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Types used by Lxd class.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TypedDict + +# The keys are not valid identifiers, hence this is defined with function-based syntax. +LxdResourceProfileConfig = TypedDict( + "LxdResourceProfileConfig", {"limits.cpu": str, "limits.memory": str} +) +LxdResourceProfileConfig.__doc__ = "Configuration LXD profile." + + +class LxdResourceProfileDevicesDisk(TypedDict): + """LXD device profile of disk. + + The details of the configuration of different types of devices can be found here: + https://linuxcontainers.org/lxd/docs/latest/reference/devices/ + + For example, configuration for disk: + https://linuxcontainers.org/lxd/docs/latest/reference/devices_disk/# + + The unit of storage and network limits can be found here: + https://linuxcontainers.org/lxd/docs/latest/reference/instance_units/#instances-limit-units + """ + + path: str + pool: str + type: str + size: str + + +LxdResourceProfileDevices = dict[str, LxdResourceProfileDevicesDisk] + + +class LxdInstanceConfigSource(TypedDict): + """Configuration for source image in LXD instance.""" + + type: str + mode: str + server: str + protocol: str + alias: str + + +class LxdInstanceConfig(TypedDict): + """Configuration for LXD instance.""" + + name: str + type: str + source: LxdInstanceConfigSource + ephemeral: bool + profiles: list[str] + + +# The keys are not valid identifiers, hence this is defined with function-based syntax. +LxdNetworkConfig = TypedDict( + "LxdNetworkConfig", + {"ipv4.address": str, "ipv4.nat": str, "ipv6.address": str, "ipv6.nat": str}, +) +LxdNetworkConfig.__doc__ = "Represent LXD network configuration." + + +@dataclass +class LxdNetwork: + """LXD network information.""" + + name: str + description: str + type: str + config: LxdNetworkConfig + managed: bool + used_by: tuple[str] diff --git a/src/repo_policy_compliance_client.py b/src/repo_policy_compliance_client.py new file mode 100644 index 000000000..2c3318983 --- /dev/null +++ b/src/repo_policy_compliance_client.py @@ -0,0 +1,48 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Client for requesting repo policy compliance service.""" + +import logging +from urllib.parse import urljoin + +import requests + +logger = logging.getLogger(__name__) + + +# Disable pylint as other method of this class can be extended in the future. +class RepoPolicyComplianceClient: # pylint: disable=too-few-public-methods + """Client for repo policy compliance service. + + Attrs: + base_url: Base url to the repo policy compliance service. + token: Charm token configured for the repo policy compliance service. + """ + + def __init__(self, session: requests.Session, url: str, charm_token: str) -> None: + """Construct the RepoPolicyComplianceClient. + + Args: + session: The request Session object for making HTTP requests. + url: Base URL to the repo policy compliance service. + charm_token: Charm token configured for the repo policy compliance service. + """ + self._session = session + self.base_url = url + self.token = charm_token + + def get_one_time_token(self) -> str: + """Get a single-use token for repo policy compliance check. + + Returns: + The one-time token to be used in a single request of repo policy compliance check. + """ + url = urljoin(self.base_url, "one-time-token") + try: + response = self._session.get(url, headers={"Authorization": f"Bearer {self.token}"}) + response.raise_for_status() + return response.content.decode("utf-8") + except requests.HTTPError: + logger.exception("Unable to get one time token from repo policy compliance service.") + raise diff --git a/src/repo_policy_compliance_service.py b/src/repo_policy_compliance_service.py new file mode 100644 index 000000000..b9c896098 --- /dev/null +++ b/src/repo_policy_compliance_service.py @@ -0,0 +1,14 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Flask application for repo policy compliance. + +This module is loaded into juju unit and run with gunicorn. +""" + +# This module is executed in a different environment. +from flask import Flask # pylint: disable=import-error +from repo_policy_compliance.blueprint import repo_policy_compliance # pylint: disable=import-error + +app = Flask(__name__) +app.register_blueprint(repo_policy_compliance) diff --git a/src/runner.py b/src/runner.py new file mode 100644 index 000000000..94b2c3ff5 --- /dev/null +++ b/src/runner.py @@ -0,0 +1,576 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manage the lifecycle of runners. + +The `Runner` class stores the information on the runners and manages the +lifecycle of the runners on LXD and GitHub. + +The `RunnerManager` class from `runner_manager.py` creates and manages a +collection of `Runner` instances. +""" + +from __future__ import annotations + +import json +import logging +import time +from pathlib import Path +from typing import Iterable, Optional, Sequence + +from errors import LxdError, RunnerCreateError, RunnerError, RunnerFileLoadError, RunnerRemoveError +from lxd import LxdInstance +from lxd_type import LxdInstanceConfig +from runner_type import ( + GitHubOrg, + GitHubRepo, + RunnerClients, + RunnerConfig, + RunnerStatus, + VirtualMachineResources, +) +from utilities import retry + +logger = logging.getLogger(__name__) + + +class Runner: + """Single instance of GitHub self-hosted runner. + + Attrs: + app_name (str): Name of the charm. + path (GitHubPath): Path to GitHub repo or org. + proxies (ProxySetting): HTTP proxy setting for juju charm. + name (str): Name of the runner instance. + exist (bool): Whether the runner instance exists on LXD. + online (bool): Whether GitHub marks this runner as online. + busy (bool): Whether GitHub marks this runner as busy. + """ + + runner_application = Path("/opt/github-runner") + env_file = runner_application / ".env" + config_script = runner_application / "config.sh" + runner_script = runner_application / "start.sh" + pre_job_script = runner_application / "pre-job.sh" + + def __init__( + self, + clients: RunnerClients, + runner_config: RunnerConfig, + runner_status: RunnerStatus, + instance: Optional[LxdInstance] = None, + ): + """Construct the runner instance. + + Args: + clients: Clients to access various services. + runner_config: Configuration of the runner instance. + instance: LXD instance of the runner if already created. + """ + # Dependency injection to share the instances across different `Runner` instance. + self._clients = clients + self.config = runner_config + self.status = runner_status + self.instance = instance + + def create( + self, + image: str, + resources: VirtualMachineResources, + binary_path: Path, + registration_token: str, + ): + """Create the runner instance on LXD and register it on GitHub. + + Args: + image: Name of the image to launch the LXD instance with. + resources: Resource setting for the LXD instance. + binary_path: Path to the runner binary. + registration_token: Token for registering the runner on GitHub. + + Raises: + RunnerCreateError: Unable to create a LXD instance for runner. + """ + logger.info("Creating runner: %s", self.config.name) + + try: + self.instance = self._create_instance(image, resources) + self._start_instance() + # Wait some initial time for the instance to boot up + time.sleep(60) + self._wait_boot_up() + self._install_binary(binary_path) + self._configure_runner() + self._register_runner(registration_token, labels=[self.config.app_name, image]) + self._start_runner() + except (RunnerError, LxdError) as err: + raise RunnerCreateError(f"Unable to create runner {self.config.name}") from err + + def remove(self, remove_token: str) -> None: + """Remove this runner instance from LXD and GitHub. + + Args: + remove_token: Token for removing the runner on GitHub. + + Raises: + RunnerRemoveError: Failure in removing runner. + """ + logger.info("Removing LXD instance of runner: %s", self.config.name) + + if self.instance: + # Run script to remove the the runner and cleanup. + self.instance.execute( + [ + "/usr/bin/sudo", + "-u", + "ubuntu", + str(self.config_script), + "remove", + "--token", + remove_token, + ], + ) + + if self.instance.status == "Running": + try: + self.instance.stop(wait=True, timeout=60) + except LxdError: + logger.exception( + "Unable to gracefully stop runner %s within timeout.", self.config.name + ) + logger.info("Force stopping of runner %s", self.config.name) + try: + self.instance.stop(force=True) + except LxdError as err: + raise RunnerRemoveError(f"Unable to remove {self.config.name}") from err + else: + # Delete ephemeral instances that are in error status or stopped status that LXD + # failed to clean up. + logger.warning( + "Found runner %s in status %s, forcing deletion", + self.config.name, + self.instance.status, + ) + try: + self.instance.delete(wait=True) + except LxdError as err: + raise RunnerRemoveError(f"Unable to remove {self.config.name}") from err + + if self.status.runner_id is None: + return + + # The runner should cleanup itself. Cleanup on GitHub in case of runner cleanup error. + if isinstance(self.config.path, GitHubRepo): + logger.debug( + "Ensure runner %s with id %s is removed from GitHub repo %s/%s", + self.config.name, + self.status.runner_id, + self.config.path.owner, + self.config.path.repo, + ) + self._clients.github.actions.delete_self_hosted_runner_from_repo( + owner=self.config.path.owner, + repo=self.config.path.repo, + runner_id=self.status.runner_id, + ) + if isinstance(self.config.path, GitHubOrg): + logger.debug( + "Ensure runner %s with id %s is removed from GitHub org %s", + self.config.name, + self.status.runner_id, + self.config.path.org, + ) + self._clients.github.actions.delete_self_hosted_runner_from_org( + org=self.config.path.org, + runner_id=self.status.runner_id, + ) + + @retry(tries=5, delay=1, local_logger=logger) + def _create_instance( + self, image: str, resources: VirtualMachineResources, ephemeral: bool = True + ) -> LxdInstance: + """Create an instance of runner. + + Args: + image: Image used to launch the instance hosting the runner. + resources: Configuration of the virtual machine resources. + ephemeral: Whether the instance is ephemeral. + + Returns: + LXD instance of the runner. + """ + logger.info("Creating an LXD instance for runner: %s", self.config.name) + + self._ensure_runner_profile() + resource_profile = self._get_resource_profile(resources) + + # Create runner instance. + instance_config: LxdInstanceConfig = { + "name": self.config.name, + "type": "virtual-machine", + "source": { + "type": "image", + "mode": "pull", + "server": "https://cloud-images.ubuntu.com/daily", + "protocol": "simplestreams", + "alias": image, + }, + "ephemeral": ephemeral, + "profiles": ["default", "runner", resource_profile], + } + + instance = self._clients.lxd.instances.create(config=instance_config, wait=True) + self.status.exist = True + return instance + + @retry(tries=5, delay=1, local_logger=logger) + def _ensure_runner_profile(self) -> None: + """Ensure the runner profile is present on LXD. + + Raises: + RunnerError: Unable to create the runner profile. + """ + if not self._clients.lxd.profiles.exists("runner"): + logger.info("Creating runner LXD profile") + profile_config = { + "security.nesting": "true", + } + self._clients.lxd.profiles.create("runner", profile_config, {}) + + # Verify the action is successful. + if not self._clients.lxd.profiles.exists("runner"): + raise RunnerError("Failed to create runner LXD profile") + else: + logger.info("Found existing runner LXD profile") + + @retry(tries=5, delay=1, local_logger=logger) + def _get_resource_profile(self, resources: VirtualMachineResources) -> str: + """Get the LXD profile name of given resource limit. + + Args: + resources: Resources limit of the runner instance. + + Raises: + RunnerError: Unable to create the profile on LXD. + + Returns: + str: Name of the profile for the given resource limit. + """ + # Ensure the resource profile exists. + profile_name = f"cpu-{resources.cpu}-mem-{resources.memory}-disk-{resources.disk}" + if not self._clients.lxd.profiles.exists(profile_name): + logger.info("Creating LXD profile for resource usage.") + try: + resource_profile_config = { + "limits.cpu": str(resources.cpu), + "limits.memory": resources.memory, + } + resource_profile_devices = { + "root": { + "path": "/", + "pool": "default", + "type": "disk", + "size": resources.disk, + } + } + self._clients.lxd.profiles.create( + profile_name, resource_profile_config, resource_profile_devices + ) + except LxdError as error: + logger.error(error) + raise RunnerError( + "Resources were not provided in the correct format, check the juju config for " + "cpu, memory and disk." + ) from error + + # Verify the action is successful. + if not self._clients.lxd.profiles.exists(profile_name): + raise RunnerError(f"Unable to create {profile_name} LXD profile") + else: + logger.info("Found existing LXD profile for resource usage.") + + return profile_name + + @retry(tries=5, delay=1, local_logger=logger) + def _start_instance(self) -> None: + """Start an instance and wait for it to boot. + + Args: + reconcile_interval: Time in seconds of period between each reconciliation. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + logger.info("Starting LXD instance for runner: %s", self.config.name) + + # Setting `wait=True` only ensure the instance has begin to boot up. + self.instance.start(wait=True) + + @retry(tries=5, delay=30, local_logger=logger) + def _wait_boot_up(self) -> None: + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + # Wait for the instance to finish to boot up and network to be up. + self.instance.execute(["/usr/bin/who"]) + self.instance.execute(["/usr/bin/nslookup", "github.com"]) + + logger.info("Finished booting up LXD instance for runner: %s", self.config.name) + + @retry(tries=5, delay=1, local_logger=logger) + def _install_binary(self, binary: Path) -> None: + """Load GitHub self-hosted runner binary on to the runner instance. + + Args: + binary: Path to the compressed runner binary. + + Raises: + RunnerFileLoadError: Unable to load the file into the runner instance. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + # TEMP: Install common tools used in GitHub Actions. This will be removed once virtual + # machines are created from custom images/GitHub runner image. + + self._apt_install(["docker.io", "npm", "python3-pip", "shellcheck", "jq"]) + self._snap_install(["yq"]) + + # Add the user to docker group. + self.instance.execute(["/usr/sbin/usermod", "-aG", "docker", "ubuntu"]) + self.instance.execute(["/usr/bin/newgrp", "docker"]) + + # The LXD instance is meant to run untrusted workload. Hardcoding the tmp directory should + # be fine. + binary_path = "/tmp/runner.tgz" # nosec B108 + + logger.info("Installing runner binary on LXD instance: %s", self.config.name) + + # Creating directory and putting the file are idempotent, and can be retried. + self.instance.files.mk_dir(str(self.runner_application)) + self.instance.files.push_file(str(binary), binary_path) + + self.instance.execute( + ["/usr/bin/tar", "-xzf", binary_path, "-C", str(self.runner_application)] + ) + self.instance.execute( + ["/usr/bin/chown", "-R", "ubuntu:ubuntu", str(self.runner_application)] + ) + + # Verify the config script is written to runner. + exit_code, _, stderr = self.instance.execute(["test", "-f", str(self.config_script)]) + if exit_code == 0: + logger.info("Runner binary loaded on runner instance %s.", self.config.name) + else: + logger.error( + "Unable to load runner binary on runner instance %s: %s", + self.config.name, + stderr.read(), + ) + raise RunnerFileLoadError(f"Failed to load runner binary on {self.config.name}") + + @retry(tries=5, delay=1, local_logger=logger) + def _configure_runner(self) -> None: + """Load configuration on to the runner. + + Raises: + RunnerFileLoadError: Unable to load configuration file on the runner. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + # Load the runner startup script. + startup_contents = self._clients.jinja.get_template("start.j2").render() + self._put_file(str(self.runner_script), startup_contents, mode="0755") + self.instance.execute(["/usr/bin/sudo", "chown", "ubuntu:ubuntu", str(self.runner_script)]) + self.instance.execute(["/usr/bin/sudo", "chmod", "u+x", str(self.runner_script)]) + + # Load the runner pre-job script. + bridge_address_range = self._clients.lxd.networks.get("lxdbr0").config["ipv4.address"] + host_ip, _ = bridge_address_range.split("/") + one_time_token = self._clients.repo.get_one_time_token() + pre_job_contents = self._clients.jinja.get_template("pre-job.j2").render( + host_ip=host_ip, one_time_token=one_time_token + ) + self._put_file(str(self.pre_job_script), pre_job_contents) + self.instance.execute( + ["/usr/bin/sudo", "chown", "ubuntu:ubuntu", str(self.pre_job_script)] + ) + self.instance.execute(["/usr/bin/sudo", "chmod", "u+x", str(self.pre_job_script)]) + + # Set permission to the same as GitHub-hosted runner for this directory. + # Some GitHub Actions require this permission setting to run. + # As the user already has sudo access, this does not give the user any additional access. + self.instance.execute(["/usr/bin/sudo", "chmod", "777", "/usr/local/bin"]) + + # Load `/etc/environment` file. + environment_contents = self._clients.jinja.get_template("environment.j2").render( + proxies=self.config.proxies + ) + self._put_file("/etc/environment", environment_contents) + + # Load `.env` config file for GitHub self-hosted runner. + env_contents = self._clients.jinja.get_template("env.j2").render( + proxies=self.config.proxies, pre_job_script=str(self.pre_job_script) + ) + self._put_file(str(self.env_file), env_contents) + self.instance.execute(["/usr/bin/chown", "ubuntu:ubuntu", str(self.env_file)]) + + if self.config.proxies: + # Creating directory and putting the file are idempotent, and can be retried. + logger.info("Adding proxy setting to the runner.") + + docker_proxy_contents = self._clients.jinja.get_template( + "systemd-docker-proxy.j2" + ).render(proxies=self.config.proxies) + + # Set docker daemon proxy config + docker_service_path = Path("/etc/systemd/system/docker.service.d") + docker_service_proxy = docker_service_path / "http-proxy.conf" + + self.instance.files.mk_dir(str(docker_service_path)) + self._put_file(str(docker_service_proxy), docker_proxy_contents) + + self.instance.execute(["systemctl", "daemon-reload"]) + self.instance.execute(["systemctl", "restart", "docker"]) + + # Set docker client proxy config + docker_client_proxy = { + "proxies": { + "default": { + "httpProxy": self.config.proxies["http"], + "httpsProxy": self.config.proxies["https"], + "noProxy": self.config.proxies["no_proxy"], + } + } + } + docker_client_proxy_content = json.dumps(docker_client_proxy) + # Configure the docker client for root user and ubuntu user. + self._put_file("/root/.docker/config.json", docker_client_proxy_content) + self._put_file("/home/ubuntu/.docker/config.json", docker_client_proxy_content) + self.instance.execute( + ["/usr/bin/chown", "-R", "ubuntu:ubuntu", "/home/ubuntu/.docker"] + ) + + @retry(tries=5, delay=30, local_logger=logger) + def _register_runner(self, registration_token: str, labels: Sequence[str]) -> None: + """Register the runner on GitHub. + + Args: + registration_token: Registration token request from GitHub. + labels: Labels to tag the runner with. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + logger.info("Registering runner %s", self.config.name) + + register_cmd = [ + "/usr/bin/sudo", + "-u", + "ubuntu", + str(self.config_script), + "--url", + f"https://github.com/{self.config.path.path()}", + "--token", + registration_token, + "--ephemeral", + "--unattended", + "--labels", + ",".join(labels), + "--name", + self.instance.name, + ] + + if isinstance(self.config.path, GitHubOrg): + register_cmd += ["--runnergroup", self.config.path.group] + + self.instance.execute( + register_cmd, + cwd=str(self.runner_application), + ) + + @retry(tries=5, delay=30, local_logger=logger) + def _start_runner(self) -> None: + """Start the GitHub runner.""" + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + logger.info("Starting runner %s", self.config.name) + + self.instance.execute( + [ + "/usr/bin/sudo", + "-u", + "ubuntu", + str(self.runner_script), + ] + ) + + logger.info("Started runner %s", self.config.name) + + def _put_file(self, filepath: str, content: str, mode: Optional[str] = None) -> None: + """Put a file into the runner instance. + + Args: + filepath: Path to load the file in the runner instance. + content: Content of the file. + + Raises: + RunnerFileLoadError: Failed to load the file into the runner instance. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + self.instance.files.write_file(filepath, content, mode) + content_on_runner = self.instance.files.read_file(filepath) + if content_on_runner != content: + logger.error( + "Loaded file %s in runner %s did not match expected content", + filepath, + self.instance.name, + ) + logger.debug( + "Excepted file content for file %s on runner %s: %s\nFound: %s", + filepath, + self.instance.name, + content, + content_on_runner, + ) + raise RunnerFileLoadError( + f"Failed to load file {filepath} to runner {self.instance.name}" + ) + + def _apt_install(self, packages: Iterable[str]) -> None: + """Installs the given APT packages. + + This is a temporary solution to provide tools not offered by the base ubuntu image. Custom + images based on the GitHub action runner image will be used in the future. + + Args: + packages: Packages to be install via apt. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + self.instance.execute(["/usr/bin/apt-get", "update"]) + + for pkg in packages: + logger.info("Installing %s via APT...", pkg) + self.instance.execute(["/usr/bin/apt-get", "install", "-yq", pkg]) + + def _snap_install(self, packages: Iterable[str]) -> None: + """Installs the given snap packages. + + This is a temporary solution to provide tools not offered by the base ubuntu image. Custom + images based on the GitHub action runner image will be used in the future. + + Args: + packages: Packages to be install via snap. + """ + if self.instance is None: + raise RunnerError("Runner operation called prior to runner creation.") + + for pkg in packages: + logger.info("Installing %s via snap...", pkg) + self.instance.execute(["/usr/bin/snap", "install", pkg]) diff --git a/src/runner_manager.py b/src/runner_manager.py new file mode 100644 index 000000000..eebe7dddc --- /dev/null +++ b/src/runner_manager.py @@ -0,0 +1,493 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Runner Manager manages the runners on LXD and GitHub.""" + +from __future__ import annotations + +import hashlib +import logging +import tarfile +import urllib.request +import uuid +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Iterator, Optional + +import fastcore.net +import jinja2 +import requests +import requests.adapters +import urllib3 +from ghapi.all import GhApi +from typing_extensions import assert_never + +from errors import RunnerBinaryError, RunnerCreateError +from github_type import ( + GitHubRunnerStatus, + RegistrationToken, + RemoveToken, + RunnerApplication, + RunnerApplicationList, + SelfHostedRunner, +) +from lxd import LxdClient, LxdInstance +from repo_policy_compliance_client import RepoPolicyComplianceClient +from runner import Runner, RunnerClients, RunnerConfig, RunnerStatus +from runner_type import GitHubOrg, GitHubPath, GitHubRepo, ProxySetting, VirtualMachineResources +from utilities import retry, set_env_var + +logger = logging.getLogger(__name__) + + +@dataclass +class RunnerManagerConfig: + """Configuration of runner manager. + + Attrs: + path: GitHub repository path in the format '/', or the GitHub organization + name. + token: GitHub personal access token to register runner to the repository or + organization. + """ + + path: GitHubPath + token: str + image: str + service_token: str + + +@dataclass +class RunnerInfo: + """Information from GitHub of a runner. + + Used as a returned type to method querying runner information. + """ + + name: str + status: GitHubRunnerStatus + + +class RunnerManager: + """Manage a group of runners according to configuration.""" + + runner_bin_path = Path("/opt/github-runner-app") + + def __init__( + self, + app_name: str, + unit: int, + runner_manager_config: RunnerManagerConfig, + proxies: ProxySetting = ProxySetting(), + ) -> None: + """Construct RunnerManager object for creating and managing runners. + + Args: + app_name: An name for the set of runners. + unit: Unit number of the set of runners. + runner_manager_config: Configuration for the runner manager. + proxies: HTTP proxy settings. + """ + self.app_name = app_name + self.instance_name = f"{app_name}-{unit}" + self.config = runner_manager_config + self.proxies = proxies + + # Setting the env var to this process and any child process spawned. + if "no_proxy" in self.proxies: + set_env_var("NO_PROXY", self.proxies["no_proxy"]) + if "http" in self.proxies: + set_env_var("HTTP_PROXY", self.proxies["http"]) + if "https" in self.proxies: + set_env_var("HTTPS_PROXY", self.proxies["https"]) + + self.session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + max_retries=urllib3.Retry( + total=10, backoff_factor=0.3, status_forcelist=[500, 502, 503, 504] + ) + ) + self.session.mount("http://", adapter) + self.session.mount("https://", adapter) + if self.proxies: + # setup proxy for requests + self.session.proxies.update(self.proxies) + # add proxy to fastcore which ghapi uses + proxy = urllib.request.ProxyHandler(self.proxies) + opener = urllib.request.build_opener(proxy) + fastcore.net._opener = opener + + # The repo policy compliance service is on localhost and should not have any proxies + # setting configured. The is a separated requests Session as the other one configured + # according proxies setting provided by user. + local_session = requests.Session() + local_session.mount("http://", adapter) + local_session.mount("https://", adapter) + + self._clients = RunnerClients( + GhApi(token=self.config.token), + jinja2.Environment(loader=jinja2.FileSystemLoader("templates"), autoescape=True), + LxdClient(), + RepoPolicyComplianceClient( + local_session, "http://127.0.0.1:8080", self.config.service_token + ), + ) + + @retry(tries=5, delay=30, local_logger=logger) + def get_latest_runner_bin_url( + self, os_name: str = "linux", arch_name: str = "x64" + ) -> RunnerApplication: + """Get the URL for the latest runner binary. + + The runner binary URL changes when a new version is available. + + Args: + os_name: Name of operating system. + arch_name: Name of architecture. + + Returns: + Information on the runner application. + """ + runner_bins: RunnerApplicationList = [] + if isinstance(self.config.path, GitHubRepo): + runner_bins = self._clients.github.actions.list_runner_applications_for_repo( + owner=self.config.path.owner, repo=self.config.path.repo + ) + if isinstance(self.config.path, GitHubOrg): + runner_bins = self._clients.github.actions.list_runner_applications_for_org( + org=self.config.path.org + ) + + logger.debug("Response of runner binary list: %s", runner_bins) + + try: + return next( + bin + for bin in runner_bins + if bin["os"] == os_name and bin["architecture"] == arch_name + ) + except StopIteration as err: + raise RunnerBinaryError( + f"Unable query GitHub runner binary information for {os_name} {arch_name}" + ) from err + + @retry(tries=5, delay=30, local_logger=logger) + def update_runner_bin(self, binary: RunnerApplication) -> None: + """Download a runner file, replacing the current copy. + + Remove the existing runner binary to prevent it from being used. This + is done to prevent security issues arising from outdated runner binary + containing security flaws. The newest version of runner binary should + always be used. + + Args: + binary: Information on the runner binary to download. + """ + logger.info("Downloading runner binary from: %s", binary["download_url"]) + + # Delete old version of runner binary. + RunnerManager.runner_bin_path.unlink(missing_ok=True) + + # Download the new file + response = self.session.get(binary["download_url"], stream=True) + + logger.info( + "Download of runner binary from %s return status code: %i", + binary["download_url"], + response.status_code, + ) + + if not binary["sha256_checksum"]: + logger.error("Checksum for runner binary is not found, unable to verify download.") + raise RunnerBinaryError("Checksum for runner binary is not found in GitHub response.") + + sha256 = hashlib.sha256() + + with RunnerManager.runner_bin_path.open(mode="wb") as file: + for chunk in response.iter_content(decode_unicode=False): + file.write(chunk) + + sha256.update(chunk) + + logger.info("Finished download of runner binary.") + + # Verify the checksum if checksum is present. + if binary["sha256_checksum"] != sha256.hexdigest(): + logger.error( + "Expected hash of runner binary (%s) doesn't match the calculated hash (%s)", + binary["sha256_checksum"], + sha256, + ) + RunnerManager.runner_bin_path.unlink(missing_ok=True) + raise RunnerBinaryError("Checksum mismatch for downloaded runner binary") + + # Verify the file integrity. + if not tarfile.is_tarfile(file.name): + logger.error("Failed to decompress downloaded GitHub runner binary.") + RunnerManager.runner_bin_path.unlink(missing_ok=True) + raise RunnerBinaryError("Downloaded runner binary cannot be decompressed.") + + logger.info("Validated newly downloaded runner binary and enabled it.") + + def get_github_info(self) -> Iterator[RunnerInfo]: + """Get information on the runners from GitHub. + + Returns: + List of information from GitHub on runners. + """ + remote_runners = self._get_runner_github_info() + return iter(RunnerInfo(runner.name, runner.status) for runner in remote_runners.values()) + + def reconcile(self, quantity: int, resources: VirtualMachineResources) -> int: + """Bring runners in line with target. + + Args: + quantity: Number of intended runners. + resources: Configuration of the virtual machine resources. + + Returns: + Difference between intended runners and actual runners. + """ + runners = self._get_runners() + + # Add/Remove runners to match the target quantity + online_runners = [ + runner for runner in runners if runner.status.exist and runner.status.online + ] + + offline_runners = [runner for runner in runners if not runner.status.online] + + local_runners = { + instance.name: instance + # Pylint cannot find the `all` method. + for instance in self._clients.lxd.instances.all() # pylint: disable=no-member + if instance.name.startswith(f"{self.instance_name}-") + } + + logger.info( + ( + "Expected runner count: %i, Online runner count: %i, Offline runner count: %i, " + "LXD instance count: %i" + ), + quantity, + len(online_runners), + len(offline_runners), + len(local_runners), + ) + + # Clean up offline runners + if offline_runners: + logger.info("Cleaning up offline runners.") + + remove_token = self._get_github_remove_token() + + for runner in offline_runners: + runner.remove(remove_token) + logger.info("Removed runner: %s", runner.config.name) + + delta = quantity - len(online_runners) + # Spawn new runners + if delta > 0: + if RunnerManager.runner_bin_path is None: + raise RunnerCreateError("Unable to create runner due to missing runner binary.") + + logger.info("Getting registration token for GitHub runners.") + + registration_token = self._get_github_registration_token() + remove_token = self._get_github_remove_token() + + logger.info("Adding %i additional runner(s).", delta) + for _ in range(delta): + config = RunnerConfig( + self.app_name, + self.config.path, + self.proxies, + self._generate_runner_name(), + ) + runner = Runner(self._clients, config, RunnerStatus()) + try: + runner.create( + self.config.image, + resources, + RunnerManager.runner_bin_path, + registration_token, + ) + logger.info("Created runner: %s", runner.config.name) + except RunnerCreateError: + logger.error("Unable to create runner: %s", runner.config.name) + runner.remove(remove_token) + logger.info("Cleaned up runner: %s", runner.config.name) + raise + + elif delta < 0: + # Idle runners are online runners that has not taken a job. + idle_runners = [runner for runner in online_runners if not runner.status.busy] + offset = min(-delta, len(idle_runners)) + if offset != 0: + logger.info("Removing %i runner(s).", offset) + remove_runners = idle_runners[:offset] + + logger.info("Cleaning up idle runners.") + + remove_token = self._get_github_remove_token() + + for runner in remove_runners: + runner.remove(remove_token) + logger.info("Removed runner: %s", runner.config.name) + + else: + logger.info("There are no idle runner to remove.") + else: + logger.info("No changes to number of runner needed.") + + return delta + + def flush(self, flush_busy: bool = True) -> int: + """Remove existing runners. + + Args: + flush_busy: Whether to flush busy runners as well. + + Returns: + Number of runner removed. + """ + if flush_busy: + runners = [runner for runner in self._get_runners() if runner.status.exist] + else: + runners = [ + runner + for runner in self._get_runners() + if runner.status.exist and not runner.status.busy + ] + + logger.info("Removing existing %i local runners", len(runners)) + + remove_token = self._get_github_remove_token() + + for runner in runners: + runner.remove(remove_token) + logger.info("Removed runner: %s", runner.config.name) + + return len(runners) + + def _generate_runner_name(self) -> str: + """Generate a runner name based on charm name. + + Returns: + Generated name of runner. + """ + suffix = str(uuid.uuid4()) + return f"{self.instance_name}-{suffix}" + + def _get_runner_github_info(self) -> Dict[str, SelfHostedRunner]: + remote_runners_list: list[SelfHostedRunner] = [] + if isinstance(self.config.path, GitHubRepo): + remote_runners_list = self._clients.github.actions.list_self_hosted_runners_for_repo( + owner=self.config.path.owner, repo=self.config.path.repo + )["runners"] + if isinstance(self.config.path, GitHubOrg): + remote_runners_list = self._clients.github.actions.list_self_hosted_runners_for_org( + org=self.config.path.org + )["runners"] + + logger.debug("List of runners found on GitHub:%s", remote_runners_list) + + return { + runner.name: runner + for runner in remote_runners_list + if runner.name.startswith(f"{self.instance_name}-") + } + + def _get_runners(self) -> list[Runner]: + """Query for the list of runners. + + Returns: + List of `Runner` from information on LXD or GitHub. + """ + + def create_runner_info( + name: str, + local_runner: Optional[LxdInstance], + remote_runner: Optional[SelfHostedRunner], + ) -> Runner: + """Create runner from information from GitHub and LXD.""" + logger.debug( + ( + "Found runner %s with GitHub info [status: %s, busy: %s, labels: %s] and LXD " + "info [status: %s]" + ), + name, + getattr(remote_runner, "status", None), + getattr(remote_runner, "busy", None), + getattr(remote_runner, "labels", None), + getattr(local_runner, "status", None), + ) + + runner_id = getattr(remote_runner, "id", None) + running = local_runner is not None + online = getattr(remote_runner, "status", None) == "online" + busy = getattr(remote_runner, "busy", None) + + config = RunnerConfig(self.app_name, self.config.path, self.proxies, name) + return Runner( + self._clients, + config, + RunnerStatus(runner_id, running, online, busy), + local_runner, + ) + + remote_runners = self._get_runner_github_info() + local_runners = { + instance.name: instance + # Pylint cannot find the `all` method. + for instance in self._clients.lxd.instances.all() # pylint: disable=no-member + if instance.name.startswith(f"{self.instance_name}-") + } + + runners: list[Runner] = [] + for name in set(local_runners.keys()) | set(remote_runners.keys()): + runners.append( + create_runner_info(name, local_runners.get(name), remote_runners.get(name)) + ) + + return runners + + def _get_github_registration_token(self) -> str: + """Get token from GitHub used for registering runners. + + Returns: + The registration token. + """ + token: RegistrationToken + if isinstance(self.config.path, GitHubRepo): + token = self._clients.github.actions.create_registration_token_for_repo( + owner=self.config.path.owner, repo=self.config.path.repo + ) + elif isinstance(self.config.path, GitHubOrg): + token = self._clients.github.actions.create_registration_token_for_org( + org=self.config.path.org + ) + else: + assert_never(token) + + return token["token"] + + def _get_github_remove_token(self) -> str: + """Get token from GitHub used for removing runners. + + Returns: + The removing token. + """ + token: RemoveToken + if isinstance(self.config.path, GitHubRepo): + token = self._clients.github.actions.create_remove_token_for_repo( + owner=self.config.path.owner, repo=self.config.path.repo + ) + elif isinstance(self.config.path, GitHubOrg): + token = self._clients.github.actions.create_remove_token_for_org( + org=self.config.path.org + ) + else: + assert_never(token) + + return token["token"] diff --git a/src/runner_type.py b/src/runner_type.py new file mode 100644 index 000000000..83530a5b2 --- /dev/null +++ b/src/runner_type.py @@ -0,0 +1,99 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Types used by both RunnerManager and Runner classes.""" + + +from dataclasses import dataclass +from typing import NamedTuple, Optional, TypedDict, Union + +import jinja2 +from ghapi.all import GhApi + +from lxd import LxdClient +from repo_policy_compliance_client import RepoPolicyComplianceClient + + +class ProxySetting(TypedDict, total=False): + """Represent HTTP-related proxy settings.""" + + no_proxy: str + http: str + https: str + + +@dataclass +class GitHubRepo: + """Represent GitHub repository.""" + + owner: str + repo: str + + def path(self) -> str: + """Return a string representing the path.""" + return f"{self.owner}/{self.repo}" + + +@dataclass +class GitHubOrg: + """Represent GitHub organization.""" + + org: str + group: str + + def path(self) -> str: + """Return a string representing the path.""" + return self.org + + +GitHubPath = Union[GitHubOrg, GitHubRepo] + + +class VirtualMachineResources(NamedTuple): + """Virtual machine resource configuration.""" + + cpu: int + memory: str + disk: str + + +@dataclass +class RunnerClients: + """Clients for access various services. + + Attrs: + github: Used to query GitHub API. + jinja: Used for templating. + lxd: Used to interact with LXD API. + """ + + github: GhApi + jinja: jinja2.Environment + lxd: LxdClient + repo: RepoPolicyComplianceClient + + +@dataclass +class RunnerConfig: + """Configuration for runner.""" + + app_name: str + path: GitHubPath + proxies: ProxySetting + name: str + + +@dataclass +class RunnerStatus: + """Status of runner. + + Attrs: + exist: Whether the runner instance exists on LXD. + online: Whether GitHub marks this runner as online. + busy: Whether GitHub marks this runner as busy. + """ + + runner_id: Optional[int] = None + exist: bool = False + online: bool = False + busy: bool = False diff --git a/src/utilities.py b/src/utilities.py new file mode 100644 index 000000000..8cc8c7b2d --- /dev/null +++ b/src/utilities.py @@ -0,0 +1,183 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Utilities used by the charm.""" + +import functools +import logging +import os +import subprocess # nosec B404 +import time +from typing import Callable, Optional, Sequence, Type, TypeVar + +from typing_extensions import ParamSpec + +from errors import SubprocessError + +logger = logging.getLogger(__name__) + + +# Parameters of the function decorated with retry +ParamT = ParamSpec("ParamT") +# Return type of the function decorated with retry +ReturnT = TypeVar("ReturnT") + + +# This decorator has default arguments, one extra argument is not a problem. +def retry( # pylint: disable=too-many-arguments + exception: Type[Exception] = Exception, + tries: int = 1, + delay: float = 0, + max_delay: Optional[float] = None, + backoff: float = 1, + local_logger: logging.Logger = logger, +) -> Callable[[Callable[ParamT, ReturnT]], Callable[ParamT, ReturnT]]: + """Parameterize the decorator for adding retry to functions. + + Args: + exception: Exception type to be retried. + tries: Number of attempts at retry. + delay: Time in seconds to wait between retry. + max_delay: Max time in seconds to wait between retry. + backoff: Factor to increase the delay by each retry. + logger: Logger for logging. + + Returns: + The function decorator for retry. + """ + + def retry_decorator( + func: Callable[ParamT, ReturnT], + ) -> Callable[ParamT, ReturnT]: + """Decorate function with retry. + + Args: + fn: The function to decorate. + + Returns: + The resulting function with retry added. + """ + + @functools.wraps(func) + def fn_with_retry(*args, **kwargs) -> ReturnT: + """Wrap the function with retries.""" + remain_tries, current_delay = tries, delay + + for _ in range(tries): + try: + return func(*args, **kwargs) + # Error caught is set by the input of the function. + except exception as err: # pylint: disable=broad-exception-caught + remain_tries -= 1 + + if remain_tries == 0: + if local_logger is not None: + local_logger.exception("Retry limit of %s exceed: %s", tries, err) + raise + + if local_logger is not None: + local_logger.warning( + "Retrying error in %s seconds: %s", current_delay, err + ) + local_logger.debug("Error to be retried:", stack_info=True) + + time.sleep(current_delay) + + current_delay *= backoff + + if max_delay is not None: + current_delay = min(current_delay, max_delay) + + raise RuntimeError("Unreachable code of retry logic.") + + return fn_with_retry + + return retry_decorator + + +def secure_run_subprocess(cmd: Sequence[str], **kwargs) -> subprocess.CompletedProcess[bytes]: + """Run command in subprocess according to security recommendations. + + The argument `shell` is set to `False` for security reasons. + + The argument `check` is set to `False`, therefore, CalledProcessError will not be raised. + Errors are handled by the caller by checking the exit code. + + Args: + cmd: Command in a list. + kwargs: Additional keyword arguments for the `subprocess.run` call. + + Returns: + Object representing the completed process. The outputs subprocess can accessed. + """ + logger.info("Executing command %s", cmd) + result = subprocess.run( # nosec B603 + cmd, + capture_output=True, + shell=False, + check=False, + # Disable type check due to the support for unpacking arguments in mypy is experimental. + **kwargs, # type: ignore + ) + logger.debug("Command %s returns: %s", cmd, result.stdout) + return result + + +def execute_command(cmd: Sequence[str], check_exit: bool = True, **kwargs) -> str: + """Execute a command on a subprocess. + + The command is executed with `subprocess.run`, additional arguments can be passed to it as + keyword arguments. The following arguments to `subprocess.run` should not be set: + `capture_output`, `shell`, `check`. As those arguments are used by this function. + + Args: + cmd: Command in a list. + check_exit: Whether to check for non-zero exit code and raise exceptions. + kwargs: Additional keyword arguments for the `subprocess.run` call. + + Returns: + Output on stdout. + """ + result = secure_run_subprocess(cmd, **kwargs) + + if check_exit: + try: + result.check_returncode() + except subprocess.CalledProcessError as err: + logger.error( + "Command %s failed with code %i: %s", + " ".join(cmd), + err.returncode, + err.stderr, + ) + + raise SubprocessError(cmd, err.returncode, err.stdout, err.stderr) from err + + return str(result.stdout) + + +def get_env_var(env_var: str) -> Optional[str]: + """Get the environment variable value. + + Looks for all upper-case and all low-case of the `env_var`. + + Args: + env_var: Name of the environment variable. + + Returns: + Value of the environment variable. None if not found. + """ + return os.environ.get(env_var.upper(), os.environ.get(env_var.lower(), None)) + + +def set_env_var(env_var: str, value: str) -> None: + """Set the environment variable value. + + Set the all upper case and all low case of the `env_var`. + + Args: + env_var: Name of the environment variable. + value: Value to set environment variable to. + """ + os.environ[env_var.upper()] = value + os.environ[env_var.lower()] = value diff --git a/templates/dispatch-event.service.j2 b/templates/dispatch-event.service.j2 new file mode 100644 index 000000000..312c0c648 --- /dev/null +++ b/templates/dispatch-event.service.j2 @@ -0,0 +1,9 @@ +[Unit] +Description=Dispatch the {{event}} event on {{unit}} + +[Service] +Type=oneshot +ExecStart=/usr/bin/juju-run "{{unit}}" "JUJU_DISPATCH_PATH={{event}} timeout {{timeout}} ./dispatch" + +[Install] +WantedBy=multi-user.target diff --git a/templates/dispatch-event.timer.j2 b/templates/dispatch-event.timer.j2 new file mode 100644 index 000000000..4a1402f75 --- /dev/null +++ b/templates/dispatch-event.timer.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Timer to dispatch {{event}} event periodically +Requires=ghro.{{event}}.service + +[Timer] +Unit=ghro.{{event}}.service +OnUnitInactiveSec={{interval}}m +RandomizedDelaySec={{jitter}}m + +[Install] +WantedBy=timers.target diff --git a/templates/env.j2 b/templates/env.j2 new file mode 100644 index 000000000..8e99da4b4 --- /dev/null +++ b/templates/env.j2 @@ -0,0 +1,9 @@ +PATH=/home/ubuntu/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin +HTTP_PROXY={{proxies['http']}} +HTTPS_PROXY={{proxies['https']}} +NO_PROXY={{proxies['no_proxy']}} +http_proxy={{proxies['http']}} +https_proxy={{proxies['https']}} +no_proxy={{proxies['no_proxy']}} +LANG=C.UTF-8 +ACTIONS_RUNNER_HOOK_JOB_STARTED={{pre_job_script}} diff --git a/templates/environment.j2 b/templates/environment.j2 new file mode 100644 index 000000000..b0b6494e7 --- /dev/null +++ b/templates/environment.j2 @@ -0,0 +1,7 @@ +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin +HTTP_PROXY={{proxies['http']}} +HTTPS_PROXY={{proxies['https']}} +NO_PROXY={{proxies['no_proxy']}} +http_proxy={{proxies['http']}} +https_proxy={{proxies['https']}} +no_proxy={{proxies['no_proxy']}} diff --git a/templates/pre-job.j2 b/templates/pre-job.j2 new file mode 100644 index 000000000..968a77dbb --- /dev/null +++ b/templates/pre-job.j2 @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +GITHUB_SOURCE_REPOSITORY=$(cat "${GITHUB_EVENT_PATH}" | jq -r '.pull_request.head.repo.full_name') + +# Request repo-policy-compliance service check. +curl --noproxy '*' \ + --fail-with-body \ + -H 'Authorization: Bearer {{one_time_token}}' \ + -H 'Content-Type: application/json' \ + -d "{\"repository_name\": \"${GITHUB_REPOSITORY}\", \"source_repository_name\": \"${GITHUB_SOURCE_REPOSITORY}\", \"target_branch_name\": \"${GITHUB_BASE_REF}\", \"source_branch_name\": \"${GITHUB_HEAD_REF}\", \"commit_sha\": \"${GITHUB_SHA}\"}" \ + http://{{host_ip}}:8080/check-run diff --git a/templates/repo-policy-compliance.service.j2 b/templates/repo-policy-compliance.service.j2 new file mode 100644 index 000000000..afde32b2e --- /dev/null +++ b/templates/repo-policy-compliance.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=Gunicorn instance to serve repo policy compliance endpoints +After=network.target + +[Service] +User=ubuntu +Group=www-data +WorkingDirectory={{working_directory}} +Environment="GITHUB_TOKEN={{github_token}}" +Environment="CHARM_TOKEN={{charm_token}}" +Environment="HTTP_PROXY={{proxies['http']}}" +Environment="HTTPS_PROXY={{proxies['https']}}" +Environment="NO_PROXY={{proxies['no_proxy']}}" +Environment="http_proxy={{proxies['http']}}" +Environment="https_proxy={{proxies['https']}}" +Environment="no_proxy={{proxies['no_proxy']}}" +ExecStart=/usr/bin/gunicorn --bind 0.0.0.0:8080 app:app diff --git a/templates/start.j2 b/templates/start.j2 new file mode 100644 index 000000000..7d5e4bdf5 --- /dev/null +++ b/templates/start.j2 @@ -0,0 +1,3 @@ +#!/bin/bash + +(/opt/github-runner/run.sh; sudo systemctl halt -i) &>/dev/null & diff --git a/templates/systemd-docker-proxy.j2 b/templates/systemd-docker-proxy.j2 new file mode 100644 index 000000000..7aa0b2604 --- /dev/null +++ b/templates/systemd-docker-proxy.j2 @@ -0,0 +1,4 @@ +[Service] +Environment="HTTP_PROXY={{proxies['http']}}" +Environment="HTTPS_PROXY={{proxies['https']}}" +Environment="NO_PROXY={{proxies['no_proxy']}}" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..db3bfe1a6 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 000000000..a74fcf8e5 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,32 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +from pathlib import Path + +import pytest +import yaml + + +@pytest.fixture +def metadata(): + metadata = Path("./metadata.yaml") + data = yaml.safe_load(metadata.read_text()) + return data + + +@pytest.fixture +def model(ops_test): + return ops_test.model + + +@pytest.fixture +def application(model, metadata): + charm_name = metadata["name"] + app = model.applications[charm_name] + return app + + +@pytest.fixture +def units(application): + units = application.units + return units diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py new file mode 100644 index 000000000..ad6f7b6a0 --- /dev/null +++ b/tests/integration/test_charm.py @@ -0,0 +1,26 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging + +import pytest + +log = logging.getLogger(__name__) + + +async def file_contents(unit, path): + cmd = "cat {}".format(path) + action = await unit.run(cmd) + return action.results["Stdout"] + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test): + my_charm = await ops_test.build_charm(".") + await ops_test.model.deploy(my_charm) + await ops_test.model.wait_for_idle() + + +async def test_status(units): + assert units[0].workload_status == "blocked" + assert units[0].workload_status_message == "Missing token or org/repo path config" diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 000000000..db3bfe1a6 --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 000000000..20bcefc73 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,30 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +import unittest.mock +from pathlib import Path + +import pytest + +from tests.unit.mock import MockGhapiClient, MockLxdClient, MockRepoPolicyComplianceClient + + +@pytest.fixture(autouse=True) +def mocks(monkeypatch, tmp_path): + monkeypatch.setattr( + "charm.GithubRunnerCharm.service_token_path", Path(tmp_path / "mock_service_token") + ) + monkeypatch.setattr( + "charm.GithubRunnerCharm.repo_check_systemd_service", Path(tmp_path / "systemd_service") + ) + monkeypatch.setattr("charm.os", unittest.mock.MagicMock()) + monkeypatch.setattr("charm.shutil", unittest.mock.MagicMock()) + monkeypatch.setattr("charm.jinja2", unittest.mock.MagicMock()) + monkeypatch.setattr("runner.time", unittest.mock.MagicMock()) + monkeypatch.setattr("runner_manager.GhApi", MockGhapiClient) + monkeypatch.setattr("runner_manager.jinja2", unittest.mock.MagicMock()) + monkeypatch.setattr("runner_manager.LxdClient", MockLxdClient) + monkeypatch.setattr( + "runner_manager.RepoPolicyComplianceClient", MockRepoPolicyComplianceClient + ) + monkeypatch.setattr("utilities.time", unittest.mock.MagicMock()) diff --git a/tests/unit/mock.py b/tests/unit/mock.py new file mode 100644 index 000000000..1238c69e8 --- /dev/null +++ b/tests/unit/mock.py @@ -0,0 +1,224 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Mock for testing.""" + +from __future__ import annotations + +import hashlib +import logging +import secrets +from typing import Optional, Sequence, Union + +from errors import LxdError, RunnerError +from github_type import RegistrationToken, RemoveToken, RunnerApplication +from lxd_type import LxdNetwork +from runner import LxdInstanceConfig + +logger = logging.getLogger(__name__) + +# Compressed tar file for testing. +# Python `tarfile` module works on only files. +# Hardcoding a sample tar file is simpler. +TEST_BINARY = ( + b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\xed\xd1\xb1\t\xc30\x14\x04P\xd5\x99B\x13\x04\xc9" + b"\xb6\xacyRx\x01[\x86\x8c\x1f\x05\x12HeHaB\xe0\xbd\xe6\x8a\x7f\xc5\xc1o\xcb\xd6\xae\xed\xde" + b"\xc2\x89R7\xcf\xd33s-\xe93_J\xc8\xd3X{\xa9\x96\xa1\xf7r\x1e\x87\x1ab:s\xd4\xdb\xbe\xb5\xdb" + b"\x1ac\xcfe=\xee\x1d\xdf\xffT\xeb\xff\xbf\xfcz\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00_{\x00" + b"\xc4\x07\x85\xe8\x00(\x00\x00" +) + + +class MockLxdClient: + """Mock the behavior of the lxd client.""" + + def __init__(self): + self.instances = MockLxdInstanceManager() + self.profiles = MockLxdProfileManager() + self.networks = MockLxdNetworkManager() + + +class MockLxdInstanceManager: + """Mock the behavior of the lxd Instances.""" + + def __init__(self): + self.instances = {} + + def create(self, config: LxdInstanceConfig, wait: bool = False) -> MockLxdInstance: + self.instances[config["name"]] = MockLxdInstance(config["name"]) + return self.instances[config["name"]] + + def get(self, name: str): + return self.instances[name] + + def all(self): + return [i for i in self.instances.values() if not i.deleted] + + +class MockLxdProfileManager: + """Mock the behavior of the lxd Profiles.""" + + def __init__(self): + self.profiles = set() + + def create(self, name: str, config: dict[str, str], devices: dict[str, str]): + self.profiles.add(name) + + def exists(self, name) -> bool: + return name in self.profiles + + +class MockLxdNetworkManager: + """Mock the behavior of the lxd networks""" + + def __init__(self): + pass + + def get(self, name: str) -> LxdNetwork: + return LxdNetwork( + "lxdbr0", "", "bridge", {"ipv4.address": "10.1.1.1/24"}, True, ("default") + ) + + +class MockLxdInstance: + """Mock the behavior of a lxd Instance.""" + + def __init__(self, name: str): + self.name = name + self.status = "Stopped" + self.deleted = False + + self.files = MockLxdInstanceFileManager() + + def start(self, wait: bool = True, timeout: int = 60): + self.status = "Running" + + def stop(self, wait: bool = True, timeout: int = 60): + self.status = "Stopped" + # Ephemeral virtual machine should be deleted on stop. + self.deleted = True + + def delete(self, wait: bool = True): + self.deleted = True + + def execute(self, cmd: Sequence[str], cwd: Optional[str] = None) -> tuple[int, str, str]: + return 0, "", "" + + +class MockLxdInstanceFileManager: + """Mock the behavior of a lxd Instance files.""" + + def __init__(self): + self.files = {} + + def mk_dir(self, path): + pass + + def push_file(self, source: str, destination: str, mode: Optional[str] = None): + self.files[destination] = "mock_content" + + def write_file(self, filepath: str, data: Union[bytes, str], mode: Optional[str] = None): + self.files[filepath] = data + + def read_file(self, filepath: str): + return self.files.get(str(filepath), None) + + +class MockErrorResponse: + """Mock of an error response for request library.""" + + def __init__(self): + self.status_code = 200 + + def json(self): + return {"metadata": {"err": "test error"}} + + +def mock_lxd_error_func(*arg, **kargs): + raise LxdError(MockErrorResponse()) + + +def mock_runner_error_func(*arg, **kargs): + raise RunnerError("test error") + + +class MockGhapiClient: + """Mock for Ghapi client.""" + + def __init__(self, token: str): + self.token = token + self.actions = MockGhapiActions() + + +class MockGhapiActions: + """Mock for actions in Ghapi client.""" + + def __init__(self): + hash = hashlib.sha256() + hash.update(TEST_BINARY) + self.test_hash = hash.hexdigest() + self.registration_token_repo = secrets.token_hex() + self.registration_token_org = secrets.token_hex() + self.remove_token_repo = secrets.token_hex() + self.remove_token_org = secrets.token_hex() + + def _list_runner_applications(self): + runners = [] + runners.append( + RunnerApplication( + os="linux", + architecture="x64", + download_url="https://www.example.com", + filename="test_runner_binary", + sha256_checksum=self.test_hash, + ) + ) + return runners + + def list_runner_applications_for_repo(self, owner: str, repo: str): + return self._list_runner_applications() + + def list_runner_applications_for_org(self, org: str): + return self._list_runner_applications() + + def create_registration_token_for_repo(self, owner: str, repo: str): + return RegistrationToken( + {"token": self.registration_token_repo, "expires_at": "2020-01-22T12:13:35.123-08:00"} + ) + + def create_registration_token_for_org(self, org: str): + return RegistrationToken( + {"token": self.registration_token_org, "expires_at": "2020-01-22T12:13:35.123-08:00"} + ) + + def create_remove_token_for_repo(self, owner: str, repo: str): + return RemoveToken( + {"token": self.remove_token_repo, "expires_at": "2020-01-22T12:13:35.123-08:00"} + ) + + def create_remove_token_for_org(self, org: str): + return RemoveToken( + {"token": self.remove_token_org, "expires_at": "2020-01-22T12:13:35.123-08:00"} + ) + + def list_self_hosted_runners_for_repo(self, owner: str, repo: str): + return {"runners": []} + + def list_self_hosted_runners_for_org(self, org: str): + return {"runners": []} + + def delete_self_hosted_runner_from_repo(self, owner: str, repo: str, runner_id: str): + pass + + def delete_self_hosted_runner_from_org(self, org: str, runner_id: str): + pass + + +class MockRepoPolicyComplianceClient: + """Mock for RepoPolicyComplianceClient.""" + + def __init__(self, session=None, url=None, charm_token=None): + pass + + def get_one_time_token(self) -> str: + return "MOCK_TOKEN_" + secrets.token_hex(8) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py new file mode 100644 index 000000000..72e4f00af --- /dev/null +++ b/tests/unit/test_charm.py @@ -0,0 +1,334 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Test cases for GithubRunnerCharm.""" + +import os +import unittest +import urllib.error +from unittest.mock import MagicMock, call, patch + +from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus +from ops.testing import Harness + +from charm import GithubRunnerCharm +from errors import RunnerError, SubprocessError +from github_type import GitHubRunnerStatus +from runner_manager import RunnerInfo, RunnerManagerConfig +from runner_type import GitHubOrg, GitHubRepo, VirtualMachineResources + + +def raise_error(*args, **kargs): + raise Exception("mock error") + + +def raise_runner_error(*args, **kargs): + raise RunnerError("mock error") + + +def raise_subprocess_error(*args, **kargs): + raise SubprocessError(cmd=["mock"], return_code=1, stdout="mock stdout", stderr="mock stderr") + + +def raise_url_error(*args, **kargs): + raise urllib.error.URLError("mock error") + + +def mock_get_latest_runner_bin_url(): + mock = MagicMock() + mock.download_url = "www.example.com" + return mock + + +def mock_get_github_info(): + return [ + RunnerInfo("test runner 0", GitHubRunnerStatus.ONLINE.value), + RunnerInfo("test runner 1", GitHubRunnerStatus.ONLINE.value), + RunnerInfo("test runner 2", GitHubRunnerStatus.OFFLINE.value), + RunnerInfo("test runner 3", GitHubRunnerStatus.OFFLINE.value), + RunnerInfo("test runner 4", "unknown"), + ] + + +class TestCharm(unittest.TestCase): + """Test cases for GithubRunnerCharm.""" + + @patch.dict( + os.environ, + { + "JUJU_CHARM_HTTPS_PROXY": "mock_https_proxy", + "JUJU_CHARM_HTTP_PROXY": "mock_http_proxy", + "JUJU_CHARM_NO_PROXY": "mock_no_proxy", + }, + ) + def test_proxy_setting(self): + harness = Harness(GithubRunnerCharm) + harness.begin() + + assert harness.charm.proxies["https"] == "mock_https_proxy" + assert harness.charm.proxies["http"] == "mock_http_proxy" + assert harness.charm.proxies["no_proxy"] == "mock_no_proxy" + + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_install(self, run, wt): + harness = Harness(GithubRunnerCharm) + harness.begin() + harness.charm.on.install.emit() + calls = [ + call( + ["/usr/bin/snap", "install", "lxd", "--channel=latest/stable"], + capture_output=True, + shell=False, + check=False, + ), + call( + ["/snap/bin/lxd", "init", "--auto"], capture_output=True, shell=False, check=False + ), + ] + run.assert_has_calls(calls, any_order=True) + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_org_register(self, run, wt, rm): + harness = Harness(GithubRunnerCharm) + harness.update_config( + { + "path": "mockorg", + "token": "mocktoken", + "group": "mockgroup", + "reconcile-interval": 5, + } + ) + harness.begin() + harness.charm.on.config_changed.emit() + token = harness.charm.service_token + rm.assert_called_with( + "github-runner", + "0", + RunnerManagerConfig( + path=GitHubOrg(org="mockorg", group="mockgroup"), + token="mocktoken", + image="jammy", + service_token=token, + ), + proxies={}, + ) + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_repo_register(self, run, wt, rm): + harness = Harness(GithubRunnerCharm) + harness.update_config( + {"path": "mockorg/repo", "token": "mocktoken", "reconcile-interval": 5} + ) + harness.begin() + harness.charm.on.config_changed.emit() + token = harness.charm.service_token + rm.assert_called_with( + "github-runner", + "0", + RunnerManagerConfig( + path=GitHubRepo(owner="mockorg", repo="repo"), + token="mocktoken", + image="jammy", + service_token=token, + ), + proxies={}, + ) + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_update_config(self, run, wt, rm): + rm.return_value = mock_rm = MagicMock() + harness = Harness(GithubRunnerCharm) + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + harness.begin() + + # update to 0 virtual machines + harness.update_config({"virtual-machines": 0}) + harness.charm.on.reconcile_runners.emit() + token = harness.charm.service_token + rm.assert_called_with( + "github-runner", + "0", + RunnerManagerConfig( + path=GitHubRepo(owner="mockorg", repo="repo"), + token="mocktoken", + image="jammy", + service_token=token, + ), + proxies={}, + ) + mock_rm.reconcile.assert_called_with(0, VirtualMachineResources(2, "7GiB", "10GiB")), + mock_rm.reset_mock() + + # update to 10 VMs with 4 cpu and 7GiB memory + harness.update_config({"virtual-machines": 10, "vm-cpu": 4}) + harness.charm.on.reconcile_runners.emit() + token = harness.charm.service_token + rm.assert_called_with( + "github-runner", + "0", + RunnerManagerConfig( + path=GitHubRepo(owner="mockorg", repo="repo"), + token="mocktoken", + image="jammy", + service_token=token, + ), + proxies={}, + ) + mock_rm.reconcile.assert_called_with( + 10, VirtualMachineResources(cpu=4, memory="7GiB", disk="10GiB") + ) + mock_rm.reset_mock() + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_on_stop(self, run, wt, rm): + rm.return_value = mock_rm = MagicMock() + harness = Harness(GithubRunnerCharm) + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + harness.begin() + harness.charm.on.stop.emit() + mock_rm.flush.assert_called() + + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_get_runner_manager(self, run, wt): + harness = Harness(GithubRunnerCharm) + harness.begin() + + # Get runner manager via input. + assert harness.charm._get_runner_manager("mocktoken", "mockorg/repo") is not None + + assert harness.charm._get_runner_manager() is None + + # Get runner manager via config. + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + assert harness.charm._get_runner_manager() is not None + + # With invalid path. + assert harness.charm._get_runner_manager("mocktoken", "mock/invalid/path") is None + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_on_install_failure(self, run, wt, rm): + """Test various error thrown during install.""" + + rm.return_value = mock_rm = MagicMock() + mock_rm.get_latest_runner_bin_url = mock_get_latest_runner_bin_url + + harness = Harness(GithubRunnerCharm) + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + harness.begin() + + # Base case: no error thrown. + harness.charm.on.install.emit() + assert harness.charm.unit.status == ActiveStatus() + + harness.charm._reconcile_runners = raise_runner_error + harness.charm.on.install.emit() + assert harness.charm.unit.status == MaintenanceStatus( + "Failed to start runners: mock error" + ) + + harness.charm._reconcile_runners = raise_error + harness.charm.on.install.emit() + assert harness.charm.unit.status == BlockedStatus("mock error") + + mock_rm.update_runner_bin = raise_error + harness.charm.on.install.emit() + assert harness.charm.unit.status == MaintenanceStatus( + "Failed to update runner binary: mock error" + ) + + GithubRunnerCharm._install_deps = raise_subprocess_error + harness.charm.on.install.emit() + assert harness.charm.unit.status == BlockedStatus("Failed to install dependencies") + + GithubRunnerCharm._install_deps = raise_error + harness.charm.on.install.emit() + assert harness.charm.unit.status == BlockedStatus("mock error") + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_on_update_runner_bin(self, run, wt, rm): + rm.return_value = mock_rm = MagicMock() + mock_rm.get_latest_runner_bin_url = mock_get_latest_runner_bin_url + + harness = Harness(GithubRunnerCharm) + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + harness.begin() + + harness.charm.on.update_runner_bin.emit() + + mock_rm.get_latest_runner_bin_url = raise_error + harness.charm.on.update_runner_bin.emit() + assert harness.charm.unit.status == BlockedStatus("mock error") + + mock_rm.get_latest_runner_bin_url = raise_url_error + harness.charm.on.update_runner_bin.emit() + assert harness.charm.unit.status == MaintenanceStatus( + "Failed to check for runner updates: " + ) + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_check_runners_action(self, run, wt, rm): + rm.return_value = mock_rm = MagicMock() + mock_event = MagicMock() + + mock_rm.get_github_info = mock_get_github_info + + harness = Harness(GithubRunnerCharm) + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + harness.begin() + + harness.charm._on_check_runners_action(mock_event) + mock_event.set_results.assert_called_with( + {"online": 2, "offline": 2, "unknown": 1, "runners": "test runner 0, test runner 1"} + ) + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_check_runners_action_with_errors(self, run, wt, rm): + mock_event = MagicMock() + + harness = Harness(GithubRunnerCharm) + harness.begin() + + # No config + harness.charm._on_check_runners_action(mock_event) + mock_event.fail.assert_called_with("Missing token or org/repo path config") + + @patch("charm.RunnerManager") + @patch("pathlib.Path.write_text") + @patch("subprocess.run") + def test_on_flush_runners_action(self, run, wt, rm): + mock_event = MagicMock() + + harness = Harness(GithubRunnerCharm) + harness.begin() + + harness.charm._on_flush_runners_action(mock_event) + mock_event.fail.assert_called_with("Missing token or org/repo path config") + mock_event.reset_mock() + + harness.update_config({"path": "mockorg/repo", "token": "mocktoken"}) + harness.charm._on_flush_runners_action(mock_event) + mock_event.set_results.assert_called() + mock_event.reset_mock() + + harness.charm._reconcile_runners = raise_error + harness.charm._on_flush_runners_action(mock_event) + mock_event.fail.assert_called() + mock_event.reset_mock() diff --git a/tests/unit/test_runner.py b/tests/unit/test_runner.py new file mode 100644 index 000000000..32e89811a --- /dev/null +++ b/tests/unit/test_runner.py @@ -0,0 +1,201 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Test cases of Runner class.""" + +import secrets +import unittest +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from errors import RunnerCreateError +from runner import Runner, RunnerClients, RunnerConfig, RunnerStatus +from runner_type import GitHubOrg, GitHubRepo, VirtualMachineResources +from tests.unit.mock import ( + MockLxdClient, + MockRepoPolicyComplianceClient, + mock_lxd_error_func, + mock_runner_error_func, +) + + +@pytest.fixture(scope="module", name="vm_resources") +def vm_resources_fixture(): + return VirtualMachineResources(2, "7Gib", "10Gib") + + +@pytest.fixture(scope="function", name="token") +def token_fixture(): + return secrets.token_hex() + + +@pytest.fixture(scope="function", name="binary_path") +def binary_path_fixture(tmp_path: Path): + return tmp_path / "test_binary" + + +@pytest.fixture(scope="module", name="instance", params=["Running", "Stopped", None]) +def instance_fixture(request): + if request.param[0] is None: + return None + + attrs = {"status": request.param[0], "execute.return_value": (0, "", "")} + instance = unittest.mock.MagicMock(**attrs) + return instance + + +@pytest.fixture(scope="function", name="lxd") +def mock_lxd_client_fixture(): + return MockLxdClient() + + +@pytest.fixture( + scope="function", + name="runner", + params=[ + (GitHubOrg("test_org", "test_group"), {}), + ( + GitHubRepo("test_owner", "test_repo"), + {"no_proxy": "test_no_proxy", "http": "test_http", "https": "test_https"}, + ), + ], +) +def runner_fixture(request, lxd: MockLxdClient): + client = RunnerClients( + MagicMock(), + MagicMock(), + lxd, + MockRepoPolicyComplianceClient(), + ) + config = RunnerConfig("test_app", request.param[0], request.param[1], "test_runner") + status = RunnerStatus() + return Runner( + client, + config, + status, + ) + + +def test_create( + runner: Runner, + vm_resources: VirtualMachineResources, + token: str, + binary_path: Path, + lxd: MockLxdClient, +): + """ + arrange: Nothing. + act: Create a runner. + assert: An lxd instance for the runner is created. + """ + + runner.create("test_image", vm_resources, binary_path, token) + + instances = lxd.instances.all() + assert len(instances) == 1 + + if runner.config.proxies: + instance = instances[0] + env_proxy = instance.files.read_file("/opt/github-runner/.env") + systemd_docker_proxy = instance.files.read_file( + "/etc/systemd/system/docker.service.d/http-proxy.conf" + ) + # Test the file has being written to. This value does not contain the string as the + # jinja2.environment.Environment is mocked with MagicMock. + assert env_proxy is not None + assert systemd_docker_proxy is not None + + +def test_create_lxd_fail( + runner: Runner, + vm_resources: VirtualMachineResources, + token: str, + binary_path: Path, + lxd: MockLxdClient, +): + """ + arrange: Setup the create runner to fail with lxd error. + act: Create a runner. + assert: Correct exception should be thrown. Any created instance should be + cleanup. + """ + lxd.profiles.exists = mock_lxd_error_func + + with pytest.raises(RunnerCreateError): + runner.create("test_image", vm_resources, binary_path, token) + + assert len(lxd.instances.all()) == 0 + + +def test_create_runner_fail( + runner: Runner, + vm_resources: VirtualMachineResources, + token: str, + binary_path: Path, + lxd: MockLxdClient, +): + """ + arrange: Setup the create runner to fail with runner error. + act: Create a runner. + assert: Correct exception should be thrown. Any created instance should be + cleanup. + """ + runner._clients.lxd.instances.create = mock_runner_error_func + + with pytest.raises(RunnerCreateError): + runner.create("test_image", vm_resources, binary_path, token) + + +def test_remove( + runner: Runner, + vm_resources: VirtualMachineResources, + token: str, + binary_path: Path, + lxd: MockLxdClient, +): + """ + arrange: Create a runner. + act: Remove the runner. + assert: The lxd instance for the runner is removed. + """ + + runner.create("test_image", vm_resources, binary_path, token) + runner.remove("test_token") + assert len(lxd.instances.all()) == 0 + + +def test_remove_failed_instance( + runner: Runner, + vm_resources: VirtualMachineResources, + token: str, + binary_path: Path, + lxd: MockLxdClient, +): + """ + arrange: Create a stopped runner that failed to remove itself. + act: Remove the runner. + assert: The lxd instance for the runner is removed. + """ + # Cases where the ephemeral instance encountered errors and the status was Stopped but not + # removed was found before. + runner.create("test_image", vm_resources, binary_path, token) + runner.instance.status = "Stopped" + runner.remove("test_token") + assert len(lxd.instances.all()) == 0 + + +def test_remove_none( + runner: Runner, + token: str, + lxd: MockLxdClient, +): + """ + arrange: Not creating a runner. + act: Remove the runner. + assert: The lxd instance for the runner is removed. + """ + + runner.remove(token) + assert len(lxd.instances.all()) == 0 diff --git a/tests/unit/test_runner_manager.py b/tests/unit/test_runner_manager.py new file mode 100644 index 000000000..31fac9d89 --- /dev/null +++ b/tests/unit/test_runner_manager.py @@ -0,0 +1,174 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Test cases of RunnerManager class.""" + +import secrets +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from errors import RunnerBinaryError +from runner import Runner, RunnerStatus +from runner_manager import RunnerManager, RunnerManagerConfig +from runner_type import GitHubOrg, GitHubRepo, VirtualMachineResources +from tests.unit.mock import TEST_BINARY + + +@pytest.fixture(scope="function", name="token") +def token_fixture(): + return secrets.token_hex() + + +@pytest.fixture( + scope="function", + name="runner_manager", + params=[ + (GitHubOrg("test_org", "test_group"), {}), + ( + GitHubRepo("test_owner", "test_repo"), + {"no_proxy": "test_no_proxy", "http": "test_http", "https": "test_https"}, + ), + ], +) +def runner_manager_fixture(request, tmp_path, monkeypatch, token): + monkeypatch.setattr( + "runner_manager.RunnerManager.runner_bin_path", Path(tmp_path / "mock_runner_binary") + ) + runner_manager = RunnerManager( + "test app", + "0", + RunnerManagerConfig(request.param[0], token, "jammy", secrets.token_hex(16)), + proxies=request.param[1], + ) + return runner_manager + + +def test_get_latest_runner_bin_url(runner_manager: RunnerManager): + """ + arrange: Nothing. + act: Get runner bin url of existing binary. + assert: Correct mock data returned. + """ + runner_bin = runner_manager.get_latest_runner_bin_url(os_name="linux", arch_name="x64") + assert runner_bin["os"] == "linux" + assert runner_bin["architecture"] == "x64" + assert runner_bin["download_url"] == "https://www.example.com" + assert runner_bin["filename"] == "test_runner_binary" + + +def test_get_latest_runner_bin_url_missing_binary(runner_manager: RunnerManager): + """ + arrange: Nothing. + act: Get runner bin url of non-existing binary. + assert: Error related to runner bin raised. + """ + with pytest.raises(RunnerBinaryError): + runner_manager.get_latest_runner_bin_url(os_name="not_exist", arch_name="not_exist") + + +def test_update_runner_bin(runner_manager: RunnerManager): + """ + arrange: Nothing. + act: Update runner binary. + assert: Runner binary in runner manager is set. + """ + + class MockRequestLibResponse: + def __init__(self, *arg, **kargs): + self.status_code = 200 + + def iter_content(self, *arg, **kargs): + return iter([TEST_BINARY]) + + runner_manager.session.get = MockRequestLibResponse + # Remove the fake binary in fixture. + runner_manager.runner_bin_path = None + runner_bin = runner_manager.get_latest_runner_bin_url(os_name="linux", arch_name="x64") + + runner_manager.update_runner_bin(runner_bin) + + +def test_reconcile_zero_count(runner_manager: RunnerManager): + """ + arrange: Nothing. + act: Reconcile with the current amount of runner. + assert: No error should be raised. + """ + # Reconcile with no change to runner count. + delta = runner_manager.reconcile(0, VirtualMachineResources(2, "7GiB", "10Gib")) + + assert delta == 0 + + +def test_reconcile_create_runner(runner_manager: RunnerManager): + """ + arrange: Nothing. + act: Reconcile to create a runner. + assert: One runner should be created. + """ + # Create a runner. + delta = runner_manager.reconcile(1, VirtualMachineResources(2, "7GiB", "10Gib")) + + assert delta == 1 + + +def test_reconcile_remove_runner(runner_manager: RunnerManager): + """ + arrange: Create online runners. + act: Reconcile to remove a runner. + assert: One runner should be removed. + """ + + def mock_get_runners(): + """Create three mock runners.""" + runners = [] + for _ in range(3): + # 0 is a mock runner id. + status = RunnerStatus(0, True, True, False) + runners.append(Runner(MagicMock(), MagicMock(), status, None)) + return runners + + # Create online runners. + runner_manager._get_runners = mock_get_runners + + delta = runner_manager.reconcile(2, VirtualMachineResources(2, "7GiB", "10Gib")) + + assert delta == -1 + + +def test_reconcile(runner_manager: RunnerManager, tmp_path: Path): + """ + arrange: Setup one runner. + act: Reconcile with the current amount of runner. + assert: Still have one runner. + """ + runner_manager.reconcile(1, VirtualMachineResources(2, "7GiB", "10Gib")) + # Reconcile with no change to runner count. + runner_manager.reconcile(1, VirtualMachineResources(2, "7GiB", "10Gib")) + + assert len(runner_manager._get_runners()) == 1 + + +def test_empty_flush(runner_manager: RunnerManager): + """ + arrange: No initial runners. + act: Perform flushing with no runners. + assert: No error thrown. + """ + # Verifying the RunnerManager does not crash if flushing with no runners. + runner_manager.flush() + + +def test_flush(runner_manager: RunnerManager, tmp_path: Path): + """ + arrange: Create some runners. + act: Perform flushing. + assert: No runners. + """ + # Create a runner. + runner_manager.reconcile(2, VirtualMachineResources(2, "7GiB", "10Gib")) + + runner_manager.flush() + assert len(runner_manager._get_runners()) == 0 diff --git a/tests/unit/test_utilities.py b/tests/unit/test_utilities.py new file mode 100644 index 000000000..2818e8a48 --- /dev/null +++ b/tests/unit/test_utilities.py @@ -0,0 +1,31 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Test cases of utilities.""" + +from subprocess import CalledProcessError # nosec B404 +from unittest.mock import MagicMock + +import pytest + +from errors import SubprocessError +from utilities import execute_command + + +def test_execute_command_with_error(monkeypatch): + """ + arrange: Set up subprocess.run to return a result with error. + act: Execute a command + assert: Throw related to subprocess thrown. + """ + + def raise_called_process_error(*args, **kargs): + raise CalledProcessError(returncode=1, cmd="mock cmd", stderr="mock stderr") + + mock_run = MagicMock() + mock_run.return_value = mock_result = MagicMock() + mock_result.check_returncode = raise_called_process_error + monkeypatch.setattr("utilities.subprocess.run", mock_run) + + with pytest.raises(SubprocessError): + execute_command(["mock", "cmd"]) diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..7817af86f --- /dev/null +++ b/tox.ini @@ -0,0 +1,110 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +[tox] +skipsdist = True +skip_missing_interpreters = True +envlist = lint, unit, static, coverage-report + +[vars] +src_path = {toxinidir}/src/ +tst_path = {toxinidir}/tests/ +all_path = {[vars]src_path} {[vars]tst_path} + +[testenv] +basepython = python3.10 +setenv = + PYTHONPATH={toxinidir}:{toxinidir}/src + PYTHONBREAKPOINT=ipdb.set_trace + PY_COLOR=1 +passenv = + PYTHONPATH + CHARM_BUILD_DIR + MODEL_SETTINGS + +[testenv:fmt] +description = Apply coding style standards to code +deps = + black + isort +commands = + isort {[vars]all_path} + black {[vars]all_path} + +[testenv:lint] +description = Check code against coding style standards +deps = + -r{toxinidir}/requirements.txt + black + flake8<6.0.0 + flake8-docstrings>=1.6 + flake8-builtins>=2.0 + flake8-docstrings-complete>=1.0.3 + flake8-test-docs>=1.0 + ; There is an error with version 6.0.0 related to integers and arguments + pyproject-flake8<6.0.0 + pep8-naming + isort + codespell + toml + mypy + pylint + pytest + ops + pytest_operator + types-requests + types-PyYAML + pytest_asyncio + pydocstyle>=2.10 +commands = + pydocstyle {[vars]src_path} + codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \ + --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \ + --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg \ + --ignore-words {toxinidir}/.codespellignore + # pflake8 wrapper supports config from pyproject.toml + pflake8 {[vars]all_path} + isort --check-only --diff {[vars]all_path} + black --check --diff {[vars]all_path} + mypy {[vars]all_path} + pylint {[vars]src_path} + pydocstyle {[vars]src_path} + +[testenv:unit] +description = Run unit tests +deps = + pytest + coverage[toml] + -r{toxinidir}/requirements.txt +commands = + coverage run --source={[vars]src_path} \ + -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs} + coverage report + +[testenv:coverage-report] +description = Create test coverage report +deps = + pytest + coverage[toml] + -r{toxinidir}/requirements.txt +commands = + coverage report + +[testenv:static] +description = Run static analysis tests +deps = + bandit[toml] + -r{toxinidir}/requirements.txt +commands = + bandit -c {toxinidir}/pyproject.toml -r {[vars]src_path} + +[testenv:integration] +description = Run integration tests +deps = + pytest + juju + pytest-operator + pytest-asyncio + -r{toxinidir}/requirements.txt +commands = + pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs}