diff --git a/.github/workflows/e2e_test.yaml b/.github/workflows/e2e_test.yaml index 89bf7f8bf..702a8829d 100644 --- a/.github/workflows/e2e_test.yaml +++ b/.github/workflows/e2e_test.yaml @@ -37,7 +37,7 @@ jobs: uses: actions/cache@v4 id: cache-charm with: - path: github-runner_ubuntu-22.04-amd64-arm64.charm + path: github-runner_ubuntu-22.04-amd64.charm key: github-runner-charm-${{ hashFiles('**/*') }} - name: Setup LXD @@ -55,8 +55,8 @@ jobs: - name: Upload github-runner Charm uses: actions/upload-artifact@v4 with: - name: dangerous-test-only-github-runner_ubuntu-22.04-amd64-arm64.charm - path: github-runner_ubuntu-22.04-amd64-arm64.charm + name: dangerous-test-only-github-runner_ubuntu-22.04-amd64.charm + path: github-runner_ubuntu-22.04-amd64.charm run-id: name: Generate Run ID @@ -122,7 +122,7 @@ jobs: - name: Download github-runner Charm uses: actions/download-artifact@v4 with: - name: dangerous-test-only-github-runner_ubuntu-22.04-amd64-arm64.charm + name: dangerous-test-only-github-runner_ubuntu-22.04-amd64.charm - name: Enable br_netfilter run: sudo modprobe br_netfilter @@ -133,12 +133,12 @@ jobs: - name: Copy github-runner Charm run: | - cp github-runner_ubuntu-22.04-amd64-arm64.charm /home/$USER/github-runner_ubuntu-22.04-amd64-arm64.charm + cp github-runner_ubuntu-22.04-amd64.charm /home/$USER/github-runner_ubuntu-22.04-amd64.charm - name: Deploy github-runner Charm (Pull Request, Workflow Dispatch and Push) if: matrix.event.name == 'workflow_dispatch' || matrix.event.name == 'push' || matrix.event.name == 'pull_request' run: | - juju deploy /home/$USER/github-runner_ubuntu-22.04-amd64-arm64.charm \ + juju deploy /home/$USER/github-runner_ubuntu-22.04-amd64.charm \ ${{ steps.runner-name.outputs.name }} \ --base ubuntu@22.04 \ --config path=${{ secrets.E2E_TESTING_REPO }} \ @@ -200,7 +200,7 @@ jobs: git remote add testing https://github.com/${TESTING_REPO}.git git push testing ${{ steps.runner-name.outputs.name }}:main - juju deploy /home/$USER/github-runner_ubuntu-22.04-amd64-arm64.charm \ + juju deploy /home/$USER/github-runner_ubuntu-22.04-amd64.charm \ ${{ steps.runner-name.outputs.name }} \ --base ubuntu@22.04 \ --config path=$TESTING_REPO \ diff --git a/charmcraft.yaml b/charmcraft.yaml index 8af8fdd70..794769f73 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -20,10 +20,18 @@ bases: channel: "22.04" architectures: - amd64 - - arm64 run-on: - name: "ubuntu" channel: "22.04" architectures: - amd64 + - build-on: + - name: "ubuntu" + channel: "22.04" + architectures: + - arm64 + run-on: + - name: "ubuntu" + channel: "22.04" + architectures: - arm64 diff --git a/docs/tutorial/quick-start.md b/docs/tutorial/quick-start.md index 495d02841..105a3489c 100644 --- a/docs/tutorial/quick-start.md +++ b/docs/tutorial/quick-start.md @@ -35,6 +35,7 @@ The registration token can be requested by calling the [GitHub API](https://docs ### Deploy the GitHub runner charm The charm requires a GitHub personal access token with `repo` access, which can be created following the instructions [here](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic). +A user with `admin` access for the repository/org is required, otherwise, the repo-policy-compliance will fail the job. Once the personal access token is created, the charm can be deployed with: diff --git a/tests/integration/charm_metrics_helpers.py b/tests/integration/charm_metrics_helpers.py index 14411a09e..ac4602a0b 100644 --- a/tests/integration/charm_metrics_helpers.py +++ b/tests/integration/charm_metrics_helpers.py @@ -17,7 +17,7 @@ from github_type import JobConclusion from metrics import METRICS_LOG_PATH from runner_metrics import PostJobStatus -from tests.integration.helpers import JOB_LOG_START_MSG_TEMPLATE, get_runner_name, run_in_unit +from tests.integration.helpers import get_runner_name, run_in_unit TEST_WORKFLOW_NAMES = [ "Workflow Dispatch Tests", @@ -26,7 +26,7 @@ ] -async def _wait_for_workflow_to_start(unit: Unit, workflow: Workflow): +async def wait_for_workflow_to_start(unit: Unit, workflow: Workflow): """Wait for the workflow to start. Args: @@ -41,7 +41,7 @@ async def _wait_for_workflow_to_start(unit: Unit, workflow: Workflow): logs_url = jobs[0].logs_url() logs = requests.get(logs_url).content.decode("utf-8") - if JOB_LOG_START_MSG_TEMPLATE.format(runner_name=runner_name) in logs: + if runner_name in logs: break else: sleep(30) @@ -105,7 +105,7 @@ async def get_metrics_log(unit: Unit) -> str: return stdout.strip() -async def _cancel_workflow_run(unit: Unit, workflow: Workflow): +async def cancel_workflow_run(unit: Unit, workflow: Workflow): """Cancel the workflow run. Args: @@ -120,7 +120,7 @@ async def _cancel_workflow_run(unit: Unit, workflow: Workflow): logs_url = jobs[0].logs_url() logs = requests.get(logs_url).content.decode("utf-8") - if JOB_LOG_START_MSG_TEMPLATE.format(runner_name=runner_name) in logs: + if runner_name in logs: run.cancel() diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 253249264..c4342448b 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -35,8 +35,6 @@ DISPATCH_FAILURE_TEST_WORKFLOW_FILENAME = "workflow_dispatch_failure_test.yaml" DISPATCH_WAIT_TEST_WORKFLOW_FILENAME = "workflow_dispatch_wait_test.yaml" -JOB_LOG_START_MSG_TEMPLATE = "Job is about to start running on the runner: {runner_name}" - async def check_runner_binary_exists(unit: Unit) -> bool: """Checks if runner binary exists in the charm. @@ -439,16 +437,23 @@ async def _assert_workflow_run_conclusion( workflow: The workflow to assert the workflow run conclusion for. start_time: The start time of the workflow. """ + log_found = False for run in workflow.get_runs(created=f">={start_time.isoformat()}"): latest_job: WorkflowJob = run.jobs()[0] logs = get_job_logs(job=latest_job) - if JOB_LOG_START_MSG_TEMPLATE.format(runner_name=runner_name) in logs: + if runner_name in logs: + log_found = True assert latest_job.conclusion == conclusion, ( f"Job {latest_job.name} for {runner_name} expected {conclusion}, " f"got {latest_job.conclusion}" ) + assert log_found, ( + f"No run with runner({runner_name}) log found for workflow({workflow.name}) " + f"starting from {start_time} with conclusion {conclusion}" + ) + async def _wait_for_workflow_to_complete( unit: Unit, workflow: Workflow, conclusion: str, start_time: datetime diff --git a/tests/integration/test_charm_metrics_failure.py b/tests/integration/test_charm_metrics_failure.py index 72e2aae9f..032b29b1e 100644 --- a/tests/integration/test_charm_metrics_failure.py +++ b/tests/integration/test_charm_metrics_failure.py @@ -14,12 +14,12 @@ import runner_logs from runner_metrics import PostJobStatus from tests.integration.charm_metrics_helpers import ( - _cancel_workflow_run, - _wait_for_workflow_to_start, assert_events_after_reconciliation, + cancel_workflow_run, clear_metrics_log, print_loop_device_info, wait_for_runner_to_be_marked_offline, + wait_for_workflow_to_start, ) from tests.integration.helpers import ( DISPATCH_CRASH_TEST_WORKFLOW_FILENAME, @@ -110,7 +110,7 @@ async def test_charm_issues_metrics_for_abnormal_termination( ) assert workflow.create_dispatch(forked_github_branch, {"runner": app.name}) - await _wait_for_workflow_to_start(unit, workflow) + await wait_for_workflow_to_start(unit, workflow) # Make the runner terminate abnormally by killing run.sh runner_name = await get_runner_name(unit) @@ -120,7 +120,7 @@ async def test_charm_issues_metrics_for_abnormal_termination( # Cancel workflow and wait that the runner is marked offline # to avoid errors during reconciliation. - await _cancel_workflow_run(unit, workflow) + await cancel_workflow_run(unit, workflow) await wait_for_runner_to_be_marked_offline(forked_github_repository, runner_name) # Set the number of virtual machines to 0 to speedup reconciliation