Skip to content

Commit

Permalink
Merge 70cf36f into 35b79da
Browse files Browse the repository at this point in the history
  • Loading branch information
yanksyoon authored Apr 9, 2024
2 parents 35b79da + 70cf36f commit 9814842
Show file tree
Hide file tree
Showing 8 changed files with 183 additions and 99 deletions.
2 changes: 1 addition & 1 deletion src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def _ensure_service_health(self) -> None:
try:
execute_command(["/usr/bin/systemctl", "is-active", "repo-policy-compliance"])
except SubprocessError:
logger.exception("Found inactive repo-policy-compliance service")
logger.exception("Found inactive repo-policy-compliance service.")
execute_command(["/usr/bin/systemctl", "restart", "repo-policy-compliance"])
logger.info("Restart repo-policy-compliance service")
raise
Expand Down
87 changes: 58 additions & 29 deletions tests/integration/charm_metrics_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,26 @@
"""Utilities for charm metrics integration tests."""


import datetime
import json
import logging
from time import sleep

import requests
from github.Branch import Branch
from github.GithubException import GithubException
from github.Repository import Repository
from github.Workflow import Workflow
from github.WorkflowJob import WorkflowJob
from juju.application import Application
from juju.unit import Unit

from github_type import JobConclusion
from metrics import METRICS_LOG_PATH
from runner_metrics import PostJobStatus
from tests.integration.helpers import get_runner_name, run_in_unit
from tests.integration.helpers import get_runner_name, run_in_unit, wait_for

logger = logging.getLogger(__name__)

TEST_WORKFLOW_NAMES = [
"Workflow Dispatch Tests",
Expand All @@ -26,29 +32,46 @@
]


async def wait_for_workflow_to_start(unit: Unit, workflow: Workflow):
async def wait_for_workflow_to_start(
unit: Unit, workflow: Workflow, branch: Branch | None = None, started_time: float | None = None
):
"""Wait for the workflow to start.
Args:
unit: The unit which contains the runner.
workflow: The workflow to wait for.
branch: The branch where the workflow belongs to.
started_time: The time in seconds since epoch the job was started.
"""
runner_name = await get_runner_name(unit)
for _ in range(30):
for run in workflow.get_runs():
created_at = (
None
if not started_time
# convert to integer since GH API takes up to seconds.
else f">={datetime.datetime.fromtimestamp(int(started_time)).isoformat()}"
)

def is_runner_log():
"""Return whether a log for given runner exists."""
for run in workflow.get_runs(branch=branch, created=created_at):
jobs = run.jobs()
if jobs:
logs_url = jobs[0].logs_url()
logs = requests.get(logs_url).content.decode("utf-8")
if not jobs:
return False
try:
job: WorkflowJob = jobs[0]
logs = requests.get(job.logs_url()).content.decode("utf-8")
except GithubException as exc:
if exc.status == 410:
logger.warning("Transient github error, %s", exc)
return False
if runner_name in logs:
return True
return False

if runner_name in logs:
break
else:
sleep(30)
continue
break
else:
assert False, "Timeout while waiting for the workflow to start"
try:
await wait_for(is_runner_log, timeout=20 * 60, check_interval=30)
except TimeoutError as exc:
raise TimeoutError("Timeout while waiting for the workflow to start") from exc


async def clear_metrics_log(unit: Unit) -> None:
Expand All @@ -57,11 +80,11 @@ async def clear_metrics_log(unit: Unit) -> None:
Args:
unit: The unit to clear the metrics log on.
"""
retcode, _ = await run_in_unit(
retcode, _, stderr = await run_in_unit(
unit=unit,
command=f"if [ -f {METRICS_LOG_PATH} ]; then rm {METRICS_LOG_PATH}; fi",
)
assert retcode == 0, "Failed to clear metrics log"
assert retcode == 0, f"Failed to clear metrics log, {stderr}"


async def print_loop_device_info(unit: Unit, loop_device: str) -> None:
Expand All @@ -71,11 +94,11 @@ async def print_loop_device_info(unit: Unit, loop_device: str) -> None:
unit: The unit to print the loop device info on.
loop_device: The loop device to print the info for.
"""
retcode, stdout = await run_in_unit(
retcode, stdout, stderr = await run_in_unit(
unit=unit,
command="sudo losetup -lJ",
)
assert retcode == 0, f"Failed to get loop devices: {stdout}"
assert retcode == 0, f"Failed to get loop devices: {stdout} {stderr}"
assert stdout is not None, "Failed to get loop devices, no stdout message"
loop_devices_info = json.loads(stdout)
for loop_device_info in loop_devices_info["loopdevices"]:
Expand All @@ -95,33 +118,39 @@ async def get_metrics_log(unit: Unit) -> str:
Returns:
The metrics log.
"""
retcode, stdout = await run_in_unit(
retcode, stdout, stderr = await run_in_unit(
unit=unit,
command=f"if [ -f {METRICS_LOG_PATH} ]; then cat {METRICS_LOG_PATH}; else echo ''; fi",
)
assert retcode == 0, f"Failed to get metrics log: {stdout}"
assert retcode == 0, f"Failed to get metrics log: {stdout} {stderr}"
assert stdout is not None, "Failed to get metrics log, no stdout message"
logging.info("Metrics log: %s", stdout)
return stdout.strip()


async def cancel_workflow_run(unit: Unit, workflow: Workflow):
async def cancel_workflow_run(unit: Unit, workflow: Workflow, branch: Branch | None = None):
"""Cancel the workflow run.
Args:
unit: The unit which contains the runner.
workflow: The workflow to cancel the workflow run for.
branch: The branch where the workflow belongs to.
"""
runner_name = await get_runner_name(unit)

for run in workflow.get_runs():
for run in workflow.get_runs(branch=branch):
jobs = run.jobs()
if jobs:
logs_url = jobs[0].logs_url()
logs = requests.get(logs_url).content.decode("utf-8")

if runner_name in logs:
run.cancel()
if not jobs:
continue
try:
job: WorkflowJob = jobs[0]
logs = requests.get(job.logs_url()).content.decode("utf-8")
except GithubException as exc:
if exc.status == 410:
logger.warning("Transient github error, %s", exc)
continue
if runner_name in logs:
run.cancel()


async def assert_events_after_reconciliation(
Expand Down
Loading

0 comments on commit 9814842

Please sign in to comment.