Skip to content

Commit

Permalink
Add rerun test executions helper script (#195)
Browse files Browse the repository at this point in the history
* Add rerun test executions helper script

* Adds a space

* add reversed option to rerun script

* Add show common failures script

* Update backend/scripts/rerun_failing_test_executions.py

Co-authored-by: Nadzeya H <[email protected]>

* Update backend/scripts/rerun_failing_test_executions.py

Co-authored-by: Nadzeya H <[email protected]>

* Address review comments

* Fix formatting

* Address rest of PR comments

* Accept both cases of y

* Increase requests global timeout

---------

Co-authored-by: Nadzeya H <[email protected]>
  • Loading branch information
omar-selo and nadzyah authored Sep 11, 2024
1 parent c82eeb5 commit 78fd83c
Show file tree
Hide file tree
Showing 2 changed files with 155 additions and 0 deletions.
96 changes: 96 additions & 0 deletions backend/scripts/rerun_failing_test_executions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
# ruff: noqa: T201

import argparse
import functools
import re
from os import environ

from requests import Session

TO_API_URL = environ.get("TO_API_URL", "https://test-observer-api.canonical.com")

requests = Session()
requests.request = functools.partial(requests.request, timeout=30) # type: ignore


def main(artefact_id: int, test_case_regex: str, reversed: bool):
artefact_builds = requests.get(
f"{TO_API_URL}/v1/artefacts/{artefact_id}/builds"
).json()

test_case_matcher = re.compile(test_case_regex)

relevant_test_executions = []
for ab in artefact_builds:
for te in ab["test_executions"]:
if (
te["review_decision"] == []
and not te["is_rerun_requested"]
and te["status"] == "FAILED"
):
relevant_test_executions.append(te)

test_execution_ids_to_rerun = []
for te in relevant_test_executions:
test_results = requests.get(
f"{TO_API_URL}/v1/test-executions/{te['id']}/test-results"
).json()

matching_failed_tests = (
tr
for tr in test_results
if tr["status"] == "FAILED" and test_case_matcher.match(tr["name"])
)

first_failing_test = next(matching_failed_tests, None)

if first_failing_test and not reversed:
test_execution_ids_to_rerun.append(te["id"])
print(
f"will rerun {te['environment']['name']}"
f" for failing {first_failing_test['name']}"
)
elif not first_failing_test and reversed:
test_execution_ids_to_rerun.append(te["id"])
print(
f"will rerun {te['environment']['name']}"
f" as no failing matches found and reversed option is set"
)

should_rerun = (
input(
f"Will rerun {len(test_execution_ids_to_rerun)}"
" test executions is that ok? (y/N) "
).lower()
== "y"
)

if should_rerun:
requests.post(
f"{TO_API_URL}/v1/test-executions/reruns",
json={"test_execution_ids": test_execution_ids_to_rerun},
)
print("Submitted rerun requests")
else:
print("Aborting the script")


example_usage = """Example:
python %(prog)s 47906 '.*wireless.*'
python %(prog)s 47906 '.*suspend.*'"""

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Reruns test executions of an artefact that"
" failed particular test cases matched by the passed regex."
"\nUses TO_API_URL environment if defined defaulting to production otherwise",
epilog=example_usage,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("artefact_id", type=int)
parser.add_argument("test_case_regex", type=str)
parser.add_argument("--reversed", action="store_true")
args = parser.parse_args()

main(args.artefact_id, args.test_case_regex, args.reversed)
59 changes: 59 additions & 0 deletions backend/scripts/show_common_failures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# ruff: noqa: T201

import argparse
import functools
import sys
from collections import Counter
from os import environ
from pprint import pprint

from requests import Session

TO_API_URL = environ.get("TO_API_URL", "https://test-observer-api.canonical.com")

requests = Session()
requests.request = functools.partial(requests.request, timeout=30) # type: ignore


def main(artefact_id: int):
artefact_builds = requests.get(
f"{TO_API_URL}/v1/artefacts/{artefact_id}/builds"
).json()

relevant_test_executions = [
te
for ab in artefact_builds
for te in ab["test_executions"]
if te["review_decision"] == [] and te["status"] == "FAILED"
]

failing_test_cases: list[str] = []
for i, te in enumerate(relevant_test_executions):
test_results = requests.get(
f"{TO_API_URL}/v1/test-executions/{te['id']}/test-results"
).json()

failing_test_cases.extend(
tr["name"] for tr in test_results if tr["status"] == "FAILED"
)

sys.stdout.write("\r")
sys.stdout.write(f"{i+1}/{len(relevant_test_executions)}")
sys.stdout.flush()

counter = Counter(failing_test_cases)

print()
pprint(counter.most_common()) # noqa: T203


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Given an artefact id, prints the most common failing"
" test cases under undecided test executions."
"\nUses TO_API_URL environment if defined defaulting to production otherwise",
)
parser.add_argument("artefact_id", type=int)
args = parser.parse_args()

main(args.artefact_id)

0 comments on commit 78fd83c

Please sign in to comment.