diff --git a/scripts/src/chartprreview/chartprreview.py b/scripts/src/chartprreview/chartprreview.py index fb821be4..e705edeb 100644 --- a/scripts/src/chartprreview/chartprreview.py +++ b/scripts/src/chartprreview/chartprreview.py @@ -6,25 +6,26 @@ import subprocess import hashlib -import environs from environs import Env import semver import semantic_version import requests import yaml + try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CLoader as Loader except ImportError: - from yaml import Loader, Dumper + from yaml import Loader -sys.path.append('../') +sys.path.append("../") from report import report_info from report import verifier_report from signedchart import signedchart from pullrequest import prartifact from tools import gitutils + def write_error_log(directory, *msg): os.makedirs(directory, exist_ok=True) with open(os.path.join(directory, "errors"), "w") as fd: @@ -33,6 +34,7 @@ def write_error_log(directory, *msg): fd.write(line) fd.write("\n") + def get_vendor_type(directory): vendor_type = os.environ.get("VENDOR_TYPE") if not vendor_type or vendor_type not in {"partner", "redhat", "community"}: @@ -41,8 +43,9 @@ def get_vendor_type(directory): sys.exit(1) return vendor_type + def get_modified_charts(directory, api_url): - print("[INFO] Get modified charts. %s" %directory) + print("[INFO] Get modified charts. %s" % directory) files = prartifact.get_modified_files(api_url) pattern = re.compile(r"charts/(\w+)/([\w-]+)/([\w-]+)/([\w\.-]+)/.*") for file_path in files: @@ -55,8 +58,11 @@ def get_modified_charts(directory, api_url): write_error_log(directory, msg) sys.exit(1) + def verify_user(directory, username, category, organization, chart): - print("[INFO] Verify user. %s, %s, %s, %s"% (username, category, organization, chart)) + print( + "[INFO] Verify user. %s, %s, %s, %s" % (username, category, organization, chart) + ) owners_path = os.path.join("charts", category, organization, chart, "OWNERS") if not os.path.exists(owners_path): msg = f"[ERROR] {owners_path} file does not exist." @@ -65,13 +71,19 @@ def verify_user(directory, username, category, organization, chart): data = open(owners_path).read() out = yaml.load(data, Loader=Loader) - if username not in [x['githubUsername'] for x in out['users']]: + if username not in [x["githubUsername"] for x in out["users"]]: msg = f"[ERROR] {username} is not allowed to submit the chart on behalf of {organization}" write_error_log(directory, msg) sys.exit(1) -def check_owners_file_against_directory_structure(directory,username, category, organization, chart): - print("[INFO] Check owners file against directory structure. %s, %s, %s" % (category, organization, chart)) + +def check_owners_file_against_directory_structure( + directory, username, category, organization, chart +): + print( + "[INFO] Check owners file against directory structure. %s, %s, %s" + % (category, organization, chart) + ) data = open(os.path.join("charts", category, organization, chart, "OWNERS")).read() out = yaml.load(data, Loader=Loader) vendor_label = out["vendor"]["label"] @@ -80,21 +92,30 @@ def check_owners_file_against_directory_structure(directory,username, category, msgs = [] if organization != vendor_label: error_exit = True - msgs.append(f"[ERROR] vendor/label in OWNERS file ({vendor_label}) doesn't match the directory structure (charts/{category}/{organization}/{chart})") + msgs.append( + f"[ERROR] vendor/label in OWNERS file ({vendor_label}) doesn't match the directory structure (charts/{category}/{organization}/{chart})" + ) if chart != chart_name: - msgs.append(f"[ERROR] chart/name in OWNERS file ({chart_name}) doesn't match the directory structure (charts/{category}/{organization}/{chart})") + msgs.append( + f"[ERROR] chart/name in OWNERS file ({chart_name}) doesn't match the directory structure (charts/{category}/{organization}/{chart})" + ) error_exit = True if error_exit: write_error_log(directory, *msgs) sys.exit(1) + def verify_signature(directory, category, organization, chart, version): print("[INFO] Verify signature. %s, %s, %s" % (organization, chart, version)) - sign = os.path.join("charts", category, organization, chart, version, "report.yaml.asc") + sign = os.path.join( + "charts", category, organization, chart, version, "report.yaml.asc" + ) if os.path.exists(sign): - data = open(os.path.join("charts", category, organization, chart, "OWNERS")).read() + data = open( + os.path.join("charts", category, organization, chart, "OWNERS") + ).read() out = yaml.load(data, Loader=Loader) - publickey = out.get('publicPgpKey') + publickey = out.get("publicPgpKey") if not publickey: return with open("public.key", "w") as fd: @@ -102,27 +123,39 @@ def verify_signature(directory, category, organization, chart, version): out = subprocess.run(["gpg", "--import", "public.key"], capture_output=True) print("[INFO]", out.stdout.decode("utf-8")) print("[WARNING]", out.stderr.decode("utf-8")) - report = os.path.join("charts", category, organization, chart, version, "report.yaml") + report = os.path.join( + "charts", category, organization, chart, version, "report.yaml" + ) out = subprocess.run(["gpg", "--verify", sign, report], capture_output=True) print("[INFO]", out.stdout.decode("utf-8")) print("[WARNING]", out.stderr.decode("utf-8")) else: print(f"[INFO] Signed report not found: {sign}.") -def match_checksum(directory,generated_report_info_path,category, organization, chart, version): + +def match_checksum( + directory, generated_report_info_path, category, organization, chart, version +): print("[INFO] Check digests match. %s, %s, %s" % (organization, chart, version)) - submitted_report_path = os.path.join("charts", category, organization, chart, version, "report.yaml") - submitted_digests = report_info.get_report_digests(report_path=submitted_report_path) + submitted_report_path = os.path.join( + "charts", category, organization, chart, version, "report.yaml" + ) + submitted_digests = report_info.get_report_digests( + report_path=submitted_report_path + ) submitted_digest = submitted_digests["chart"] - generated_digests = report_info.get_report_digests(report_info_path=generated_report_info_path) + generated_digests = report_info.get_report_digests( + report_info_path=generated_report_info_path + ) generated_digest = generated_digests["chart"] - if submitted_digest != generated_digest: + if submitted_digest != generated_digest: msg = f"[ERROR] Digest is not matching: {submitted_digest}, {generated_digest}" write_error_log(directory, msg) sys.exit(1) + def check_url(directory, report_path): print("[INFO] Check chart_url is a valid url. %s" % report_path) chart_url = report_info.get_report_chart_url(report_path=report_path) @@ -156,13 +189,23 @@ def check_url(directory, report_path): msgs.append(str(err)) write_error_log(directory, *msgs) - verify_package_digest(chart_url,report_path) + verify_package_digest(chart_url, report_path) + -def match_name_and_version(directory, category, organization, chart, version,generated_report_path): - print("[INFO] Check chart has same name and version as directory structure. %s, %s, %s" % (organization, chart, version)) - submitted_report_path = os.path.join("charts", category, organization, chart, version, "report.yaml") +def match_name_and_version( + directory, category, organization, chart, version, generated_report_path +): + print( + "[INFO] Check chart has same name and version as directory structure. %s, %s, %s" + % (organization, chart, version) + ) + submitted_report_path = os.path.join( + "charts", category, organization, chart, version, "report.yaml" + ) if os.path.exists(submitted_report_path): - submitted_report_chart = report_info.get_report_chart(report_path=submitted_report_path) + submitted_report_chart = report_info.get_report_chart( + report_path=submitted_report_path + ) submitted_report_chart_name = submitted_report_chart["name"] submitted_report_chart_version = submitted_report_chart["version"] @@ -177,7 +220,9 @@ def match_name_and_version(directory, category, organization, chart, version,gen sys.exit(1) if os.path.exists(generated_report_path): - report_chart = report_info.get_report_chart(report_path=generated_report_path) + report_chart = report_info.get_report_chart( + report_path=generated_report_path + ) report_chart_name = report_chart["name"] report_chart_version = report_chart["version"] @@ -206,22 +251,27 @@ def match_name_and_version(directory, category, organization, chart, version,gen write_error_log(directory, msg) sys.exit(1) + def check_report_success(directory, api_url, report_path, report_info_path, version): print("[INFO] Check report success. %s" % report_path) data = open(report_path).read() print("[INFO] Full report: ") print(data) quoted_data = data.replace("%", "%25").replace("\n", "%0A").replace("\r", "%0D") - gitutils.add_output("report_content",quoted_data) + gitutils.add_output("report_content", quoted_data) - chart = report_info.get_report_chart(report_path=report_path,report_info_path=report_info_path) + chart = report_info.get_report_chart( + report_path=report_path, report_info_path=report_info_path + ) report_version = chart["version"] if report_version != version: msg = f"[ERROR] Chart Version '{report_version}' doesn't match the version in the directory path: '{version}'" write_error_log(directory, msg) sys.exit(1) - report_metadata = report_info.get_report_metadata(report_path=report_path,report_info_path=report_info_path) + report_metadata = report_info.get_report_metadata( + report_path=report_path, report_info_path=report_info_path + ) profile_version = report_metadata["profileVersion"] vendor_type = get_vendor_type(directory) report_vendor_type = report_metadata["vendorType"] @@ -232,17 +282,23 @@ def check_report_success(directory, api_url, report_path, report_info_path, vers sys.exit(1) print(f"[INFO] Profile version: {profile_version}") - annotations = report_info.get_report_annotations(report_path=report_path,report_info_path=report_info_path) + annotations = report_info.get_report_annotations( + report_path=report_path, report_info_path=report_info_path + ) - required_annotations = {"charts.openshift.io/lastCertifiedTimestamp", - "charts.openshift.io/testedOpenShiftVersion", - "charts.openshift.io/supportedOpenShiftVersions", - "charts.openshift.io/digest"} + required_annotations = { + "charts.openshift.io/lastCertifiedTimestamp", + "charts.openshift.io/testedOpenShiftVersion", + "charts.openshift.io/supportedOpenShiftVersions", + "charts.openshift.io/digest", + } if profile_version == "v1.0": - required_annotations = {"charts.openshift.io/lastCertifiedTimestamp", - "charts.openshift.io/certifiedOpenShiftVersions", - "charts.openshift.io/digest"} + required_annotations = { + "charts.openshift.io/lastCertifiedTimestamp", + "charts.openshift.io/certifiedOpenShiftVersions", + "charts.openshift.io/digest", + } available_annotations = set(annotations.keys()) @@ -252,7 +308,11 @@ def check_report_success(directory, api_url, report_path, report_info_path, vers write_error_log(directory, msg) sys.exit(1) - report = report_info.get_report_results(report_path=report_path,report_info_path=report_info_path,profile_type=vendor_type) + report = report_info.get_report_results( + report_path=report_path, + report_info_path=report_info_path, + profile_type=vendor_type, + ) label_names = prartifact.get_labels(api_url) @@ -264,22 +324,22 @@ def check_report_success(directory, api_url, report_path, report_info_path, vers msgs.append("[ERROR] Chart verifier report includes failures:") msgs.append(f"- Number of checks passed: {passed}") msgs.append(f"- Number of checks failed: {failed}") - msgs.append(f"- Error message(s):") + msgs.append("- Error message(s):") for m in report["message"]: msgs.append(f" - {m}") write_error_log(directory, *msgs) if vendor_type == "redhat": - gitutils.add_output("redhat_to_community","True") + gitutils.add_output("redhat_to_community", "True") if vendor_type != "redhat" and "force-publish" not in label_names: if vendor_type == "community": # requires manual review and approval - gitutils.add_output("community_manual_review_required","True") + gitutils.add_output("community_manual_review_required", "True") sys.exit(1) if vendor_type == "community" and "force-publish" not in label_names: # requires manual review and approval print("[INFO] Community submission requires manual approval.") - gitutils.add_output("community_manual_review_required","True") + gitutils.add_output("community_manual_review_required", "True") sys.exit(1) if failures_in_report or vendor_type == "community": @@ -301,39 +361,68 @@ def check_report_success(directory, api_url, report_path, report_info_path, vers write_error_log(directory, msg) sys.exit(1) -def verify_package_digest(url,report): + +def verify_package_digest(url, report): print("[INFO] check package digest.") response = requests.get(url, allow_redirects=True) if response.status_code == 200: target_digest = hashlib.sha256(response.content).hexdigest() - found,report_data = verifier_report.get_report_data(report) + found, report_data = verifier_report.get_report_data(report) if found: pkg_digest = verifier_report.get_package_digest(report_data) if target_digest: if pkg_digest and pkg_digest != target_digest: # Digest was passed and computed but differ - raise Exception("Found an integrity issue. SHA256 digest passed does not match SHA256 digest computed.") + raise Exception( + "Found an integrity issue. SHA256 digest passed does not match SHA256 digest computed." + ) elif not pkg_digest: # Digest was not passed and could not be computed - raise Exception("Was unable to compute SHA256 digest, please ensure chart url points to a chart package.") + raise Exception( + "Was unable to compute SHA256 digest, please ensure chart url points to a chart package." + ) def main(): parser = argparse.ArgumentParser() - parser.add_argument("-d", "--directory", dest="directory", type=str, required=True, - help="artifact directory for archival") - parser.add_argument("-n", "--verify-user", dest="username", type=str, required=True, - help="check if the user can update the chart") - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") + parser.add_argument( + "-d", + "--directory", + dest="directory", + type=str, + required=True, + help="artifact directory for archival", + ) + parser.add_argument( + "-n", + "--verify-user", + dest="username", + type=str, + required=True, + help="check if the user can update the chart", + ) + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) args = parser.parse_args() - category, organization, chart, version = get_modified_charts(args.directory, args.api_url) + category, organization, chart, version = get_modified_charts( + args.directory, args.api_url + ) verify_user(args.directory, args.username, category, organization, chart) - check_owners_file_against_directory_structure(args.directory, args.username, category, organization, chart) - submitted_report_path = os.path.join("charts", category, organization, chart, version, "report.yaml") + check_owners_file_against_directory_structure( + args.directory, args.username, category, organization, chart + ) + submitted_report_path = os.path.join( + "charts", category, organization, chart, version, "report.yaml" + ) if os.path.exists(submitted_report_path): report_valid, message = verifier_report.validate(submitted_report_path) @@ -348,19 +437,23 @@ def main(): pgp_key_in_owners = signedchart.get_pgp_key_from_owners(owners_file) if pgp_key_in_owners: if signedchart.check_report_for_signed_chart(submitted_report_path): - if not signedchart.check_pgp_public_key(pgp_key_in_owners,submitted_report_path): - msg = f"PGP key in OWNERS file does not match with key digest in report." + if not signedchart.check_pgp_public_key( + pgp_key_in_owners, submitted_report_path + ): + msg = "PGP key in OWNERS file does not match with key digest in report." print(f"[ERROR] {msg}") write_error_log(args.directory, msg) sys.exit(1) else: - print("[INFO] PGP key in OWNERS file matches with key digest in report.") + print( + "[INFO] PGP key in OWNERS file matches with key digest in report." + ) report_generated = os.environ.get("REPORT_GENERATED") generated_report_path = os.environ.get("GENERATED_REPORT_PATH") - generated_report_info_path = os.environ.get("REPORT_SUMMARY_PATH") + generated_report_info_path = os.environ.get("REPORT_SUMMARY_PATH") env = Env() - web_catalog_only = env.bool("WEB_CATALOG_ONLY",False) + web_catalog_only = env.bool("WEB_CATALOG_ONLY", False) if os.path.exists(submitted_report_path): print("[INFO] Report exists: ", submitted_report_path) @@ -368,7 +461,14 @@ def main(): report_path = submitted_report_path report_info_path = "" if report_generated and report_generated == "True": - match_checksum(args.directory,generated_report_info_path, category, organization, chart, version) + match_checksum( + args.directory, + generated_report_info_path, + category, + organization, + chart, + version, + ) elif not web_catalog_only: check_url(args.directory, report_path) else: @@ -380,6 +480,9 @@ def main(): print(f"[INFO]: generated report path: {generated_report_path}") print(f"[INFO]: generated report info: {generated_report_info_path}") - - match_name_and_version(args.directory, category, organization, chart, version, generated_report_path) - check_report_success(args.directory, args.api_url, report_path, report_info_path, version) + match_name_and_version( + args.directory, category, organization, chart, version, generated_report_path + ) + check_report_success( + args.directory, args.api_url, report_path, report_info_path, version + ) diff --git a/scripts/src/chartprreview/chartprreview_test.py b/scripts/src/chartprreview/chartprreview_test.py index 1c37f027..e37de1ed 100644 --- a/scripts/src/chartprreview/chartprreview_test.py +++ b/scripts/src/chartprreview/chartprreview_test.py @@ -4,10 +4,12 @@ from chartprreview.chartprreview import check_owners_file_against_directory_structure from chartprreview.chartprreview import write_error_log + def test_verify_user(): with pytest.raises(SystemExit): verify_user("mbaiju", "partners", "test-org1", "test-chart") + owners_with_wrong_vendor_label = """\ --- chart: @@ -51,21 +53,33 @@ def test_verify_user(): """ - def test_check_owners_file_against_directory_structure(tmpdir): original_cwd = os.getcwd() - p = tmpdir.mkdir("charts").mkdir("partners").mkdir("test-org").mkdir("test-chart").join("OWNERS") + p = ( + tmpdir.mkdir("charts") + .mkdir("partners") + .mkdir("test-org") + .mkdir("test-chart") + .join("OWNERS") + ) p.write(owners_with_wrong_vendor_label) os.chdir(tmpdir) new_cwd = os.getcwd() print("new_cwd", new_cwd) with pytest.raises(SystemExit): - check_owners_file_against_directory_structure("baijum", "partners", "test-org", "test-chart") + check_owners_file_against_directory_structure( + "baijum", "partners", "test-org", "test-chart" + ) p.write(owners_with_wrong_chart_name) with pytest.raises(SystemExit): - check_owners_file_against_directory_structure("baijum", "partners", "test-org", "test-chart") + check_owners_file_against_directory_structure( + "baijum", "partners", "test-org", "test-chart" + ) p.write(owners_with_correct_values) - check_owners_file_against_directory_structure("baijum", "partners", "test-org", "test-chart") + check_owners_file_against_directory_structure( + "baijum", "partners", "test-org", "test-chart" + ) + def test_write_error_log(tmpdir): write_error_log(tmpdir, "First message") diff --git a/scripts/src/chartrepomanager/chartrepomanager.py b/scripts/src/chartrepomanager/chartrepomanager.py index 53e2b03e..4300bb69 100644 --- a/scripts/src/chartrepomanager/chartrepomanager.py +++ b/scripts/src/chartrepomanager/chartrepomanager.py @@ -2,7 +2,6 @@ import shutil import os import sys -import json import re import subprocess import tempfile @@ -10,24 +9,24 @@ from datetime import datetime, timezone import hashlib import urllib.parse -import environs from environs import Env -import semver import requests import yaml + try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper -sys.path.append('../') +sys.path.append("../") from report import report_info from chartrepomanager import indexannotations from signedchart import signedchart from pullrequest import prartifact from tools import gitutils + def get_modified_charts(api_url): files = prartifact.get_modified_files(api_url) pattern = re.compile(r"charts/(\w+)/([\w-]+)/([\w-]+)/([\w\.-]+)/.*") @@ -40,11 +39,14 @@ def get_modified_charts(api_url): print("No modified files found.") sys.exit(0) + def get_current_commit_sha(): cwd = os.getcwd() os.chdir("..") subprocess.run(["git", "pull", "--all", "--force"], capture_output=True) - commit = subprocess.run(["git", "rev-parse", "--verify", "HEAD"], capture_output=True) + commit = subprocess.run( + ["git", "rev-parse", "--verify", "HEAD"], capture_output=True + ) print(commit.stdout.decode("utf-8")) print(commit.stderr.decode("utf-8")) commit_hash = commit.stdout.strip() @@ -52,21 +54,28 @@ def get_current_commit_sha(): os.chdir(cwd) return commit_hash + def check_chart_source_or_tarball_exists(category, organization, chart, version): src = os.path.join("charts", category, organization, chart, version, "src") if os.path.exists(src): return True, False - tarball = os.path.join("charts", category, organization, chart, version, f"{chart}-{version}.tgz") + tarball = os.path.join( + "charts", category, organization, chart, version, f"{chart}-{version}.tgz" + ) if os.path.exists(tarball): return False, True return False, False + def check_report_exists(category, organization, chart, version): - report_path = os.path.join("charts", category, organization, chart, version, "report.yaml") + report_path = os.path.join( + "charts", category, organization, chart, version, "report.yaml" + ) return os.path.exists(report_path), report_path + def generate_report(chart_file_name): cwd = os.getcwd() report_content = urllib.parse.unquote(os.environ.get("REPORT_CONTENT")) @@ -77,8 +86,12 @@ def generate_report(chart_file_name): fd.write(report_content) return report_path + def prepare_chart_source_for_release(category, organization, chart, version): - print("[INFO] prepare chart source for release. %s, %s, %s, %s" % (category, organization, chart, version)) + print( + "[INFO] prepare chart source for release. %s, %s, %s, %s" + % (category, organization, chart, version) + ) path = os.path.join("charts", category, organization, chart, version, "src") out = subprocess.run(["helm", "package", path], capture_output=True) print(out.stdout.decode("utf-8")) @@ -88,12 +101,20 @@ def prepare_chart_source_for_release(category, organization, chart, version): os.remove(os.path.join(".cr-release-packages", chart_file_name)) except FileNotFoundError: pass - shutil.copy(f"{chart}-{version}.tgz" , f".cr-release-packages/{chart_file_name}") + shutil.copy(f"{chart}-{version}.tgz", f".cr-release-packages/{chart_file_name}") + -def prepare_chart_tarball_for_release(category, organization, chart, version,signed_chart): - print("[INFO] prepare chart tarball for release. %s, %s, %s, %s" % (category, organization, chart, version)) +def prepare_chart_tarball_for_release( + category, organization, chart, version, signed_chart +): + print( + "[INFO] prepare chart tarball for release. %s, %s, %s, %s" + % (category, organization, chart, version) + ) chart_file_name = f"{chart}-{version}.tgz" - path = os.path.join("charts", category, organization, chart, version, chart_file_name) + path = os.path.join( + "charts", category, organization, chart, version, chart_file_name + ) try: os.remove(os.path.join(".cr-release-packages", chart_file_name)) except FileNotFoundError: @@ -104,7 +125,9 @@ def prepare_chart_tarball_for_release(category, organization, chart, version,sig if signed_chart: print("[INFO] Signed chart - include PROV file") prov_file_name = f"{chart_file_name}.prov" - path = os.path.join("charts", category, organization, chart, version, prov_file_name) + path = os.path.join( + "charts", category, organization, chart, version, prov_file_name + ) try: os.remove(os.path.join(".cr-release-packages", prov_file_name)) except FileNotFoundError: @@ -114,57 +137,112 @@ def prepare_chart_tarball_for_release(category, organization, chart, version,sig return get_key_file(category, organization, chart, version) return "" + def get_key_file(category, organization, chart, version): owners_path = os.path.join("charts", category, organization, chart, "OWNERS") key_in_owners = signedchart.get_pgp_key_from_owners(owners_path) if key_in_owners: key_file_name = f"{chart}-{version}.tgz.key" print(f"[INFO] Signed chart - add public key file : {key_file_name}") - signedchart.create_public_key_file(key_in_owners,key_file_name) + signedchart.create_public_key_file(key_in_owners, key_file_name) return key_file_name return "" def push_chart_release(repository, organization, commit_hash): - print("[INFO]push chart release. %s, %s, %s " % (repository, organization, commit_hash)) + print( + "[INFO]push chart release. %s, %s, %s " + % (repository, organization, commit_hash) + ) org, repo = repository.split("/") token = os.environ.get("GITHUB_TOKEN") print("[INFO] Upload chart using the chart-releaser") - out = subprocess.run(["cr", "upload", "-c", commit_hash, "-o", org, "-r", repo, "--release-name-template", f"{organization}-"+"{{ .Name }}-{{ .Version }}", "-t", token], capture_output=True) + out = subprocess.run( + [ + "cr", + "upload", + "-c", + commit_hash, + "-o", + org, + "-r", + repo, + "--release-name-template", + f"{organization}-" + "{{ .Name }}-{{ .Version }}", + "-t", + token, + ], + capture_output=True, + ) print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) + def create_worktree_for_index(branch): dr = tempfile.mkdtemp(prefix="crm-") upstream = os.environ["GITHUB_SERVER_URL"] + "/" + os.environ["GITHUB_REPOSITORY"] - out = subprocess.run(["git", "remote", "add", "upstream", upstream], capture_output=True) + out = subprocess.run( + ["git", "remote", "add", "upstream", upstream], capture_output=True + ) print(out.stdout.decode("utf-8")) err = out.stderr.decode("utf-8") if err.strip(): - print("Adding upstream remote failed:", err, "branch", branch, "upstream", upstream) - out = subprocess.run(["git", "fetch", "upstream",branch], capture_output=True) + print( + "Adding upstream remote failed:", + err, + "branch", + branch, + "upstream", + upstream, + ) + out = subprocess.run(["git", "fetch", "upstream", branch], capture_output=True) print(out.stdout.decode("utf-8")) err = out.stderr.decode("utf-8") if err.strip(): - print("Fetching upstream remote failed:", err, "branch", branch, "upstream", upstream) - out = subprocess.run(["git", "worktree", "add", "--detach", dr, f"upstream/{branch}"], capture_output=True) + print( + "Fetching upstream remote failed:", + err, + "branch", + branch, + "upstream", + upstream, + ) + out = subprocess.run( + ["git", "worktree", "add", "--detach", dr, f"upstream/{branch}"], + capture_output=True, + ) print(out.stdout.decode("utf-8")) err = out.stderr.decode("utf-8") if err.strip(): print("Creating worktree failed:", err, "branch", branch, "directory", dr) return dr -def create_index_from_chart(indexdir, repository, branch, category, organization, chart, version, chart_url): - print("[INFO] create index from chart. %s, %s, %s, %s, %s" % (category, organization, chart, version, chart_url)) + +def create_index_from_chart( + indexdir, repository, branch, category, organization, chart, version, chart_url +): + print( + "[INFO] create index from chart. %s, %s, %s, %s, %s" + % (category, organization, chart, version, chart_url) + ) path = os.path.join("charts", category, organization, chart, version) chart_file_name = f"{chart}-{version}.tgz" - out = subprocess.run(["helm", "show", "chart", os.path.join(".cr-release-packages", chart_file_name)], capture_output=True) + out = subprocess.run( + [ + "helm", + "show", + "chart", + os.path.join(".cr-release-packages", chart_file_name), + ], + capture_output=True, + ) p = out.stdout.decode("utf-8") print(p) print(out.stderr.decode("utf-8")) crt = yaml.load(p, Loader=Loader) return crt + def create_index_from_report(category, report_path): print("[INFO] create index from report. %s, %s" % (category, report_path)) @@ -186,7 +264,6 @@ def create_index_from_report(category, report_path): chart_entry["annotations"] = annotations - digests = report_info.get_report_digests(report_path) if "package" in digests: chart_entry["digest"] = digests["package"] @@ -209,12 +286,10 @@ def set_package_digest(chart_entry): target_digest = hashlib.sha256(response.content).hexdigest() print(f"[DEBUG]: calculated digest : {target_digest}") - pkg_digest = "" if "digest" in chart_entry: pkg_digest = chart_entry["digest"] - print(f"[DEBUG]: digest in report : {pkg_digest}" ) - + print(f"[DEBUG]: digest in report : {pkg_digest}") if target_digest: if not pkg_digest: @@ -222,26 +297,43 @@ def set_package_digest(chart_entry): chart_entry["digest"] = target_digest elif pkg_digest != target_digest: # Digest was passed and computed but differ - raise Exception("Found an integrity issue. SHA256 digest passed does not match SHA256 digest computed.") + raise Exception( + "Found an integrity issue. SHA256 digest passed does not match SHA256 digest computed." + ) elif not pkg_digest: # Digest was not passed and could not be computed - raise Exception("Was unable to compute SHA256 digest, please ensure chart url points to a chart package.") - - -def update_index_and_push(indexfile, indexdir, repository, branch, category, organization, chart, version, chart_url, chart_entry, pr_number, web_catalog_only): + raise Exception( + "Was unable to compute SHA256 digest, please ensure chart url points to a chart package." + ) + + +def update_index_and_push( + indexfile, + indexdir, + repository, + branch, + category, + organization, + chart, + version, + chart_url, + chart_entry, + pr_number, + web_catalog_only, +): token = os.environ.get("GITHUB_TOKEN") print(f"Downloading {indexfile}") - r = requests.get(f'https://raw.githubusercontent.com/{repository}/{branch}/{indexfile}') - original_etag = r.headers.get('etag') + r = requests.get( + f"https://raw.githubusercontent.com/{repository}/{branch}/{indexfile}" + ) + original_etag = r.headers.get("etag") now = datetime.now(timezone.utc).astimezone().isoformat() if r.status_code == 200: data = yaml.load(r.text, Loader=Loader) data["generated"] = now else: - data = {"apiVersion": "v1", - "generated": now, - "entries": {}} + data = {"apiVersion": "v1", "generated": now, "entries": {}} print("[INFO] Updating the chart entry with new version") crtentries = [] @@ -265,7 +357,7 @@ def update_index_and_push(indexfile, indexdir, repository, branch, category, org print("[INFO] Add and commit changes to git") out = yaml.dump(data, Dumper=Dumper) print(f"{indexfile} content:\n", out) - with open(os.path.join(indexdir,indexfile), "w") as fd: + with open(os.path.join(indexdir, indexfile), "w") as fd: fd.write(out) old_cwd = os.getcwd() os.chdir(indexdir) @@ -273,41 +365,101 @@ def update_index_and_push(indexfile, indexdir, repository, branch, category, org print("Git status:") print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) - out = subprocess.run(["git", "add", os.path.join(indexdir, indexfile)], cwd=indexdir, capture_output=True) + out = subprocess.run( + ["git", "add", os.path.join(indexdir, indexfile)], + cwd=indexdir, + capture_output=True, + ) print(out.stdout.decode("utf-8")) err = out.stderr.decode("utf-8") if err.strip(): - print(f"Error adding {indexfile} to git staging area", "index directory", indexdir, "branch", branch) + print( + f"Error adding {indexfile} to git staging area", + "index directory", + indexdir, + "branch", + branch, + ) out = subprocess.run(["git", "status"], cwd=indexdir, capture_output=True) print("Git status:") print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) - out = subprocess.run(["git", "commit", "-m", f"{organization}-{chart}-{version} {indexfile} (#{pr_number})"], cwd=indexdir, capture_output=True) + out = subprocess.run( + [ + "git", + "commit", + "-m", + f"{organization}-{chart}-{version} {indexfile} (#{pr_number})", + ], + cwd=indexdir, + capture_output=True, + ) print(out.stdout.decode("utf-8")) err = out.stderr.decode("utf-8") if err.strip(): - print(f"Error committing {indexfile}", "index directory", indexdir, "branch", branch, "error:", err) - r = requests.head(f'https://raw.githubusercontent.com/{repository}/{branch}/{indexfile}') - - etag = r.headers.get('etag') + print( + f"Error committing {indexfile}", + "index directory", + indexdir, + "branch", + branch, + "error:", + err, + ) + r = requests.head( + f"https://raw.githubusercontent.com/{repository}/{branch}/{indexfile}" + ) + + etag = r.headers.get("etag") if original_etag and etag and (original_etag != etag): - print(f"{indexfile} not updated. ETag mismatch.", "original ETag", original_etag, "new ETag", etag, "index directory", indexdir, "branch", branch) + print( + f"{indexfile} not updated. ETag mismatch.", + "original ETag", + original_etag, + "new ETag", + etag, + "index directory", + indexdir, + "branch", + branch, + ) sys.exit(1) out = subprocess.run(["git", "status"], cwd=indexdir, capture_output=True) print("Git status:") print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) - out = subprocess.run(["git", "push", f"https://x-access-token:{token}@github.com/{repository}", f"HEAD:refs/heads/{branch}", "-f"], cwd=indexdir, capture_output=True) + out = subprocess.run( + [ + "git", + "push", + f"https://x-access-token:{token}@github.com/{repository}", + f"HEAD:refs/heads/{branch}", + "-f", + ], + cwd=indexdir, + capture_output=True, + ) print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) if out.returncode: - print(f"{indexfile} not updated. Push failed.", "index directory", indexdir, "branch", branch) + print( + f"{indexfile} not updated. Push failed.", + "index directory", + indexdir, + "branch", + branch, + ) sys.exit(1) os.chdir(old_cwd) -def update_chart_annotation(category, organization, chart_file_name, chart, report_path): - print("[INFO] Update chart annotation. %s, %s, %s, %s" % (category, organization, chart_file_name, chart)) +def update_chart_annotation( + category, organization, chart_file_name, chart, report_path +): + print( + "[INFO] Update chart annotation. %s, %s, %s, %s" + % (category, organization, chart_file_name, chart) + ) dr = tempfile.mkdtemp(prefix="annotations-") annotations = indexannotations.getIndexAnnotations(report_path) @@ -322,18 +474,29 @@ def update_chart_annotation(category, organization, chart_file_name, chart, repo annotations["charts.openshift.io/providerType"] = category if "charts.openshift.io/provider" not in annotations: - data = open(os.path.join("charts", category, organization, chart, "OWNERS")).read() + data = open( + os.path.join("charts", category, organization, chart, "OWNERS") + ).read() out = yaml.load(data, Loader=Loader) vendor_name = out["vendor"]["name"] annotations["charts.openshift.io/provider"] = vendor_name - out = subprocess.run(["tar", "zxvf", os.path.join(".cr-release-packages", f"{chart_file_name}"), "-C", dr], capture_output=True) + out = subprocess.run( + [ + "tar", + "zxvf", + os.path.join(".cr-release-packages", f"{chart_file_name}"), + "-C", + dr, + ], + capture_output=True, + ) print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) fd = open(os.path.join(dr, chart, "Chart.yaml")) data = yaml.load(fd, Loader=Loader) - + if "annotations" not in data: data["annotations"] = annotations else: @@ -347,7 +510,9 @@ def update_chart_annotation(category, organization, chart_file_name, chart, repo with open(os.path.join(dr, chart, "Chart.yaml"), "w") as fd: fd.write(out) - out = subprocess.run(["helm", "package", os.path.join(dr, chart)], capture_output=True) + out = subprocess.run( + ["helm", "package", os.path.join(dr, chart)], capture_output=True + ) print(out.stdout.decode("utf-8")) print(out.stderr.decode("utf-8")) @@ -361,48 +526,77 @@ def update_chart_annotation(category, organization, chart_file_name, chart, repo def main(): parser = argparse.ArgumentParser() - parser.add_argument("-b", "--index-branch", dest="branch", type=str, required=True, - help="index branch") - parser.add_argument("-r", "--repository", dest="repository", type=str, required=True, - help="Git Repository") - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") - parser.add_argument("-n", "--pr-number", dest="pr_number", type=str, required=True, - help="current pull request number") + parser.add_argument( + "-b", + "--index-branch", + dest="branch", + type=str, + required=True, + help="index branch", + ) + parser.add_argument( + "-r", + "--repository", + dest="repository", + type=str, + required=True, + help="Git Repository", + ) + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) + parser.add_argument( + "-n", + "--pr-number", + dest="pr_number", + type=str, + required=True, + help="current pull request number", + ) args = parser.parse_args() branch = args.branch.split("/")[-1] category, organization, chart, version = get_modified_charts(args.api_url) - chart_source_exists, chart_tarball_exists = check_chart_source_or_tarball_exists(category, organization, chart, version) + chart_source_exists, chart_tarball_exists = check_chart_source_or_tarball_exists( + category, organization, chart, version + ) print("[INFO] Creating Git worktree for index branch") indexdir = create_worktree_for_index(branch) env = Env() - web_catalog_only = env.bool("WEB_CATALOG_ONLY",False) + web_catalog_only = env.bool("WEB_CATALOG_ONLY", False) - print(f'[INFO] webCatalogOnly/providerDelivery is {web_catalog_only}') + print(f"[INFO] webCatalogOnly/providerDelivery is {web_catalog_only}") if web_catalog_only: indexfile = "unpublished-certified-charts.yaml" else: indexfile = "index.yaml" - public_key_file = "" print("[INFO] Report Content : ", os.environ.get("REPORT_CONTENT")) if chart_source_exists or chart_tarball_exists: if chart_source_exists: prepare_chart_source_for_release(category, organization, chart, version) if chart_tarball_exists: - signed_chart = signedchart.is_chart_signed(args.api_url,"") - public_key_file = prepare_chart_tarball_for_release(category, organization, chart, version, signed_chart) + signed_chart = signedchart.is_chart_signed(args.api_url, "") + public_key_file = prepare_chart_tarball_for_release( + category, organization, chart, version, signed_chart + ) commit_hash = get_current_commit_sha() print("[INFO] Publish chart release to GitHub") push_chart_release(args.repository, organization, commit_hash) print("[INFO] Check if report exist as part of the commit") - report_exists, report_path = check_report_exists(category, organization, chart, version) + report_exists, report_path = check_report_exists( + category, organization, chart, version + ) chart_file_name = f"{chart}-{version}.tgz" if report_exists: @@ -412,13 +606,26 @@ def main(): report_path = generate_report(chart_file_name) print("[INFO] Updating chart annotation") - update_chart_annotation(category, organization, chart_file_name, chart, report_path) + update_chart_annotation( + category, organization, chart_file_name, chart, report_path + ) chart_url = f"https://github.com/{args.repository}/releases/download/{organization}-{chart}-{version}/{chart_file_name}" print("[INFO] Helm package was released at %s" % chart_url) print("[INFO] Creating index from chart") - chart_entry = create_index_from_chart(indexdir, args.repository, branch, category, organization, chart, version, chart_url) + chart_entry = create_index_from_chart( + indexdir, + args.repository, + branch, + category, + organization, + chart, + version, + chart_url, + ) else: - report_path = os.path.join("charts", category, organization, chart, version, "report.yaml") + report_path = os.path.join( + "charts", category, organization, chart, version, "report.yaml" + ) print(f"[INFO] Report only PR: {report_path}") shutil.copy(report_path, "report.yaml") if signedchart.check_report_for_signed_chart(report_path): @@ -431,13 +638,26 @@ def main(): if not tag: print("[ERROR] Internal error: missing chart name with version (tag)") sys.exit(1) - gitutils.add_output("tag",tag) + gitutils.add_output("tag", tag) current_dir = os.getcwd() - gitutils.add_output("report_file",f"{current_dir}/report.yaml") + gitutils.add_output("report_file", f"{current_dir}/report.yaml") if public_key_file: print(f"[INFO] Add key file for release : {current_dir}/{public_key_file}") - gitutils.add_output("public_key_file",f"{current_dir}/{public_key_file}") + gitutils.add_output("public_key_file", f"{current_dir}/{public_key_file}") print("Sleeping for 10 seconds") time.sleep(10) - update_index_and_push(indexfile,indexdir, args.repository, branch, category, organization, chart, version, chart_url, chart_entry, args.pr_number, web_catalog_only) + update_index_and_push( + indexfile, + indexdir, + args.repository, + branch, + category, + organization, + chart, + version, + chart_url, + chart_entry, + args.pr_number, + web_catalog_only, + ) diff --git a/scripts/src/chartrepomanager/indexannotations.py b/scripts/src/chartrepomanager/indexannotations.py index b12a6134..3916e0c5 100644 --- a/scripts/src/chartrepomanager/indexannotations.py +++ b/scripts/src/chartrepomanager/indexannotations.py @@ -2,27 +2,29 @@ import semantic_version import requests import yaml -import json -sys.path.append('../') +sys.path.append("../") from report import report_info kubeOpenShiftVersionMap = {} -def getKubVersionMap(): +def getKubVersionMap(): if not kubeOpenShiftVersionMap: - content = requests.get("https://github.com/redhat-certification/chart-verifier/blob/main/internal/tool/kubeOpenShiftVersionMap.yaml?raw=true") + content = requests.get( + "https://github.com/redhat-certification/chart-verifier/blob/main/internal/tool/kubeOpenShiftVersionMap.yaml?raw=true" + ) version_data = yaml.safe_load(content.text) for kubeVersion in version_data["versions"]: - kubeOpenShiftVersionMap[kubeVersion["kube-version"]] = kubeVersion["ocp-version"] + kubeOpenShiftVersionMap[kubeVersion["kube-version"]] = kubeVersion[ + "ocp-version" + ] - return kubeOpenShiftVersionMap + return kubeOpenShiftVersionMap def getOCPVersions(kubeVersion): - if kubeVersion == "": return "N/A" @@ -31,14 +33,16 @@ def getOCPVersions(kubeVersion): try: semantic_version.NpmSpec(kubeVersion) except ValueError: - print(f"Value error with kubeVersion - NpmSpec : {kubeVersion}, see if it fixable") + print( + f"Value error with kubeVersion - NpmSpec : {kubeVersion}, see if it fixable" + ) try: # Kubversion is bad, see if we can fix it separator = checkKubeVersion.find(" - ") if separator != -1: lowVersion = checkKubeVersion[:separator].strip() - highVersion = checkKubeVersion[separator+3:].strip() + highVersion = checkKubeVersion[separator + 3 :].strip() checkKubeVersion = f"{semantic_version.Version.coerce(lowVersion)} - {semantic_version.Version.coerce(highVersion)}" else: firstDigit = -1 @@ -49,7 +53,9 @@ def getOCPVersions(kubeVersion): if firstDigit != -1: versionInRange = checkKubeVersion[firstDigit:].strip() preVersion = checkKubeVersion[:firstDigit].strip() - checkKubeVersion = f"{preVersion}{semantic_version.Version.coerce(versionInRange)}" + checkKubeVersion = ( + f"{preVersion}{semantic_version.Version.coerce(versionInRange)}" + ) # see if the updates have helped semantic_version.NpmSpec(checkKubeVersion) @@ -59,25 +65,34 @@ def getOCPVersions(kubeVersion): print(f"Unable to fix value error in kubeVersion : {kubeVersion}") return "N/A" - minOCP = "" maxOCP = "" getKubVersionMap() - for kubeVersionKey in kubeOpenShiftVersionMap : - #print(f"\n Map entry : {kubeVersionKey}: {kubeOpenShiftVersionMap[kubeVersionKey]}") - #print(f" MinOCP : {minOCP}, maxOCP: {maxOCP}") + for kubeVersionKey in kubeOpenShiftVersionMap: + # print(f"\n Map entry : {kubeVersionKey}: {kubeOpenShiftVersionMap[kubeVersionKey]}") + # print(f" MinOCP : {minOCP}, maxOCP: {maxOCP}") coercedKubeVersionKey = semantic_version.Version.coerce(kubeVersionKey) if coercedKubeVersionKey in semantic_version.NpmSpec(checkKubeVersion): - coercedOCPVersionValue = semantic_version.Version.coerce(kubeOpenShiftVersionMap[kubeVersionKey]) - if minOCP == "" or semantic_version.Version.coerce(minOCP) > coercedOCPVersionValue: + coercedOCPVersionValue = semantic_version.Version.coerce( + kubeOpenShiftVersionMap[kubeVersionKey] + ) + if ( + minOCP == "" + or semantic_version.Version.coerce(minOCP) > coercedOCPVersionValue + ): minOCP = kubeOpenShiftVersionMap[kubeVersionKey] - #print(f" Found new min : {checkKubeVersion}: {minOCP}") - if maxOCP == "" or semantic_version.Version.coerce(maxOCP) < coercedOCPVersionValue: + # print(f" Found new min : {checkKubeVersion}: {minOCP}") + if ( + maxOCP == "" + or semantic_version.Version.coerce(maxOCP) < coercedOCPVersionValue + ): maxOCP = kubeOpenShiftVersionMap[kubeVersionKey] - #print(f" Found new Max : {checkKubeVersion}: {maxOCP}") + # print(f" Found new Max : {checkKubeVersion}: {maxOCP}") # check if minOCP is open ended - if minOCP != "" and semantic_version.Version("1.999.999") in semantic_version.NpmSpec(checkKubeVersion): + if minOCP != "" and semantic_version.Version( + "1.999.999" + ) in semantic_version.NpmSpec(checkKubeVersion): ocp_versions = f">={minOCP}" elif minOCP == "": ocp_versions = "N/A" @@ -90,7 +105,6 @@ def getOCPVersions(kubeVersion): def getIndexAnnotations(report_path): - annotations = report_info.get_report_annotations(report_path) set_annotations = {} @@ -100,9 +114,13 @@ def getIndexAnnotations(report_path): full_version = annotations[annotation] if full_version != "N/A" and semantic_version.validate(full_version): ver = semantic_version.Version(full_version) - set_annotations["charts.openshift.io/testedOpenShiftVersion"] = f"{ver.major}.{ver.minor}" + set_annotations[ + "charts.openshift.io/testedOpenShiftVersion" + ] = f"{ver.major}.{ver.minor}" else: - set_annotations["charts.openshift.io/testedOpenShiftVersion"] = annotations[annotation] + set_annotations[ + "charts.openshift.io/testedOpenShiftVersion" + ] = annotations[annotation] else: if annotation == "charts.openshift.io/supportedOpenShiftVersions": OCPSupportedSet = True @@ -111,7 +129,7 @@ def getIndexAnnotations(report_path): if not OCPSupportedSet: chart = report_info.get_report_chart(report_path) OCPVersions = "N/A" - if "kubeVersion" in chart and chart["kubeVersion"]: + if "kubeVersion" in chart and chart["kubeVersion"]: kubeVersion = chart["kubeVersion"] OCPVersions = getOCPVersions(kubeVersion) set_annotations["charts.openshift.io/supportedOpenShiftVersions"] = OCPVersions diff --git a/scripts/src/checkautomerge/checkautomerge.py b/scripts/src/checkautomerge/checkautomerge.py index 241dfec9..25f61b91 100644 --- a/scripts/src/checkautomerge/checkautomerge.py +++ b/scripts/src/checkautomerge/checkautomerge.py @@ -5,9 +5,13 @@ import requests + def ensure_pull_request_not_merged(api_url): # api_url https://api.github.com/repos///pulls/1 - headers = {'Accept': 'application/vnd.github.v3+json','Authorization': f'Bearer {os.environ.get("BOT_TOKEN")}'} + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f'Bearer {os.environ.get("BOT_TOKEN")}', + } merged = False for i in range(20): r = requests.get(api_url, headers=headers) @@ -26,9 +30,16 @@ def ensure_pull_request_not_merged(api_url): print("[ERROR] Pull request not merged") sys.exit(1) + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) args = parser.parse_args() ensure_pull_request_not_merged(args.api_url) diff --git a/scripts/src/checkprcontent/checkpr.py b/scripts/src/checkprcontent/checkpr.py index f91232b5..79a9de26 100644 --- a/scripts/src/checkprcontent/checkpr.py +++ b/scripts/src/checkprcontent/checkpr.py @@ -7,12 +7,13 @@ import requests import semver import yaml + try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CLoader as Loader except ImportError: - from yaml import Loader, Dumper + from yaml import Loader -sys.path.append('../') +sys.path.append("../") from owners import owners_file from report import verifier_report from pullrequest import prartifact @@ -21,90 +22,103 @@ ALLOW_CI_CHANGES = "allow/ci-changes" TYPE_MATCH_EXPRESSION = "(partners|redhat|community)" -def check_web_catalog_only(report_in_pr, num_files_in_pr, report_file_match): +def check_web_catalog_only(report_in_pr, num_files_in_pr, report_file_match): print(f"[INFO] report in PR {report_in_pr}") print(f"[INFO] num files in PR {num_files_in_pr}") - + category, organization, chart, version = report_file_match.groups() - print(f"read owners file : {category}/{organization}/{chart}" ) - found_owners,owner_data = owners_file.get_owner_data(category, organization, chart) + print(f"read owners file : {category}/{organization}/{chart}") + found_owners, owner_data = owners_file.get_owner_data(category, organization, chart) if found_owners: owner_web_catalog_only = owners_file.get_web_catalog_only(owner_data) - print(f"[INFO] webCatalogOnly/providerDelivery from OWNERS : {owner_web_catalog_only}") + print( + f"[INFO] webCatalogOnly/providerDelivery from OWNERS : {owner_web_catalog_only}" + ) else: msg = "[ERROR] OWNERS file was not found." print(msg) - gitutils.add_output("owners-error-message",msg) + gitutils.add_output("owners-error-message", msg) sys.exit(1) if report_in_pr: - report_file_path = os.path.join("pr-branch","charts", category, organization, chart, version, "report.yaml") - print(f"read report file : {report_file_path}" ) - found_report,report_data = verifier_report.get_report_data(report_file_path) + report_file_path = os.path.join( + "pr-branch", "charts", category, organization, chart, version, "report.yaml" + ) + print(f"read report file : {report_file_path}") + found_report, report_data = verifier_report.get_report_data(report_file_path) if found_report: report_web_catalog_only = verifier_report.get_web_catalog_only(report_data) - print(f"[INFO] webCatalogOnly/providerDelivery from report : {report_web_catalog_only}") + print( + f"[INFO] webCatalogOnly/providerDelivery from report : {report_web_catalog_only}" + ) else: msg = f"[ERROR] Failed tp open report: {report_file_path}." print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) web_catalog_only = False if report_in_pr and num_files_in_pr > 1: if report_web_catalog_only or owner_web_catalog_only: - msg = f"[ERROR] The web catalog distribution method requires the pull request to be report only." + msg = "[ERROR] The web catalog distribution method requires the pull request to be report only." print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) elif report_in_pr: if report_web_catalog_only and owner_web_catalog_only: if verifier_report.get_package_digest(report_data): web_catalog_only = True else: - msg = f"[ERROR] The web catalog distribution method requires a package digest in the report." + msg = "[ERROR] The web catalog distribution method requires a package digest in the report." print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) elif report_web_catalog_only: - msg = f"[ERROR] Report indicates web catalog only but the distribution method set for the chart is not web catalog only." + msg = "[ERROR] Report indicates web catalog only but the distribution method set for the chart is not web catalog only." print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) elif owner_web_catalog_only: - msg = f"[ERROR] The web catalog distribution method is set for the chart but is not set in the report." + msg = "[ERROR] The web catalog distribution method is set for the chart but is not set in the report." print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) if web_catalog_only: - print(f"[INFO] webCatalogOnly/providerDelivery is a go") - gitutils.add_output("webCatalogOnly","True") + print("[INFO] webCatalogOnly/providerDelivery is a go") + gitutils.add_output("webCatalogOnly", "True") else: - gitutils.add_output("webCatalogOnly","False") - print(f"[INFO] webCatalogOnly/providerDelivery is a no-go") + gitutils.add_output("webCatalogOnly", "False") + print("[INFO] webCatalogOnly/providerDelivery is a no-go") + def get_file_match_compiled_patterns(): - """Return a tuple of patterns, where the first can be used to match any file in a chart PR + """Return a tuple of patterns, where the first can be used to match any file in a chart PR and the second can be used to match a valid report file within a chart PR. The patterns match based on the relative path of a file to the base repository - + Both patterns capture chart type, chart vendor, chart name and chart version from the file path.. - + Examples of valid file paths are: - + charts/partners/hashicorp/vault/0.20.0/ charts/partners/hashicorp/vault/0.20.0//report.yaml """ - pattern = re.compile(r"charts/"+TYPE_MATCH_EXPRESSION+"/([\w-]+)/([\w-]+)/([\w\.-]+)/.*") - reportpattern = re.compile(r"charts/"+TYPE_MATCH_EXPRESSION+"/([\w-]+)/([\w-]+)/([\w\.-]+)/report.yaml") - tarballpattern = re.compile(r"charts/(partners|redhat|community)/([\w-]+)/([\w-]+)/([\w\.-]+)/(.*\.tgz$)") - return pattern,reportpattern,tarballpattern + pattern = re.compile( + r"charts/" + TYPE_MATCH_EXPRESSION + "/([\w-]+)/([\w-]+)/([\w\.-]+)/.*" + ) + reportpattern = re.compile( + r"charts/" + TYPE_MATCH_EXPRESSION + "/([\w-]+)/([\w-]+)/([\w\.-]+)/report.yaml" + ) + tarballpattern = re.compile( + r"charts/(partners|redhat|community)/([\w-]+)/([\w-]+)/([\w\.-]+)/(.*\.tgz$)" + ) + return pattern, reportpattern, tarballpattern def ensure_only_chart_is_modified(api_url, repository, branch): @@ -114,7 +128,7 @@ def ensure_only_chart_is_modified(api_url, repository, branch): return files = prartifact.get_modified_files(api_url) - pattern,reportpattern,tarballpattern = get_file_match_compiled_patterns() + pattern, reportpattern, tarballpattern = get_file_match_compiled_patterns() matches_found = 0 report_found = False none_chart_files = {} @@ -128,18 +142,18 @@ def ensure_only_chart_is_modified(api_url, repository, branch): matches_found += 1 if reportpattern.match(file_path): print(f"[INFO] Report found: {file_path}") - gitutils.add_output("report-exists","true") + gitutils.add_output("report-exists", "true") report_found = True else: tar_match = tarballpattern.match(file_path) if tar_match: print(f"[INFO] tarball found: {file_path}") - _,_,chart_name,chart_version,tar_name = tar_match.groups() + _, _, chart_name, chart_version, tar_name = tar_match.groups() expected_tar_name = f"{chart_name}-{chart_version}.tgz" if tar_name != expected_tar_name: msg = f"[ERROR] the tgz file is named incorrectly. Expected: {expected_tar_name}" print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) exit(1) if matches_found == 1: @@ -147,102 +161,136 @@ def ensure_only_chart_is_modified(api_url, repository, branch): elif pattern_match.groups() != match.groups(): msg = "[ERROR] A PR must contain only one chart. Current PR includes files for multiple charts." print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) exit(1) - + if none_chart_files: - if len(files) > 1 or "OWNERS" not in none_chart_files: #OWNERS not present or preset but not the only file + if ( + len(files) > 1 or "OWNERS" not in none_chart_files + ): # OWNERS not present or preset but not the only file example_file = list(none_chart_files.values())[0] msg = f"[ERROR] PR includes one or more files not related to charts, e.g., {example_file}" print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) if "OWNERS" in none_chart_files: file_path = none_chart_files["OWNERS"] path_parts = file_path.split("/") - category = path_parts[1] # Second after charts + category = path_parts[1] # Second after charts if category == "partners": msg = "[ERROR] OWNERS file should never be set directly by partners. See certification docs." print(msg) - gitutils.add_output("owners-error-message",msg) - elif matches_found>0: # There is a mix of chart and non-chart files including OWNERS + gitutils.add_output("owners-error-message", msg) + elif ( + matches_found > 0 + ): # There is a mix of chart and non-chart files including OWNERS msg = "[ERROR] Send OWNERS file by itself in a separate PR." print(msg) - gitutils.add_output("owners-error-message",msg) - elif len(files) == 1: # OWNERS file is the only file in PR + gitutils.add_output("owners-error-message", msg) + elif len(files) == 1: # OWNERS file is the only file in PR msg = "[INFO] OWNERS file changes require manual review by maintainers." print(msg) - gitutils.add_output("owners-error-message",msg) - + gitutils.add_output("owners-error-message", msg) + sys.exit(1) check_web_catalog_only(report_found, matches_found, pattern_match) - if matches_found>0: + if matches_found > 0: category, organization, chart, version = pattern_match.groups() - gitutils.add_output("category",f"{'partner' if category == 'partners' else category}") - gitutils.add_output("organization",organization) + gitutils.add_output( + "category", f"{'partner' if category == 'partners' else category}" + ) + gitutils.add_output("organization", organization) if not semver.VersionInfo.isvalid(version): - msg = f"[ERROR] Helm chart version is not a valid semantic version: {version}" + msg = ( + f"[ERROR] Helm chart version is not a valid semantic version: {version}" + ) print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) print("Downloading index.yaml", category, organization, chart, version) - r = requests.get(f'https://raw.githubusercontent.com/{repository}/{branch}/index.yaml') + r = requests.get( + f"https://raw.githubusercontent.com/{repository}/{branch}/index.yaml" + ) if r.status_code == 200: data = yaml.load(r.text, Loader=Loader) else: - data = {"apiVersion": "v1", - "entries": {}} + data = {"apiVersion": "v1", "entries": {}} entry_name = f"{organization}-{chart}" d = data["entries"].get(entry_name, []) - gitutils.add_output("chart-entry-name",entry_name) + gitutils.add_output("chart-entry-name", entry_name) for v in d: if v["version"] == version: msg = f"[ERROR] Helm chart release already exists in the index.yaml: {version}" print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) tag_name = f"{organization}-{chart}-{version}" - gitutils.add_output("chart-name-with-version",tag_name) + gitutils.add_output("chart-name-with-version", tag_name) tag_api = f"https://api.github.com/repos/{repository}/git/ref/tags/{tag_name}" - headers = {'Accept': 'application/vnd.github.v3+json','Authorization': f'Bearer {os.environ.get("BOT_TOKEN")}'} + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f'Bearer {os.environ.get("BOT_TOKEN")}', + } print(f"[INFO] checking tag: {tag_api}") r = requests.head(tag_api, headers=headers) if r.status_code == 200: msg = f"[ERROR] Helm chart release already exists in the GitHub Release/Tag: {tag_name}" print(msg) - gitutils.add_output("pr-content-error-message",msg) + gitutils.add_output("pr-content-error-message", msg) sys.exit(1) try: if prartifact.xRateLimit in r.headers: - print(f'[DEBUG] {prartifact.xRateLimit} : {r.headers[prartifact.xRateLimit]}') + print( + f"[DEBUG] {prartifact.xRateLimit} : {r.headers[prartifact.xRateLimit]}" + ) if prartifact.xRateRemain in r.headers: - print(f'[DEBUG] {prartifact.xRateRemain} : {r.headers[prartifact.xRateRemain]}') + print( + f"[DEBUG] {prartifact.xRateRemain} : {r.headers[prartifact.xRateRemain]}" + ) response_content = r.json() if "message" in response_content: - print(f'[ERROR] getting index file content: {response_content["message"]}') + print( + f'[ERROR] getting index file content: {response_content["message"]}' + ) sys.exit(1) except json.decoder.JSONDecodeError: pass - - def main(): parser = argparse.ArgumentParser() - parser.add_argument("-b", "--index-branch", dest="branch", type=str, required=True, - help="index branch") - parser.add_argument("-r", "--repository", dest="repository", type=str, required=True, - help="Git Repository") - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") + parser.add_argument( + "-b", + "--index-branch", + dest="branch", + type=str, + required=True, + help="index branch", + ) + parser.add_argument( + "-r", + "--repository", + dest="repository", + type=str, + required=True, + help="Git Repository", + ) + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) args = parser.parse_args() branch = args.branch.split("/")[-1] ensure_only_chart_is_modified(args.api_url, args.repository, branch) diff --git a/scripts/src/indexfile/index.py b/scripts/src/indexfile/index.py index 271620cc..85e7ffa9 100644 --- a/scripts/src/indexfile/index.py +++ b/scripts/src/indexfile/index.py @@ -1,15 +1,15 @@ - import json import requests import yaml import semantic_version import sys -sys.path.append('../') +sys.path.append("../") from chartrepomanager import indexannotations INDEX_FILE = "https://charts.openshift.io/index.yaml" + def _make_http_request(url, body=None, params={}, headers={}, verbose=False): response = requests.get(url, params=params, headers=headers, json=body) if verbose: @@ -19,24 +19,29 @@ def _make_http_request(url, body=None, params={}, headers={}, verbose=False): print(response.text) return response.text + def _load_index_yaml(): yaml_text = _make_http_request(INDEX_FILE) dct = yaml.safe_load(yaml_text) return dct + def get_chart_info(tar_name): index_dct = _load_index_yaml() for entry, charts in index_dct["entries"].items(): if tar_name.startswith(entry): for chart in charts: index_tar_name = f"{entry}-{chart['version']}" - if tar_name == index_tar_name : + if tar_name == index_tar_name: print(f"[INFO] match found: {tar_name}") - providerType = chart["annotations"]["charts.openshift.io/providerType"] + providerType = chart["annotations"][ + "charts.openshift.io/providerType" + ] provider = chart["annotations"]["charts.openshift.io/provider"] return providerType, provider, chart["name"], chart["version"] print(f"[INFO] match not found: {tar_name}") - return "","","","" + return "", "", "", "" + def get_charts_info(): chart_info_list = [] @@ -45,48 +50,53 @@ def get_charts_info(): for entry, charts in index_dct["entries"].items(): for chart in charts: chart_info = {} - chart_info["name"] = chart['name'] + chart_info["name"] = chart["name"] chart_info["version"] = chart["version"] - chart_info["providerType"] = chart["annotations"]["charts.openshift.io/providerType"] - chart_info["provider"] = entry.removesuffix(f'-{chart["name"]}') - #print(f'[INFO] found chart : {chart_info["provider"]} {chart["name"]} {chart["version"]} ') - if 'charts.openshift.io/supportedOpenShiftVersions' in chart["annotations"]: - chart_info["supportedOCP"] = chart["annotations"]["charts.openshift.io/supportedOpenShiftVersions"] + chart_info["providerType"] = chart["annotations"][ + "charts.openshift.io/providerType" + ] + chart_info["provider"] = entry.removesuffix(f'-{chart["name"]}') + # print(f'[INFO] found chart : {chart_info["provider"]} {chart["name"]} {chart["version"]} ') + if "charts.openshift.io/supportedOpenShiftVersions" in chart["annotations"]: + chart_info["supportedOCP"] = chart["annotations"][ + "charts.openshift.io/supportedOpenShiftVersions" + ] else: chart_info["supportedOCP"] = "" if "kubeVersion" in chart: chart_info["kubeVersion"] = chart["kubeVersion"] else: - chart_info["kubeVersion"] ="" + chart_info["kubeVersion"] = "" chart_info_list.append(chart_info) return chart_info_list + def get_latest_charts(): chart_list = get_charts_info() print(f"{len(chart_list)} charts found in Index file") - chart_in_process = {"name" : ""} + chart_in_process = {"name": ""} chart_latest_version = "" latest_charts = [] - for index,chart in enumerate(chart_list): + for index, chart in enumerate(chart_list): chart_name = chart["name"] - #print(f'[INFO] look for latest chart : {chart_name} {chart["version"]}') + # print(f'[INFO] look for latest chart : {chart_name} {chart["version"]}') if chart_name == chart_in_process["name"]: new_version = semantic_version.Version.coerce(chart["version"]) - #print(f' [INFO] compare chart versions : {new_version}({chart["version"]}) : {chart_latest_version}') + # print(f' [INFO] compare chart versions : {new_version}({chart["version"]}) : {chart_latest_version}') if new_version > chart_latest_version: - #print(f' [INFO] a new latest chart version : {new_version}') + # print(f' [INFO] a new latest chart version : {new_version}') chart_latest_version = new_version chart_in_process = chart else: if chart_in_process["name"] != "": - #print(f' [INFO] chart completed : {chart_in_process["name"]} {chart_in_process["version"]}') + # print(f' [INFO] chart completed : {chart_in_process["name"]} {chart_in_process["version"]}') latest_charts.append(chart_in_process) - #print(f'[INFO] new chart found : {chart_name} {chart["version"]}') + # print(f'[INFO] new chart found : {chart_name} {chart["version"]}') chart_in_process = chart chart_version = chart["version"] if chart_version.startswith("v"): @@ -95,8 +105,8 @@ def get_latest_charts(): else: chart_in_process = chart - if index+1 == len(chart_list): - #print(f' [INFO] last chart completed : {chart_in_process["name"]} {chart_in_process["version"]}') + if index + 1 == len(chart_list): + # print(f' [INFO] last chart completed : {chart_in_process["name"]} {chart_in_process["version"]}') latest_charts.append(chart_in_process) return latest_charts @@ -110,25 +120,29 @@ def get_latest_charts(): for chart in chart_list: print(f'[INFO] found latest chart : {chart["name"]} {chart["version"]}') - OCP_VERSION = semantic_version.Version.coerce("4.11") for chart in chart_list: - if "supportedOCP" in chart and chart["supportedOCP"] != "N/A" and chart["supportedOCP"] != "": + if ( + "supportedOCP" in chart + and chart["supportedOCP"] != "N/A" + and chart["supportedOCP"] != "" + ): if OCP_VERSION in semantic_version.NpmSpec(chart["supportedOCP"]): - print(f'PASS: Chart supported OCP version {chart["supportedOCP"]} includes: {OCP_VERSION}') + print( + f'PASS: Chart supported OCP version {chart["supportedOCP"]} includes: {OCP_VERSION}' + ) else: - print(f' ERROR: Chart supported OCP version {chart["supportedOCP"]} does not include {OCP_VERSION}') + print( + f' ERROR: Chart supported OCP version {chart["supportedOCP"]} does not include {OCP_VERSION}' + ) elif "kubeVersion" in chart and chart["kubeVersion"] != "": supportedOCPVersion = indexannotations.getOCPVersions(chart["kubeVersion"]) if OCP_VERSION in semantic_version.NpmSpec(supportedOCPVersion): - print(f'PASS: Chart kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) includes OCP version: {OCP_VERSION}') + print( + f'PASS: Chart kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) includes OCP version: {OCP_VERSION}' + ) else: - print(f' ERROR: Chart kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include {OCP_VERSION}') - - - - - - - + print( + f' ERROR: Chart kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include {OCP_VERSION}' + ) diff --git a/scripts/src/metrics/metrics.py b/scripts/src/metrics/metrics.py index 116cdd6c..c2a289d8 100644 --- a/scripts/src/metrics/metrics.py +++ b/scripts/src/metrics/metrics.py @@ -1,7 +1,5 @@ - import argparse import itertools -import json import requests import sys import analytics @@ -9,28 +7,41 @@ import re from github import Github -sys.path.append('../') +sys.path.append("../") from indexfile import index from pullrequest import prepare_pr_comment as pr_comment from collections import OrderedDict file_pattern = re.compile(r"charts/([\w-]+)/([\w-]+)/([\w\.-]+)/([\w\.-]+)/.*") -chart_downloads_event="Chart Downloads v1.0" -ignore_users=["zonggen","mmulholla","dperaza4dustbit","openshift-helm-charts-bot","baijum","tisutisu","rhrivero","Kartikey-star"] -pr_submission="PR Submission v1.0" -pr_merged="PR Merged v1.0" -pr_outcome="PR Outcome v1.0" -charts="charts" +chart_downloads_event = "Chart Downloads v1.0" +ignore_users = [ + "zonggen", + "mmulholla", + "dperaza4dustbit", + "openshift-helm-charts-bot", + "baijum", + "tisutisu", + "rhrivero", + "Kartikey-star", +] +pr_submission = "PR Submission v1.0" +pr_merged = "PR Merged v1.0" +pr_outcome = "PR Outcome v1.0" +charts = "charts" xRateLimit = "X-RateLimit-Limit" xRateRemain = "X-RateLimit-Remaining" + def parse_response(response): result = [] for obj in response: - if 'name' in obj and 'assets' in obj: - for asset in obj['assets']: + if "name" in obj and "assets" in obj: + for asset in obj["assets"]: if asset["name"].endswith(".tgz"): - release = { 'name' : obj['name'], "asset" : { asset.get('name') : asset.get('download_count', 0)}} + release = { + "name": obj["name"], + "asset": {asset.get("name"): asset.get("download_count", 0)}, + } result.append(release) return result @@ -38,19 +49,26 @@ def parse_response(response): def get_release_metrics(): result = [] for i in itertools.count(start=1): - request_headers = {'Accept': 'application/vnd.github.v3+json','Authorization': f'Bearer {os.environ.get("BOT_TOKEN")}'} + request_headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f'Bearer {os.environ.get("BOT_TOKEN")}', + } response = requests.get( - f'https://api.github.com/repos/openshift-helm-charts/charts/releases?per_page=100&page={i}',headers=request_headers) + f"https://api.github.com/repos/openshift-helm-charts/charts/releases?per_page=100&page={i}", + headers=request_headers, + ) if not 200 <= response.status_code < 300: - print(f"[ERROR] unexpected response getting release data : {response.status_code} : {response.reason}") + print( + f"[ERROR] unexpected response getting release data : {response.status_code} : {response.reason}" + ) sys.exit(1) - + response_json = response.json() if xRateLimit in response.headers: - print(f'[DEBUG] {xRateLimit} : {response.headers[xRateLimit]}') + print(f"[DEBUG] {xRateLimit} : {response.headers[xRateLimit]}") if xRateRemain in response.headers: - print(f'[DEBUG] {xRateRemain} : {response.headers[xRateRemain]}') + print(f"[DEBUG] {xRateRemain} : {response.headers[xRateRemain]}") if "message" in response_json: print(f'[ERROR] getting pr files: {response_json["message"]}') @@ -61,53 +79,77 @@ def get_release_metrics(): result.extend(response_json) return parse_response(result) + def send_release_metrics(write_key, downloads, prefix): - metrics={} - chart_downloads=[] - chart_downloads_latest=[] + metrics = {} + chart_downloads = [] + chart_downloads_latest = [] for release in downloads: - _,provider,chart,_ = index.get_chart_info(release.get('name')) - if len(provider)>0: + _, provider, chart, _ = index.get_chart_info(release.get("name")) + if len(provider) > 0: if provider not in metrics: metrics[provider] = {} if chart not in metrics[provider]: metrics[provider][chart] = {} - for key in release.get('asset'): - metrics[provider][chart][key] = release.get('asset')[key] - + for key in release.get("asset"): + metrics[provider][chart][key] = release.get("asset")[key] for provider in metrics: for chart in metrics[provider]: - ordered_download_perChart = OrderedDict(sorted(metrics[provider][chart].items(),key = lambda i: i[1],reverse=True)) - for key,value in ordered_download_perChart.items(): - chart_downloads_latest.append({"downloads":value,"name":key,"provider":provider}) + ordered_download_perChart = OrderedDict( + sorted( + metrics[provider][chart].items(), key=lambda i: i[1], reverse=True + ) + ) + for key, value in ordered_download_perChart.items(): + chart_downloads_latest.append( + {"downloads": value, "name": key, "provider": provider} + ) break - for key,value in metrics[provider][chart].items(): - chart_downloads.append({"downloads":value,"name":key,"provider":provider}) - chart_downloads.sort(key = lambda k : k['downloads'],reverse=True) - chart_downloads_latest.sort(key = lambda k : k['downloads'],reverse=True) + for key, value in metrics[provider][chart].items(): + chart_downloads.append( + {"downloads": value, "name": key, "provider": provider} + ) + chart_downloads.sort(key=lambda k: k["downloads"], reverse=True) + chart_downloads_latest.sort(key=lambda k: k["downloads"], reverse=True) for x in range(len(chart_downloads)): - send_download_metric(write_key,chart_downloads[x]["provider"],chart_downloads[x]["downloads"],chart_downloads[x]["name"],x+1,prefix) + send_download_metric( + write_key, + chart_downloads[x]["provider"], + chart_downloads[x]["downloads"], + chart_downloads[x]["name"], + x + 1, + prefix, + ) for x in range(5): - send_top_five_metric(write_key,chart_downloads_latest[x]["provider"],chart_downloads_latest[x]["downloads"],chart_downloads_latest[x]["name"],x+1,prefix) + send_top_five_metric( + write_key, + chart_downloads_latest[x]["provider"], + chart_downloads_latest[x]["downloads"], + chart_downloads_latest[x]["name"], + x + 1, + prefix, + ) + -def send_download_metric(write_key,partner,downloads,artifact_name,rank,prefix): +def send_download_metric(write_key, partner, downloads, artifact_name, rank, prefix): id = f"{prefix}-{partner}-{artifact_name}" - properties = {"downloads":downloads,"rank":rank,"name":artifact_name } + properties = {"downloads": downloads, "rank": rank, "name": artifact_name} + + send_metric(write_key, id, chart_downloads_event, properties) - send_metric(write_key,id,chart_downloads_event,properties) -def send_top_five_metric(write_key,partner,downloads,artifact_name,rank,prefix): +def send_top_five_metric(write_key, partner, downloads, artifact_name, rank, prefix): id = f"{prefix}-top5" - properties = {"downloads":downloads,"rank":rank,"name":artifact_name } + properties = {"downloads": downloads, "rank": rank, "name": artifact_name} - send_metric(write_key,id,chart_downloads_event,properties) + send_metric(write_key, id, chart_downloads_event, properties) -def send_pull_request_metrics(write_key,g): +def send_pull_request_metrics(write_key, g): chart_submissions = 0 partners = [] partner_charts = [] @@ -116,9 +158,9 @@ def send_pull_request_metrics(write_key,g): charts_in_progress = 0 abandoned = [] repo = g.get_repo("openshift-helm-charts/charts") - pull_requests = repo.get_pulls(state='all') + pull_requests = repo.get_pulls(state="all") for pr in pull_requests: - pr_content,type,provider,chart,version = check_and_get_pr_content(pr,repo) + pr_content, type, provider, chart, version = check_and_get_pr_content(pr, repo) if pr_content != "not-chart": chart_submissions += 1 if pr.closed_at and not pr.merged_at: @@ -133,15 +175,24 @@ def send_pull_request_metrics(write_key,g): if chart not in partner_charts: partner_charts.append(chart) else: - charts_in_progress +=1 + charts_in_progress += 1 - check_rate_limit(g,False) + check_rate_limit(g, False) print(f"[INFO] abandoned PRS: {abandoned}") - send_summary_metric(write_key,chart_submissions,charts_merged,charts_abandoned,charts_in_progress,len(partners),len(partner_charts)) + send_summary_metric( + write_key, + chart_submissions, + charts_merged, + charts_abandoned, + charts_in_progress, + len(partners), + len(partner_charts), + ) + def get_pr_files(pr): - files=pr.get_files() + files = pr.get_files() pr_chart_submission_files = [] for file in files: pr_chart_submission_files.append(file.filename) @@ -149,7 +200,6 @@ def get_pr_files(pr): def process_report_fails(message_file): - fails = "0" num_error_messages = 0 error_messages = [] @@ -177,19 +227,19 @@ def process_report_fails(message_file): if "Error message(s)" in message_line: num_error_messages = 1 elif num_error_messages <= int(fails): - print(f"[INFO] add error message: {message_line.strip()}" ) + print(f"[INFO] add error message: {message_line.strip()}") error_messages.append(message_line.strip()) - num_error_messages +=1 + num_error_messages += 1 elif not check_failures and len(message_line) > 0: non_check_failures = True - print(f"[INFO] non-check message: {message_line.strip()}" ) + print(f"[INFO] non-check message: {message_line.strip()}") error_messages.append(message_line.strip()) if check_failures: for error_message in error_messages: - if ("Missing required annotations" in error_message - or - "Empty metadata in chart" in error_messages + if ( + "Missing required annotations" in error_message + or "Empty metadata in chart" in error_messages ): checks_failed.append("required-annotations-present") elif "Chart test files do not exist" in error_message: @@ -200,16 +250,16 @@ def process_report_fails(message_file): checks_failed.append("contains-values") elif "Values schema file does not exist" in error_message: checks_failed.append("contains-values-schema") - elif ("Kubernetes version is not specified" in error_message - or - "Error converting kubeVersion to an OCP range" in error_message + elif ( + "Kubernetes version is not specified" in error_message + or "Error converting kubeVersion to an OCP range" in error_message ): checks_failed.append("has-kubeversion") elif "Helm lint has failed" in error_message: checks_failed.append("helm_lint") - elif ( "Failed to certify images" in error_message - or - "Image is not Red Hat certified" in error_message + elif ( + "Failed to certify images" in error_message + or "Image is not Red Hat certified" in error_message ): if "images-are-certified" not in checks_failed: checks_failed.append("images-are-certified") @@ -224,31 +274,32 @@ def process_report_fails(message_file): else: checks_failed.append("chart-testing") elif non_check_failures: - fails="1" + fails = "1" checks_failed.append("other-non-check-failure") - return int(fails),checks_failed + return int(fails), checks_failed -def process_comments(repo,pr): +def process_comments(repo, pr): issue = repo.get_issue(number=pr.number) comments = issue.get_comments() num_builds = 0 for comment in comments: - report_result = parse_message(comment.body,pr.number) + report_result = parse_message(comment.body, pr.number) if report_result != "not-found": num_builds += 1 return num_builds -def process_comment_file(message_file,pr_number): - with open(message_file, 'r') as file: +def process_comment_file(message_file, pr_number): + with open(message_file, "r") as file: message = file.read() - return parse_message(message,pr_number) + return parse_message(message, pr_number) -def parse_message(message,pr_number): + +def parse_message(message, pr_number): report_result = "not-found" if pr_comment.get_comment_header(pr_number) in message: if pr_comment.get_verifier_errors_comment() in message: @@ -260,21 +311,22 @@ def parse_message(message,pr_number): elif pr_comment.get_community_review_message() in message: report_result = "community_review" - print(f"[INFO] report_result : {report_result}") return report_result -def get_pr_content(pr): +def get_pr_content(pr): pr_content = "not-chart" pr_chart_submission_files = get_pr_files(pr) if len(pr_chart_submission_files) > 0: match = file_pattern.match(pr_chart_submission_files[0]) if match: - type,org,chart,version = match.groups() + type, org, chart, version = match.groups() if type == "partners": type = "partner" - print(f"[INFO] Found PR {pr.number}:{pr.user.login}: type: {type},org: {org},chart: {chart},version: {version}, #files: {len(pr_chart_submission_files)}, file match: {pr_chart_submission_files[0]}") + print( + f"[INFO] Found PR {pr.number}:{pr.user.login}: type: {type},org: {org},chart: {chart},version: {version}, #files: {len(pr_chart_submission_files)}, file match: {pr_chart_submission_files[0]}" + ) tgz_found = False report_found = False src_found = False @@ -299,41 +351,58 @@ def get_pr_content(pr): elif src_found: pr_content = "src only" - return pr_content,type,org,chart,version + return pr_content, type, org, chart, version + + return pr_content, "", "", "", "" - return pr_content,"","","","" -def check_and_get_pr_content(pr,repo): +def check_and_get_pr_content(pr, repo): repo_name = repo.full_name - if (pr.user.login in ignore_users and pr.user.login not in repo_name) or pr.draft or pr.base.ref != "main": - print(f"[INFO] Ignore pr, user: {pr.user.login}, draft: {pr.draft}, target_branch: {pr.base.ref}") - return "not-chart","","","","" + if ( + (pr.user.login in ignore_users and pr.user.login not in repo_name) + or pr.draft + or pr.base.ref != "main" + ): + print( + f"[INFO] Ignore pr, user: {pr.user.login}, draft: {pr.draft}, target_branch: {pr.base.ref}" + ) + return "not-chart", "", "", "", "" return get_pr_content(pr) -def process_pr(write_key,repo,message_file,pr_number,action,prefix,pr_directory): +def process_pr(write_key, repo, message_file, pr_number, action, prefix, pr_directory): pr = repo.get_pull(int(pr_number)) - pr_content,type,provider,chart,version = check_and_get_pr_content(pr,repo) + pr_content, type, provider, chart, version = check_and_get_pr_content(pr, repo) if pr_content != "not-chart": if action == "opened": - send_submission_metric(write_key,type,provider,chart,pr_number,pr_content,prefix,pr_directory) - - pr_result = process_comment_file(message_file,pr_number) - num_fails=0 + send_submission_metric( + write_key, + type, + provider, + chart, + pr_number, + pr_content, + prefix, + pr_directory, + ) + + pr_result = process_comment_file(message_file, pr_number) + num_fails = 0 if pr_result == "report-failure": - num_fails,checks_failed = process_report_fails(message_file) + num_fails, checks_failed = process_report_fails(message_file) for check in checks_failed: - send_check_metric(write_key,type,provider,chart,pr_number,check) + send_check_metric(write_key, type, provider, chart, pr_number, check) elif pr_result == "content-failure": num_fails = 1 - send_outcome_metric(write_key,type,provider,chart,pr_number,pr_result,num_fails,prefix) + send_outcome_metric( + write_key, type, provider, chart, pr_number, pr_result, num_fails, prefix + ) - ## if pr is merged we can collect summary stats + # if pr is merged we can collect summary stats if pr.merged_at: - - builds = process_comments(repo,pr) + builds = process_comments(repo, pr) print(f"[INFO] PR build cycles : {builds}") builds_out = str(builds) if builds > 5: @@ -341,81 +410,152 @@ def process_pr(write_key,repo,message_file,pr_number,action,prefix,pr_directory) elapsed_time = pr.merged_at - pr.created_at # round up to an hour to avoid 0 time - elapsed_hours = elapsed_time.total_seconds()//3600 + elapsed_hours = elapsed_time.total_seconds() // 3600 duration = "0-1 hours" if 24 > elapsed_hours > 1: duration = "1-24 hours" elif 168 > elapsed_hours > 24: duration = "1-7 days" elif elapsed_hours > 168: - duration= "> 7 days" - - send_merge_metric(write_key,type,provider,chart,duration,pr_number,builds_out,pr_content,prefix,pr_directory) - - -def send_summary_metric(write_key,num_submissions,num_merged,num_abandoned,num_in_progress,num_partners,num_charts): - properties = { "submissions": num_submissions, "merged": num_merged, "abandoned" : num_abandoned, "in_progress" : num_in_progress, - "partners": num_partners, "partner_charts" : num_charts} + duration = "> 7 days" + + send_merge_metric( + write_key, + type, + provider, + chart, + duration, + pr_number, + builds_out, + pr_content, + prefix, + pr_directory, + ) + + +def send_summary_metric( + write_key, + num_submissions, + num_merged, + num_abandoned, + num_in_progress, + num_partners, + num_charts, +): + properties = { + "submissions": num_submissions, + "merged": num_merged, + "abandoned": num_abandoned, + "in_progress": num_in_progress, + "partners": num_partners, + "partner_charts": num_charts, + } id = "helm-metric-summary" - send_metric(write_key,id,"PR Summary",properties) + send_metric(write_key, id, "PR Summary", properties) -def send_outcome_metric(write_key,type,provider,chart,pr_number,outcome,num_fails,prefix): - properties = { "type": type, "provider": provider, "chart" : chart, "pr" : pr_number, "outcome" : outcome, "failures" : num_fails} +def send_outcome_metric( + write_key, type, provider, chart, pr_number, outcome, num_fails, prefix +): + properties = { + "type": type, + "provider": provider, + "chart": chart, + "pr": pr_number, + "outcome": outcome, + "failures": num_fails, + } id = f"{prefix}-{type}-{provider}" - send_metric(write_key,id,pr_outcome,properties) - + send_metric(write_key, id, pr_outcome, properties) -def send_check_metric(write_key,type,partner,chart,pr_number,check): - properties = { "type": type, "provider": partner, "chart" : chart, "pr" : pr_number, "check" : check } +def send_check_metric(write_key, type, partner, chart, pr_number, check): + properties = { + "type": type, + "provider": partner, + "chart": chart, + "pr": pr_number, + "check": check, + } id = f"helm-metric-{partner}" - send_metric(write_key,id,"PR Report Fails",properties) - -def send_merge_metric(write_key,type,partner,chart,duration,pr_number,num_builds,pr_content,prefix,pr_directory): - update=getChartUpdate(type,partner,chart,pr_directory) + send_metric(write_key, id, "PR Report Fails", properties) + + +def send_merge_metric( + write_key, + type, + partner, + chart, + duration, + pr_number, + num_builds, + pr_content, + prefix, + pr_directory, +): + update = getChartUpdate(type, partner, chart, pr_directory) id = f"{prefix}-{type}-{partner}" - properties = { "type" : type, "provider": partner, "chart" : chart, "pr" : pr_number, "builds" :num_builds, "duration" : duration, "content" : pr_content,"update": update} - - send_metric(write_key,id,pr_merged,properties) - -def send_submission_metric(write_key,type,partner,chart,pr_number,pr_content,prefix,pr_directory): - - update=getChartUpdate(type,partner,chart,pr_directory) + properties = { + "type": type, + "provider": partner, + "chart": chart, + "pr": pr_number, + "builds": num_builds, + "duration": duration, + "content": pr_content, + "update": update, + } + + send_metric(write_key, id, pr_merged, properties) + + +def send_submission_metric( + write_key, type, partner, chart, pr_number, pr_content, prefix, pr_directory +): + update = getChartUpdate(type, partner, chart, pr_directory) id = f"{prefix}-{type}-{partner}" - properties = { "type" : type, "provider": partner, "chart" : chart, "pr" : pr_number, "pr content": pr_content,"update": update} + properties = { + "type": type, + "provider": partner, + "chart": chart, + "pr": pr_number, + "pr content": pr_content, + "update": update, + } - send_metric(write_key,id,pr_submission,properties) + send_metric(write_key, id, pr_submission, properties) -def on_error(error,items): + +def on_error(error, items): print("An error occurred creating metrics:", error) - print("error with items:",items) + print("error with items:", items) sys.exit(1) -def send_metric(write_key,id,event,properties): +def send_metric(write_key, id, event, properties): analytics.write_key = write_key analytics.on_error = on_error - print(f'[INFO] Add track: id: {id}, event:{event}, properties:{properties}') + print(f"[INFO] Add track: id: {id}, event:{event}, properties:{properties}") analytics.track(id, event, properties) -def check_rate_limit(g,force): +def check_rate_limit(g, force): rate_limit = g.get_rate_limit() if force or rate_limit.core.remaining < 10: print(f"[INFO] rate limit info: {rate_limit.core}") -def getChartUpdate(type,partner,chart,cwd): - if type=="partner": - directory_type="partners" + +def getChartUpdate(type, partner, chart, cwd): + if type == "partner": + directory_type = "partners" else: - directory_type=type - directoryPath=os.path.join(cwd, charts,directory_type, partner,chart) + directory_type = type + directoryPath = os.path.join(cwd, charts, directory_type, partner, chart) # Checking if the directory contains only the OWNERS file print(os.listdir(directoryPath)) if len(os.listdir(directoryPath)) == 1: @@ -426,22 +566,70 @@ def getChartUpdate(type,partner,chart,cwd): def main(): parser = argparse.ArgumentParser() - parser.add_argument("-k", "--write-key", dest="write_key", type=str, required=True, - help="segment write key") - parser.add_argument("-t", "--metric-type", dest="type", type=str, required=True, - help="metric type, releases or pull_request") - parser.add_argument("-m", "--message-file", dest="message_file", type=str, required=False, - help="message for metric") - parser.add_argument("-n", "--pr-number", dest="pr_number", type=str, required=False, - help="number of teh pr") - parser.add_argument("-a", "--pr-action", dest="pr_action", type=str, required=False, - help="The event action of the pr") - parser.add_argument("-r", "--repository", dest="repository", type=str, required=False, - help="The repository of the pr") - parser.add_argument("-p", "--prefix", dest="prefix", type=str, required=False, - help="The prefix of the id in segment") - parser.add_argument("-d", "--pr_dir", dest="pr_dir", type=str, required=False, - help="Directory of pull request code.") + parser.add_argument( + "-k", + "--write-key", + dest="write_key", + type=str, + required=True, + help="segment write key", + ) + parser.add_argument( + "-t", + "--metric-type", + dest="type", + type=str, + required=True, + help="metric type, releases or pull_request", + ) + parser.add_argument( + "-m", + "--message-file", + dest="message_file", + type=str, + required=False, + help="message for metric", + ) + parser.add_argument( + "-n", + "--pr-number", + dest="pr_number", + type=str, + required=False, + help="number of teh pr", + ) + parser.add_argument( + "-a", + "--pr-action", + dest="pr_action", + type=str, + required=False, + help="The event action of the pr", + ) + parser.add_argument( + "-r", + "--repository", + dest="repository", + type=str, + required=False, + help="The repository of the pr", + ) + parser.add_argument( + "-p", + "--prefix", + dest="prefix", + type=str, + required=False, + help="The prefix of the id in segment", + ) + parser.add_argument( + "-d", + "--pr_dir", + dest="pr_dir", + type=str, + required=False, + help="Directory of pull request code.", + ) args = parser.parse_args() print("Input arguments:") @@ -462,13 +650,22 @@ def main(): if args.type == "pull_request": repo_current = g.get_repo(args.repository) - process_pr(args.write_key,repo_current,args.message_file,args.pr_number,args.pr_action,args.prefix,args.pr_dir) + process_pr( + args.write_key, + repo_current, + args.message_file, + args.pr_number, + args.pr_action, + args.prefix, + args.pr_dir, + ) else: - check_rate_limit(g,True) - send_release_metrics(args.write_key,get_release_metrics(),args.prefix) - check_rate_limit(g,True) - send_pull_request_metrics(args.write_key,g) - check_rate_limit(g,True) + check_rate_limit(g, True) + send_release_metrics(args.write_key, get_release_metrics(), args.prefix) + check_rate_limit(g, True) + send_pull_request_metrics(args.write_key, g) + check_rate_limit(g, True) + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/scripts/src/metrics/pushowners.py b/scripts/src/metrics/pushowners.py index 99c2df53..3612ac97 100644 --- a/scripts/src/metrics/pushowners.py +++ b/scripts/src/metrics/pushowners.py @@ -1,81 +1,169 @@ import argparse import sys import analytics -sys.path.append('../') + +sys.path.append("../") from owners import owners_file + def getVendorType(changed_file): - path_as_list=changed_file.split("/") - for i in (range(len(path_as_list) - 1)): - if path_as_list[i]=='charts': - vendor_type=path_as_list[i+1] + path_as_list = changed_file.split("/") + for i in range(len(path_as_list) - 1): + if path_as_list[i] == "charts": + vendor_type = path_as_list[i + 1] return vendor_type + def getFileContent(changed_file): - status,owner_data=owners_file.get_owner_data_from_file(changed_file) - if status==True: - users_included=owners_file.get_users_included(owner_data) - web_catalog_only=owners_file.get_web_catalog_only(owner_data) + status, owner_data = owners_file.get_owner_data_from_file(changed_file) + if status is True: + users_included = owners_file.get_users_included(owner_data) + web_catalog_only = owners_file.get_web_catalog_only(owner_data) if not web_catalog_only: - web_catalog_only_string="No" + web_catalog_only_string = "No" else: - web_catalog_only_string="Yes" - vendor_name=owners_file.get_vendor(owner_data) - chart_name=owners_file.get_chart(owner_data) - vendor_type=getVendorType(changed_file) - return users_included,web_catalog_only_string,vendor_name,chart_name,vendor_type + web_catalog_only_string = "Yes" + vendor_name = owners_file.get_vendor(owner_data) + chart_name = owners_file.get_chart(owner_data) + vendor_type = getVendorType(changed_file) + return ( + users_included, + web_catalog_only_string, + vendor_name, + chart_name, + vendor_type, + ) else: print("Exception loading OWNERS file") - return "","","","","" - -def process_pr(added_file,modified_file): - if modified_file!='': - action="update" - update="existing-vendor" - users_included,web_catalog_only,vendor_name,chart_name,vendor_type=getFileContent(modified_file) - return users_included,web_catalog_only,vendor_name,chart_name,vendor_type,action,update - elif added_file!='': - action="create" - update="new-vendor" - users_included,web_catalog_only,vendor_name,chart_name,vendor_type=getFileContent(added_file) - return users_included,web_catalog_only,vendor_name,chart_name,vendor_type,action,update - - -def send_owner_metric(write_key,prefix,users_included,web_catalog_only,partner,chart_name,type,action,update): - if chart_name!="" and partner!="": + return "", "", "", "", "" + + +def process_pr(added_file, modified_file): + if modified_file != "": + action = "update" + update = "existing-vendor" + ( + users_included, + web_catalog_only, + vendor_name, + chart_name, + vendor_type, + ) = getFileContent(modified_file) + return ( + users_included, + web_catalog_only, + vendor_name, + chart_name, + vendor_type, + action, + update, + ) + elif added_file != "": + action = "create" + update = "new-vendor" + ( + users_included, + web_catalog_only, + vendor_name, + chart_name, + vendor_type, + ) = getFileContent(added_file) + return ( + users_included, + web_catalog_only, + vendor_name, + chart_name, + vendor_type, + action, + update, + ) + + +def send_owner_metric( + write_key, + prefix, + users_included, + web_catalog_only, + partner, + chart_name, + type, + action, + update, +): + if chart_name != "" and partner != "": id = f"{prefix}-{type}-{chart_name}" - properties = { "type" : type, "vendor": partner, "chart" : chart_name, "users_included" : users_included, "provider_delivery" :web_catalog_only, "action" : action, "update" : update} - send_metric(write_key,id,"owners v1.0",properties) - -def on_error(error,items): + properties = { + "type": type, + "vendor": partner, + "chart": chart_name, + "users_included": users_included, + "provider_delivery": web_catalog_only, + "action": action, + "update": update, + } + send_metric(write_key, id, "owners v1.0", properties) + + +def on_error(error, items): print("An error occurred creating metrics:", error) - print("error with items:",items) + print("error with items:", items) sys.exit(1) -def send_metric(write_key,id,event,properties): +def send_metric(write_key, id, event, properties): analytics.write_key = write_key analytics.on_error = on_error - print(f'[INFO] Add track: id: {id}, event:{event}, properties:{properties}') + print(f"[INFO] Add track: id: {id}, event:{event}, properties:{properties}") analytics.track(id, event, properties) + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-k", "--write-key", dest="write_key", type=str, required=True, - help="segment write key") - parser.add_argument("-t", "--metric-type", dest="type", type=str, required=True, - help="metric type, releases or pull_request") - parser.add_argument("-n", "--added", dest="added", nargs="*", required=False, - help="files added") - parser.add_argument("-a", "--modified", dest="modified", nargs="*", required=False, - help="files modified") - parser.add_argument("-r", "--repository", dest="repository", type=str, required=False, - help="The repository of the pr") - parser.add_argument("-p", "--prefix", dest="prefix", type=str, required=False, - help="The prefix of the id in segment") - + parser.add_argument( + "-k", + "--write-key", + dest="write_key", + type=str, + required=True, + help="segment write key", + ) + parser.add_argument( + "-t", + "--metric-type", + dest="type", + type=str, + required=True, + help="metric type, releases or pull_request", + ) + parser.add_argument( + "-n", "--added", dest="added", nargs="*", required=False, help="files added" + ) + parser.add_argument( + "-a", + "--modified", + dest="modified", + nargs="*", + required=False, + help="files modified", + ) + parser.add_argument( + "-r", + "--repository", + dest="repository", + type=str, + required=False, + help="The repository of the pr", + ) + parser.add_argument( + "-p", + "--prefix", + dest="prefix", + type=str, + required=False, + help="The prefix of the id in segment", + ) args = parser.parse_args() print("Input arguments:") @@ -90,8 +178,27 @@ def main(): print("Error: Segment write key not set") sys.exit(1) - users_included,web_catalog_only,vendor_name,chart_name,vendor_type,action,update = process_pr(args.added[0],args.modified[0]) - send_owner_metric(args.write_key,args.prefix,users_included,web_catalog_only,vendor_name,chart_name,vendor_type,action,update) - -if __name__ == '__main__': - main() \ No newline at end of file + ( + users_included, + web_catalog_only, + vendor_name, + chart_name, + vendor_type, + action, + update, + ) = process_pr(args.added[0], args.modified[0]) + send_owner_metric( + args.write_key, + args.prefix, + users_included, + web_catalog_only, + vendor_name, + chart_name, + vendor_type, + action, + update, + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/src/owners/checkuser.py b/scripts/src/owners/checkuser.py index 5f02f0f6..05b615b3 100644 --- a/scripts/src/owners/checkuser.py +++ b/scripts/src/owners/checkuser.py @@ -12,16 +12,16 @@ import re import argparse -import requests import os import sys import yaml + try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CLoader as Loader except ImportError: - from yaml import Loader, Dumper + from yaml import Loader -sys.path.append('../') +sys.path.append("../") from pullrequest import prartifact @@ -44,6 +44,7 @@ def verify_user(username): print(f"[ERROR] {username} not auhtorized") return False + def check_for_restricted_file(api_url): files = prartifact.get_modified_files(api_url) pattern_owners = re.compile(OWNERS_FILE) @@ -51,28 +52,44 @@ def check_for_restricted_file(api_url): pattern_thisfile = re.compile(THIS_FILE) for filename in files: - if pattern_versionfile.match(filename) or pattern_owners.match(filename) or pattern_thisfile.match(filename): + if ( + pattern_versionfile.match(filename) + or pattern_owners.match(filename) + or pattern_thisfile.match(filename) + ): print(f"[INFO] restricted file found: {filename}") return True - + return False def main(): parser = argparse.ArgumentParser() - parser.add_argument("-a", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") - parser.add_argument("-u", "--user", dest="username", type=str, required=True, - help="user to be checked for authority to modify release files in a PR") + parser.add_argument( + "-a", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) + parser.add_argument( + "-u", + "--user", + dest="username", + type=str, + required=True, + help="user to be checked for authority to modify release files in a PR", + ) args = parser.parse_args() if check_for_restricted_file(args.api_url): if verify_user(args.username): print(f"[INFO] {args.username} is authorized to modify all files in the PR") else: - print(f"[INFO] {args.username} is not authorized to modify all files in the PR") + print( + f"[INFO] {args.username} is not authorized to modify all files in the PR" + ) sys.exit(1) else: - print(f"[INFO] no restricted files found in the PR") - - + print("[INFO] no restricted files found in the PR") diff --git a/scripts/src/owners/owners_file.py b/scripts/src/owners/owners_file.py index dbe081fa..addcdcbf 100644 --- a/scripts/src/owners/owners_file.py +++ b/scripts/src/owners/owners_file.py @@ -1,63 +1,70 @@ import os import yaml + try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CLoader as Loader except ImportError: - from yaml import Loader, Dumper + from yaml import Loader def get_owner_data(category, organization, chart): - path=os.path.join("charts", category, organization, chart, "OWNERS") - status,owner_content=get_owner_data_from_file(path) - return status,owner_content + path = os.path.join("charts", category, organization, chart, "OWNERS") + status, owner_content = get_owner_data_from_file(path) + return status, owner_content + def get_owner_data_from_file(owner_path): try: with open(owner_path) as owner_data: - owner_content = yaml.load(owner_data,Loader=Loader) - return True,owner_content + owner_content = yaml.load(owner_data, Loader=Loader) + return True, owner_content except Exception as err: print(f"Exception loading OWNERS file: {err}") - return False,"" + return False, "" + def get_vendor(owner_data): - vendor="" + vendor = "" try: - vendor = owner_data['vendor']['name'] + vendor = owner_data["vendor"]["name"] except Exception: pass return vendor + def get_chart(owner_data): - chart="" + chart = "" try: - chart = owner_data['chart']['name'] + chart = owner_data["chart"]["name"] except Exception: pass return chart + def get_web_catalog_only(owner_data): web_catalog_only = False try: - if 'webCatalogOnly' in owner_data: - web_catalog_only = owner_data['webCatalogOnly'] - elif 'providerDelivery' in owner_data: - web_catalog_only = owner_data['providerDelivery'] + if "webCatalogOnly" in owner_data: + web_catalog_only = owner_data["webCatalogOnly"] + elif "providerDelivery" in owner_data: + web_catalog_only = owner_data["providerDelivery"] except Exception: pass return web_catalog_only + def get_users_included(owner_data): - users_included="No" + users_included = "No" try: - users = owner_data['users'] - if len(users)!=0: + users = owner_data["users"] + if len(users) != 0: return "Yes" except Exception: pass return users_included + def get_pgp_public_key(owner_data): pgp_public_key = "null" try: @@ -65,5 +72,3 @@ def get_pgp_public_key(owner_data): except Exception: pass return pgp_public_key - - diff --git a/scripts/src/pullrequest/prartifact.py b/scripts/src/pullrequest/prartifact.py index 14849dca..8f994bf8 100644 --- a/scripts/src/pullrequest/prartifact.py +++ b/scripts/src/pullrequest/prartifact.py @@ -1,4 +1,3 @@ -import re import os import sys import argparse @@ -7,7 +6,7 @@ import requests -sys.path.append('../') +sys.path.append("../") from checkprcontent import checkpr from tools import gitutils @@ -16,10 +15,11 @@ xRateLimit = "X-RateLimit-Limit" xRateRemain = "X-RateLimit-Remaining" + # TODO(baijum): Move this code under chartsubmission.chart module def get_modified_charts(api_url): files = get_modified_files(api_url) - pattern,_,_ = checkpr.get_file_match_compiled_patterns() + pattern, _, _ = checkpr.get_file_match_compiled_patterns() for file in files: match = pattern.match(file) if match: @@ -28,26 +28,29 @@ def get_modified_charts(api_url): return "", "", "", "" + def get_modified_files(api_url): if not pr_files: page_number = 1 - max_page_size,page_size = 100,100 - headers = {'Accept': 'application/vnd.github.v3+json','Authorization': f'Bearer {os.environ.get("BOT_TOKEN")}'} - files_api_url = f'{api_url}/files' + max_page_size, page_size = 100, 100 + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f'Bearer {os.environ.get("BOT_TOKEN")}', + } + files_api_url = f"{api_url}/files" while page_size == max_page_size: - - files_api_query = f'{files_api_url}?per_page={page_size}&page={page_number}' + files_api_query = f"{files_api_url}?per_page={page_size}&page={page_number}" print(f"[INFO] Query files : {files_api_query}") - r = requests.get(files_api_query,headers=headers) + r = requests.get(files_api_query, headers=headers) files = r.json() page_size = len(files) page_number += 1 if xRateLimit in r.headers: - print(f'[DEBUG] {xRateLimit} : {r.headers[xRateLimit]}') + print(f"[DEBUG] {xRateLimit} : {r.headers[xRateLimit]}") if xRateRemain in r.headers: - print(f'[DEBUG] {xRateRemain} : {r.headers[xRateRemain]}') + print(f"[DEBUG] {xRateRemain} : {r.headers[xRateRemain]}") if "message" in files: print(f'[ERROR] getting pr files: {files["message"]}') @@ -59,16 +62,20 @@ def get_modified_files(api_url): return pr_files + def get_labels(api_url): if not pr_labels: - headers = {'Accept': 'application/vnd.github.v3+json','Authorization': f'Bearer {os.environ.get("BOT_TOKEN")}'} + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f'Bearer {os.environ.get("BOT_TOKEN")}', + } r = requests.get(api_url, headers=headers) pr_data = r.json() if xRateLimit in r.headers: - print(f'[DEBUG] {xRateLimit} : {r.headers[xRateLimit]}') + print(f"[DEBUG] {xRateLimit} : {r.headers[xRateLimit]}") if xRateRemain in r.headers: - print(f'[DEBUG] {xRateRemain} : {r.headers[xRateRemain]}') + print(f"[DEBUG] {xRateRemain} : {r.headers[xRateRemain]}") if "message" in pr_data: print(f'[ERROR] getting pr files: {pr_data["message"]}') @@ -79,6 +86,7 @@ def get_labels(api_url): return pr_labels + def save_metadata(directory, vendor_label, chart, number): with open(os.path.join(directory, "vendor"), "w") as fd: print(f"add {directory}/vendor as {vendor_label}") @@ -96,25 +104,47 @@ def save_metadata(directory, vendor_label, chart, number): else: pathlib.Path(os.path.join(directory, "report.yaml")).touch() + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-d", "--directory", dest="directory", type=str, required=False, - help="artifact directory for archival") - parser.add_argument("-n", "--pr-number", dest="number", type=str, required=False, - help="current pull request number") - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") - parser.add_argument("-f","--get-files", dest="get_files", default=False,action='store_true' ) + parser.add_argument( + "-d", + "--directory", + dest="directory", + type=str, + required=False, + help="artifact directory for archival", + ) + parser.add_argument( + "-n", + "--pr-number", + dest="number", + type=str, + required=False, + help="current pull request number", + ) + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) + parser.add_argument( + "-f", "--get-files", dest="get_files", default=False, action="store_true" + ) args = parser.parse_args() if args.get_files: pr_files = get_modified_files(args.api_url) print(f"[INFO] files in pr: {pr_files}") - gitutils.add_output("pr_files",pr_files) + gitutils.add_output("pr_files", pr_files) else: os.makedirs(args.directory, exist_ok=True) category, organization, chart, version = get_modified_charts(args.api_url) save_metadata(args.directory, organization, chart, args.number) + if __name__ == "__main__": main() diff --git a/scripts/src/pullrequest/prepare_pr_comment.py b/scripts/src/pullrequest/prepare_pr_comment.py index 7aa486bb..6d690dde 100644 --- a/scripts/src/pullrequest/prepare_pr_comment.py +++ b/scripts/src/pullrequest/prepare_pr_comment.py @@ -2,36 +2,48 @@ import sys from tools import gitutils + def get_success_coment(): - return "Congratulations! Your chart has been certified and will be published shortly." + return ( + "Congratulations! Your chart has been certified and will be published shortly." + ) + def get_content_failure_message(): return "One or more errors were found with the pull request:" + def get_community_review_message(): return "Community charts require maintainer review and approval, a review will be conducted shortly." + def get_failure_comment(): - return "There were one or more errors while building and verifying your pull request." + return ( + "There were one or more errors while building and verifying your pull request." + ) + def get_verifier_errors_comment(): return "[ERROR] The submitted chart has failed certification. Reason(s):" + def get_verifier_errors_trailer(): return "Please create a valid report with the [chart-verifier](https://github.com/redhat-certification/chart-verifier) \ and ensure all mandatory checks pass." + def get_look_at_job_output_comment(): - return f"""To see the console output with the error messages, click the "Details" \ + return """To see the console output with the error messages, click the "Details" \ link next to "CI / Chart Certification" job status towards the end of this page.""" + def prepare_failure_comment(): msg = f"""\ {get_failure_comment()} {get_look_at_job_output_comment()}""" if os.path.exists("./pr/errors"): errors = open("./pr/errors").read() - msg += f""" + msg += f""" {get_verifier_errors_comment()} {errors} @@ -39,31 +51,34 @@ def prepare_failure_comment(): {get_verifier_errors_trailer()} """ - gitutils.add_output("error-message",errors) + gitutils.add_output("error-message", errors) else: - gitutils.add_output("error-message",get_failure_comment()) + gitutils.add_output("error-message", get_failure_comment()) return msg + def prepare_success_comment(): msg = f"{get_success_coment()}.\n\n" return msg + def prepare_pr_content_failure_comment(): msg = f"{get_content_failure_message()} \n" pr_content_error_msg = os.environ.get("PR_CONTENT_ERROR_MESSAGE", "") owners_error_msg = os.environ.get("OWNERS_ERROR_MESSAGE", "") if pr_content_error_msg: - gitutils.add_output("error-message",pr_content_error_msg) + gitutils.add_output("error-message", pr_content_error_msg) msg += f"{pr_content_error_msg}\n\n" if owners_error_msg: - gitutils.add_output("error-message",owners_error_msg) + gitutils.add_output("error-message", owners_error_msg) msg += f"{owners_error_msg}\n\n" return msg + def prepare_run_verifier_failure_comment(): verifier_error_msg = os.environ.get("VERIFIER_ERROR_MESSAGE", "") - gitutils.add_output("error-message",verifier_error_msg) - msg = f""" + gitutils.add_output("error-message", verifier_error_msg) + msg = f""" {verifier_error_msg} {get_look_at_job_output_comment()} @@ -79,24 +94,31 @@ def prepare_community_comment(): msg += f"{errors}\n\n" return msg + def prepare_generic_fail_comment(): msg = "" if os.path.exists("./pr/errors"): errors = open("./pr/errors").read() msg += "One or more errors were found while building and verifying your pull request:\n\n" msg += f"{errors}\n\n" - else: msg += "An unspecified error has occured while building and verifying your pull request.\n\n" + else: + msg += "An unspecified error has occured while building and verifying your pull request.\n\n" return msg + def prepare_oc_install_fail_comment(): msg = "Unfortunately the certification process failed to install OpenShift and could not complete.\n\n" msg += "This problem will be addressed by maintainers and no further action is required from the submitter at this time.\n\n" return msg + def get_comment_header(issue_number): - msg = f"Thank you for submitting PR #{issue_number} for Helm Chart Certification!\n\n" + msg = ( + f"Thank you for submitting PR #{issue_number} for Helm Chart Certification!\n\n" + ) return msg + def get_comment_footer(vendor_label, chart_name): msg = "\n---\n\n" msg += "For information on the certification process see:\n" @@ -105,6 +127,7 @@ def get_comment_footer(vendor_label, chart_name): msg += f'/metadata {{"vendor_label": "{vendor_label}", "chart_name": "{chart_name}"}}\n\n' return msg + def main(): pr_content_result = sys.argv[1] run_verifier_result = sys.argv[2] @@ -115,37 +138,43 @@ def main(): msg = get_comment_header(issue_number) oc_install_result = os.environ.get("OC_INSTALL_RESULT", False) - # Handle success explicitly - if pr_content_result == "success" and run_verifier_result == "success" and verify_result == "success" and oc_install_result == "success": + # Handle success explicitly + if ( + pr_content_result == "success" + and run_verifier_result == "success" + and verify_result == "success" + and oc_install_result == "success" + ): msg += prepare_success_comment() - gitutils.add_output("pr_passed","true") - else: # Handle various failure scenarios. + gitutils.add_output("pr_passed", "true") + else: # Handle various failure scenarios. if pr_content_result == "failure": msg += prepare_pr_content_failure_comment() - gitutils.add_output("pr_passed","false") + gitutils.add_output("pr_passed", "false") elif run_verifier_result == "failure": msg += prepare_run_verifier_failure_comment() - gitutils.add_output("pr_passed","false") + gitutils.add_output("pr_passed", "false") elif verify_result == "failure": - community_manual_review = os.environ.get("COMMUNITY_MANUAL_REVIEW",False) + community_manual_review = os.environ.get("COMMUNITY_MANUAL_REVIEW", False) if community_manual_review: msg += prepare_community_comment() - gitutils.add_output("pr_passed","true") + gitutils.add_output("pr_passed", "true") else: msg += prepare_failure_comment() - gitutils.add_output("pr_passed","false") + gitutils.add_output("pr_passed", "false") elif oc_install_result == "failure": msg += prepare_oc_install_fail_comment() - gitutils.add_output("pr_passed","false") + gitutils.add_output("pr_passed", "false") else: msg += prepare_generic_fail_comment() - gitutils.add_output("pr_passed","false") + gitutils.add_output("pr_passed", "false") msg += get_comment_footer(vendor_label, chart_name) with open("./pr/comment", "w") as fd: fd.write(msg) - gitutils.add_output("message-file",fd.name) + gitutils.add_output("message-file", fd.name) + if __name__ == "__main__": main() diff --git a/scripts/src/release/release_info.py b/scripts/src/release/release_info.py index 35e59bb3..073f01f3 100644 --- a/scripts/src/release/release_info.py +++ b/scripts/src/release/release_info.py @@ -8,12 +8,12 @@ import os -RELEASE_INFO_FILE="release/release_info.json" +RELEASE_INFO_FILE = "release/release_info.json" RELEASE_INFOS = {} -def _get_release_info(directory): +def _get_release_info(directory): global RELEASE_INFOS if not directory: @@ -21,25 +21,26 @@ def _get_release_info(directory): root_dir = os.path.dirname(f"{os.getcwd()}/{directory}") - if not root_dir in RELEASE_INFOS: - + if root_dir not in RELEASE_INFOS: print(f"Open release_info file: {root_dir}/{RELEASE_INFO_FILE}") - with open(f"{root_dir}/{RELEASE_INFO_FILE}",'r') as json_file: + with open(f"{root_dir}/{RELEASE_INFO_FILE}", "r") as json_file: RELEASE_INFOS[root_dir] = json.load(json_file) return RELEASE_INFOS[root_dir] + def get_version(directory): info = _get_release_info(directory) return info["version"] + def get_info(directory): info = _get_release_info(directory) return info["info"] -def get_replaces(from_repo,to_repo,directory): +def get_replaces(from_repo, to_repo, directory): print(f"get replaces for {from_repo} to {to_repo} ") info = _get_release_info(directory) if from_repo in info: @@ -49,7 +50,8 @@ def get_replaces(from_repo,to_repo,directory): print("no replaces found") return [] -def get_merges(from_repo,to_repo,directory): + +def get_merges(from_repo, to_repo, directory): print(f"get merges for {from_repo} to {to_repo}") info = _get_release_info(directory) if from_repo in info: @@ -60,7 +62,7 @@ def get_merges(from_repo,to_repo,directory): return [] -def get_ignores(from_repo,to_repo,directory): +def get_ignores(from_repo, to_repo, directory): print(f"get ignores for {from_repo} to {to_repo}") info = _get_release_info(directory) if from_repo in info: @@ -72,29 +74,40 @@ def get_ignores(from_repo,to_repo,directory): def main(): - print(f"[INFO] Version : {get_version('.')}") - #from development to charts - print(f"[INFO] Dev to charts repo merges : {get_merges('development','charts','.')}") + # from development to charts + print( + f"[INFO] Dev to charts repo merges : {get_merges('development','charts','.')}" + ) - print(f"[INFO] Dev to charts repo replace : {get_replaces('development','charts','.')}") + print( + f"[INFO] Dev to charts repo replace : {get_replaces('development','charts','.')}" + ) - print(f"[INFO] Dev to charts repo ignore : {get_ignores('development','charts','.')}") + print( + f"[INFO] Dev to charts repo ignore : {get_ignores('development','charts','.')}" + ) - #from development to stage + # from development to stage print(f"[INFO] Dev to stage repo merges : {get_merges('development','stage','.')}") - print(f"[INFO] Dev to stage repo replace : {get_replaces('development','stage','.')}") + print( + f"[INFO] Dev to stage repo replace : {get_replaces('development','stage','.')}" + ) print(f"[INFO] Dev to stage repo ignore : {get_ignores('development','stage','.')}") - #From charts to development + # From charts to development print(f"[INFO] Chart to dev repo merges : {get_merges('charts','development','.')}") - print(f"[INFO] Chart to dev repo replace : {get_replaces('charts','development','.')}") + print( + f"[INFO] Chart to dev repo replace : {get_replaces('charts','development','.')}" + ) - print(f"[INFO] Chart to dev repo ignore : {get_ignores('charts','development','.')}") + print( + f"[INFO] Chart to dev repo ignore : {get_ignores('charts','development','.')}" + ) if __name__ == "__main__": diff --git a/scripts/src/release/releasechecker.py b/scripts/src/release/releasechecker.py index 170e11a1..8cf557eb 100644 --- a/scripts/src/release/releasechecker.py +++ b/scripts/src/release/releasechecker.py @@ -27,13 +27,12 @@ import os import argparse import json -import requests import semver import sys from release import release_info from release import releaser -sys.path.append('../') +sys.path.append("../") from owners import checkuser from tools import gitutils from pullrequest import prartifact @@ -50,8 +49,8 @@ ERROR_IF_MATCH_NOT_FOUND = False ERROR_IF_MATCH_FOUND = True -def check_file_in_pr(api_url,pattern,error_value): +def check_file_in_pr(api_url, pattern, error_value): print("[INFO] check if PR for matching files") files = prartifact.get_modified_files(api_url) @@ -69,42 +68,52 @@ def check_file_in_pr(api_url,pattern,error_value): def check_if_only_charts_are_included(api_url): print("[INFO] check if only chart files are included") - chart_pattern = re.compile(r"charts/"+TYPE_MATCH_EXPRESSION+"/([\w-]+)/([\w-]+)/.*") + chart_pattern = re.compile( + r"charts/" + TYPE_MATCH_EXPRESSION + "/([\w-]+)/([\w-]+)/.*" + ) return check_file_in_pr(api_url, chart_pattern, ERROR_IF_MATCH_NOT_FOUND) + def check_if_no_charts_are_included(api_url): print("[INFO] check if no chart files are included") - chart_pattern = re.compile(r"charts/"+TYPE_MATCH_EXPRESSION+"/([\w-]+)/([\w-]+)/.*") + chart_pattern = re.compile( + r"charts/" + TYPE_MATCH_EXPRESSION + "/([\w-]+)/([\w-]+)/.*" + ) return check_file_in_pr(api_url, chart_pattern, ERROR_IF_MATCH_FOUND) + def check_if_only_version_file_is_modified(api_url): print("[INFO] check if only version file is modified") pattern_versionfile = re.compile(r"release/release_info.json") return check_file_in_pr(api_url, pattern_versionfile, ERROR_IF_MATCH_NOT_FOUND) -def check_if_dev_release_branch(sender,pr_branch,pr_body,api_url,pr_head_repo): +def check_if_dev_release_branch(sender, pr_branch, pr_body, api_url, pr_head_repo): print("[INFO] check if PR is release branch on dev") - if sender!=os.environ.get("BOT_NAME") and sender!=DEFAULT_BOT_NAME: + if sender != os.environ.get("BOT_NAME") and sender != DEFAULT_BOT_NAME: print(f"Sender indicates PR is not part of a release: {sender}") return False if not checkuser.verify_user(sender): print(f"Sender is not authorized to create a release PR : {sender}") return False - + if not pr_branch.startswith(releaser.DEV_PR_BRANCH_NAME_PREFIX): print(f"PR branch indicates PR is not part of a release: {pr_branch}") return False version = pr_branch.removeprefix(releaser.DEV_PR_BRANCH_NAME_PREFIX) if not semver.VersionInfo.isvalid(version): - print(f"Release part ({version}) of branch name {pr_branch} is not a valid semantic version.") + print( + f"Release part ({version}) of branch name {pr_branch} is not a valid semantic version." + ) return False if not pr_head_repo.endswith(DEV_PR_HEAD_REPO): - print(f"PR does not have the expected origin. Got: {pr_head_repo}, expected: {DEV_PR_HEAD_REPO}") + print( + f"PR does not have the expected origin. Got: {pr_head_repo}, expected: {DEV_PR_HEAD_REPO}" + ) return False if not pr_body.startswith(releaser.DEV_PR_BRANCH_BODY_PREFIX): @@ -113,11 +122,11 @@ def check_if_dev_release_branch(sender,pr_branch,pr_body,api_url,pr_head_repo): return check_if_only_charts_are_included(api_url) -def check_if_charts_release_branch(sender,pr_branch,pr_body,api_url,pr_head_repo): +def check_if_charts_release_branch(sender, pr_branch, pr_body, api_url, pr_head_repo): print("[INFO] check if PR is release branch on charts") - if sender!=os.environ.get("BOT_NAME") and sender!=DEFAULT_BOT_NAME: + if sender != os.environ.get("BOT_NAME") and sender != DEFAULT_BOT_NAME: print(f"Sender indicates PR is not part of a release: {sender}") return False @@ -131,11 +140,17 @@ def check_if_charts_release_branch(sender,pr_branch,pr_body,api_url,pr_head_repo version = pr_branch.removeprefix(releaser.CHARTS_PR_BRANCH_NAME_PREFIX) if not semver.VersionInfo.isvalid(version): - print(f"Release part ({version}) of branch name {pr_branch} is not a valid semantic version.") + print( + f"Release part ({version}) of branch name {pr_branch} is not a valid semantic version." + ) return False - if not pr_head_repo.endswith(CHARTS_PR_HEAD_REPO) and not pr_head_repo.endswith(STAGE_PR_HEAD_REPO): - print(f"PR does not have the expected origin. Got: {pr_head_repo}, expected: {CHARTS_PR_HEAD_REPO}") + if not pr_head_repo.endswith(CHARTS_PR_HEAD_REPO) and not pr_head_repo.endswith( + STAGE_PR_HEAD_REPO + ): + print( + f"PR does not have the expected origin. Got: {pr_head_repo}, expected: {CHARTS_PR_HEAD_REPO}" + ) return False if not pr_body.startswith(releaser.CHARTS_PR_BRANCH_BODY_PREFIX): @@ -152,7 +167,8 @@ def make_release_body(version, release_info): body += f"- {info}
" print(f"[INFO] Release body: {body}") - gitutils.add_output("PR_release_body",body) + gitutils.add_output("PR_release_body", body) + def get_version_info(): data = {} @@ -160,22 +176,60 @@ def get_version_info(): data = json.load(json_file) return data + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-a", "--api-url", dest="api_url", type=str, required=False, - help="API URL for the pull request") - parser.add_argument("-v", "--version", dest="version", type=str, required=False, - help="Version to compare") - parser.add_argument("-s", "--sender", dest="sender", type=str, required=False, - help="sender of the PR") - parser.add_argument("-b", "--pr_branch", dest="pr_branch", type=str, required=False, - help="PR branch name") - parser.add_argument("-t", "--pr_body", dest="pr_body", type=str, required=False, - help="PR title") - parser.add_argument("-r", "--pr_base_repo", dest="pr_base_repo", type=str, required=False, - help="PR target repo") - parser.add_argument("-z", "--pr_head_repo", dest="pr_head_repo", type=str, required=False, - help="PR source repo") + parser.add_argument( + "-a", + "--api-url", + dest="api_url", + type=str, + required=False, + help="API URL for the pull request", + ) + parser.add_argument( + "-v", + "--version", + dest="version", + type=str, + required=False, + help="Version to compare", + ) + parser.add_argument( + "-s", + "--sender", + dest="sender", + type=str, + required=False, + help="sender of the PR", + ) + parser.add_argument( + "-b", + "--pr_branch", + dest="pr_branch", + type=str, + required=False, + help="PR branch name", + ) + parser.add_argument( + "-t", "--pr_body", dest="pr_body", type=str, required=False, help="PR title" + ) + parser.add_argument( + "-r", + "--pr_base_repo", + dest="pr_base_repo", + type=str, + required=False, + help="PR target repo", + ) + parser.add_argument( + "-z", + "--pr_head_repo", + dest="pr_head_repo", + type=str, + required=False, + help="PR source repo", + ) args = parser.parse_args() @@ -190,49 +244,70 @@ def main(): if args.pr_branch: if args.pr_base_repo.endswith(DEV_PR_BASE_REPO): - if check_if_dev_release_branch(args.sender,args.pr_branch,args.pr_body,args.api_url,args.pr_head_repo): - print('[INFO] Dev release pull request found') - gitutils.add_output("dev_release_branch","true") - version = args.pr_branch.removeprefix(releaser.DEV_PR_BRANCH_NAME_PREFIX) - gitutils.add_output("PR_version",version) - gitutils.add_output("PR_release_body",args.pr_body) - elif args.pr_base_repo.endswith(CHARTS_PR_BASE_REPO) or args.pr_base_repo.endswith(STAGE_PR_BASE_REPO): - if check_if_charts_release_branch(args.sender,args.pr_branch,args.pr_body,args.api_url,args.pr_head_repo): - print('[INFO] Workflow release pull request found') - gitutils.add_output("charts_release_branch","true") + if check_if_dev_release_branch( + args.sender, + args.pr_branch, + args.pr_body, + args.api_url, + args.pr_head_repo, + ): + print("[INFO] Dev release pull request found") + gitutils.add_output("dev_release_branch", "true") + version = args.pr_branch.removeprefix( + releaser.DEV_PR_BRANCH_NAME_PREFIX + ) + gitutils.add_output("PR_version", version) + gitutils.add_output("PR_release_body", args.pr_body) + elif args.pr_base_repo.endswith( + CHARTS_PR_BASE_REPO + ) or args.pr_base_repo.endswith(STAGE_PR_BASE_REPO): + if check_if_charts_release_branch( + args.sender, + args.pr_branch, + args.pr_body, + args.api_url, + args.pr_head_repo, + ): + print("[INFO] Workflow release pull request found") + gitutils.add_output("charts_release_branch", "true") elif args.api_url: - ## should be on PR branch + # should be on PR branch if args.pr_base_repo.endswith(DEV_PR_BASE_REPO): version_only = check_if_only_version_file_is_modified(args.api_url) user_authorized = checkuser.verify_user(args.sender) if version_only and user_authorized: organization = args.pr_base_repo.removesuffix(DEV_PR_BASE_REPO) - gitutils.add_output("charts_repo",f"{organization}{CHARTS_PR_BASE_REPO}") - gitutils.add_output("stage_repo",f"{organization}{STAGE_PR_BASE_REPO}") + gitutils.add_output( + "charts_repo", f"{organization}{CHARTS_PR_BASE_REPO}" + ) + gitutils.add_output("stage_repo", f"{organization}{STAGE_PR_BASE_REPO}") version = release_info.get_version("./") version_info = release_info.get_info("./") - print(f'[INFO] Release found in PR files : {version}.') - gitutils.add_output("PR_version",version) - gitutils.add_output("PR_release_info",version_info) - gitutils.add_output("PR_includes_release_only","true") - make_release_body(version,version_info) + print(f"[INFO] Release found in PR files : {version}.") + gitutils.add_output("PR_version", version) + gitutils.add_output("PR_release_info", version_info) + gitutils.add_output("PR_includes_release_only", "true") + make_release_body(version, version_info) elif version_only and not user_authorized: - print(f'[ERROR] sender not authorized : {args.sender}.') - gitutils.add_output("sender_not_authorized","true") + print(f"[ERROR] sender not authorized : {args.sender}.") + gitutils.add_output("sender_not_authorized", "true") else: - print('[INFO] Not a release PR') + print("[INFO] Not a release PR") else: - print(f'[INFO] Not a release PR, target is not : {DEV_PR_BASE_REPO}.') + print(f"[INFO] Not a release PR, target is not : {DEV_PR_BASE_REPO}.") else: version = release_info.get_version("./") if args.version: # should be on main branch - if semver.compare(args.version,version) > 0 : - print(f'[INFO] Release {args.version} found in PR files is newer than: {version}.') - gitutils.add_output("release_updated","true") + if semver.compare(args.version, version) > 0: + print( + f"[INFO] Release {args.version} found in PR files is newer than: {version}." + ) + gitutils.add_output("release_updated", "true") else: - print(f'[ERROR] Release found in PR files is not new : {args.version}.') + print( + f"[ERROR] Release found in PR files is not new : {args.version}." + ) else: - print(f'[ERROR] no valid parameter set to release checker.') - + print("[ERROR] no valid parameter set to release checker.") diff --git a/scripts/src/release/releaser.py b/scripts/src/release/releaser.py index 3090ac54..0f5926b2 100644 --- a/scripts/src/release/releaser.py +++ b/scripts/src/release/releaser.py @@ -19,53 +19,51 @@ """ -import yaml import os import argparse import sys import shutil from release import release_info -sys.path.append('../') +sys.path.append("../") from tools import gitutils -VERSION_CHECK_YAML_FILE=".github/workflows/version_check.yml" -BUILD_YAML_FILE=".github/workflows/build.yml" -DEV_PR_BRANCH_BODY_PREFIX="Charts workflow version" -DEV_PR_BRANCH_NAME_PREFIX="Auto-Release-" -CHARTS_PR_BRANCH_BODY_PREFIX="Workflow and script updates from development repository" -CHARTS_PR_BRANCH_NAME_PREFIX="Release-" -STAGE_PR_BRANCH_BODY_PREFIX="Workflow and script updates from development repository" -STAGE_PR_BRANCH_NAME_PREFIX="Release-" +VERSION_CHECK_YAML_FILE = ".github/workflows/version_check.yml" +BUILD_YAML_FILE = ".github/workflows/build.yml" +DEV_PR_BRANCH_BODY_PREFIX = "Charts workflow version" +DEV_PR_BRANCH_NAME_PREFIX = "Auto-Release-" +CHARTS_PR_BRANCH_BODY_PREFIX = "Workflow and script updates from development repository" +CHARTS_PR_BRANCH_NAME_PREFIX = "Release-" +STAGE_PR_BRANCH_BODY_PREFIX = "Workflow and script updates from development repository" +STAGE_PR_BRANCH_NAME_PREFIX = "Release-" SCHEDULE_INSERT = [ - ' # Daily trigger to check updates', - ' schedule:', - ' - cron: "0 0 * * *"' + " # Daily trigger to check updates", + " schedule:", + ' - cron: "0 0 * * *"', ] -def update_workflow(): - - lines=[] - with open(VERSION_CHECK_YAML_FILE,'r') as schedule_file: +def update_workflow(): + lines = [] + with open(VERSION_CHECK_YAML_FILE, "r") as schedule_file: lines = schedule_file.readlines() for line in lines: if line.strip() == "on:": - insert_location = lines.index(line)+1 + insert_location = lines.index(line) + 1 if SCHEDULE_INSERT[0] not in lines[insert_location].rstrip(): print("[INFO] add cron job to schedule.yaml") - lines.insert(insert_location,f"{SCHEDULE_INSERT[0]}\n") - lines.insert(insert_location+1,f"{SCHEDULE_INSERT[1]}\n") - lines.insert(insert_location+2,f"{SCHEDULE_INSERT[2]}\n") + lines.insert(insert_location, f"{SCHEDULE_INSERT[0]}\n") + lines.insert(insert_location + 1, f"{SCHEDULE_INSERT[1]}\n") + lines.insert(insert_location + 2, f"{SCHEDULE_INSERT[2]}\n") break - with open(VERSION_CHECK_YAML_FILE,'w') as schedule_file: + with open(VERSION_CHECK_YAML_FILE, "w") as schedule_file: schedule_file.write("".join(lines)) -def make_required_changes(release_info_dir,origin,destination): +def make_required_changes(release_info_dir, origin, destination): print(f"Make required changes from {origin} to {destination}") if "charts" in origin and "dev" in destination: @@ -80,21 +78,23 @@ def make_required_changes(release_info_dir,origin,destination): else: sys.exit("Wrong arguments while calling make_required_changes") - replaces = release_info.get_replaces(from_repository,to_repository,release_info_dir) + replaces = release_info.get_replaces( + from_repository, to_repository, release_info_dir + ) for replace in replaces: - replace_this=f"{destination}/{replace}" + replace_this = f"{destination}/{replace}" with_this = f"{origin}/{replace}" if os.path.isdir(with_this) or os.path.isdir(replace_this): print(f"Replace directory {replace_this} with {with_this}") if os.path.isdir(replace_this): shutil.rmtree(replace_this) - shutil.copytree(with_this,replace_this) + shutil.copytree(with_this, replace_this) else: print(f"Replace file {replace_this} with {with_this}") - shutil.copy2(with_this,replace_this) + shutil.copy2(with_this, replace_this) - merges = release_info.get_merges(from_repository,to_repository,release_info_dir) + merges = release_info.get_merges(from_repository, to_repository, release_info_dir) for merge in merges: merge_this = f"{origin}/{merge}" @@ -102,13 +102,12 @@ def make_required_changes(release_info_dir,origin,destination): if os.path.isdir(merge_this) or os.path.isdir(into_this): print(f"Merge directory {merge_this} with {into_this}") - shutil.copytree(merge_this,into_this,dirs_exist_ok=True) + shutil.copytree(merge_this, into_this, dirs_exist_ok=True) else: print(f"Merge file {merge_this} with {into_this}") - shutil.copy2(merge_this,into_this) - + shutil.copy2(merge_this, into_this) - ignores = release_info.get_ignores(from_repository,to_repository,release_info_dir) + ignores = release_info.get_ignores(from_repository, to_repository, release_info_dir) for ignore in ignores: ignore_this = f"{destination}/{ignore}" if os.path.isdir(ignore_this): @@ -120,25 +119,72 @@ def make_required_changes(release_info_dir,origin,destination): def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-v", "--version", dest="version", type=str, required=True, - help="Version to compare") - parser.add_argument("-d", "--development_dir", dest="dev_dir", type=str, required=True, - help="Directory of development code with latest release info.") - parser.add_argument("-c", "--charts_dir", dest="charts_dir", type=str, required=True, - help="Directory of charts code.") - parser.add_argument("-s", "--stage_dir", dest="stage_dir", type=str, required=True, - help="Directory of stage code.") - parser.add_argument("-p", "--pr_dir", dest="pr_dir", type=str, required=True, - help="Directory of pull request code.") - parser.add_argument("-b", "--dev_pr_body", dest="dev_pr_body", type=str, required=True, - help="Body to use for the dev PR") - parser.add_argument("-t", "--target_branch", dest="target_branch", type=str, required=True, - help="Target branch of the Pull Request" ) - parser.add_argument("-r", "--target_repository", dest="target_repository", type=str, required=True, - help="Repository which is the target of the pull request" ) + parser.add_argument( + "-v", + "--version", + dest="version", + type=str, + required=True, + help="Version to compare", + ) + parser.add_argument( + "-d", + "--development_dir", + dest="dev_dir", + type=str, + required=True, + help="Directory of development code with latest release info.", + ) + parser.add_argument( + "-c", + "--charts_dir", + dest="charts_dir", + type=str, + required=True, + help="Directory of charts code.", + ) + parser.add_argument( + "-s", + "--stage_dir", + dest="stage_dir", + type=str, + required=True, + help="Directory of stage code.", + ) + parser.add_argument( + "-p", + "--pr_dir", + dest="pr_dir", + type=str, + required=True, + help="Directory of pull request code.", + ) + parser.add_argument( + "-b", + "--dev_pr_body", + dest="dev_pr_body", + type=str, + required=True, + help="Body to use for the dev PR", + ) + parser.add_argument( + "-t", + "--target_branch", + dest="target_branch", + type=str, + required=True, + help="Target branch of the Pull Request", + ) + parser.add_argument( + "-r", + "--target_repository", + dest="target_repository", + type=str, + required=True, + help="Repository which is the target of the pull request", + ) args = parser.parse_args() @@ -152,73 +198,87 @@ def main(): print(f"[INFO] arg target_branch : {args.target_branch}") print(f"[INFO] arg target_repository : {args.target_repository}") - start_directory = os.getcwd() print(f"working directory: {start_directory}") - print(f"make changes to charts from development") - make_required_changes(args.pr_dir,args.dev_dir,args.charts_dir) + print("make changes to charts from development") + make_required_changes(args.pr_dir, args.dev_dir, args.charts_dir) - print(f"edit files in charts") + print("edit files in charts") os.chdir(args.charts_dir) update_workflow() organization = args.target_repository.split("/")[0] - charts_repository=f"{organization}{gitutils.CHARTS_REPO}" - print(f"create charts pull request, repository: {charts_repository}, branch: {args.target_branch} ") + charts_repository = f"{organization}{gitutils.CHARTS_REPO}" + print( + f"create charts pull request, repository: {charts_repository}, branch: {args.target_branch} " + ) branch_name = f"{CHARTS_PR_BRANCH_NAME_PREFIX}{args.version}" - message = f'{CHARTS_PR_BRANCH_BODY_PREFIX} {branch_name}' - outcome = gitutils.create_pr(branch_name,[],charts_repository,message,args.target_branch) + message = f"{CHARTS_PR_BRANCH_BODY_PREFIX} {branch_name}" + outcome = gitutils.create_pr( + branch_name, [], charts_repository, message, args.target_branch + ) if outcome == gitutils.PR_CREATED: - gitutils.add_output("charts_pr_created","true") + gitutils.add_output("charts_pr_created", "true") elif outcome == gitutils.PR_NOT_NEEDED: - gitutils.add_output("charts_pr_not_needed","true") + gitutils.add_output("charts_pr_not_needed", "true") else: print("[ERROR] error creating charts PR") - gitutils.add_output("charts_pr_error","true") + gitutils.add_output("charts_pr_error", "true") os.chdir(start_directory) return os.chdir(start_directory) - print(f"make changes to development from charts") - make_required_changes(args.pr_dir,args.charts_dir,args.dev_dir) + print("make changes to development from charts") + make_required_changes(args.pr_dir, args.charts_dir, args.dev_dir) os.chdir(args.dev_dir) - print(f"create development pull request") + print("create development pull request") branch_name = f"{DEV_PR_BRANCH_NAME_PREFIX}{args.version}" - outcome = gitutils.create_pr(branch_name,[release_info.RELEASE_INFO_FILE],args.target_repository,args.dev_pr_body,args.target_branch) + outcome = gitutils.create_pr( + branch_name, + [release_info.RELEASE_INFO_FILE], + args.target_repository, + args.dev_pr_body, + args.target_branch, + ) if outcome == gitutils.PR_CREATED: print("Dev PR successfully created.") - gitutils.add_output("dev_pr_created","true") + gitutils.add_output("dev_pr_created", "true") elif outcome == gitutils.PR_NOT_NEEDED: print("Dev PR not needed.") - gitutils.add_output("dev_pr_not_needed","true") + gitutils.add_output("dev_pr_not_needed", "true") else: print("[ERROR] error creating development PR.") - gitutils.add_output("dev_pr_error","true") + gitutils.add_output("dev_pr_error", "true") os.chdir(start_directory) - print(f"make changes to stage from development") - make_required_changes(args.pr_dir,args.dev_dir,args.stage_dir) + print("make changes to stage from development") + make_required_changes(args.pr_dir, args.dev_dir, args.stage_dir) os.chdir(args.stage_dir) - stage_repository=f"{organization}{gitutils.STAGE_REPO}" - print(f"create stage pull request, repository: {stage_repository}, branch: {args.target_branch} ") + stage_repository = f"{organization}{gitutils.STAGE_REPO}" + print( + f"create stage pull request, repository: {stage_repository}, branch: {args.target_branch} " + ) branch_name = f"{STAGE_PR_BRANCH_NAME_PREFIX}{args.version}" - message = f'{STAGE_PR_BRANCH_BODY_PREFIX} {branch_name}' - outcome = gitutils.create_pr(branch_name,[],stage_repository,message,args.target_branch) + message = f"{STAGE_PR_BRANCH_BODY_PREFIX} {branch_name}" + outcome = gitutils.create_pr( + branch_name, [], stage_repository, message, args.target_branch + ) if outcome == gitutils.PR_CREATED: - gitutils.add_output("stage_pr_created","true") + gitutils.add_output("stage_pr_created", "true") elif outcome == gitutils.PR_NOT_NEEDED: - gitutils.add_output("stage_pr_not_needed","true") + gitutils.add_output("stage_pr_not_needed", "true") else: print("[ERROR] error creating stage PR") - gitutils.add_output("stage_pr_error","true") + gitutils.add_output("stage_pr_error", "true") os.chdir(start_directory) return os.chdir(start_directory) + if __name__ == "__main__": main() diff --git a/scripts/src/report/get_verify_params.py b/scripts/src/report/get_verify_params.py index 75257b93..9dbf9d35 100644 --- a/scripts/src/report/get_verify_params.py +++ b/scripts/src/report/get_verify_params.py @@ -1,18 +1,30 @@ - import os import sys import argparse -sys.path.append('../') +sys.path.append("../") from chartprreview import chartprreview from signedchart import signedchart from tools import gitutils -def generate_verify_options(directory,category, organization, chart, version): - print("[INFO] Generate verify options. %s, %s, %s" % (organization,chart,version)) - src = os.path.join(os.getcwd(), "charts", category, organization, chart, version, "src") - report_path = os.path.join(os.getcwd(),"charts", category, organization, chart, version, "report.yaml") - tar = os.path.join(os.getcwd(),"charts", category, organization, chart, version, f"{chart}-{version}.tgz") + +def generate_verify_options(directory, category, organization, chart, version): + print("[INFO] Generate verify options. %s, %s, %s" % (organization, chart, version)) + src = os.path.join( + os.getcwd(), "charts", category, organization, chart, version, "src" + ) + report_path = os.path.join( + os.getcwd(), "charts", category, organization, chart, version, "report.yaml" + ) + tar = os.path.join( + os.getcwd(), + "charts", + category, + organization, + chart, + version, + f"{chart}-{version}.tgz", + ) print(f"[INF0] report path exists = {os.path.exists(report_path)} : {report_path}") print(f"[INF0] src path exists = {os.path.exists(src)} : {src}") @@ -27,41 +39,57 @@ def generate_verify_options(directory,category, organization, chart, version): if os.path.exists(src) and not os.path.exists(tar): print("[INFO] chart src included") - return flags,src,True, cluster_needed + return flags, src, True, cluster_needed elif os.path.exists(tar) and not os.path.exists(src): print("[INFO] tarball included") if not os.path.exists(report_path): - owners_file = os.path.join(os.getcwd(),"charts", category, organization, chart, "OWNERS") - signed_flags = signedchart.get_verifier_flags(tar,owners_file,directory) + owners_file = os.path.join( + os.getcwd(), "charts", category, organization, chart, "OWNERS" + ) + signed_flags = signedchart.get_verifier_flags(tar, owners_file, directory) if signed_flags: print(f"[INFO] include flags for signed chart: {signed_flags}") flags = f"{flags} {signed_flags}" - return flags,tar,True, cluster_needed + return flags, tar, True, cluster_needed elif os.path.exists(tar) and os.path.exists(src): msg = "[ERROR] Both chart source directory and tarball should not exist" chartprreview.write_error_log(directory, msg) sys.exit(1) else: print("[INFO] report only") - return "","",False,False - + return "", "", False, False def main(): parser = argparse.ArgumentParser() - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=True, - help="API URL for the pull request") - parser.add_argument("-d", "--directory", dest="directory", type=str, required=True, - help="artifact directory for archival") + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=True, + help="API URL for the pull request", + ) + parser.add_argument( + "-d", + "--directory", + dest="directory", + type=str, + required=True, + help="artifact directory for archival", + ) args = parser.parse_args() - category, organization, chart, version = chartprreview.get_modified_charts(args.directory, args.api_url) + category, organization, chart, version = chartprreview.get_modified_charts( + args.directory, args.api_url + ) - flags,chart_uri,report_needed,cluster_needed = generate_verify_options(args.directory,category, organization, chart, version) - gitutils.add_output("report_needed",report_needed) - gitutils.add_output("cluster_needed",cluster_needed) + flags, chart_uri, report_needed, cluster_needed = generate_verify_options( + args.directory, category, organization, chart, version + ) + gitutils.add_output("report_needed", report_needed) + gitutils.add_output("cluster_needed", cluster_needed) if report_needed: - gitutils.add_output("verify_args",flags) - gitutils.add_output("verify_uri",chart_uri) - + gitutils.add_output("verify_args", flags) + gitutils.add_output("verify_uri", chart_uri) diff --git a/scripts/src/report/report_info.py b/scripts/src/report/report_info.py index 96ede706..304446cf 100644 --- a/scripts/src/report/report_info.py +++ b/scripts/src/report/report_info.py @@ -1,4 +1,3 @@ - import os import sys import docker @@ -11,6 +10,7 @@ REPORT_METADATA = "metadata" SHA_ERROR = "Digest in report did not match report content" + def write_error_log(*msg): directory = os.environ.get("WORKFLOW_WORKING_DIRECTORY") if directory: @@ -24,38 +24,70 @@ def write_error_log(*msg): print(line) -def _get_report_info(report_path, report_info_path,info_type, profile_type, profile_version): - +def _get_report_info( + report_path, report_info_path, info_type, profile_type, profile_version +): if report_info_path and len(report_info_path) > 0: print(f"[INFO] Using existing report info: {report_info_path}") report_out = json.load(open(report_info_path)) else: - command = f"report" + command = "report" set_values = "" if profile_type: set_values = "profile.vendortype=%s" % profile_type if profile_version: if set_values: - set_values = "%s,profile.version=%s" % (set_values,profile_version) + set_values = "%s,profile.version=%s" % (set_values, profile_version) else: set_values = "profile.version=%s" % profile_version if os.environ.get("VERIFIER_IMAGE"): print(f"[INFO] Generate report info using docker : {report_path}") - docker_command = f"{command} {info_type} /charts/{os.path.basename(report_path)}" + docker_command = ( + f"{command} {info_type} /charts/{os.path.basename(report_path)}" + ) if set_values: docker_command = "%s --set %s" % (docker_command, set_values) client = docker.from_env() report_directory = os.path.dirname(os.path.abspath(report_path)) - print(f'Call docker using image: {os.environ.get("VERIFIER_IMAGE")}, docker command: {docker_command}, report directory: {report_directory}') - output = client.containers.run(os.environ.get("VERIFIER_IMAGE"),docker_command,stdin_open=True,tty=True,stdout=True,volumes={report_directory: {'bind': '/charts/', 'mode': 'rw'}}) + print( + f'Call docker using image: {os.environ.get("VERIFIER_IMAGE")}, docker command: {docker_command}, report directory: {report_directory}' + ) + output = client.containers.run( + os.environ.get("VERIFIER_IMAGE"), + docker_command, + stdin_open=True, + tty=True, + stdout=True, + volumes={report_directory: {"bind": "/charts/", "mode": "rw"}}, + ) else: - print(f"[INFO] Generate report info using chart-verifier on path : {os.path.abspath(report_path)}") + print( + f"[INFO] Generate report info using chart-verifier on path : {os.path.abspath(report_path)}" + ) if set_values: - out = subprocess.run(["chart-verifier",command,info_type,"--set",set_values,os.path.abspath(report_path)],capture_output=True) + out = subprocess.run( + [ + "chart-verifier", + command, + info_type, + "--set", + set_values, + os.path.abspath(report_path), + ], + capture_output=True, + ) else: - out = subprocess.run(["chart-verifier",command,info_type,os.path.abspath(report_path)],capture_output=True) + out = subprocess.run( + [ + "chart-verifier", + command, + info_type, + os.path.abspath(report_path), + ], + capture_output=True, + ) output = out.stdout.decode("utf-8") if SHA_ERROR in output: @@ -72,7 +104,7 @@ def _get_report_info(report_path, report_info_path,info_type, profile_type, prof write_error_log(*msgs) sys.exit(1) - if not info_type in report_out: + if info_type not in report_out: msg = f"Error extracting {info_type} from the report:", report_out.strip() write_error_log(msg) sys.exit(1) @@ -87,45 +119,55 @@ def _get_report_info(report_path, report_info_path,info_type, profile_type, prof return report_out[info_type] -def get_report_annotations(report_path=None,report_info_path=None): - annotations = _get_report_info(report_path,report_info_path,REPORT_ANNOTATIONS,"","") +def get_report_annotations(report_path=None, report_info_path=None): + annotations = _get_report_info( + report_path, report_info_path, REPORT_ANNOTATIONS, "", "" + ) print("[INFO] report annotations : %s" % annotations) return annotations -def get_report_results(report_path=None, profile_type=None, profile_version=None,report_info_path=None): - results = _get_report_info(report_path,report_info_path,REPORT_RESULTS,profile_type,profile_version) +def get_report_results( + report_path=None, profile_type=None, profile_version=None, report_info_path=None +): + results = _get_report_info( + report_path, report_info_path, REPORT_RESULTS, profile_type, profile_version + ) print("[INFO] report results : %s" % results) results["failed"] = int(results["failed"]) results["passed"] = int(results["passed"]) return results - -def get_report_digests(report_path=None,report_info_path=None): - digests = _get_report_info(report_path,report_info_path,REPORT_DIGESTS,"","") + + +def get_report_digests(report_path=None, report_info_path=None): + digests = _get_report_info(report_path, report_info_path, REPORT_DIGESTS, "", "") print("[INFO] report digests : %s" % digests) return digests -def get_report_metadata(report_path=None,report_info_path=None): - metadata = _get_report_info(report_path,report_info_path,REPORT_METADATA,"","") + +def get_report_metadata(report_path=None, report_info_path=None): + metadata = _get_report_info(report_path, report_info_path, REPORT_METADATA, "", "") print("[INFO] report metadata : %s" % metadata) return metadata -def get_report_chart_url(report_path=None,report_info_path=None): - metadata = _get_report_info(report_path,report_info_path,REPORT_METADATA,"","") - print("[INFO] report chart-uri : %s" % metadata["chart-uri"]) - return metadata["chart-uri"] -def get_report_chart(report_path=None,report_info_path=None): - metadata = _get_report_info(report_path,report_info_path,REPORT_METADATA,"","") - print("[INFO] report chart : %s" % metadata["chart"]) - return metadata["chart"] +def get_report_chart_url(report_path=None, report_info_path=None): + metadata = _get_report_info(report_path, report_info_path, REPORT_METADATA, "", "") + print("[INFO] report chart-uri : %s" % metadata["chart-uri"]) + return metadata["chart-uri"] + + +def get_report_chart(report_path=None, report_info_path=None): + metadata = _get_report_info(report_path, report_info_path, REPORT_METADATA, "", "") + print("[INFO] report chart : %s" % metadata["chart"]) + return metadata["chart"] -def main(): +def main(): print("\n\n\n\nDocker image results:\n") os.environ["VERIFIER_IMAGE"] = "quay.io/redhat-certification/chart-verifier:main" - get_report_results("./report.yaml","","") - get_report_results("./report.yaml","community","v1.1") + get_report_results("./report.yaml", "", "") + get_report_results("./report.yaml", "community", "v1.1") get_report_digests("./report.yaml") get_report_metadata("./report.yaml") get_report_annotations("./report.yaml") @@ -134,8 +176,8 @@ def main(): print("\n\n\n\nverifier command results:\n") os.environ["VERIFIER_IMAGE"] = "" - get_report_results("./report.yaml","","") - get_report_results("./report.yaml","community","v1.1") + get_report_results("./report.yaml", "", "") + get_report_results("./report.yaml", "community", "v1.1") get_report_digests("./report.yaml") get_report_metadata("./report.yaml") get_report_annotations("./report.yaml") @@ -151,7 +193,5 @@ def main(): get_report_chart(report_info_path="./report_info.json") -if __name__ == '__main__': +if __name__ == "__main__": main() - - diff --git a/scripts/src/report/verifier_report.py b/scripts/src/report/verifier_report.py index c029cb81..24da56e2 100644 --- a/scripts/src/report/verifier_report.py +++ b/scripts/src/report/verifier_report.py @@ -27,12 +27,13 @@ import semantic_version import yaml + try: from yaml import CLoader as Loader except ImportError: from yaml import Loader -sys.path.append('../') +sys.path.append("../") from chartrepomanager import indexannotations from report import report_info @@ -42,16 +43,18 @@ SUPPORTED_VERSIONS_ANNOTATION = "charts.openshift.io/supportedOpenShiftVersions" KUBE_VERSION_ATTRIBUTE = "kubeVersion" + def get_report_data(report_path): try: with open(report_path) as report_data: - report_content = yaml.load(report_data,Loader=Loader) - return True,report_content + report_content = yaml.load(report_data, Loader=Loader) + return True, report_content except Exception as err: print(f"Exception 2 loading file: {err}") - return False,"" + return False, "" -def get_result(report_data,check_name): + +def get_result(report_data, check_name): outcome = False reason = "Not Found" for result in report_data["results"]: @@ -60,16 +63,20 @@ def get_result(report_data,check_name): if result["outcome"] == "PASS": outcome = True break - return outcome,reason + return outcome, reason + def get_chart_testing_result(report_data): - return get_result(report_data,"/chart-testing") + return get_result(report_data, "/chart-testing") + def get_has_kubeversion_result(report_data): - return get_result(report_data,"/has-kubeversion") + return get_result(report_data, "/has-kubeversion") + def get_signature_is_valid_result(report_data): - return get_result(report_data,"/signature-is-valid") + return get_result(report_data, "/signature-is-valid") + def get_profile_version(report_data): profile_version = "1.1" @@ -79,18 +86,24 @@ def get_profile_version(report_data): pass return profile_version + def get_web_catalog_only(report_data): web_catalog_only = False try: if "webCatalogOnly" in report_data["metadata"]["tool"]: web_catalog_only = report_data["metadata"]["tool"]["webCatalogOnly"] if "providerControlledDelivery" in report_data["metadata"]["tool"]: - web_catalog_only = report_data["metadata"]["tool"]["providerControlledDelivery"] + web_catalog_only = report_data["metadata"]["tool"][ + "providerControlledDelivery" + ] except Exception as err: - print(f"Exception getting webCatalogOnly/providerControlledDelivery {err=}, {type(err)=}") + print( + f"Exception getting webCatalogOnly/providerControlledDelivery {err=}, {type(err)=}" + ) pass return web_catalog_only + def get_package_digest(report_data): package_digest = None try: @@ -102,6 +115,7 @@ def get_package_digest(report_data): pass return package_digest + def get_public_key_digest(report_data): public_key_digest = None try: @@ -117,21 +131,21 @@ def get_public_key_digest(report_data): def report_is_valid(report_data): outcome = True - if not "kind" in report_data or report_data["kind"] != "verify-report": - print('[ERROR] kind attribute invalid or missing from report') + if "kind" not in report_data or report_data["kind"] != "verify-report": + print("[ERROR] kind attribute invalid or missing from report") return False - if not "results" in report_data: + if "results" not in report_data: print("No results section in report") outcome = False - if not "metadata" in report_data: + if "metadata" not in report_data: print("No metadata section in report") outcome = False else: - if not "tool" in report_data["metadata"]: + if "tool" not in report_data["metadata"]: print("No tool metadata section in report") outcome = False - if not "chart" in report_data["metadata"]: + if "chart" not in report_data["metadata"]: print("No tool chart section in report") outcome = False @@ -139,19 +153,17 @@ def report_is_valid(report_data): def validate(report_path): - - is_valid_yaml,report_data = get_report_data(report_path) + is_valid_yaml, report_data = get_report_data(report_path) if not is_valid_yaml: - return False,f"Report is not valid yaml: {report_path}" + return False, f"Report is not valid yaml: {report_path}" if not report_is_valid(report_data): - return False,f"Report is incomplete and cannot be processed: {report_path}" + return False, f"Report is incomplete and cannot be processed: {report_path}" - ## No value in checking if chart testing failed - chart_testing_outcome,_ = get_chart_testing_result(report_data) + # No value in checking if chart testing failed + chart_testing_outcome, _ = get_chart_testing_result(report_data) if chart_testing_outcome: - profile_version_string = get_profile_version(report_data) try: @@ -162,8 +174,7 @@ def validate(report_path): except Exception: message = f"Invalid profile version in report : {profile_version_string}" print(message) - return False,message - + return False, message annotations = report_info.get_report_annotations(report_path) @@ -172,57 +183,89 @@ def validate(report_path): else: tested_version_annotation = TESTED_VERSION_ANNOTATION - if tested_version_annotation in annotations: + if tested_version_annotation in annotations: tested_version_string = annotations[tested_version_annotation] else: - return False,f"No annotation provided for {tested_version_annotation}" + return False, f"No annotation provided for {tested_version_annotation}" try: tested_version = semantic_version.Version.coerce(tested_version_string) if tested_version not in MIN_SUPPORTED_OPENSHIFT_VERSION: - return False,f"{tested_version_annotation} {tested_version_string} is not a supported OpenShift version." + return ( + False, + f"{tested_version_annotation} {tested_version_string} is not a supported OpenShift version.", + ) except ValueError: - return False,f"{tested_version_annotation} {tested_version_string} is not a valid semantic version." + return ( + False, + f"{tested_version_annotation} {tested_version_string} is not a valid semantic version.", + ) - has_kubeversion_outcome,_ = get_chart_testing_result(report_data) + has_kubeversion_outcome, _ = get_chart_testing_result(report_data) if has_kubeversion_outcome: - chart = report_info.get_report_chart(report_path) if KUBE_VERSION_ATTRIBUTE in chart: - kube_supported_ocp_versions_string = indexannotations.getOCPVersions(chart[KUBE_VERSION_ATTRIBUTE]) + kube_supported_ocp_versions_string = indexannotations.getOCPVersions( + chart[KUBE_VERSION_ATTRIBUTE] + ) try: - kube_supported_versions = semantic_version.NpmSpec(kube_supported_ocp_versions_string) + kube_supported_versions = semantic_version.NpmSpec( + kube_supported_ocp_versions_string + ) except ValueError: if v1_0_profile: - return True,"" + return True, "" else: - return False,f'Kube Version {chart[KUBE_VERSION_ATTRIBUTE]} translates to an invalid OCP version range {kube_supported_ocp_versions_string}' + return ( + False, + f"Kube Version {chart[KUBE_VERSION_ATTRIBUTE]} translates to an invalid OCP version range {kube_supported_ocp_versions_string}", + ) else: if v1_0_profile: - return True,"" + return True, "" else: - return False,f'{KUBE_VERSION_ATTRIBUTE} missing from chart!' + return False, f"{KUBE_VERSION_ATTRIBUTE} missing from chart!" if tested_version not in kube_supported_versions: - return False,f"Tested OpenShift version {str(tested_version)} not within specified kube-versions : {kube_supported_ocp_versions_string}" + return ( + False, + f"Tested OpenShift version {str(tested_version)} not within specified kube-versions : {kube_supported_ocp_versions_string}", + ) if not v1_0_profile: - if SUPPORTED_VERSIONS_ANNOTATION in annotations: - supported_versions_string = annotations[SUPPORTED_VERSIONS_ANNOTATION] + supported_versions_string = annotations[ + SUPPORTED_VERSIONS_ANNOTATION + ] try: - supported_versions = semantic_version.NpmSpec(supported_versions_string) + supported_versions = semantic_version.NpmSpec( + supported_versions_string + ) except ValueError: - return False,f"{SUPPORTED_VERSIONS_ANNOTATION}: {supported_versions_string} is not a valid semantic version." + return ( + False, + f"{SUPPORTED_VERSIONS_ANNOTATION}: {supported_versions_string} is not a valid semantic version.", + ) else: - return False,f"Missing annotation in report: {SUPPORTED_VERSIONS_ANNOTATION}" + return ( + False, + f"Missing annotation in report: {SUPPORTED_VERSIONS_ANNOTATION}", + ) if tested_version not in supported_versions: - return False,f"Tested OpenShift version {str(tested_version)} not within supported versions : {supported_versions_string}" - - if supported_versions_string and supported_versions_string != str(kube_supported_versions): - return False,f'Kube Version {chart[KUBE_VERSION_ATTRIBUTE]} -> {str(kube_supported_versions)} does not match supportedOpenShiftVersions: {supported_versions_string}' + return ( + False, + f"Tested OpenShift version {str(tested_version)} not within supported versions : {supported_versions_string}", + ) + + if supported_versions_string and supported_versions_string != str( + kube_supported_versions + ): + return ( + False, + f"Kube Version {chart[KUBE_VERSION_ATTRIBUTE]} -> {str(kube_supported_versions)} does not match supportedOpenShiftVersions: {supported_versions_string}", + ) else: print("[INFO] Chart testing failed so skip report checking") - return True,"" + return True, "" diff --git a/scripts/src/saforcertadmin/push_secrets.py b/scripts/src/saforcertadmin/push_secrets.py index 2e1a1d26..9d1ecf4c 100644 --- a/scripts/src/saforcertadmin/push_secrets.py +++ b/scripts/src/saforcertadmin/push_secrets.py @@ -1,4 +1,4 @@ -''' +""" This script will help to list, create or update secrets of a repository Prerequsites: @@ -18,7 +18,7 @@ 2. To create or update the CLUSTER_TOKEN of openshift-helm-charts/sandbox repository python push_secrets.py -r openshift-helm-charts/sandbox -s CLUSTER_TOKEN -v -''' +""" from base64 import b64encode from nacl import encoding, public import logging @@ -28,14 +28,18 @@ import requests import argparse -sys.path.append('../') +sys.path.append("../") from pullrequest import prartifact token = os.environ.get("BOT_TOKEN") -headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}'} +headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"token {token}", +} logging.basicConfig(level=logging.INFO) + def encrypt(public_key: str, secret_value: str) -> str: """Encrypt a Unicode string using the public key.""" public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder()) @@ -43,45 +47,67 @@ def encrypt(public_key: str, secret_value: str) -> str: encrypted = sealed_box.encrypt(secret_value.encode("utf-8")) return b64encode(encrypted).decode("utf-8") + def get_repo_public_key(repo): """Get the public key id and key of a github repository""" - response = requests.get(f'https://api.github.com/repos/{repo}/actions/secrets/public-key', headers=headers) + response = requests.get( + f"https://api.github.com/repos/{repo}/actions/secrets/public-key", + headers=headers, + ) if response.status_code != 200: - logging.error(f"unexpected response getting repo public key : {response.status_code} : {response.reason}") + logging.error( + f"unexpected response getting repo public key : {response.status_code} : {response.reason}" + ) sys.exit(1) response_json = response.json() - if prartifact.xRateLimit in r.headers: - print(f'[DEBUG] {prartifact.xRateLimit} : {r.headers[prartifact.xRateLimit]}') - if prartifact.xRateRemain in r.headers: - print(f'[DEBUG] {prartifact.xRateRemain} : {r.headers[prartifact.xRateRemain]}') + if prartifact.xRateLimit in response.headers: + print( + f"[DEBUG] {prartifact.xRateLimit} : {response.headers[prartifact.xRateLimit]}" + ) + if prartifact.xRateRemain in response.headers: + print( + f"[DEBUG] {prartifact.xRateRemain} : {response.headers[prartifact.xRateRemain]}" + ) if "message" in response_json: print(f'[ERROR] getting public key: {response_json["message"]}') sys.exit(1) - return response_json['key_id'], response_json['key'] + return response_json["key_id"], response_json["key"] + def get_repo_secrets(repo): """Get the list of secret names of a github repository""" secret_names = [] - response = requests.get(f'https://api.github.com/repos/{repo}/actions/secrets', headers=headers) + response = requests.get( + f"https://api.github.com/repos/{repo}/actions/secrets", headers=headers + ) if response.status_code != 200: - logging.error(f"[ERROR] unexpected response getting repo secrets : {response.status_code} : {response.reason}") + logging.error( + f"[ERROR] unexpected response getting repo secrets : {response.status_code} : {response.reason}" + ) sys.exit(1) response_json = response.json() if "message" in response_json: print(f'[ERROR] getting repo secrets: {response_json["message"]}') sys.exit(1) - for i in range(response_json['total_count']): - secret_names.append(response_json['secrets'][i]['name']) + for i in range(response_json["total_count"]): + secret_names.append(response_json["secrets"][i]["name"]) return secret_names + def create_or_update_repo_secrets(repo, secret_name, key_id, encrypted_value): """Create or update a github repository secret""" - response = requests.put(f'https://api.github.com/repos/{repo}/actions/secrets/{secret_name}', json={'key_id': key_id, 'encrypted_value': encrypted_value}, headers=headers) + response = requests.put( + f"https://api.github.com/repos/{repo}/actions/secrets/{secret_name}", + json={"key_id": key_id, "encrypted_value": encrypted_value}, + headers=headers, + ) if response.status_code != 201 and response.status_code != 204: - logging.error(f"unexpected response during put request : {response.status_code} : {response.reason}") + logging.error( + f"unexpected response during put request : {response.status_code} : {response.reason}" + ) sys.exit(1) try: response_json = response.json() @@ -91,33 +117,55 @@ def create_or_update_repo_secrets(repo, secret_name, key_id, encrypted_value): except json.decoder.JSONDecodeError: pass + logging.info(f"Secret {secret_name} create or update successful") - logging.info(f'Secret {secret_name} create or update successful') def main(): - parser = argparse.ArgumentParser(description='Script to list, create or update secrets of a repository') - parser.add_argument("-r", "--repo", dest="repo", type=str, required=True, - help="Github repo name in {org}/{repo_name} format") - parser.add_argument("-l", "--list", dest="list", action='store_true', required=False, - help="List the secret names") - parser.add_argument("-s", "--secret", dest="secret", type=str, required=False, - help="Secret name") - parser.add_argument("-v", "--value", dest="value", type=str, required=False, - help="Secret value to set") + parser = argparse.ArgumentParser( + description="Script to list, create or update secrets of a repository" + ) + parser.add_argument( + "-r", + "--repo", + dest="repo", + type=str, + required=True, + help="Github repo name in {org}/{repo_name} format", + ) + parser.add_argument( + "-l", + "--list", + dest="list", + action="store_true", + required=False, + help="List the secret names", + ) + parser.add_argument( + "-s", "--secret", dest="secret", type=str, required=False, help="Secret name" + ) + parser.add_argument( + "-v", + "--value", + dest="value", + type=str, + required=False, + help="Secret value to set", + ) args = parser.parse_args() if args.list: secrets = get_repo_secrets(args.repo) - logging.info(f'Github Secret Names: {secrets}') + logging.info(f"Github Secret Names: {secrets}") elif args.secret and args.value: secret_name = args.secret secret_value = args.value - logging.info(f'Setting SECRET: {secret_name}') + logging.info(f"Setting SECRET: {secret_name}") key_id, public_key = get_repo_public_key(args.repo) encrypted_value = encrypt(public_key, secret_value) create_or_update_repo_secrets(args.repo, secret_name, key_id, encrypted_value) else: - logging.error('Wrong argument combination') + logging.error("Wrong argument combination") + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/scripts/src/saforcharttesting/saforcharttesting.py b/scripts/src/saforcharttesting/saforcharttesting.py index 57249952..ba2ef7b2 100644 --- a/scripts/src/saforcharttesting/saforcharttesting.py +++ b/scripts/src/saforcharttesting/saforcharttesting.py @@ -115,6 +115,7 @@ namespace: ${name} """ + def apply_config(tmpl, **values): with tempfile.TemporaryDirectory(prefix="sa-for-chart-testing-") as tmpdir: content = Template(tmpl).substitute(values) @@ -130,6 +131,7 @@ def apply_config(tmpl, **values): return stdout, stderr + def delete_config(tmpl, **values): with tempfile.TemporaryDirectory(prefix="sa-for-chart-testing-") as tmpdir: content = Template(tmpl).substitute(values) @@ -145,6 +147,7 @@ def delete_config(tmpl, **values): return stdout, stderr + def create_namespace(namespace): print("creating Namespace:", namespace) stdout, stderr = apply_config(namespace_template, name=namespace) @@ -152,6 +155,7 @@ def create_namespace(namespace): if stderr.strip(): print("[ERROR] creating Namespace:", stderr) + def create_serviceaccount(namespace): print("creating ServiceAccount:", namespace) stdout, stderr = apply_config(serviceaccount_template, name=namespace) @@ -159,6 +163,7 @@ def create_serviceaccount(namespace): if stderr.strip(): print("[ERROR] creating ServiceAccount:", stderr) + def create_role(namespace): print("creating Role:", namespace) stdout, stderr = apply_config(role_template, name=namespace) @@ -166,6 +171,7 @@ def create_role(namespace): if stderr.strip(): print("[ERROR] creating Role:", stderr) + def create_rolebinding(namespace): print("creating RoleBinding:", namespace) stdout, stderr = apply_config(rolebinding_template, name=namespace) @@ -173,6 +179,7 @@ def create_rolebinding(namespace): if stderr.strip(): print("[ERROR] creating RoleBinding:", stderr) + def create_clusterrole(namespace): print("creating ClusterRole:", namespace) stdout, stderr = apply_config(clusterrole_template, name=namespace) @@ -180,6 +187,7 @@ def create_clusterrole(namespace): if stderr.strip(): print("[ERROR] creating ClusterRole:", stderr) + def create_clusterrolebinding(namespace): print("creating ClusterRoleBinding:", namespace) stdout, stderr = apply_config(clusterrolebinding_template, name=namespace) @@ -187,6 +195,7 @@ def create_clusterrolebinding(namespace): if stderr.strip(): print("[ERROR] creating ClusterRoleBinding:", stderr) + def delete_namespace(namespace): print("deleting Namespace:", namespace) stdout, stderr = delete_config(namespace_template, name=namespace) @@ -195,6 +204,7 @@ def delete_namespace(namespace): print("[ERROR] deleting Namespace:", namespace, stderr) sys.exit(1) + def delete_clusterrole(name): print("deleting ClusterRole:", name) stdout, stderr = delete_config(clusterrole_template, name=name) @@ -203,6 +213,7 @@ def delete_clusterrole(name): print("[ERROR] deleting ClusterRole:", name, stderr) sys.exit(1) + def delete_clusterrolebinding(name): print("deleting ClusterRoleBinding:", name) stdout, stderr = delete_config(clusterrolebinding_template, name=name) @@ -211,11 +222,15 @@ def delete_clusterrolebinding(name): print("[ERROR] deleting ClusterRoleBinding:", name, stderr) sys.exit(1) + def write_sa_token(namespace, token): secret_found = False secrets = [] for i in range(7): - out = subprocess.run(["oc", "get", "serviceaccount", namespace, "-n", namespace, "-o", "json"], capture_output=True) + out = subprocess.run( + ["oc", "get", "serviceaccount", namespace, "-n", namespace, "-o", "json"], + capture_output=True, + ) stdout = out.stdout.decode("utf-8") if out.returncode != 0: stderr = out.stderr.decode("utf-8") @@ -229,15 +244,18 @@ def write_sa_token(namespace, token): secret_found = True break else: - pattern = r'Tokens:\s+([A-Za-z0-9-]+)' - dout = subprocess.run(["oc", "describe", "serviceaccount", namespace, "-n", namespace], capture_output=True) + pattern = r"Tokens:\s+([A-Za-z0-9-]+)" + dout = subprocess.run( + ["oc", "describe", "serviceaccount", namespace, "-n", namespace], + capture_output=True, + ) dstdout = dout.stdout.decode("utf-8") match = re.search(pattern, dstdout) if match: - token_name = match.group(1) + token_name = match.group(1) else: - print("[ERROR] Token not found, Exiting") - sys.exit(1) + print("[ERROR] Token not found, Exiting") + sys.exit(1) secrets.append({"name": token_name}) secret_found = True break @@ -248,7 +266,10 @@ def write_sa_token(namespace, token): sys.exit(1) for secret in secrets: - out = subprocess.run(["oc", "get", "secret", secret["name"], "-n", namespace, "-o", "json"], capture_output=True) + out = subprocess.run( + ["oc", "get", "secret", secret["name"], "-n", namespace, "-o", "json"], + capture_output=True, + ) stdout = out.stdout.decode("utf-8") if out.returncode != 0: stderr = out.stderr.decode("utf-8") @@ -262,10 +283,13 @@ def write_sa_token(namespace, token): with open(token, "w") as fd: fd.write(base64.b64decode(content).decode("utf-8")) + def switch_project_context(namespace, token, api_server): tkn = open(token).read() for i in range(7): - out = subprocess.run(["oc", "login", "--token", tkn, "--server", api_server], capture_output=True) + out = subprocess.run( + ["oc", "login", "--token", tkn, "--server", api_server], capture_output=True + ) stdout = out.stdout.decode("utf-8") print(stdout) out = subprocess.run(["oc", "project", namespace], capture_output=True) @@ -280,19 +304,41 @@ def switch_project_context(namespace, token, api_server): time.sleep(10) # This exit will happen if there is an infra failure - print("""[ERROR] There is an error creating the namespace and service account. It happens due to some infrastructure failure. It is not directly related to the changes in the pull request. You can wait for some time and try to re-run the job. To re-run the job change the PR into a draft and remove the draft state.""") + print( + """[ERROR] There is an error creating the namespace and service account. It happens due to some infrastructure failure. It is not directly related to the changes in the pull request. You can wait for some time and try to re-run the job. To re-run the job change the PR into a draft and remove the draft state.""" + ) sys.exit(1) + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-c", "--create", dest="create", type=str, required=False, - help="create service account and namespace for chart testing") - parser.add_argument("-t", "--token", dest="token", type=str, required=False, - help="service account token for chart testing") - parser.add_argument("-d", "--delete", dest="delete", type=str, required=False, - help="delete service account and namespace used for chart testing") - parser.add_argument("-s", "--server", dest="server", type=str, required=False, - help="API server URL") + parser.add_argument( + "-c", + "--create", + dest="create", + type=str, + required=False, + help="create service account and namespace for chart testing", + ) + parser.add_argument( + "-t", + "--token", + dest="token", + type=str, + required=False, + help="service account token for chart testing", + ) + parser.add_argument( + "-d", + "--delete", + dest="delete", + type=str, + required=False, + help="delete service account and namespace used for chart testing", + ) + parser.add_argument( + "-s", "--server", dest="server", type=str, required=False, help="API server URL" + ) args = parser.parse_args() if args.create: diff --git a/scripts/src/signedchart/signedchart.py b/scripts/src/signedchart/signedchart.py index b0fa5f62..11958ff3 100644 --- a/scripts/src/signedchart/signedchart.py +++ b/scripts/src/signedchart/signedchart.py @@ -1,4 +1,3 @@ -import requests import sys import subprocess import base64 @@ -6,19 +5,14 @@ import os import re -try: - from yaml import CLoader as Loader, CDumper as Dumper -except ImportError: - from yaml import Loader, Dumper - -sys.path.append('../') +sys.path.append("../") from report import verifier_report from owners import owners_file from pullrequest import prartifact -def check_and_prepare_signed_chart(api_url,report_path,owner_path,key_file_path): - signed_chart = is_chart_signed(api_url,report_path) +def check_and_prepare_signed_chart(api_url, report_path, owner_path, key_file_path): + signed_chart = is_chart_signed(api_url, report_path) key_in_owners = False keys_match = False if signed_chart: @@ -26,30 +20,32 @@ def check_and_prepare_signed_chart(api_url,report_path,owner_path,key_file_path) if owners_pgp_key: key_in_owners = True if report_path: - keys_match = check_pgp_public_key(owners_pgp_key,report_path) + keys_match = check_pgp_public_key(owners_pgp_key, report_path) elif key_file_path: - create_public_key_file(owners_pgp_key,key_file_path) + create_public_key_file(owners_pgp_key, key_file_path) + + return signed_chart, key_in_owners, keys_match - return signed_chart,key_in_owners,keys_match -def get_verifier_flags(tar_file,owners_file,temp_dir): +def get_verifier_flags(tar_file, owners_file, temp_dir): prov_file = f"{tar_file}.prov" if os.path.exists(prov_file): gpg_key = get_pgp_key_from_owners(owners_file) if gpg_key: - key_file = os.path.join(temp_dir,"pgp",f"{tar_file}.key") - create_public_key_file(gpg_key,key_file) + key_file = os.path.join(temp_dir, "pgp", f"{tar_file}.key") + create_public_key_file(gpg_key, key_file) return f"--pgp-public-key {key_file}" return "" -def is_chart_signed(api_url,report_path): - +def is_chart_signed(api_url, report_path): if api_url: files = prartifact.get_modified_files(api_url) tgz_pattern = re.compile(r"charts/(\w+)/([\w-]+)/([\w-]+)/([\w\.-]+)/.*.tgz") tgz_found = False - prov_pattern = re.compile(r"charts/(\w+)/([\w-]+)/([\w-]+)/([\w\.-]+)/.*.tgz.prov") + prov_pattern = re.compile( + r"charts/(\w+)/([\w-]+)/([\w-]+)/([\w\.-]+)/.*.tgz.prov" + ) prov_found = False for file_path in files: @@ -65,11 +61,13 @@ def is_chart_signed(api_url,report_path): return False -def key_in_owners_match_report(owner_path,report_path): + +def key_in_owners_match_report(owner_path, report_path): owner_key = get_pgp_key_from_owners(owner_path) if not owner_key: return True - return check_pgp_public_key(owner_key,report_path) + return check_pgp_public_key(owner_key, report_path) + def get_pgp_key_from_owners(owner_path): found, owner_data = owners_file.get_owner_data_from_file(owner_path) @@ -83,24 +81,24 @@ def get_pgp_key_from_owners(owner_path): def check_report_for_signed_chart(report_path): - - found,report_data = verifier_report.get_report_data(report_path) + found, report_data = verifier_report.get_report_data(report_path) if found: - outcome,reason = verifier_report.get_signature_is_valid_result(report_data) + outcome, reason = verifier_report.get_signature_is_valid_result(report_data) if "Chart is signed" in reason: return True return False -def check_pgp_public_key(owner_pgp_key,report_path): - - ## return True if one of: - # - report not found - # - report is not for a signed chart - # - digests match - found,report_data = verifier_report.get_report_data(report_path) +def check_pgp_public_key(owner_pgp_key, report_path): + # return True if one of: + # - report not found + # - report is not for a signed chart + # - digests match + found, report_data = verifier_report.get_report_data(report_path) if found: - pgp_public_key_digest_owners = subprocess.getoutput(f'echo {owner_pgp_key} | sha256sum').split(" ")[0] + pgp_public_key_digest_owners = subprocess.getoutput( + f"echo {owner_pgp_key} | sha256sum" + ).split(" ")[0] print(f"[INFO] digest of PGP key from OWNERS :{pgp_public_key_digest_owners}:") pgp_public_digest_report = verifier_report.get_public_key_digest(report_data) print(f"[INFO] PGP key digest in report :{pgp_public_digest_report}:") @@ -111,18 +109,16 @@ def check_pgp_public_key(owner_pgp_key,report_path): return True -def create_public_key_file(pgp_public_key_from_owners,key_file_path): - +def create_public_key_file(pgp_public_key_from_owners, key_file_path): key_content = base64.b64decode(pgp_public_key_from_owners) key_file = open(key_file_path, "w") - key_file.write(key_content.decode('utf-8')) + key_file.write(key_content.decode("utf-8")) key_file.close() def main(): - - if not is_chart_signed("","./partner-report.yaml"): + if not is_chart_signed("", "./partner-report.yaml"): print("ERROR chart is signed") else: print("PASS chart is signed") @@ -132,23 +128,25 @@ def main(): else: print("PASS report is signed") - encoded_key_in_owners = get_pgp_key_from_owners("./OWNERS") - if not check_pgp_public_key(encoded_key_in_owners,"./partner-report.yaml"): + if not check_pgp_public_key(encoded_key_in_owners, "./partner-report.yaml"): print("ERROR key digests do not match") else: print("PASS key digests match") - - signed,key_in_owners,keys_match = check_and_prepare_signed_chart("","./partner-report.yaml","./OWNERS","./pgp.key") + signed, key_in_owners, keys_match = check_and_prepare_signed_chart( + "", "./partner-report.yaml", "./OWNERS", "./pgp.key" + ) if signed and key_in_owners and keys_match: print("PASS all is good") else: - print(f"ERROR, all true expected: signed = {signed}, key_in_owners = {key_in_owners}. keys_match = {keys_match}") + print( + f"ERROR, all true expected: signed = {signed}, key_in_owners = {key_in_owners}. keys_match = {keys_match}" + ) - create_public_key_file(encoded_key_in_owners,"./pgp.key") + create_public_key_file(encoded_key_in_owners, "./pgp.key") if os.path.exists("./pgp.key"): - if not filecmp.cmp("./psql-service-0.1.11.tgz.key","./pgp.key"): + if not filecmp.cmp("./psql-service-0.1.11.tgz.key", "./pgp.key"): print("ERROR public key files file do not match") else: print("PASS public key files do match") @@ -157,10 +155,5 @@ def main(): print("ERROR pgp key file was not created") - -if __name__ == '__main__': +if __name__ == "__main__": main() - - - - diff --git a/scripts/src/tools/gitutils.py b/scripts/src/tools/gitutils.py index ea24fd47..52debd77 100644 --- a/scripts/src/tools/gitutils.py +++ b/scripts/src/tools/gitutils.py @@ -17,9 +17,8 @@ import json import requests from git import Repo -from git.exc import GitCommandError -GITHUB_BASE_URL = 'https://api.github.com' +GITHUB_BASE_URL = "https://api.github.com" CHARTS_REPO = "/charts" DEVELOPMENT_REPO = "/development" STAGE_REPO = "/stage" @@ -29,7 +28,8 @@ PR_FAILED = "PR_FAILED" # GitHub actions bot email for git email -GITHUB_ACTIONS_BOT_EMAIL = '41898282+github-actions[bot]@users.noreply.github.com' +GITHUB_ACTIONS_BOT_EMAIL = "41898282+github-actions[bot]@users.noreply.github.com" + def set_git_username_email(repo, username, email): """ @@ -43,8 +43,7 @@ def set_git_username_email(repo, username, email): def github_api_post(endpoint, headers, json): - r = requests.post(f'{GITHUB_BASE_URL}/{endpoint}', - headers=headers, json=json) + r = requests.post(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers, json=json) try: response_json = r.json() @@ -55,11 +54,11 @@ def github_api_post(endpoint, headers, json): except json.JSONDecodeError: pass - return r + def github_api_get(endpoint, headers): - r = requests.get(f'{GITHUB_BASE_URL}/{endpoint}', headers=headers) + r = requests.get(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers) response_json = r.json() if "message" in response_json: print(f'[ERROR] get request: {response_json["message"]}') @@ -67,16 +66,21 @@ def github_api_get(endpoint, headers): return r + def github_api(method, endpoint, bot_token, json={}): - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - if method == 'get': - return github_api_get(endpoint,headers) - elif method == 'post': - return github_api_post(endpoint,headers,json) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + if method == "get": + return github_api_get(endpoint, headers) + elif method == "post": + return github_api_post(endpoint, headers, json) else: raise ValueError( - f"Github API method {method} not implemented in helper function") + f"Github API method {method} not implemented in helper function" + ) + def get_bot_name_and_token(): bot_name = os.environ.get("BOT_NAME") @@ -92,39 +96,45 @@ def get_bot_name_and_token(): return bot_name, bot_token -def create_pr(branch_name,skip_files,repository,message,target_branch): - +def create_pr(branch_name, skip_files, repository, message, target_branch): repo = Repo(os.getcwd()) bot_name, bot_token = get_bot_name_and_token() - set_git_username_email(repo,bot_name,GITHUB_ACTIONS_BOT_EMAIL) + set_git_username_email(repo, bot_name, GITHUB_ACTIONS_BOT_EMAIL) repo.create_head(branch_name) print(f"checkout branch {branch_name}") repo.git.checkout(branch_name) - if add_changes(repo,skip_files): - + if add_changes(repo, skip_files): print(f"commit changes with message: {branch_name}") repo.index.commit(branch_name) print(f"push the branch {branch_name} to {repository}") - repo.git.push(f'https://x-access-token:{bot_token}@github.com/{repository}', - f'HEAD:refs/heads/{branch_name}','-f') + repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{repository}", + f"HEAD:refs/heads/{branch_name}", + "-f", + ) print(f"make the pull request to {target_branch}") - data = {'head': branch_name, 'base': f'{target_branch}', - 'title': branch_name, 'body': f'{message}'} + data = { + "head": branch_name, + "base": f"{target_branch}", + "title": branch_name, + "body": f"{message}", + } - r = github_api( - 'post', f'repos/{repository}/pulls',bot_token,json=data) + r = github_api("post", f"repos/{repository}/pulls", bot_token, json=data) j = json.loads(r.text) - if 'number' in j: + if "number" in j: print(f"pull request info: {j['number']}") return PR_CREATED else: - print(f"Unexpected response from PR. status code: {r.status_code}, text: {j}") + print( + f"Unexpected response from PR. status code: {r.status_code}, text: {j}" + ) return PR_FAILED else: @@ -132,19 +142,17 @@ def create_pr(branch_name,skip_files,repository,message,target_branch): return PR_NOT_NEEDED - -def add_changes(repo,skip_files): - +def add_changes(repo, skip_files): if len(skip_files) == 0: - changed = [ item.a_path for item in repo.index.diff(None) ] + changed = [item.a_path for item in repo.index.diff(None)] for change in changed: print(f"Changed file: {change}") for add in repo.untracked_files: print(f"Added file: {add}") - print(f"Add all changes") + print("Add all changes") repo.git.add(all=True) else: - changed = [ item.a_path for item in repo.index.diff(None) ] + changed = [item.a_path for item in repo.index.diff(None)] for change in changed: if change in skip_files: print(f"Skip changed file: {change}") @@ -161,6 +169,7 @@ def add_changes(repo,skip_files): return len(repo.index.diff("HEAD")) > 0 -def add_output(name,value): - with open(os.environ['GITHUB_OUTPUT'],'a') as fh: - print(f'{name}={value}',file=fh) + +def add_output(name, value): + with open(os.environ["GITHUB_OUTPUT"], "a") as fh: + print(f"{name}={value}", file=fh) diff --git a/scripts/src/workflowtesting/checkprforci.py b/scripts/src/workflowtesting/checkprforci.py index 96cf6109..0e7f487a 100644 --- a/scripts/src/workflowtesting/checkprforci.py +++ b/scripts/src/workflowtesting/checkprforci.py @@ -1,27 +1,37 @@ import re import argparse import os -import requests -import json import yaml import sys try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CLoader as Loader except ImportError: - from yaml import Loader, Dumper + from yaml import Loader from tools import gitutils -sys.path.append('../') +sys.path.append("../") from pullrequest import prartifact + def check_if_ci_only_is_modified(api_url): # api_url https://api.github.com/repos///pulls/1 files = prartifact.get_modified_files(api_url) - workflow_files = [re.compile(r".github/workflows/.*"),re.compile(r"scripts/.*"),re.compile(r"tests/.*")] - test_files = [re.compile(r"tests/functional/step_defs/.*_test_.*"),re.compile(r"tests/functional/behave_features/.*.feature")] - skip_build_files = [re.compile(r"release/release_info.json"),re.compile(r"README.md"),re.compile(r"docs/([\w-]+)\.md")] + workflow_files = [ + re.compile(r".github/workflows/.*"), + re.compile(r"scripts/.*"), + re.compile(r"tests/.*"), + ] + test_files = [ + re.compile(r"tests/functional/step_defs/.*_test_.*"), + re.compile(r"tests/functional/behave_features/.*.feature"), + ] + skip_build_files = [ + re.compile(r"release/release_info.json"), + re.compile(r"README.md"), + re.compile(r"docs/([\w-]+)\.md"), + ] workflow_found = False others_found = False @@ -38,10 +48,10 @@ def check_if_ci_only_is_modified(api_url): return False if others_found and not workflow_found: - gitutils.add_output("do-not-build","true") + gitutils.add_output("do-not-build", "true") elif tests_included: - print(f"[INFO] set full_tests_in_pr to true") - gitutils.add_output("full_tests_in_pr","true") + print("[INFO] set full_tests_in_pr to true") + gitutils.add_output("full_tests_in_pr", "true") return workflow_found @@ -58,33 +68,51 @@ def verify_user(username): print(f"[INFO] {username} authorized") return True else: - print(f"[ERROR] {username} cannot run tests") + print(f"[ERROR] {username} cannot run tests") return False def main(): parser = argparse.ArgumentParser() - parser.add_argument("-u", "--api-url", dest="api_url", type=str, required=False, - help="API URL for the pull request") - parser.add_argument("-n", "--verify-user", dest="username", type=str, required=True, - help="check if the user can run tests") + parser.add_argument( + "-u", + "--api-url", + dest="api_url", + type=str, + required=False, + help="API URL for the pull request", + ) + parser.add_argument( + "-n", + "--verify-user", + dest="username", + type=str, + required=True, + help="check if the user can run tests", + ) args = parser.parse_args() if not args.api_url: if verify_user(args.username): - print(f"[INFO] User authorized for manual invocation - run tests.") - gitutils.add_output("run-tests","true") + print("[INFO] User authorized for manual invocation - run tests.") + gitutils.add_output("run-tests", "true") else: - print(f"[INFO] User not authorized for manual invocation - do not run tests.") - gitutils.add_output("workflow-only-but-not-authorized","true") + print( + "[INFO] User not authorized for manual invocation - do not run tests." + ) + gitutils.add_output("workflow-only-but-not-authorized", "true") elif check_if_ci_only_is_modified(args.api_url): if verify_user(args.username): - print(f"[INFO] PR is workflow changes only and user is authorized - run tests.") - gitutils.add_output("run-tests","true") + print( + "[INFO] PR is workflow changes only and user is authorized - run tests." + ) + gitutils.add_output("run-tests", "true") else: - print(f"[INFO] PR is workflow changes only but user is not authorized - do not run tests.") - gitutils.add_output("workflow-only-but-not-authorized","true") + print( + "[INFO] PR is workflow changes only but user is not authorized - do not run tests." + ) + gitutils.add_output("workflow-only-but-not-authorized", "true") else: - print(f"[INFO] Non workflow changes were found - do not run tests") + print("[INFO] Non workflow changes were found - do not run tests") if __name__ == "__main__": diff --git a/tests/functional/behave_features/common/utils/chart.py b/tests/functional/behave_features/common/utils/chart.py index 3aeec916..8f1e859a 100644 --- a/tests/functional/behave_features/common/utils/chart.py +++ b/tests/functional/behave_features/common/utils/chart.py @@ -9,6 +9,7 @@ from enum import Enum from dataclasses import dataclass + class Chart_Type(Enum): SRC = 1 TAR = 2 @@ -16,6 +17,7 @@ class Chart_Type(Enum): SRC_AND_REPORT = 4 TAR_AND_REPORT = 5 + class Release_Type(Enum): CHART_ONLY = 1 REPORT_ONLY = 2 @@ -24,21 +26,25 @@ class Release_Type(Enum): CHART_PROV_AND_REPORT = 5 CHART_REPORT_PROV_AND_KEY = 6 + @dataclass class Chart: - chart_file_path : str = '' - report_file_path : str = '' - chart_name: str = '' - chart_version: str = '' - chart_directory: str = '' + chart_file_path: str = "" + report_file_path: str = "" + chart_name: str = "" + chart_version: str = "" + chart_directory: str = "" chart_type: Chart_Type = None def update_chart_directory(self, secrets): base_branch_without_uuid = "-".join(secrets.base_branch.split("-")[:-1]) vendor_without_suffix = secrets.vendor.split("-")[0] - secrets.base_branch = f'{base_branch_without_uuid}-{secrets.vendor_type}-{vendor_without_suffix}-{self.chart_name}-{self.chart_version}' - secrets.pr_branch = f'{secrets.base_branch}-pr-branch' - self.chart_directory = f'charts/{secrets.vendor_type}/{secrets.vendor}/{self.chart_name}' + secrets.base_branch = f"{base_branch_without_uuid}-{secrets.vendor_type}-{vendor_without_suffix}-{self.chart_name}-{self.chart_version}" + secrets.pr_branch = f"{secrets.base_branch}-pr-branch" + self.chart_directory = ( + f"charts/{secrets.vendor_type}/{secrets.vendor}/{self.chart_name}" + ) + def get_name_and_version_from_report(path): """ @@ -49,22 +55,22 @@ def get_name_and_version_from_report(path): str: chart name str: chart version """ - if path.endswith('yaml'): - with open(path, 'r') as fd: + if path.endswith("yaml"): + with open(path, "r") as fd: try: report = yaml.safe_load(fd) except yaml.YAMLError as err: raise AssertionError(f"error parsing '{path}': {err}") - elif path.endswith('json'): - with open(path, 'r') as fd: + elif path.endswith("json"): + with open(path, "r") as fd: try: report = json.load(fd) except Exception as err: raise AssertionError(f"error parsing '{path}': {err}") else: raise AssertionError("Unknown report type") - chart = report['metadata']['chart'] - return chart['name'], chart['version'] + chart = report["metadata"]["chart"] + return chart["name"], chart["version"] def get_name_and_version_from_chart_tar(path): @@ -78,13 +84,13 @@ def get_name_and_version_from_chart_tar(path): """ tar = tarfile.open(path) for member in tar.getmembers(): - if member.name.split('/')[-1] == 'Chart.yaml': + if member.name.split("/")[-1] == "Chart.yaml": chart = tar.extractfile(member) if chart is not None: content = chart.read() try: chart_yaml = yaml.safe_load(content) - return chart_yaml['name'], chart_yaml['version'] + return chart_yaml["name"], chart_yaml["version"] except yaml.YAMLError as err: raise AssertionError(f"error parsing '{path}': {err}") else: @@ -100,13 +106,14 @@ def get_name_and_version_from_chart_src(path): str: chart name str: chart version """ - chart_path = os.path.join(path, 'Chart.yaml') - with open(chart_path, 'r') as fd: + chart_path = os.path.join(path, "Chart.yaml") + with open(chart_path, "r") as fd: try: chart_yaml = yaml.safe_load(fd) except yaml.YAMLError as err: raise AssertionError(f"error parsing '{path}': {err}") - return chart_yaml['name'], chart_yaml['version'] + return chart_yaml["name"], chart_yaml["version"] + def extract_chart_tgz(src, dst, chart_name, logger): """Extracts the chart tgz file into the target location under 'charts/' for PR submission tests @@ -117,13 +124,14 @@ def extract_chart_tgz(src, dst, chart_name, logger): """ try: logger.info(f"Remove existing local '{dst}/src'") - shutil.rmtree(f'{dst}/src') + shutil.rmtree(f"{dst}/src") except FileNotFoundError: logger.info(f"'{dst}/src' does not exist") finally: - with tarfile.open(src, 'r') as fd: + with tarfile.open(src, "r") as fd: fd.extractall(dst) - os.rename(f'{dst}/{chart_name}', f'{dst}/src') + os.rename(f"{dst}/{chart_name}", f"{dst}/src") + def get_all_charts(charts_path: str, vendor_types: str) -> list: # TODO: Support `community` as vendor_type. @@ -139,37 +147,44 @@ def get_all_charts(charts_path: str, vendor_types: str) -> list: """ ret = [] # Pre-process vendor types - vendor_types = vendor_types.replace('partner', 'partners') - vendor_types = [vt.strip() for vt in vendor_types.split(',')] - vendor_types = list( - {'partners', 'redhat', 'all'}.intersection(set(vendor_types))) - vendor_types = ['partners', - 'redhat'] if 'all' in vendor_types else vendor_types + vendor_types = vendor_types.replace("partner", "partners") + vendor_types = [vt.strip() for vt in vendor_types.split(",")] + vendor_types = list({"partners", "redhat", "all"}.intersection(set(vendor_types))) + vendor_types = ["partners", "redhat"] if "all" in vendor_types else vendor_types # Iterate through `charts/` to find chart submission with src or tgz for vt in vendor_types: - charts_path_vt = f'{charts_path}/{vt}' - vendor_names = [name for name in os.listdir( - charts_path_vt) if os.path.isdir(f'{charts_path_vt}/{name}')] + charts_path_vt = f"{charts_path}/{vt}" + vendor_names = [ + name + for name in os.listdir(charts_path_vt) + if os.path.isdir(f"{charts_path_vt}/{name}") + ] for vn in vendor_names: - charts_path_vt_vn = f'{charts_path_vt}/{vn}' - chart_names = [name for name in os.listdir( - charts_path_vt_vn) if os.path.isdir(f'{charts_path_vt_vn}/{name}')] + charts_path_vt_vn = f"{charts_path_vt}/{vn}" + chart_names = [ + name + for name in os.listdir(charts_path_vt_vn) + if os.path.isdir(f"{charts_path_vt_vn}/{name}") + ] for cn in chart_names: - charts_path_vt_vn_cn = f'{charts_path_vt_vn}/{cn}' - file_names = [name for name in os.listdir( - charts_path_vt_vn_cn)] - if 'OWNERS' not in file_names: + charts_path_vt_vn_cn = f"{charts_path_vt_vn}/{cn}" + file_names = [name for name in os.listdir(charts_path_vt_vn_cn)] + if "OWNERS" not in file_names: continue - chart_versions = [name for name in os.listdir( - charts_path_vt_vn_cn) if os.path.isdir(f'{charts_path_vt_vn_cn}/{name}')] + chart_versions = [ + name + for name in os.listdir(charts_path_vt_vn_cn) + if os.path.isdir(f"{charts_path_vt_vn_cn}/{name}") + ] # Only interest in latest chart version if len(chart_versions) == 0: continue cv = max(chart_versions) - charts_path_vt_vn_cn_cv = f'{charts_path_vt_vn_cn}/{cv}' - file_names = [name for name in os.listdir( - charts_path_vt_vn_cn_cv)] - if 'report.yaml' not in file_names and (f'{cn}-{cv}.tgz' in file_names or 'src' in file_names): + charts_path_vt_vn_cn_cv = f"{charts_path_vt_vn_cn}/{cv}" + file_names = [name for name in os.listdir(charts_path_vt_vn_cn_cv)] + if "report.yaml" not in file_names and ( + f"{cn}-{cv}.tgz" in file_names or "src" in file_names + ): ret.append((vt, vn, cn, cv)) return ret diff --git a/tests/functional/behave_features/common/utils/chart_certification.py b/tests/functional/behave_features/common/utils/chart_certification.py index f56b67e2..74937cb7 100644 --- a/tests/functional/behave_features/common/utils/chart_certification.py +++ b/tests/functional/behave_features/common/utils/chart_certification.py @@ -26,6 +26,7 @@ from common.utils.chart import * from common.utils.env import * + @dataclass class ChartCertificationE2ETest: owners_file_content: str = """\ @@ -47,6 +48,7 @@ class ChartCertificationE2ETest: temp_dir: TemporaryDirectory = None temp_repo: git.Repo = None github_actions: str = os.environ.get("GITHUB_ACTIONS") + def set_git_username_email(self, repo, username, email): """ Parameters: @@ -57,110 +59,155 @@ def set_git_username_email(self, repo, username, email): repo.config_writer().set_value("user", "name", username).release() repo.config_writer().set_value("user", "email", email).release() - def remove_chart(self, chart_directory, chart_version, remote_repo, base_branch, bot_token): + def remove_chart( + self, chart_directory, chart_version, remote_repo, base_branch, bot_token + ): # Remove chart files from base branch logging.info( - f"Remove {chart_directory}/{chart_version} from {remote_repo}:{base_branch}") + f"Remove {chart_directory}/{chart_version} from {remote_repo}:{base_branch}" + ) try: - self.temp_repo.git.rm('-rf', '--cached', f'{chart_directory}/{chart_version}') - self.temp_repo.git.commit( - '-m', f'Remove {chart_directory}/{chart_version}') - self.temp_repo.git.push(f'https://x-access-token:{bot_token}@github.com/{remote_repo}', - f'HEAD:refs/heads/{base_branch}') + self.temp_repo.git.rm( + "-rf", "--cached", f"{chart_directory}/{chart_version}" + ) + self.temp_repo.git.commit("-m", f"Remove {chart_directory}/{chart_version}") + self.temp_repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{remote_repo}", + f"HEAD:refs/heads/{base_branch}", + ) except git.exc.GitCommandError: logging.info( - f"{chart_directory}/{chart_version} not exist on {remote_repo}:{base_branch}") + f"{chart_directory}/{chart_version} not exist on {remote_repo}:{base_branch}" + ) def remove_owners_file(self, chart_directory, remote_repo, base_branch, bot_token): # Remove the OWNERS file from base branch logging.info( - f"Remove {chart_directory}/OWNERS from {remote_repo}:{base_branch}") + f"Remove {chart_directory}/OWNERS from {remote_repo}:{base_branch}" + ) try: - self.temp_repo.git.rm('-rf', '--cached', f'{chart_directory}/OWNERS') - self.temp_repo.git.commit( - '-m', f'Remove {chart_directory}/OWNERS') - self.temp_repo.git.push(f'https://x-access-token:{bot_token}@github.com/{remote_repo}', - f'HEAD:refs/heads/{base_branch}') + self.temp_repo.git.rm("-rf", "--cached", f"{chart_directory}/OWNERS") + self.temp_repo.git.commit("-m", f"Remove {chart_directory}/OWNERS") + self.temp_repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{remote_repo}", + f"HEAD:refs/heads/{base_branch}", + ) except git.exc.GitCommandError: logging.info( - f"{chart_directory}/OWNERS not exist on {remote_repo}:{base_branch}") + f"{chart_directory}/OWNERS not exist on {remote_repo}:{base_branch}" + ) def create_test_gh_pages_branch(self, remote_repo, base_branch, bot_token): # Get SHA from 'dev-gh-pages' branch logging.info( - f"Create '{remote_repo}:{base_branch}-gh-pages' from '{remote_repo}:dev-gh-pages'") + f"Create '{remote_repo}:{base_branch}-gh-pages' from '{remote_repo}:dev-gh-pages'" + ) r = github_api( - 'get', f'repos/{remote_repo}/git/ref/heads/dev-gh-pages', bot_token) + "get", f"repos/{remote_repo}/git/ref/heads/dev-gh-pages", bot_token + ) j = json.loads(r.text) - sha = j['object']['sha'] + sha = j["object"]["sha"] # Create a new gh-pages branch for testing - data = {'ref': f'refs/heads/{base_branch}-gh-pages', 'sha': sha} - r = github_api( - 'post', f'repos/{remote_repo}/git/refs', bot_token, json=data) + data = {"ref": f"refs/heads/{base_branch}-gh-pages", "sha": sha} + r = github_api("post", f"repos/{remote_repo}/git/refs", bot_token, json=data) - logging.info(f'gh-pages branch created: {base_branch}-gh-pages') + logging.info(f"gh-pages branch created: {base_branch}-gh-pages") def setup_git_context(self, repo: git.Repo): - self.set_git_username_email(repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL) - if os.environ.get('WORKFLOW_DEVELOPMENT'): + self.set_git_username_email( + repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL + ) + if os.environ.get("WORKFLOW_DEVELOPMENT"): logging.info("Wokflow development enabled") repo.git.add(A=True) - repo.git.commit('-m', 'Checkpoint') + repo.git.commit("-m", "Checkpoint") def send_pull_request(self, remote_repo, base_branch, pr_branch, bot_token): - pr_body = os.environ.get('PR_BODY') - data = {'head': pr_branch, 'base': base_branch, - 'title': base_branch, 'body': pr_body} + pr_body = os.environ.get("PR_BODY") + data = { + "head": pr_branch, + "base": base_branch, + "title": base_branch, + "body": pr_body, + } logging.debug(f"PR_BODY Content: {pr_body}") - logging.info( - f"Create PR from '{remote_repo}:{pr_branch}'") - r = github_api( - 'post', f'repos/{remote_repo}/pulls', bot_token, json=data) + logging.info(f"Create PR from '{remote_repo}:{pr_branch}'") + r = github_api("post", f"repos/{remote_repo}/pulls", bot_token, json=data) j = json.loads(r.text) - if not 'number' in j: + if "number" not in j: raise AssertionError(f"error sending pull request, response was: {r.text}") - return j['number'] - - def create_and_push_owners_file(self, chart_directory, base_branch, vendor_name, vendor_type, chart_name, provider_delivery=False, public_key_file=None): + return j["number"] + + def create_and_push_owners_file( + self, + chart_directory, + base_branch, + vendor_name, + vendor_type, + chart_name, + provider_delivery=False, + public_key_file=None, + ): with SetDirectory(Path(self.temp_dir.name)): # Create the OWNERS file from the string template - if public_key_file != None: - with open(public_key_file, 'r') as f: + if public_key_file is not None: + with open(public_key_file, "r") as f: content = f.read() - encoded_content = content.encode('utf-8') - public_key_value = base64.b64encode(encoded_content).decode('utf-8') + encoded_content = content.encode("utf-8") + public_key_value = base64.b64encode(encoded_content).decode("utf-8") else: - public_key_value = 'null' - values = {'bot_name': self.secrets.bot_name, 'public_key': public_key_value, - 'vendor': vendor_name, 'chart_name': chart_name, - "provider_delivery" : provider_delivery} + public_key_value = "null" + values = { + "bot_name": self.secrets.bot_name, + "public_key": public_key_value, + "vendor": vendor_name, + "chart_name": chart_name, + "provider_delivery": provider_delivery, + } content = Template(self.secrets.owners_file_content).substitute(values) logging.debug(f"OWNERS File Content: {content}") - with open(f'{chart_directory}/OWNERS', 'w') as fd: + with open(f"{chart_directory}/OWNERS", "w") as fd: fd.write(content) # Push OWNERS file to the test_repo logging.info( - f"Push OWNERS file to '{self.secrets.test_repo}:{base_branch}'") - self.temp_repo.git.add(f'{chart_directory}/OWNERS') + f"Push OWNERS file to '{self.secrets.test_repo}:{base_branch}'" + ) + self.temp_repo.git.add(f"{chart_directory}/OWNERS") self.temp_repo.git.commit( - '-m', f"Add {vendor_type} {vendor_name} {chart_name} OWNERS file") - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{base_branch}', '-f') - - def check_index_yaml(self,base_branch, vendor, chart_name, chart_version, index_file="index.yaml", check_provider_type=False, failure_type='error'): + "-m", f"Add {vendor_type} {vendor_name} {chart_name} OWNERS file" + ) + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{base_branch}", + "-f", + ) + + def check_index_yaml( + self, + base_branch, + vendor, + chart_name, + chart_version, + index_file="index.yaml", + check_provider_type=False, + failure_type="error", + ): old_branch = self.repo.active_branch.name - self.repo.git.fetch(f'https://github.com/{self.secrets.test_repo}.git', - '{0}:{0}'.format(f'{base_branch}-gh-pages'), '-f') + self.repo.git.fetch( + f"https://github.com/{self.secrets.test_repo}.git", + "{0}:{0}".format(f"{base_branch}-gh-pages"), + "-f", + ) - self.repo.git.checkout(f'{base_branch}-gh-pages') + self.repo.git.checkout(f"{base_branch}-gh-pages") - with open(index_file, 'r') as fd: + with open(index_file, "r") as fd: try: index = yaml.safe_load(fd) except yaml.YAMLError as err: - if failure_type == 'error': + if failure_type == "error": raise AssertionError(f"error parsing index.yaml: {err}") else: logging.warning(f"error parsing index.yaml: {err}") @@ -168,37 +215,57 @@ def check_index_yaml(self,base_branch, vendor, chart_name, chart_version, index_ if index: entry = f"{vendor}-{chart_name}" - if "entries" not in index or entry not in index['entries']: - if failure_type == 'error': - raise AssertionError(f"{entry} not added in entries to {index_file} & Found index.yaml entries: {index['entries']}") + if "entries" not in index or entry not in index["entries"]: + if failure_type == "error": + raise AssertionError( + f"{entry} not added in entries to {index_file} & Found index.yaml entries: {index['entries']}" + ) else: logging.warning(f"{chart_version} not added to {index_file}") - logging.warning(f"Index.yaml entry content: {index['entries'][entry]}") + logging.warning( + f"Index.yaml entry content: {index['entries'][entry]}" + ) return False - version_list = [release['version'] for release in index['entries'][entry]] + version_list = [release["version"] for release in index["entries"][entry]] if chart_version not in version_list: - raise AssertionError(f"{chart_version} not added to {index_file} & Found index.yaml entry content: {index['entries'][entry]}") - - #This check is applicable for charts submitted in redhat path when one of the chart-verifier check fails - #Check whether providerType annotations is community in index.yaml when vendor_type is redhat - if check_provider_type and self.secrets.vendor_type == 'redhat': - provider_type_in_index_yaml = index['entries'][entry][0]['annotations']['charts.openshift.io/providerType'] - if provider_type_in_index_yaml != 'community': - if failure_type == 'error': - raise AssertionError(f"{provider_type_in_index_yaml} is not correct as providerType in index.yaml") + raise AssertionError( + f"{chart_version} not added to {index_file} & Found index.yaml entry content: {index['entries'][entry]}" + ) + + # This check is applicable for charts submitted in redhat path when one of the chart-verifier check fails + # Check whether providerType annotations is community in index.yaml when vendor_type is redhat + if check_provider_type and self.secrets.vendor_type == "redhat": + provider_type_in_index_yaml = index["entries"][entry][0]["annotations"][ + "charts.openshift.io/providerType" + ] + if provider_type_in_index_yaml != "community": + if failure_type == "error": + raise AssertionError( + f"{provider_type_in_index_yaml} is not correct as providerType in index.yaml" + ) else: - logging.warning(f"{provider_type_in_index_yaml} is not correct as providerType in index.yaml") + logging.warning( + f"{provider_type_in_index_yaml} is not correct as providerType in index.yaml" + ) logging.info("Index updated correctly, cleaning up local branch") self.repo.git.checkout(old_branch) - self.repo.git.branch('-D', f'{base_branch}-gh-pages') + self.repo.git.branch("-D", f"{base_branch}-gh-pages") return True else: return False - def check_release_result(self, vendor, chart_name, chart_version, chart_tgz, failure_type='error', release_type=Release_Type.CHART_ONLY): - expected_tag = f'{vendor}-{chart_name}-{chart_version}' + def check_release_result( + self, + vendor, + chart_name, + chart_version, + chart_tgz, + failure_type="error", + release_type=Release_Type.CHART_ONLY, + ): + expected_tag = f"{vendor}-{chart_name}-{chart_version}" try: release = get_release_by_tag(self.secrets, expected_tag) logging.info(f"Released '{expected_tag}' successfully") @@ -207,27 +274,27 @@ def check_release_result(self, vendor, chart_name, chart_version, chart_tgz, fai if release_type == Release_Type.CHART_ONLY: required_assets.append(chart_tgz) elif release_type == Release_Type.REPORT_ONLY: - required_assets.append('report.yaml') + required_assets.append("report.yaml") elif release_type == Release_Type.CHART_AND_REPORT: - required_assets.extend([chart_tgz, 'report.yaml']) + required_assets.extend([chart_tgz, "report.yaml"]) elif release_type == Release_Type.REPORT_AND_KEY: - key_file = chart_name + '-' + chart_version + '.tgz' + '.key' - required_assets.extend(['report.yaml', key_file]) + key_file = chart_name + "-" + chart_version + ".tgz" + ".key" + required_assets.extend(["report.yaml", key_file]) elif release_type == Release_Type.CHART_PROV_AND_REPORT: - prov_file = chart_tgz + '.prov' - required_assets.extend([chart_tgz, 'report.yaml', prov_file]) + prov_file = chart_tgz + ".prov" + required_assets.extend([chart_tgz, "report.yaml", prov_file]) elif release_type == Release_Type.CHART_REPORT_PROV_AND_KEY: - key_file = chart_tgz + '.key' - prov_file = chart_tgz + '.prov' - required_assets.extend([chart_tgz, 'report.yaml', prov_file, key_file]) + key_file = chart_tgz + ".key" + prov_file = chart_tgz + ".prov" + required_assets.extend([chart_tgz, "report.yaml", prov_file, key_file]) else: - sys.exit('Trying to check wrong release type') + sys.exit("Trying to check wrong release type") logging.info(f"Check '{required_assets}' is in release assets") - release_id = release['id'] + release_id = release["id"] check_release_assets(self.secrets, release_id, required_assets) return True except Exception as e: - if failure_type == 'error': + if failure_type == "error": raise AssertionError(e) else: logging.warning(e) @@ -235,40 +302,58 @@ def check_release_result(self, vendor, chart_name, chart_version, chart_tgz, fai finally: logging.info(f"Delete release '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/releases/{release_id}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/releases/{release_id}", + self.secrets.bot_token, + ) logging.info(f"Delete release tag '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}", + self.secrets.bot_token, + ) # expect_result: a string representation of expected result, e.g. 'success' - def check_workflow_conclusion(self, pr_number, expect_result: str, failure_type='error'): + def check_workflow_conclusion( + self, pr_number, expect_result: str, failure_type="error" + ): try: # Check workflow conclusion run_id = get_run_id(self.secrets, pr_number) conclusion = get_run_result(self.secrets, run_id) if conclusion == expect_result: - logging.info(f"PR{pr_number} Workflow run was '{expect_result}' which is expected") + logging.info( + f"PR{pr_number} Workflow run was '{expect_result}' which is expected" + ) else: - if failure_type == 'warning': - logging.warning(f"PR{pr_number if pr_number else self.secrets.pr_number} Workflow run was '{conclusion}' which is unexpected, run id: {run_id}") + if failure_type == "warning": + logging.warning( + f"PR{pr_number if pr_number else self.secrets.pr_number} Workflow run was '{conclusion}' which is unexpected, run id: {run_id}" + ) else: raise AssertionError( - f"PR{pr_number if pr_number else self.secrets.pr_number} Workflow run was '{conclusion}' which is unexpected, run id: {run_id}") - + f"PR{pr_number if pr_number else self.secrets.pr_number} Workflow run was '{conclusion}' which is unexpected, run id: {run_id}" + ) + return run_id, conclusion except Exception as e: - if failure_type == 'error': + if failure_type == "error": raise AssertionError(e) else: logging.warning(e) return None, None # expect_merged: boolean representing whether the PR should be merged - def check_pull_request_result(self, pr_number, expect_merged: bool, failure_type='error'): + def check_pull_request_result( + self, pr_number, expect_merged: bool, failure_type="error" + ): # Check if PR merged r = github_api( - 'get', f'repos/{self.secrets.test_repo}/pulls/{pr_number}/merge', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/pulls/{pr_number}/merge", + self.secrets.bot_token, + ) logging.info(f"PR{pr_number} result status_code : {r.status_code}") if r.status_code == 204 and expect_merged: logging.info(f"PR{pr_number} merged sucessfully as expected") @@ -277,22 +362,32 @@ def check_pull_request_result(self, pr_number, expect_merged: bool, failure_type logging.info(f"PR{pr_number} not merged, which is expected") return True elif r.status_code == 204 and not expect_merged: - if failure_type == 'error': - raise AssertionError(f"PR{pr_number} Expecting not merged but PR was merged") + if failure_type == "error": + raise AssertionError( + f"PR{pr_number} Expecting not merged but PR was merged" + ) else: logging.warning(f"PR{pr_number} Expecting not merged but PR was merged") return False elif r.status_code == 404 and expect_merged: - if failure_type == 'error': - raise AssertionError(f"PR{pr_number} Expecting PR merged but PR was not merged") + if failure_type == "error": + raise AssertionError( + f"PR{pr_number} Expecting PR merged but PR was not merged" + ) else: - logging.warning(f"PR{pr_number} Expecting PR merged but PR was not merged") + logging.warning( + f"PR{pr_number} Expecting PR merged but PR was not merged" + ) return False else: - if failure_type == 'error': - raise AssertionError(f"PR{pr_number} Got unexpected status code from PR: {r.status_code}") + if failure_type == "error": + raise AssertionError( + f"PR{pr_number} Got unexpected status code from PR: {r.status_code}" + ) else: - logging.warning(f"PR{pr_number} Got unexpected status code from PR: {r.status_code}") + logging.warning( + f"PR{pr_number} Got unexpected status code from PR: {r.status_code}" + ) return False def cleanup_release(self, expected_tag): @@ -301,47 +396,62 @@ def cleanup_release(self, expected_tag): Releases might be left behind if check_index_yam() ran before check_release_result() and fails the test. """ r = github_api( - 'get', f'repos/{self.secrets.test_repo}/releases', self.secrets.bot_token) + "get", f"repos/{self.secrets.test_repo}/releases", self.secrets.bot_token + ) releases = json.loads(r.text) logging.debug(f"List of releases: {releases}") for release in releases: - if release['tag_name'] == expected_tag: - release_id = release['id'] + if release["tag_name"] == expected_tag: + release_id = release["id"] logging.info(f"Delete release '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/releases/{release_id}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/releases/{release_id}", + self.secrets.bot_token, + ) logging.info(f"Delete release tag '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}', self.secrets.bot_token) - + "delete", + f"repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}", + self.secrets.bot_token, + ) + def check_pull_request_labels(self, pr_number): r = github_api( - 'get', f'repos/{self.secrets.test_repo}/issues/{pr_number}/labels', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/issues/{pr_number}/labels", + self.secrets.bot_token, + ) labels = json.loads(r.text) authorized_request = False content_ok = False for label in labels: logging.info(f"PR{pr_number} found label {label['name']}") - if label['name'] == "authorized-request": + if label["name"] == "authorized-request": authorized_request = True - if label['name'] == "content-ok": + if label["name"] == "content-ok": content_ok = True - + if authorized_request and content_ok: - logging.info(f"PR{pr_number} authorized request and content-ok labels were found as expected") + logging.info( + f"PR{pr_number} authorized request and content-ok labels were found as expected" + ) return True else: - raise AssertionError(f"PR{pr_number} authorized request and/or content-ok labels were not found as expected") + raise AssertionError( + f"PR{pr_number} authorized request and/or content-ok labels were not found as expected" + ) + @dataclass class ChartCertificationE2ETestSingle(ChartCertificationE2ETest): - test_name: str = '' # Meaningful test name for this test, displayed in PR title + test_name: str = "" # Meaningful test name for this test, displayed in PR title test_charts: list[Chart] = field(default_factory=list) - #test_report: str = '' - #chart_directory: str = '' - uuid: str = '' - head_sha: str = '' + # test_report: str = '' + # chart_directory: str = '' + uuid: str = "" + head_sha: str = "" secrets: E2ETestSecretOneShot = E2ETestSecretOneShot() def __post_init__(self) -> None: @@ -353,38 +463,45 @@ def __post_init__(self) -> None: bot_name, bot_token = get_bot_name_and_token() test_repo = TEST_REPO - #Storing current branch to checkout after scenario execution - if os.environ.get('LOCAL_RUN'): + # Storing current branch to checkout after scenario execution + if os.environ.get("LOCAL_RUN"): self.secrets.active_branch = self.repo.active_branch.name logging.debug(f"Active branch name : {self.secrets.active_branch}") # Create a new branch locally from detached HEAD - self.head_sha = self.repo.git.rev_parse('--short', 'HEAD') - unique_branch = f'{self.head_sha}-{self.uuid}' + self.head_sha = self.repo.git.rev_parse("--short", "HEAD") + unique_branch = f"{self.head_sha}-{self.uuid}" logging.debug(f"Unique branch name : {unique_branch}") local_branches = [h.name for h in self.repo.heads] logging.debug(f"Local branch names : {local_branches}") if unique_branch not in local_branches: - self.repo.git.checkout('-b', f'{unique_branch}') + self.repo.git.checkout("-b", f"{unique_branch}") current_branch = self.repo.active_branch.name logging.debug(f"Current active branch name : {current_branch}") - - r = github_api( - 'get', f'repos/{test_repo}/branches', bot_token) + + r = github_api("get", f"repos/{test_repo}/branches", bot_token) branches = json.loads(r.text) - branch_names = [branch['name'] for branch in branches] + branch_names = [branch["name"] for branch in branches] logging.debug(f"Remote test repo branch names : {branch_names}") if current_branch not in branch_names: logging.info( - f"{test_repo}:{current_branch} does not exists, creating with local branch") - self.repo.git.push(f'https://x-access-token:{bot_token}@github.com/{test_repo}', - f'HEAD:refs/heads/{current_branch}', '-f') - - pretty_test_name = self.test_name.strip().lower().replace(' ', '-') - base_branch = f'{self.uuid}-{pretty_test_name}-{current_branch}' if pretty_test_name else f'{self.uuid}-test-{current_branch}' + f"{test_repo}:{current_branch} does not exists, creating with local branch" + ) + self.repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{test_repo}", + f"HEAD:refs/heads/{current_branch}", + "-f", + ) + + pretty_test_name = self.test_name.strip().lower().replace(" ", "-") + base_branch = ( + f"{self.uuid}-{pretty_test_name}-{current_branch}" + if pretty_test_name + else f"{self.uuid}-test-{current_branch}" + ) logging.debug(f"Base branch name : {base_branch}") - pr_branch = base_branch + '-pr-branch' + pr_branch = base_branch + "-pr-branch" self.secrets.test_repo = test_repo self.secrets.bot_name = bot_name @@ -395,86 +512,142 @@ def __post_init__(self) -> None: self.secrets.index_file = "index.yaml" self.secrets.provider_delivery = False - - def cleanup (self): + def cleanup(self): # Cleanup releases and release tags self.cleanup_release() # Teardown step to cleanup branches if self.temp_dir is not None: self.temp_dir.cleanup() - self.repo.git.worktree('prune') + self.repo.git.worktree("prune") - current_branch = f'{self.head_sha}-{self.uuid}' + current_branch = f"{self.head_sha}-{self.uuid}" logging.info(f"Delete remote '{current_branch}' branch") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{current_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{current_branch}", + self.secrets.bot_token, + ) logging.info(f"Delete '{self.secrets.test_repo}:{self.secrets.base_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}", + self.secrets.bot_token, + ) - logging.info(f"Delete '{self.secrets.test_repo}:{self.secrets.base_branch}-gh-pages'") + logging.info( + f"Delete '{self.secrets.test_repo}:{self.secrets.base_branch}-gh-pages'" + ) github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}-gh-pages', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}-gh-pages", + self.secrets.bot_token, + ) logging.info(f"Delete '{self.secrets.test_repo}:{self.secrets.pr_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.pr_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.pr_branch}", + self.secrets.bot_token, + ) logging.info(f"Delete local '{self.secrets.base_branch}'") try: - self.repo.git.branch('-D', self.secrets.base_branch) + self.repo.git.branch("-D", self.secrets.base_branch) except git.exc.GitCommandError: logging.info(f"Local '{self.secrets.base_branch}' does not exist") logging.info(f"Delete local '{current_branch}'") try: - if os.environ.get('LOCAL_RUN'): - self.repo.git.checkout(f'{self.secrets.active_branch}') - self.repo.git.branch('-D', current_branch) + if os.environ.get("LOCAL_RUN"): + self.repo.git.checkout(f"{self.secrets.active_branch}") + self.repo.git.branch("-D", current_branch) except git.exc.GitCommandError: logging.info(f"Local '{current_branch}' does not exist") - + def update_bot_name(self, bot_name): logging.debug(f"Updating bot name: {bot_name}") self.secrets.bot_name = bot_name - + def update_bad_version(self, bad_version): logging.debug(f"Updating bad version: {bad_version}") self.secrets.bad_version = bad_version - + def update_provided_delivery(self, value): if value == "true": - self.secrets.provider_delivery=True + self.secrets.provider_delivery = True else: - self.secrets.provider_delivery=False + self.secrets.provider_delivery = False - def update_test_charts(self, test_charts, new_chart_version=''): + def update_test_charts(self, test_charts, new_chart_version=""): logging.debug(f"Updating test charts: {test_charts}") for chart in test_charts: if chart[0] == Chart_Type.SRC or chart[0] == Chart_Type.TAR: - if new_chart_version == '': - chart_name, chart_version = get_name_and_version_from_chart_tar(chart[1]) - test_chart = Chart(chart_name=chart_name, chart_version=chart_version, chart_type=chart[0], chart_file_path=chart[1]) + if new_chart_version == "": + chart_name, chart_version = get_name_and_version_from_chart_tar( + chart[1] + ) + test_chart = Chart( + chart_name=chart_name, + chart_version=chart_version, + chart_type=chart[0], + chart_file_path=chart[1], + ) else: chart_name, _ = get_name_and_version_from_chart_tar(chart[1]) - test_chart = Chart(chart_name=chart_name, chart_version=new_chart_version, chart_type=chart[0], chart_file_path=chart[1]) + test_chart = Chart( + chart_name=chart_name, + chart_version=new_chart_version, + chart_type=chart[0], + chart_file_path=chart[1], + ) elif chart[0] == Chart_Type.REPORT: - if new_chart_version == '': - chart_name, chart_version = get_name_and_version_from_report(chart[1]) - test_chart = Chart(chart_name=chart_name, chart_version=chart_version, chart_type=chart[0], report_file_path=chart[1]) + if new_chart_version == "": + chart_name, chart_version = get_name_and_version_from_report( + chart[1] + ) + test_chart = Chart( + chart_name=chart_name, + chart_version=chart_version, + chart_type=chart[0], + report_file_path=chart[1], + ) else: chart_name, _ = get_name_and_version_from_report(chart[1]) - test_chart = Chart(chart_name=chart_name, chart_version=new_chart_version, chart_type=chart[0], report_file_path=chart[1]) - elif chart[0] == Chart_Type.SRC_AND_REPORT or chart[0] == Chart_Type.TAR_AND_REPORT: - if new_chart_version == '': - chart_name, chart_version = get_name_and_version_from_report(chart[2]) - test_chart = Chart(chart_name=chart_name, chart_version=chart_version, chart_type=chart[0], chart_file_path=chart[1], report_file_path=chart[2]) + test_chart = Chart( + chart_name=chart_name, + chart_version=new_chart_version, + chart_type=chart[0], + report_file_path=chart[1], + ) + elif ( + chart[0] == Chart_Type.SRC_AND_REPORT + or chart[0] == Chart_Type.TAR_AND_REPORT + ): + if new_chart_version == "": + chart_name, chart_version = get_name_and_version_from_report( + chart[2] + ) + test_chart = Chart( + chart_name=chart_name, + chart_version=chart_version, + chart_type=chart[0], + chart_file_path=chart[1], + report_file_path=chart[2], + ) else: chart_name, _ = get_name_and_version_from_report(chart[2]) - test_chart = Chart(chart_name=chart_name, chart_version=new_chart_version, chart_type=chart[0], chart_file_path=chart[1], report_file_path=chart[2]) + test_chart = Chart( + chart_name=chart_name, + chart_version=new_chart_version, + chart_type=chart[0], + chart_file_path=chart[1], + report_file_path=chart[2], + ) else: - raise AssertionError(f"Chart_Type: {chart[0]} is not correct or yet to be handled") + raise AssertionError( + f"Chart_Type: {chart[0]} is not correct or yet to be handled" + ) test_chart.update_chart_directory(self.secrets) self.test_charts.append(test_chart) @@ -501,138 +674,214 @@ def setup_git_context(self): super().setup_git_context(self.repo) def setup_gh_pages_branch(self): - self.create_test_gh_pages_branch(self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token) + self.create_test_gh_pages_branch( + self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token + ) def setup_temp_dir(self): - self.temp_dir = TemporaryDirectory(prefix='tci-') + self.temp_dir = TemporaryDirectory(prefix="tci-") with SetDirectory(Path(self.temp_dir.name)): # Make PR's from a temporary directory - logging.info(f'Worktree directory: {self.temp_dir.name}') - self.repo.git.worktree('add', '--detach', self.temp_dir.name, f'HEAD') + logging.info(f"Worktree directory: {self.temp_dir.name}") + self.repo.git.worktree("add", "--detach", self.temp_dir.name, "HEAD") self.temp_repo = git.Repo(self.temp_dir.name) - self.set_git_username_email(self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL) - self.temp_repo.git.checkout('-b', self.secrets.base_branch) + self.set_git_username_email( + self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL + ) + self.temp_repo.git.checkout("-b", self.secrets.base_branch) for chart in self.test_charts: - pathlib.Path( - f'{chart.chart_directory}/{chart.chart_version}').mkdir(parents=True, exist_ok=True) - - self.remove_chart(chart.chart_directory, chart.chart_version, self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token) - self.remove_owners_file(chart.chart_directory, self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token) + pathlib.Path(f"{chart.chart_directory}/{chart.chart_version}").mkdir( + parents=True, exist_ok=True + ) + + self.remove_chart( + chart.chart_directory, + chart.chart_version, + self.secrets.test_repo, + self.secrets.base_branch, + self.secrets.bot_token, + ) + self.remove_owners_file( + chart.chart_directory, + self.secrets.test_repo, + self.secrets.base_branch, + self.secrets.bot_token, + ) def update_chart_version_in_chart_yaml(self, new_version): with SetDirectory(Path(self.temp_dir.name)): for chart in self.test_charts: - path = f'{chart.chart_directory}/{chart.chart_version}/src/Chart.yaml' - with open(path, 'r') as fd: + path = f"{chart.chart_directory}/{chart.chart_version}/src/Chart.yaml" + with open(path, "r") as fd: try: chart = yaml.safe_load(fd) except yaml.YAMLError as err: raise AssertionError(f"error parsing '{path}': {err}") - current_version = chart['version'] + current_version = chart["version"] if current_version != new_version: - chart['version'] = new_version + chart["version"] = new_version try: - with open(path, 'w') as fd: + with open(path, "w") as fd: fd.write(yaml.dump(chart)) except Exception as e: raise AssertionError("Failed to update version in yaml file") - + def remove_readme_file(self): with SetDirectory(Path(self.temp_dir.name)): for chart in self.test_charts: - path = f'{chart.chart_directory}/{chart.chart_version}/src/README.md' + path = f"{chart.chart_directory}/{chart.chart_version}/src/README.md" try: os.remove(path) except Exception as e: raise AssertionError(f"Failed to remove readme file : {e}") def process_owners_file(self, public_key_file=None): - super().create_and_push_owners_file(self.test_charts[0].chart_directory, self.secrets.base_branch, self.secrets.vendor, self.secrets.vendor_type, self.test_charts[0].chart_name, self.secrets.provider_delivery, public_key_file) + super().create_and_push_owners_file( + self.test_charts[0].chart_directory, + self.secrets.base_branch, + self.secrets.vendor, + self.secrets.vendor_type, + self.test_charts[0].chart_name, + self.secrets.provider_delivery, + public_key_file, + ) def process_charts(self, include_prov_file=False): with SetDirectory(Path(self.temp_dir.name)): for chart in self.test_charts: - if chart.chart_type == Chart_Type.TAR or chart.chart_type == Chart_Type.TAR_AND_REPORT: + if ( + chart.chart_type == Chart_Type.TAR + or chart.chart_type == Chart_Type.TAR_AND_REPORT + ): # Copy the chart tar into temporary directory for PR submission - chart_tar = chart.chart_file_path.split('/')[-1] - shutil.copyfile(f'{self.old_cwd}/{chart.chart_file_path}', - f'{chart.chart_directory}/{chart.chart_version}/{chart_tar}') - if include_prov_file == True: - prov_file_dir = '/'.join(chart.chart_file_path.split('/')[:-1]) - prov_file_name = chart_tar + '.prov' - logging.debug(f'PROV FILE DIR: {prov_file_dir}') - logging.debug(f'PROV FILE NAME: {prov_file_name}') - shutil.copyfile(f'{self.old_cwd}/{prov_file_dir}/{prov_file_name}', - f'{chart.chart_directory}/{chart.chart_version}/{prov_file_name}') - elif chart.chart_type == Chart_Type.SRC or chart.chart_type == Chart_Type.SRC_AND_REPORT: + chart_tar = chart.chart_file_path.split("/")[-1] + shutil.copyfile( + f"{self.old_cwd}/{chart.chart_file_path}", + f"{chart.chart_directory}/{chart.chart_version}/{chart_tar}", + ) + if include_prov_file is True: + prov_file_dir = "/".join(chart.chart_file_path.split("/")[:-1]) + prov_file_name = chart_tar + ".prov" + logging.debug(f"PROV FILE DIR: {prov_file_dir}") + logging.debug(f"PROV FILE NAME: {prov_file_name}") + shutil.copyfile( + f"{self.old_cwd}/{prov_file_dir}/{prov_file_name}", + f"{chart.chart_directory}/{chart.chart_version}/{prov_file_name}", + ) + elif ( + chart.chart_type == Chart_Type.SRC + or chart.chart_type == Chart_Type.SRC_AND_REPORT + ): # Unzip files into temporary directory for PR submission logging.debug(f"CHART SRC FILE PATH: {chart.chart_file_path}") - extract_chart_tgz(chart.chart_file_path, f'{chart.chart_directory}/{chart.chart_version}', chart.chart_name, logging) + extract_chart_tgz( + chart.chart_file_path, + f"{chart.chart_directory}/{chart.chart_version}", + chart.chart_name, + logging, + ) elif chart.chart_type == Chart_Type.REPORT: logging.debug("Skip adding chart since chart_type is report") else: - raise AssertionError(f"Yet To be implemented for chart_type {chart.chart_type}") + raise AssertionError( + f"Yet To be implemented for chart_type {chart.chart_type}" + ) def process_report(self): - with SetDirectory(Path(self.temp_dir.name)): # Copy report to temporary location and push to test_repo:pr_branch logging.info( - f"Push report to '{self.secrets.test_repo}:{self.secrets.pr_branch}'") + f"Push report to '{self.secrets.test_repo}:{self.secrets.pr_branch}'" + ) for chart in self.test_charts: - if chart.chart_type == Chart_Type.REPORT or chart.chart_type == Chart_Type.SRC_AND_REPORT or chart.chart_type == Chart_Type.TAR_AND_REPORT: - if chart.report_file_path.endswith('json'): + if ( + chart.chart_type == Chart_Type.REPORT + or chart.chart_type == Chart_Type.SRC_AND_REPORT + or chart.chart_type == Chart_Type.TAR_AND_REPORT + ): + if chart.report_file_path.endswith("json"): logging.debug("Report type is json") - report_path = f'{chart.chart_directory}/{chart.chart_version}/' + chart.report_file_path.split('/')[-1] - shutil.copyfile(f'{chart.report_file_path}', f'{report_path}') - elif chart.report_file_path.endswith('yaml'): + report_path = ( + f"{chart.chart_directory}/{chart.chart_version}/" + + chart.report_file_path.split("/")[-1] + ) + shutil.copyfile(f"{chart.report_file_path}", f"{report_path}") + elif chart.report_file_path.endswith("yaml"): logging.debug("Report type is yaml") - report_path = f'{chart.chart_directory}/{chart.chart_version}/' + chart.report_file_path.split('/')[-1] - shutil.copyfile(f'{chart.report_file_path}', f'{report_path}') + report_path = ( + f"{chart.chart_directory}/{chart.chart_version}/" + + chart.report_file_path.split("/")[-1] + ) + shutil.copyfile(f"{chart.report_file_path}", f"{report_path}") else: raise AssertionError("Unknown report type") self.temp_repo.git.add(report_path) self.temp_repo.git.commit( - '-m', f"Add {self.secrets.vendor} {self.test_charts} report") - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{self.secrets.pr_branch}', '-f') + "-m", f"Add {self.secrets.vendor} {self.test_charts} report" + ) + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{self.secrets.pr_branch}", + "-f", + ) def add_non_chart_related_file(self): with SetDirectory(Path(self.temp_dir.name)): for chart in self.test_charts: - path = f'{chart.chart_directory}/Notes.txt' - with open(path, 'w') as fd: + path = f"{chart.chart_directory}/Notes.txt" + with open(path, "w") as fd: fd.write("This is a test file") def push_charts(self, add_non_chart_file=False): # Push chart to test_repo:pr_branch for chart in self.test_charts: - if chart.chart_type == Chart_Type.TAR or chart.chart_type == Chart_Type.TAR_AND_REPORT: - chart_tar = chart.chart_file_path.split('/')[-1] - self.temp_repo.git.add(f'{chart.chart_directory}/{chart.chart_version}/') - elif chart.chart_type == Chart_Type.SRC or chart.chart_type == Chart_Type.SRC_AND_REPORT: + if ( + chart.chart_type == Chart_Type.TAR + or chart.chart_type == Chart_Type.TAR_AND_REPORT + ): + chart_tar = chart.chart_file_path.split("/")[-1] + self.temp_repo.git.add( + f"{chart.chart_directory}/{chart.chart_version}/" + ) + elif ( + chart.chart_type == Chart_Type.SRC + or chart.chart_type == Chart_Type.SRC_AND_REPORT + ): if add_non_chart_file: - self.temp_repo.git.add(f'{chart.chart_directory}/') + self.temp_repo.git.add(f"{chart.chart_directory}/") else: - self.temp_repo.git.add(f'{chart.chart_directory}/{chart.chart_version}/src') + self.temp_repo.git.add( + f"{chart.chart_directory}/{chart.chart_version}/src" + ) elif chart.chart_type == Chart_Type.REPORT: logging.debug("Skip adding chart since chart_type is report") else: - raise AssertionError(f"YTD: chart_type {chart.chart_type} is yet to be supported") - + raise AssertionError( + f"YTD: chart_type {chart.chart_type} is yet to be supported" + ) + self.temp_repo.git.commit( - '-m', f"Adding {self.secrets.vendor} {self.test_charts} charts") + "-m", f"Adding {self.secrets.vendor} {self.test_charts} charts" + ) - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{self.secrets.pr_branch}', '-f') + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{self.secrets.pr_branch}", + "-f", + ) def send_pull_request(self): - self.secrets.pr_number = super().send_pull_request(self.secrets.test_repo, self.secrets.base_branch, self.secrets.pr_branch, self.secrets.bot_token) + self.secrets.pr_number = super().send_pull_request( + self.secrets.test_repo, + self.secrets.base_branch, + self.secrets.pr_branch, + self.secrets.bot_token, + ) logging.info(f"[INFO] PR number: {self.secrets.pr_number}") # expect_result: a string representation of expected result, e.g. 'success' @@ -649,34 +898,55 @@ def check_pull_request_labels(self): def check_pull_request_comments(self, expect_message: str): r = github_api( - 'get', f'repos/{self.secrets.test_repo}/issues/{self.secrets.pr_number}/comments', self.secrets.bot_token) - logging.debug(f'STATUS_CODE: {r.status_code}') + "get", + f"repos/{self.secrets.test_repo}/issues/{self.secrets.pr_number}/comments", + self.secrets.bot_token, + ) + logging.debug(f"STATUS_CODE: {r.status_code}") response = json.loads(r.text) logging.debug(f"CHECK PULL_REQUEST COMMENT RESPONSE: {response}") if len(response) == 0: raise AssertionError(f"No comment found in the PR {self.secrets.pr_number}") - complete_comment = response[0]['body'] + complete_comment = response[0]["body"] if expect_message in complete_comment: logging.info("Found the expected comment in the PR") else: - raise AssertionError(f"Was expecting '{expect_message}' in the comment {complete_comment}") + raise AssertionError( + f"Was expecting '{expect_message}' in the comment {complete_comment}" + ) def check_index_yaml(self, check_provider_type=False): for chart in self.test_charts: - super().check_index_yaml(self.secrets.base_branch, self.secrets.vendor, chart.chart_name, chart.chart_version, self.secrets.index_file, check_provider_type) + super().check_index_yaml( + self.secrets.base_branch, + self.secrets.vendor, + chart.chart_name, + chart.chart_version, + self.secrets.index_file, + check_provider_type, + ) def check_release_result(self, release_type): for chart in self.test_charts: - chart_tgz = chart.chart_file_path.split('/')[-1] - super().check_release_result(self.secrets.vendor, chart.chart_name, chart.chart_version, chart_tgz, release_type=release_type) + chart_tgz = chart.chart_file_path.split("/")[-1] + super().check_release_result( + self.secrets.vendor, + chart.chart_name, + chart.chart_version, + chart_tgz, + release_type=release_type, + ) def cleanup_release(self): for chart in self.test_charts: - expected_tag = f'{self.secrets.vendor}-{chart.chart_name}-{chart.chart_version}' + expected_tag = ( + f"{self.secrets.vendor}-{chart.chart_name}-{chart.chart_version}" + ) super().cleanup_release(expected_tag) + @dataclass class ChartCertificationE2ETestMultiple(ChartCertificationE2ETest): secrets: E2ETestSecretRecursive = E2ETestSecretRecursive() @@ -693,15 +963,18 @@ def __post_init__(self) -> None: pr_branches = [] pr_base_branch = self.repo.active_branch.name - r = github_api( - 'get', f'repos/{test_repo}/branches', bot_token) + r = github_api("get", f"repos/{test_repo}/branches", bot_token) branches = json.loads(r.text) - branch_names = [branch['name'] for branch in branches] + branch_names = [branch["name"] for branch in branches] if pr_base_branch not in branch_names: logging.info( - f"{test_repo}:{pr_base_branch} does not exists, creating with local branch") - self.repo.git.push(f'https://x-access-token:{bot_token}@github.com/{test_repo}', - f'HEAD:refs/heads/{pr_base_branch}', '-f') + f"{test_repo}:{pr_base_branch} does not exists, creating with local branch" + ) + self.repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{test_repo}", + f"HEAD:refs/heads/{pr_base_branch}", + "-f", + ) self.secrets = E2ETestSecretRecursive() self.secrets.software_name = software_name @@ -718,204 +991,338 @@ def __post_init__(self) -> None: self.secrets.owners_file_content = self.owners_file_content self.secrets.release_tags = list() - def cleanup (self): + def cleanup(self): # Teardown step to cleanup releases and branches for release_tag in self.secrets.release_tags: self.cleanup_release(release_tag) - self.repo.git.worktree('prune') + self.repo.git.worktree("prune") for base_branch in self.secrets.base_branches: logging.info(f"Delete '{self.secrets.test_repo}:{base_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}", + self.secrets.bot_token, + ) logging.info(f"Delete '{self.secrets.test_repo}:{base_branch}-gh-pages'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}-gh-pages', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}-gh-pages", + self.secrets.bot_token, + ) logging.info(f"Delete local '{base_branch}'") try: - self.repo.git.branch('-D', base_branch) + self.repo.git.branch("-D", base_branch) except git.exc.GitCommandError: logging.info(f"Local '{base_branch}' does not exist") for pr_branch in self.secrets.pr_branches: logging.info(f"Delete '{self.secrets.test_repo}:{pr_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{pr_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{pr_branch}", + self.secrets.bot_token, + ) try: logging.info("Delete local 'tmp' branch") - self.temp_repo.git.branch('-D', 'tmp') + self.temp_repo.git.branch("-D", "tmp") except git.exc.GitCommandError: - logging.info(f"Local 'tmp' branch does not exist") + logging.info("Local 'tmp' branch does not exist") def setup_temp_dir(self): - self.temp_dir = TemporaryDirectory(prefix='tci-') + self.temp_dir = TemporaryDirectory(prefix="tci-") with SetDirectory(Path(self.temp_dir.name)): # Make PR's from a temporary directory - logging.info(f'Worktree directory: {self.temp_dir.name}') - self.repo.git.worktree('add', '--detach', self.temp_dir.name, f'HEAD') + logging.info(f"Worktree directory: {self.temp_dir.name}") + self.repo.git.worktree("add", "--detach", self.temp_dir.name, "HEAD") self.temp_repo = git.Repo(self.temp_dir.name) # Run submission flow test with charts in PROD_REPO:PROD_BRANCH - self.set_git_username_email(self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL) - self.temp_repo.git.checkout(PROD_BRANCH, 'charts') - self.temp_repo.git.restore('--staged', 'charts') + self.set_git_username_email( + self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL + ) + self.temp_repo.git.checkout(PROD_BRANCH, "charts") + self.temp_repo.git.restore("--staged", "charts") self.secrets.submitted_charts = get_all_charts( - 'charts', self.secrets.vendor_type) + "charts", self.secrets.vendor_type + ) logging.info( - f"Found charts for {self.secrets.vendor_type}: {self.secrets.submitted_charts}") - self.temp_repo.git.checkout('-b', 'tmp') + f"Found charts for {self.secrets.vendor_type}: {self.secrets.submitted_charts}" + ) + self.temp_repo.git.checkout("-b", "tmp") def get_owner_ids(self, chart_directory, owners_table): - - with open(f'{chart_directory}/OWNERS', 'r') as fd: + with open(f"{chart_directory}/OWNERS", "r") as fd: try: owners = yaml.safe_load(fd) # Pick owner ids for notification owners_table[chart_directory] = [ - owner.get('githubUsername', '') for owner in owners['users']] + owner.get("githubUsername", "") for owner in owners["users"] + ] except yaml.YAMLError as err: - logging.warning( - f"Error parsing OWNERS of {chart_directory}: {err}") - - def push_chart(self, chart_directory, chart_name, chart_version, vendor_name, vendor_type, pr_branch): + logging.warning(f"Error parsing OWNERS of {chart_directory}: {err}") + + def push_chart( + self, + chart_directory, + chart_name, + chart_version, + vendor_name, + vendor_type, + pr_branch, + ): # Push chart files to test_repo:pr_branch - self.temp_repo.git.add(f'{chart_directory}/{chart_version}') + self.temp_repo.git.add(f"{chart_directory}/{chart_version}") self.temp_repo.git.commit( - '-m', f"Add {vendor_type} {vendor_name} {chart_name} {chart_version} chart files") - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{pr_branch}', '-f') - - def report_failure(self,chart,chart_owners,failure_type,pr_html_url=None,run_html_url=None): - - os.environ['GITHUB_REPO'] = PROD_REPO.split('/')[1] - os.environ['GITHUB_AUTH_TOKEN'] = self.secrets.bot_token + "-m", + f"Add {vendor_type} {vendor_name} {chart_name} {chart_version} chart files", + ) + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{pr_branch}", + "-f", + ) + + def report_failure( + self, chart, chart_owners, failure_type, pr_html_url=None, run_html_url=None + ): + os.environ["GITHUB_REPO"] = PROD_REPO.split("/")[1] + os.environ["GITHUB_AUTH_TOKEN"] = self.secrets.bot_token if not self.secrets.dry_run: - os.environ['GITHUB_REPO'] = PROD_REPO.split('/')[1] - os.environ['GITHUB_AUTH_TOKEN'] = self.secrets.bot_token - os.environ['GITHUB_ORGANIZATION'] = PROD_REPO.split('/')[0] - logging.info(f"Send notification to '{self.secrets.notify_id}' about verification result of '{chart}'") - create_verification_issue(chart, chart_owners, failure_type,self.secrets.notify_id, pr_html_url, run_html_url, self.secrets.software_name, - self.secrets.software_version, self.secrets.bot_token, self.secrets.dry_run) + os.environ["GITHUB_REPO"] = PROD_REPO.split("/")[1] + os.environ["GITHUB_AUTH_TOKEN"] = self.secrets.bot_token + os.environ["GITHUB_ORGANIZATION"] = PROD_REPO.split("/")[0] + logging.info( + f"Send notification to '{self.secrets.notify_id}' about verification result of '{chart}'" + ) + create_verification_issue( + chart, + chart_owners, + failure_type, + self.secrets.notify_id, + pr_html_url, + run_html_url, + self.secrets.software_name, + self.secrets.software_version, + self.secrets.bot_token, + self.secrets.dry_run, + ) else: - os.environ['GITHUB_ORGANIZATION'] = PROD_REPO.split('/')[0] - os.environ['GITHUB_REPO'] = "sandbox" - os.environ['GITHUB_AUTH_TOKEN'] = self.secrets.bot_token - logging.info(f"Send notification to '{self.secrets.notify_id}' about dry run verification result of '{chart}'") - create_verification_issue(chart, chart_owners, failure_type,self.secrets.notify_id, pr_html_url, run_html_url, self.secrets.software_name, - self.secrets.software_version, self.secrets.bot_token, self.secrets.dry_run) - logging.info(f"Dry Run - send sandbox notification to '{chart_owners}' about verification result of '{chart}'") - - - def check_single_chart_result(self, vendor_type, vendor_name, chart_name, chart_version, pr_number, owners_table): - base_branch = f'{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}' + os.environ["GITHUB_ORGANIZATION"] = PROD_REPO.split("/")[0] + os.environ["GITHUB_REPO"] = "sandbox" + os.environ["GITHUB_AUTH_TOKEN"] = self.secrets.bot_token + logging.info( + f"Send notification to '{self.secrets.notify_id}' about dry run verification result of '{chart}'" + ) + create_verification_issue( + chart, + chart_owners, + failure_type, + self.secrets.notify_id, + pr_html_url, + run_html_url, + self.secrets.software_name, + self.secrets.software_version, + self.secrets.bot_token, + self.secrets.dry_run, + ) + logging.info( + f"Dry Run - send sandbox notification to '{chart_owners}' about verification result of '{chart}'" + ) + + def check_single_chart_result( + self, + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number, + owners_table, + ): + base_branch = f"{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}" # Check workflow conclusion - chart = f'{vendor_name} {chart_name} {chart_version}' - run_id, conclusion = super().check_workflow_conclusion(pr_number, 'success', failure_type='warning') + chart = f"{vendor_name} {chart_name} {chart_version}" + run_id, conclusion = super().check_workflow_conclusion( + pr_number, "success", failure_type="warning" + ) if conclusion and run_id: - if conclusion != 'success': + if conclusion != "success": # Send notification to owner through GitHub issues r = github_api( - 'get', f'repos/{self.secrets.test_repo}/actions/runs/{run_id}', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/actions/runs/{run_id}", + self.secrets.bot_token, + ) run = r.json() - run_html_url = run['html_url'] + run_html_url = run["html_url"] - pr = get_pr(self.secrets,pr_number) + pr = get_pr(self.secrets, pr_number) pr_html_url = pr["html_url"] - chart_directory = f'charts/{vendor_type}/{vendor_name}/{chart_name}' + chart_directory = f"charts/{vendor_type}/{vendor_name}/{chart_name}" chart_owners = owners_table[chart_directory] - self.report_failure(chart,chart_owners,CHECKS_FAILED,pr_html_url,run_html_url) + self.report_failure( + chart, chart_owners, CHECKS_FAILED, pr_html_url, run_html_url + ) - logging.warning(f"PR{pr_number} workflow failed: {vendor_name}, {chart_name}, {chart_version}") + logging.warning( + f"PR{pr_number} workflow failed: {vendor_name}, {chart_name}, {chart_version}" + ) return else: - logging.info(f"PR{pr_number} workflow passed: {vendor_name}, {chart_name}, {chart_version}") + logging.info( + f"PR{pr_number} workflow passed: {vendor_name}, {chart_name}, {chart_version}" + ) else: - logging.warning(f"PR{pr_number} workflow did not complete: {vendor_name}, {chart_name}, {chart_version}") + logging.warning( + f"PR{pr_number} workflow did not complete: {vendor_name}, {chart_name}, {chart_version}" + ) return - # Check PRs are merged - if not super().check_pull_request_result(pr_number, True, failure_type='warning'): - logging.warning(f"PR{pr_number} pull request was not merged: {vendor_name}, {chart_name}, {chart_version}") + if not super().check_pull_request_result( + pr_number, True, failure_type="warning" + ): + logging.warning( + f"PR{pr_number} pull request was not merged: {vendor_name}, {chart_name}, {chart_version}" + ) return - logging.info(f"PR{pr_number} pull request was merged: {vendor_name}, {chart_name}, {chart_version}") + logging.info( + f"PR{pr_number} pull request was merged: {vendor_name}, {chart_name}, {chart_version}" + ) # Check index.yaml is updated - if not super().check_index_yaml(base_branch, vendor_name, chart_name, chart_version, check_provider_type=False, failure_type='warning'): - logging.warning(f"PR{pr_number} - Chart was not found in Index file: {vendor_name}, {chart_name}, {chart_version}") - logging.info(f"PR{pr_number} - Chart was found in Index file: {vendor_name}, {chart_name}, {chart_version}") + if not super().check_index_yaml( + base_branch, + vendor_name, + chart_name, + chart_version, + check_provider_type=False, + failure_type="warning", + ): + logging.warning( + f"PR{pr_number} - Chart was not found in Index file: {vendor_name}, {chart_name}, {chart_version}" + ) + logging.info( + f"PR{pr_number} - Chart was found in Index file: {vendor_name}, {chart_name}, {chart_version}" + ) # Check release is published - chart_tgz = f'{chart_name}-{chart_version}.tgz' - if not super().check_release_result(vendor_name, chart_name, chart_version, chart_tgz, failure_type='warning'): - logging.warning(f"PR{pr_number} - Release was not created: {vendor_name}, {chart_name}, {chart_version}") - logging.info(f"PR{pr_number} - Release was created: {vendor_name}, {chart_name}, {chart_version}") - - def process_single_chart(self, vendor_type, vendor_name, chart_name, chart_version, pr_number_list, owners_table): + chart_tgz = f"{chart_name}-{chart_version}.tgz" + if not super().check_release_result( + vendor_name, chart_name, chart_version, chart_tgz, failure_type="warning" + ): + logging.warning( + f"PR{pr_number} - Release was not created: {vendor_name}, {chart_name}, {chart_version}" + ) + logging.info( + f"PR{pr_number} - Release was created: {vendor_name}, {chart_name}, {chart_version}" + ) + + def process_single_chart( + self, + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number_list, + owners_table, + ): # Get SHA from 'pr_base_branch' branch - logging.info(f"Process chart: {vendor_type}/{vendor_name}/{chart_name}/{chart_version}") + logging.info( + f"Process chart: {vendor_type}/{vendor_name}/{chart_name}/{chart_version}" + ) r = github_api( - 'get', f'repos/{self.secrets.test_repo}/git/ref/heads/{self.secrets.pr_base_branch}', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/git/ref/heads/{self.secrets.pr_base_branch}", + self.secrets.bot_token, + ) j = json.loads(r.text) - pr_base_branch_sha = j['object']['sha'] + pr_base_branch_sha = j["object"]["sha"] - chart_directory = f'charts/{vendor_type}/{vendor_name}/{chart_name}' - base_branch = f'{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}' - base_branch = base_branch.replace(":","-") - pr_branch = f'{base_branch}-pr-branch' + chart_directory = f"charts/{vendor_type}/{vendor_name}/{chart_name}" + base_branch = f"{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}" + base_branch = base_branch.replace(":", "-") + pr_branch = f"{base_branch}-pr-branch" self.secrets.base_branches.append(base_branch) self.secrets.pr_branches.append(pr_branch) - self.temp_repo.git.checkout('tmp') - self.temp_repo.git.checkout('-b', base_branch) + self.temp_repo.git.checkout("tmp") + self.temp_repo.git.checkout("-b", base_branch) # Create test gh-pages branch for checking index.yaml - self.create_test_gh_pages_branch(self.secrets.test_repo, base_branch, self.secrets.bot_token) + self.create_test_gh_pages_branch( + self.secrets.test_repo, base_branch, self.secrets.bot_token + ) # Create a new base branch for testing current chart - logging.info( - f"Create {self.secrets.test_repo}:{base_branch} for testing") + logging.info(f"Create {self.secrets.test_repo}:{base_branch} for testing") r = github_api( - 'get', f'repos/{self.secrets.test_repo}/branches', self.secrets.bot_token) + "get", f"repos/{self.secrets.test_repo}/branches", self.secrets.bot_token + ) branches = json.loads(r.text) - branch_names = [branch['name'] for branch in branches] + branch_names = [branch["name"] for branch in branches] if base_branch in branch_names: - logging.warning( - f"{self.secrets.test_repo}:{base_branch} already exists") + logging.warning(f"{self.secrets.test_repo}:{base_branch} already exists") return - data = {'ref': f'refs/heads/{base_branch}', - 'sha': pr_base_branch_sha} + data = {"ref": f"refs/heads/{base_branch}", "sha": pr_base_branch_sha} r = github_api( - 'post', f'repos/{self.secrets.test_repo}/git/refs', self.secrets.bot_token, json=data) + "post", + f"repos/{self.secrets.test_repo}/git/refs", + self.secrets.bot_token, + json=data, + ) # Remove chart and owners file from git - self.remove_chart(chart_directory, chart_version, self.secrets.test_repo, base_branch, self.secrets.bot_token) - self.remove_owners_file(chart_directory, self.secrets.test_repo, base_branch, self.secrets.bot_token) + self.remove_chart( + chart_directory, + chart_version, + self.secrets.test_repo, + base_branch, + self.secrets.bot_token, + ) + self.remove_owners_file( + chart_directory, self.secrets.test_repo, base_branch, self.secrets.bot_token + ) # Get owners id for notifications self.get_owner_ids(chart_directory, owners_table) # Create and push test owners file - super().create_and_push_owners_file(chart_directory, base_branch, vendor_name, vendor_type, chart_name) + super().create_and_push_owners_file( + chart_directory, base_branch, vendor_name, vendor_type, chart_name + ) # Push test chart to pr_branch - self.push_chart(chart_directory, chart_name, chart_version, vendor_name, vendor_type, pr_branch) + self.push_chart( + chart_directory, + chart_name, + chart_version, + vendor_name, + vendor_type, + pr_branch, + ) # Create PR from pr_branch to base_branch logging.info("sleep for 5 seconds to avoid secondary api limit") time.sleep(5) - pr_number = super().send_pull_request(self.secrets.test_repo, base_branch, pr_branch, self.secrets.bot_token) - pr_number_list.append((vendor_type, vendor_name, chart_name, chart_version, pr_number)) - logging.info(f"PR{pr_number} created in {self.secrets.test_repo} into {base_branch} from {pr_branch}") + pr_number = super().send_pull_request( + self.secrets.test_repo, base_branch, pr_branch, self.secrets.bot_token + ) + pr_number_list.append( + (vendor_type, vendor_name, chart_name, chart_version, pr_number) + ) + logging.info( + f"PR{pr_number} created in {self.secrets.test_repo} into {base_branch} from {pr_branch}" + ) # Record expected release tags - self.secrets.release_tags.append(f'{vendor_name}-{chart_name}-{chart_version}') + self.secrets.release_tags.append(f"{vendor_name}-{chart_name}-{chart_version}") def process_all_charts(self): self.setup_git_context(self.repo) @@ -926,35 +1333,69 @@ def process_all_charts(self): skip_charts = list() - logging.info(f"Running tests for : {self.secrets.software_name} {self.secrets.software_version} :") + logging.info( + f"Running tests for : {self.secrets.software_name} {self.secrets.software_version} :" + ) # First look for charts in index.yaml to see if kubeVersion is good: if self.secrets.software_name == "OpenShift": logging.info("check index file for invalid kubeVersions") failed_charts = check_index_entries(self.secrets.software_version) if failed_charts: for chart in failed_charts: - providerDir = chart["providerType"].replace("partner","partners") - chart_directory = f'charts/{providerDir}/{chart["provider"]}/{chart["name"]}' - self.get_owner_ids(chart_directory,owners_table) + providerDir = chart["providerType"].replace("partner", "partners") + chart_directory = ( + f'charts/{providerDir}/{chart["provider"]}/{chart["name"]}' + ) + self.get_owner_ids(chart_directory, owners_table) chart_owners = owners_table[chart_directory] chart_id = f'{chart["provider"]} {chart["name"]} {chart["version"]}' - self.report_failure(chart_id,chart_owners,chart["message"],"","") + self.report_failure( + chart_id, chart_owners, chart["message"], "", "" + ) skip_charts.append(f'{chart["name"]}-{chart["version"]}') - # Process test charts and send PRs from temporary directory with SetDirectory(Path(self.temp_dir.name)): - for vendor_type, vendor_name, chart_name, chart_version in self.secrets.submitted_charts: - if f'{chart_name}-{chart_version}' in skip_charts: - logging.info(f"Skip already failed chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}") + for ( + vendor_type, + vendor_name, + chart_name, + chart_version, + ) in self.secrets.submitted_charts: + if f"{chart_name}-{chart_version}" in skip_charts: + logging.info( + f"Skip already failed chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}" + ) else: - logging.info(f"Process chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}") - self.process_single_chart(vendor_type, vendor_name, chart_name, chart_version, pr_number_list, owners_table) + logging.info( + f"Process chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}" + ) + self.process_single_chart( + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number_list, + owners_table, + ) logging.info("sleep for 5 seconds to avoid secondary api limit") time.sleep(5) - for vendor_type, vendor_name, chart_name, chart_version, pr_number in pr_number_list: - logging.info(f"PR{pr_number} Check result: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}") - self.check_single_chart_result(vendor_type, vendor_name, chart_name, chart_version, pr_number, owners_table) - - + for ( + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number, + ) in pr_number_list: + logging.info( + f"PR{pr_number} Check result: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}" + ) + self.check_single_chart_result( + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number, + owners_table, + ) diff --git a/tests/functional/behave_features/common/utils/env.py b/tests/functional/behave_features/common/utils/env.py index d92f1a42..494e9a4c 100644 --- a/tests/functional/behave_features/common/utils/env.py +++ b/tests/functional/behave_features/common/utils/env.py @@ -5,6 +5,7 @@ from common.utils.setttings import * + def get_bot_name_and_token(): bot_name = os.environ.get("BOT_NAME") logging.debug(f"Enviroment variable value BOT_NAME: {bot_name}") @@ -20,33 +21,36 @@ def get_bot_name_and_token(): raise Exception("BOT_NAME set but BOT_TOKEN not specified") return bot_name, bot_token + def get_dry_run(): # Accepts 'true' or 'false', depending on whether we want to notify # Don't notify on dry runs, default to True - dry_run = False if os.environ.get("DRY_RUN") == 'false' else True + dry_run = False if os.environ.get("DRY_RUN") == "false" else True # Don't notify if not triggerd on PROD_REPO and PROD_BRANCH if not dry_run: - triggered_branch = os.environ.get("GITHUB_REF").split('/')[-1] + triggered_branch = os.environ.get("GITHUB_REF").split("/")[-1] triggered_repo = os.environ.get("GITHUB_REPOSITORY") if triggered_repo != PROD_REPO or triggered_branch != PROD_BRANCH: dry_run = True return dry_run + def get_notify_id(): # Accepts comma separated Github IDs or empty strings to override people to tag in notifications notify_id = os.environ.get("NOTIFY_ID") if notify_id: - notify_id = [vt.strip() for vt in notify_id.split(',')] + notify_id = [vt.strip() for vt in notify_id.split(",")] else: - notify_id = ["dperaza","mmulholla"] + notify_id = ["dperaza", "mmulholla"] return notify_id + def get_software_name_version(): software_name = os.environ.get("SOFTWARE_NAME") if not software_name: raise Exception("SOFTWARE_NAME environment variable not defined") - software_version = os.environ.get("SOFTWARE_VERSION").strip('\"') + software_version = os.environ.get("SOFTWARE_VERSION").strip('"') if not software_version: raise Exception("SOFTWARE_VERSION environment variable not defined") elif software_version.startswith("sha256"): @@ -54,10 +58,10 @@ def get_software_name_version(): return software_name, software_version + def get_vendor_type(): vendor_type = os.environ.get("VENDOR_TYPE") if not vendor_type: - logging.info( - f"VENDOR_TYPE environment variable not defined, default to `all`") - vendor_type = 'all' - return vendor_type \ No newline at end of file + logging.info("VENDOR_TYPE environment variable not defined, default to `all`") + vendor_type = "all" + return vendor_type diff --git a/tests/functional/behave_features/common/utils/github.py b/tests/functional/behave_features/common/utils/github.py index ffcc044e..52dcbbe1 100644 --- a/tests/functional/behave_features/common/utils/github.py +++ b/tests/functional/behave_features/common/utils/github.py @@ -8,40 +8,46 @@ from common.utils.setttings import * + @retry(stop_max_delay=30_000, wait_fixed=1000) def get_run_id(secrets, pr_number=None): - pr = get_pr(secrets, pr_number) - r = github_api( - 'get', f'repos/{secrets.test_repo}/actions/runs', secrets.bot_token) + r = github_api("get", f"repos/{secrets.test_repo}/actions/runs", secrets.bot_token) runs = json.loads(r.text) - for run in runs['workflow_runs']: - if run['head_sha'] == pr['head']['sha'] and run['name'] == CERTIFICATION_CI_NAME: - return run['id'] + for run in runs["workflow_runs"]: + if ( + run["head_sha"] == pr["head"]["sha"] + and run["name"] == CERTIFICATION_CI_NAME + ): + return run["id"] else: raise Exception("Workflow for the submitted PR did not run.") -@retry(stop_max_delay=60_000*40, wait_fixed=2000) +@retry(stop_max_delay=60_000 * 40, wait_fixed=2000) def get_run_result(secrets, run_id): r = github_api( - 'get', f'repos/{secrets.test_repo}/actions/runs/{run_id}', secrets.bot_token) + "get", f"repos/{secrets.test_repo}/actions/runs/{run_id}", secrets.bot_token + ) run = json.loads(r.text) - if run['conclusion'] is None: + if run["conclusion"] is None: raise Exception(f"Workflow {run_id} is still running, PR: {secrets.pr_number} ") - return run['conclusion'] + return run["conclusion"] @retry(stop_max_delay=10_000, wait_fixed=1000) def check_release_assets(secrets, release_id, required_assets): r = github_api( - 'get', f'repos/{secrets.test_repo}/releases/{release_id}/assets', secrets.bot_token) + "get", + f"repos/{secrets.test_repo}/releases/{release_id}/assets", + secrets.bot_token, + ) asset_list = json.loads(r.text) - asset_names = [asset['name'] for asset in asset_list] - logging.debug(f'FOUND RELEASE ASSETS: {asset_names}') + asset_names = [asset["name"] for asset in asset_list] + logging.debug(f"FOUND RELEASE ASSETS: {asset_names}") missing_assets = list() for asset in required_assets: if asset not in asset_names: @@ -52,11 +58,10 @@ def check_release_assets(secrets, release_id, required_assets): @retry(stop_max_delay=15_000, wait_fixed=1000) def get_release_by_tag(secrets, release_tag): - r = github_api( - 'get', f'repos/{secrets.test_repo}/releases', secrets.bot_token) + r = github_api("get", f"repos/{secrets.test_repo}/releases", secrets.bot_token) releases = json.loads(r.text) for release in releases: - if release['tag_name'] == release_tag: + if release["tag_name"] == release_tag: return release raise Exception("Release not published") @@ -64,46 +69,51 @@ def get_release_by_tag(secrets, release_tag): def get_pr(secrets, pr_number=None): pr_number = secrets.pr_number if pr_number is None else pr_number r = github_api( - 'post', f'repos/{secrets.test_repo}/pulls/{pr_number}', secrets.bot_token) + "post", f"repos/{secrets.test_repo}/pulls/{pr_number}", secrets.bot_token + ) pr = json.loads(r.text) return pr def github_api_get(endpoint, bot_token, headers={}): if not headers: - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - r = requests.get(f'{GITHUB_BASE_URL}/{endpoint}', headers=headers) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + r = requests.get(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers) return r def github_api_delete(endpoint, bot_token, headers={}): if not headers: - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - r = requests.delete(f'{GITHUB_BASE_URL}/{endpoint}', headers=headers) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + r = requests.delete(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers) return r def github_api_post(endpoint, bot_token, headers={}, json={}): if not headers: - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - r = requests.post(f'{GITHUB_BASE_URL}/{endpoint}', - headers=headers, json=json) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + r = requests.post(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers, json=json) return r def github_api(method, endpoint, bot_token, headers={}, data={}, json={}): - if method == 'get': + if method == "get": return github_api_get(endpoint, bot_token, headers=headers) - elif method == 'post': + elif method == "post": return github_api_post(endpoint, bot_token, headers=headers, json=json) - elif method == 'delete': + elif method == "delete": return github_api_delete(endpoint, bot_token, headers=headers) else: - raise ValueError( - "Github API method not implemented in helper function") + raise ValueError("Github API method not implemented in helper function") diff --git a/tests/functional/behave_features/common/utils/index.py b/tests/functional/behave_features/common/utils/index.py index a6e6d97e..0213a951 100644 --- a/tests/functional/behave_features/common/utils/index.py +++ b/tests/functional/behave_features/common/utils/index.py @@ -1,43 +1,49 @@ - import logging import semantic_version import sys -sys.path.append('../../../../../scripts/src') +sys.path.append("../../../../../scripts/src") from chartrepomanager import indexannotations from indexfile import index - def check_index_entries(ocpVersion): - all_chart_list = index.get_latest_charts() failed_chart_list = [] OCP_VERSION = semantic_version.Version.coerce(ocpVersion) for chart in all_chart_list: - if "supportedOCP" in chart and chart["supportedOCP"] != "N/A" and chart["supportedOCP"] != "": + if ( + "supportedOCP" in chart + and chart["supportedOCP"] != "N/A" + and chart["supportedOCP"] != "" + ): if OCP_VERSION in semantic_version.NpmSpec(chart["supportedOCP"]): - logging.info(f'PASS: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} includes: {OCP_VERSION}') + logging.info( + f'PASS: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} includes: {OCP_VERSION}' + ) else: - chart["message"] = f'chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include latest OCP version {OCP_VERSION}' - logging.info(f' ERROR: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include {OCP_VERSION}') + chart[ + "message" + ] = f'chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include latest OCP version {OCP_VERSION}' + logging.info( + f' ERROR: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include {OCP_VERSION}' + ) failed_chart_list.append(chart) elif "kubeVersion" in chart and chart["kubeVersion"] != "": supportedOCPVersion = indexannotations.getOCPVersions(chart["kubeVersion"]) if OCP_VERSION in semantic_version.NpmSpec(supportedOCPVersion): - logging.info(f'PASS: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) includes OCP version: {OCP_VERSION}') + logging.info( + f'PASS: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) includes OCP version: {OCP_VERSION}' + ) else: - chart["message"] = f'chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include latest OCP version {OCP_VERSION}' - logging.info(f' ERROR: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include {OCP_VERSION}') + chart[ + "message" + ] = f'chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include latest OCP version {OCP_VERSION}' + logging.info( + f' ERROR: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include {OCP_VERSION}' + ) failed_chart_list.append(chart) return failed_chart_list - - - - - - - diff --git a/tests/functional/behave_features/common/utils/notifier.py b/tests/functional/behave_features/common/utils/notifier.py index fc1b433d..edddb65b 100755 --- a/tests/functional/behave_features/common/utils/notifier.py +++ b/tests/functional/behave_features/common/utils/notifier.py @@ -13,13 +13,15 @@ CHECKS_FAILED = "checks failed" + def _set_endpoint_key(key, env_var): if key not in endpoint_data: if env_var in os.environ: endpoint_data[key] = os.environ[env_var] else: raise Exception( - f"Environment variables {env_var} is required to connect to github") + f"Environment variables {env_var} is required to connect to github" + ) def _set_endpoint(): @@ -29,17 +31,23 @@ def _set_endpoint(): def _make_gihub_request(method, uri, body=None, params={}, headers={}, verbose=False): - headers.update({"Authorization": f'Bearer {endpoint_data["access_token"]}', - "Accept": "application/vnd.github.v3+json"}) + headers.update( + { + "Authorization": f'Bearer {endpoint_data["access_token"]}', + "Accept": "application/vnd.github.v3+json", + } + ) url = f'{GITHUB_BASE_URL}/repos/{endpoint_data["organization"]}/{endpoint_data["repo"]}/{uri}' print(f"API url: {url}") - method_map = {"get": requests.get, - "post": requests.post, - "put": requests.put, - "delete": requests.delete, - "patch": requests.patch} + method_map = { + "get": requests.get, + "post": requests.post, + "put": requests.put, + "delete": requests.delete, + "patch": requests.patch, + } request_method = method_map[method] response = request_method(url, params=params, headers=headers, json=body) if verbose: @@ -56,14 +64,17 @@ def _make_gihub_request(method, uri, body=None, params={}, headers={}, verbose=F print(json.dumps(resp_json, indent=4, sort_keys=True)) return resp_json + # Call this method directly if you are not creating a verification issue nor a version change issue. def create_an_issue(title, description, assignees=[], labels=[]): uri = "issues" method = "post" - body = {"title": title, - "body": description, - "assignees": assignees, - "labels": labels} + body = { + "title": title, + "body": description, + "assignees": assignees, + "labels": labels, + } _make_gihub_request(method, uri, body=body, verbose=False) @@ -78,7 +89,18 @@ def _verify_endpoint(access_token): endpoint_data["access_token"] = access_token -def create_verification_issue(chart, chart_owners, failure_type, notify_developers, pr_url, report_url, software_name, software_version, access_token=None, dry_run=False): +def create_verification_issue( + chart, + chart_owners, + failure_type, + notify_developers, + pr_url, + report_url, + software_name, + software_version, + access_token=None, + dry_run=False, +): """Create and issue with chart-verifier findings after a version change trigger. chart_name -- Name of the chart that was verified. Include version for more verbose information\n @@ -92,7 +114,6 @@ def create_verification_issue(chart, chart_owners, failure_type, notify_develope dry-run -- Set if the test run is a dry-run. """ - title = f"Chart {chart}" if dry_run: title = f"Dry Run: Chart {chart}" @@ -100,22 +121,26 @@ def create_verification_issue(chart, chart_owners, failure_type, notify_develope if failure_type == CHECKS_FAILED: title = f"{title} has failures with {software_name} version {software_version}" report_result = "some chart checks have failed. Please review the failures and, if required, consider submitting a new chart version with the appropriate additions/corrections." - body = (f"FYI @{' @'.join(notify_developers)}, in PR {pr_url} we triggered the chart certification workflow against chart {chart} because the workflow " - f"now supports {software_name} version {software_version}. We have found that {report_result}. Check details in the report: " - f"{report_url}, Chart owners are: {chart_owners}") + body = ( + f"FYI @{' @'.join(notify_developers)}, in PR {pr_url} we triggered the chart certification workflow against chart {chart} because the workflow " + f"now supports {software_name} version {software_version}. We have found that {report_result}. Check details in the report: " + f"{report_url}, Chart owners are: {chart_owners}" + ) else: title = f"{title} does not support {software_name} version {software_version}" - body = (f"FYI @{' @'.join(notify_developers)}, we checked the OCP versions supported by {chart} because the workflow " - f"now supports {software_name} version {software_version}. We have found that {failure_type}. Chart owners are: {chart_owners}") + body = ( + f"FYI @{' @'.join(notify_developers)}, we checked the OCP versions supported by {chart} because the workflow " + f"now supports {software_name} version {software_version}. We have found that {failure_type}. Chart owners are: {chart_owners}" + ) _set_endpoint() _verify_endpoint(access_token) create_an_issue(title, body) - - -def create_version_change_issue(chart_name, chart_owners, software_name, software_version, access_token=None): +def create_version_change_issue( + chart_name, chart_owners, software_name, software_version, access_token=None +): """Create and issue with new version of software dependencies supported by certitifcation program. chart_name -- Name of the chart afected. Include version for more verbose information @@ -127,8 +152,10 @@ def create_version_change_issue(chart_name, chart_owners, software_name, softwar title = f"Action needed for {chart_name} after a certification dependency change" - body = (f"FYI @{' @'.join(chart_owners)}, {software_name} {software_version} is now supported by the certification program. " - "Consider submiting a new chart version.") + body = ( + f"FYI @{' @'.join(chart_owners)}, {software_name} {software_version} is now supported by the certification program. " + "Consider submiting a new chart version." + ) _set_endpoint() _verify_endpoint(access_token) @@ -165,8 +192,15 @@ def create_version_change_issue(chart_name, chart_owners, software_name, softwar print("Did the chart verification pass (yes/no)?: ") pass_answer = sys.stdin.readline().strip() pass_verification = pass_answer == "yes" - create_verification_issue(chart_name, chart_owners, report_url, - software_name, software_version, pass_verification=pass_verification) + create_verification_issue( + chart_name, + chart_owners, + report_url, + software_name, + software_version, + pass_verification=pass_verification, + ) else: create_version_change_issue( - chart_name, chart_owners, software_name, software_version) + chart_name, chart_owners, software_name, software_version + ) diff --git a/tests/functional/behave_features/common/utils/secret.py b/tests/functional/behave_features/common/utils/secret.py index a509da40..b75e1e4c 100644 --- a/tests/functional/behave_features/common/utils/secret.py +++ b/tests/functional/behave_features/common/utils/secret.py @@ -3,34 +3,37 @@ from dataclasses import dataclass + @dataclass class E2ETestSecret: # common secrets between one-shot and recursive tests - test_repo: str = '' - bot_name: str = '' - bot_token: str = '' + test_repo: str = "" + bot_name: str = "" + bot_token: str = "" pr_number: int = -1 - vendor_type: str = '' - owners_file_content: str = '' + vendor_type: str = "" + owners_file_content: str = "" + @dataclass class E2ETestSecretOneShot(E2ETestSecret): # one-shot testing - active_branch: str = '' - base_branch: str = '' - pr_branch: str = '' + active_branch: str = "" + base_branch: str = "" + pr_branch: str = "" pr_number: int = -1 - vendor: str = '' - bad_version: str = '' + vendor: str = "" + bad_version: str = "" provider_delivery: bool = False index_file: str = "index.yaml" + @dataclass class E2ETestSecretRecursive(E2ETestSecret): # recursive testing - software_name: str = '' - software_version: str = '' - pr_base_branch: str = '' + software_name: str = "" + software_version: str = "" + pr_base_branch: str = "" base_branches: list = None pr_branches: list = None dry_run: bool = True diff --git a/tests/functional/behave_features/common/utils/set_directory.py b/tests/functional/behave_features/common/utils/set_directory.py index 921c295f..becb220e 100644 --- a/tests/functional/behave_features/common/utils/set_directory.py +++ b/tests/functional/behave_features/common/utils/set_directory.py @@ -8,16 +8,19 @@ from dataclasses import dataclass from pathlib import Path + @dataclass class SetDirectory(object): """ Args: path (Path): The path to the cwd """ + path: Path origin: Path = Path().absolute() def __enter__(self): os.chdir(self.path) + def __exit__(self, exc_type, exc_value, traceback): os.chdir(self.origin) diff --git a/tests/functional/behave_features/common/utils/setttings.py b/tests/functional/behave_features/common/utils/setttings.py index 5f2439f1..45caafc8 100644 --- a/tests/functional/behave_features/common/utils/setttings.py +++ b/tests/functional/behave_features/common/utils/setttings.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- """Settings and global variables for e2e tests""" -GITHUB_BASE_URL = 'https://api.github.com' +GITHUB_BASE_URL = "https://api.github.com" # The sandbox repository where we run all our tests on -TEST_REPO = 'openshift-helm-charts/sandbox' +TEST_REPO = "openshift-helm-charts/sandbox" # The prod repository where we create notification issues -PROD_REPO = 'openshift-helm-charts/charts' +PROD_REPO = "openshift-helm-charts/charts" # The prod branch where we store all chart files -PROD_BRANCH = 'main' +PROD_BRANCH = "main" # This is used to find chart certification workflow run id -CERTIFICATION_CI_NAME = 'CI' +CERTIFICATION_CI_NAME = "CI" # GitHub actions bot email for git email -GITHUB_ACTIONS_BOT_EMAIL = '41898282+github-actions[bot]@users.noreply.github.com' +GITHUB_ACTIONS_BOT_EMAIL = "41898282+github-actions[bot]@users.noreply.github.com" diff --git a/tests/functional/behave_features/environment.py b/tests/functional/behave_features/environment.py index f34a5d2e..d05a09ef 100644 --- a/tests/functional/behave_features/environment.py +++ b/tests/functional/behave_features/environment.py @@ -2,22 +2,25 @@ from common.utils.chart_certification import ChartCertificationE2ETestSingle from common.utils.chart_certification import ChartCertificationE2ETestMultiple + @fixture def workflow_test(context): context.workflow_test = ChartCertificationE2ETestSingle(test_name=context.test_name) yield context.workflow_test context.workflow_test.cleanup() + @fixture def submitted_chart_test(context): context.chart_test = ChartCertificationE2ETestMultiple() yield context.chart_test - context.chart_test.cleanup() + context.chart_test.cleanup() + def before_scenario(context, scenario): - if 'version-change' in scenario.tags: + if "version-change" in scenario.tags: print("[INFO] Using submitted charts fixture") use_fixture(submitted_chart_test, context) else: - context.test_name = scenario.name.split('@')[0][:-4].split(']')[1] + context.test_name = scenario.name.split("@")[0][:-4].split("]")[1] use_fixture(workflow_test, context) diff --git a/tests/functional/behave_features/steps/implementation.py b/tests/functional/behave_features/steps/implementation.py index e261a227..23fd9c81 100644 --- a/tests/functional/behave_features/steps/implementation.py +++ b/tests/functional/behave_features/steps/implementation.py @@ -1,12 +1,14 @@ from behave import given, when, then -from common.utils.chart import Chart, Chart_Type, Release_Type +from common.utils.chart import Chart_Type, Release_Type + ############### Common step definitions ############### -@given(u'the vendor "{vendor}" has a valid identity as "{vendor_type}"') +@given('the vendor "{vendor}" has a valid identity as "{vendor_type}"') def vendor_has_valid_identity(context, vendor, vendor_type): context.workflow_test.set_vendor(vendor, vendor_type) -@given(u'an error-free chart source is used in "{chart_path}"') + +@given('an error-free chart source is used in "{chart_path}"') def chart_source_is_used(context, chart_path): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path)]) context.workflow_test.setup_git_context() @@ -16,7 +18,8 @@ def chart_source_is_used(context, chart_path): context.workflow_test.process_charts() context.workflow_test.push_charts() -@given(u'chart source is used in "{chart_path}"') + +@given('chart source is used in "{chart_path}"') def user_has_used_chart_src(context, chart_path): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path)]) context.workflow_test.setup_git_context() @@ -25,7 +28,8 @@ def user_has_used_chart_src(context, chart_path): context.workflow_test.process_owners_file() context.workflow_test.process_charts() -@given(u'an error-free chart tarball is used in "{chart_path}"') + +@given('an error-free chart tarball is used in "{chart_path}"') def user_has_created_error_free_chart_tarball(context, chart_path): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR, chart_path)]) context.workflow_test.setup_git_context() @@ -35,7 +39,10 @@ def user_has_created_error_free_chart_tarball(context, chart_path): context.workflow_test.process_charts() context.workflow_test.push_charts() -@given(u'a signed chart tarball is used in "{chart_path}" and public key in "{public_key_file}"') + +@given( + 'a signed chart tarball is used in "{chart_path}" and public key in "{public_key_file}"' +) def user_has_created_signed_chart_tarball(context, chart_path, public_key_file): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR, chart_path)]) context.workflow_test.setup_git_context() @@ -45,7 +52,8 @@ def user_has_created_signed_chart_tarball(context, chart_path, public_key_file): context.workflow_test.process_charts(include_prov_file=True) context.workflow_test.push_charts() -@given(u'signed chart tar used in "{chart_path}"') + +@given('signed chart tar used in "{chart_path}"') def user_has_created_signed_chart_tarball(context, chart_path): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR, chart_path)]) context.workflow_test.setup_git_context() @@ -55,9 +63,16 @@ def user_has_created_signed_chart_tarball(context, chart_path): context.workflow_test.process_charts(include_prov_file=True) context.workflow_test.push_charts() -@given(u'an error-free chart tarball used in "{chart_path}" and report in "{report_path}"') -def user_has_created_error_free_chart_tarball_and_report(context, chart_path, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR_AND_REPORT, chart_path, report_path)]) + +@given( + 'an error-free chart tarball used in "{chart_path}" and report in "{report_path}"' +) +def user_has_created_error_free_chart_tarball_and_report( + context, chart_path, report_path +): + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.TAR_AND_REPORT, chart_path, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -67,9 +82,16 @@ def user_has_created_error_free_chart_tarball_and_report(context, chart_path, re context.workflow_test.process_report() context.workflow_test.push_charts() -@given(u'a signed chart tar is used in "{chart_path}", report in "{report_path}" and public key in "{public_key_file}"') -def user_has_created_error_free_chart_tarball_and_report(context, chart_path, report_path, public_key_file): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR_AND_REPORT, chart_path, report_path)]) + +@given( + 'a signed chart tar is used in "{chart_path}", report in "{report_path}" and public key in "{public_key_file}"' +) +def user_has_created_error_free_chart_tarball_and_report( + context, chart_path, report_path, public_key_file +): + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.TAR_AND_REPORT, chart_path, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -79,7 +101,10 @@ def user_has_created_error_free_chart_tarball_and_report(context, chart_path, re context.workflow_test.process_report() context.workflow_test.push_charts() -@given(u'unsigned chart tarball is used in "{chart_path}" and public key used "{public_key_file}" in owners') + +@given( + 'unsigned chart tarball is used in "{chart_path}" and public key used "{public_key_file}" in owners' +) def user_has_created_error_free_chart_tarball(context, chart_path, public_key_file): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR, chart_path)]) @@ -90,9 +115,12 @@ def user_has_created_error_free_chart_tarball(context, chart_path, public_key_fi context.workflow_test.process_charts() context.workflow_test.push_charts() -@given(u'a chart tarball is used in "{chart_path}" and report in "{report_path}"') + +@given('a chart tarball is used in "{chart_path}" and report in "{report_path}"') def user_has_created_a_chart_tarball_and_report(context, chart_path, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR_AND_REPORT, chart_path, report_path)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.TAR_AND_REPORT, chart_path, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -100,9 +128,14 @@ def user_has_created_a_chart_tarball_and_report(context, chart_path, report_path context.workflow_test.process_owners_file() context.workflow_test.process_charts() -@given(u'an error-free chart source used in "{chart_path}" and report in "{report_path}"') + +@given( + 'an error-free chart source used in "{chart_path}" and report in "{report_path}"' +) def user_has_created_error_free_chart_src_and_report(context, chart_path, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC_AND_REPORT, chart_path, report_path)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.SRC_AND_REPORT, chart_path, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -112,37 +145,56 @@ def user_has_created_error_free_chart_src_and_report(context, chart_path, report context.workflow_test.process_report() context.workflow_test.push_charts() -@given(u'report is used in "{report_path}"') -@given(u'an error-free report is used in "{report_path}"') + +@given('report is used in "{report_path}"') +@given('an error-free report is used in "{report_path}"') def user_has_created_error_free_report(context, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.REPORT, report_path)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.REPORT, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() context.workflow_test.setup_temp_dir() context.workflow_test.process_owners_file() context.workflow_test.process_report() -@given(u'signed chart report used in "{report_path}" and public key in "{public_key_file}"') + +@given( + 'signed chart report used in "{report_path}" and public key in "{public_key_file}"' +) def user_has_created_error_free_report(context, report_path, public_key_file): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.REPORT, report_path)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.REPORT, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() context.workflow_test.setup_temp_dir() context.workflow_test.process_owners_file(public_key_file=public_key_file) context.workflow_test.process_report() -@given(u'user wants to send two reports as in "{report_path_1}" and "{report_path_2}"') + +@given('user wants to send two reports as in "{report_path_1}" and "{report_path_2}"') def user_has_created_error_free_report(context, report_path_1, report_path_2): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.REPORT, report_path_1), (Chart_Type.REPORT, report_path_2)]) + context.workflow_test.update_test_charts( + test_charts=[ + (Chart_Type.REPORT, report_path_1), + (Chart_Type.REPORT, report_path_2), + ] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() context.workflow_test.setup_temp_dir() context.workflow_test.process_owners_file() context.workflow_test.process_report() -@given(u'user wants to send two chart sources as in "{chart_path_1}" and "{chart_path_2}"') + +@given( + 'user wants to send two chart sources as in "{chart_path_1}" and "{chart_path_2}"' +) def user_wants_to_send_two_chart_sources(context, chart_path_1, chart_path_2): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path_1), (Chart_Type.SRC, chart_path_2)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.SRC, chart_path_1), (Chart_Type.SRC, chart_path_2)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() context.workflow_test.setup_temp_dir() @@ -150,9 +202,12 @@ def user_wants_to_send_two_chart_sources(context, chart_path_1, chart_path_2): context.workflow_test.process_charts() context.workflow_test.push_charts() -@given(u'user wants to send two chart tars as in "{chart_path_1}" and "{chart_path_2}"') + +@given('user wants to send two chart tars as in "{chart_path_1}" and "{chart_path_2}"') def user_wants_to_send_two_chart_tars(context, chart_path_1, chart_path_2): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR, chart_path_1), (Chart_Type.TAR, chart_path_2)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.TAR, chart_path_1), (Chart_Type.TAR, chart_path_2)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() context.workflow_test.setup_temp_dir() @@ -160,9 +215,16 @@ def user_wants_to_send_two_chart_tars(context, chart_path_1, chart_path_2): context.workflow_test.process_charts() context.workflow_test.push_charts() -@given(u'user wants to send two charts one with source "{chart_path}" and other with report "{report_path}"') -def user_wants_to_send_multiple_chart_one_with_src_and_other_with_report(context, chart_path, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path), (Chart_Type.REPORT, report_path)]) + +@given( + 'user wants to send two charts one with source "{chart_path}" and other with report "{report_path}"' +) +def user_wants_to_send_multiple_chart_one_with_src_and_other_with_report( + context, chart_path, report_path +): + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.SRC, chart_path), (Chart_Type.REPORT, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -172,9 +234,16 @@ def user_wants_to_send_multiple_chart_one_with_src_and_other_with_report(context context.workflow_test.process_report() context.workflow_test.push_charts() -@given(u'user wants to send two charts one with tar "{chart_path}" and other with report "{report_path}"') -def user_wants_to_send_multiple_chart_one_with_tar_and_other_with_report(context, chart_path, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.TAR, chart_path), (Chart_Type.REPORT, report_path)]) + +@given( + 'user wants to send two charts one with tar "{chart_path}" and other with report "{report_path}"' +) +def user_wants_to_send_multiple_chart_one_with_tar_and_other_with_report( + context, chart_path, report_path +): + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.TAR, chart_path), (Chart_Type.REPORT, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -184,137 +253,204 @@ def user_wants_to_send_multiple_chart_one_with_tar_and_other_with_report(context context.workflow_test.process_report() context.workflow_test.push_charts() -@given(u'a "{report_path}" is provided') + +@given('a "{report_path}" is provided') def user_generated_a_report(context, report_path): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.REPORT, report_path)]) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.REPORT, report_path)] + ) context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() context.workflow_test.setup_temp_dir() context.workflow_test.process_owners_file() -@when(u'the user sends a pull request with the report') -@when(u'the user sends a pull request with the chart') -@when(u'the user sends a pull request with the chart and report') + +@when("the user sends a pull request with the report") +@when("the user sends a pull request with the chart") +@when("the user sends a pull request with the chart and report") def user_sends_a_pull_request(context): context.workflow_test.send_pull_request() -@when(u'the user pushed the chart and created pull request') + +@when("the user pushed the chart and created pull request") def user_pushed_the_chart_and_created_pull_request(context): context.workflow_test.push_charts() context.workflow_test.send_pull_request() -@then(u'the user sees the pull request is merged') + +@then("the user sees the pull request is merged") def pull_request_is_merged(context): - context.workflow_test.check_workflow_conclusion(expect_result='success') + context.workflow_test.check_workflow_conclusion(expect_result="success") context.workflow_test.check_pull_request_result(expect_merged=True) context.workflow_test.check_pull_request_labels() -@then(u'the index.yaml file is updated with an entry for the submitted chart') + +@then("the index.yaml file is updated with an entry for the submitted chart") def index_yaml_updated_with_submitted_chart(context): context.workflow_test.check_index_yaml() -@then(u'a release is published with report only') + +@then("a release is published with report only") def release_is_published(context): context.workflow_test.check_release_result(release_type=Release_Type.REPORT_ONLY) -@then(u'a release is published with corresponding report and chart tarball') + +@then("a release is published with corresponding report and chart tarball") def release_is_published(context): - context.workflow_test.check_release_result(release_type=Release_Type.CHART_AND_REPORT) + context.workflow_test.check_release_result( + release_type=Release_Type.CHART_AND_REPORT + ) -@then(u'a release is published with corresponding report, tarball, prov and key') + +@then("a release is published with corresponding report, tarball, prov and key") def release_is_published_for_signed_chart(context): - context.workflow_test.check_release_result(release_type=Release_Type.CHART_REPORT_PROV_AND_KEY) + context.workflow_test.check_release_result( + release_type=Release_Type.CHART_REPORT_PROV_AND_KEY + ) + -@then(u'a release is published with corresponding report and key') +@then("a release is published with corresponding report and key") def release_is_published_for_signed_chart(context): context.workflow_test.check_release_result(release_type=Release_Type.REPORT_AND_KEY) -@then(u'a release is published with corresponding report, chart tar and prov file') + +@then("a release is published with corresponding report, chart tar and prov file") def release_is_published_for_signed_chart_and_report(context): - context.workflow_test.check_release_result(release_type=Release_Type.CHART_PROV_AND_REPORT) + context.workflow_test.check_release_result( + release_type=Release_Type.CHART_PROV_AND_REPORT + ) -@then(u'the pull request is not merged') + +@then("the pull request is not merged") def pull_request_is_not_merged(context): - context.workflow_test.check_workflow_conclusion(expect_result='failure') + context.workflow_test.check_workflow_conclusion(expect_result="failure") context.workflow_test.check_pull_request_result(expect_merged=False) -@then(u'user gets the "{message}" in the pull request comment') + +@then('user gets the "{message}" in the pull request comment') def user_gets_a_message(context, message): context.workflow_test.check_pull_request_comments(expect_message=message) + ########## Unique step definitions ################# -@given(u'README file is missing in the chart') + +@given("README file is missing in the chart") def readme_file_is_missing(context): context.workflow_test.remove_readme_file() -@then(u'the index.yaml file is updated with an entry for the submitted chart with correct providerType') + +@then( + "the index.yaml file is updated with an entry for the submitted chart with correct providerType" +) def index_yaml_is_updated_with_new_entry_with_correct_provider_type(context): context.workflow_test.check_index_yaml(check_provider_type=True) -@given(u'the report contains an "{invalid_url}"') + +@given('the report contains an "{invalid_url}"') def invalid_url_in_the_report(context, invalid_url): context.workflow_test.process_report(update_url=True, url=invalid_url) -@given(u'user adds a non chart related file') + +@given("user adds a non chart related file") def user_adds_a_non_chart_related_file(context): context.workflow_test.add_non_chart_related_file() -@when(u'the user sends a pull request with both chart and non related file') + +@when("the user sends a pull request with both chart and non related file") def user_sends_pull_request_with_chart_and_non_related_file(context): context.workflow_test.push_charts(add_non_chart_file=True) context.workflow_test.send_pull_request() -@given(u'provider delivery control is set to "{provider_control_owners}" in the OWNERS file') + +@given( + 'provider delivery control is set to "{provider_control_owners}" in the OWNERS file' +) def provider_delivery_control_set_in_owners(context, provider_control_owners): context.workflow_test.update_provided_delivery(provider_control_owners) -@given(u'provider delivery control is set to "{provider_control_report}" in the report') + +@given('provider delivery control is set to "{provider_control_report}" in the report') def provider_delivery_control_set_in_report(context, provider_control_report): if provider_control_report == "true": - context.workflow_test.process_report(update_provider_delivery=True, provider_delivery=True) + context.workflow_test.process_report( + update_provider_delivery=True, provider_delivery=True + ) else: - context.workflow_test.process_report(update_provider_delivery=True, provider_delivery=False) + context.workflow_test.process_report( + update_provider_delivery=True, provider_delivery=False + ) + -@given(u'provider delivery controls is set to "{provider_control_report}" and a package digest is "{package_digest_set}" in the report') -def provider_delivery_control_and_package_digest_set_in_report(context, provider_control_report, package_digest_set=True): +@given( + 'provider delivery controls is set to "{provider_control_report}" and a package digest is "{package_digest_set}" in the report' +) +def provider_delivery_control_and_package_digest_set_in_report( + context, provider_control_report, package_digest_set=True +): if package_digest_set == "true": no_package_digest = False else: no_package_digest = True if provider_control_report == "true": - context.workflow_test.process_report(update_provider_delivery=True, provider_delivery=True, unset_package_digest=no_package_digest) + context.workflow_test.process_report( + update_provider_delivery=True, + provider_delivery=True, + unset_package_digest=no_package_digest, + ) else: - context.workflow_test.process_report(update_provider_delivery=True, provider_delivery=False, unset_package_digest=no_package_digest) + context.workflow_test.process_report( + update_provider_delivery=True, + provider_delivery=False, + unset_package_digest=no_package_digest, + ) -@then(u'the "{index_file}" is updated with an entry for the submitted chart') + +@then('the "{index_file}" is updated with an entry for the submitted chart') def index_file_is_updated(context, index_file): context.workflow_test.secrets.index_file = index_file context.workflow_test.check_index_yaml(True) -@given(u'the report includes "{tested}" and "{supported}" OpenshiftVersion values and chart "{kubeversion}" value') + +@given( + 'the report includes "{tested}" and "{supported}" OpenshiftVersion values and chart "{kubeversion}" value' +) def report_includes_specified_versions(context, tested, supported, kubeversion): - context.workflow_test.process_report(update_versions=True, supported_versions=supported, tested_version=tested, kube_version=kubeversion) + context.workflow_test.process_report( + update_versions=True, + supported_versions=supported, + tested_version=tested, + kube_version=kubeversion, + ) + -@given(u'the report has a "{check}" missing') +@given('the report has a "{check}" missing') def report_has_a_check_missing(context, check): context.workflow_test.process_report(missing_check=check) -@given(u'A "{user}" wants to submit a chart in "{chart_path}"') + +@given('A "{user}" wants to submit a chart in "{chart_path}"') def user_wants_to_submit_a_chart(context, user, chart_path): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path)]) context.workflow_test.update_bot_name(user) -@given(u'An authorized user wants to submit a chart in "{chart_path}"') + +@given('An authorized user wants to submit a chart in "{chart_path}"') def authorized_user_wants_to_submit_a_chart(context, chart_path): context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path)]) -@given(u'a chart source used in "{chart_path}" and directory structure contains "{bad_version}"') + +@given( + 'a chart source used in "{chart_path}" and directory structure contains "{bad_version}"' +) def a_user_wants_to_submit_a_chart_with_bad_semver(context, chart_path, bad_version): - context.workflow_test.update_test_charts(test_charts=[(Chart_Type.SRC, chart_path)], new_chart_version=bad_version) + context.workflow_test.update_test_charts( + test_charts=[(Chart_Type.SRC, chart_path)], new_chart_version=bad_version + ) + -@given(u'the user creates a branch to add a new chart version') +@given("the user creates a branch to add a new chart version") def the_user_creates_a_branch_to_add_a_new_chart_version(context): context.workflow_test.setup_git_context() context.workflow_test.setup_gh_pages_branch() @@ -322,48 +458,61 @@ def the_user_creates_a_branch_to_add_a_new_chart_version(context): context.workflow_test.process_owners_file() context.workflow_test.process_charts() if context.workflow_test.secrets.bad_version: - context.workflow_test.update_chart_version_in_chart_yaml(context.workflow_test.secrets.bad_version) + context.workflow_test.update_chart_version_in_chart_yaml( + context.workflow_test.secrets.bad_version + ) context.workflow_test.push_charts() -@given(u'Chart.yaml specifies a "{bad_version}"') + +@given('Chart.yaml specifies a "{bad_version}"') def chart_yaml_specifies_bad_version(context, bad_version): - if bad_version != '': + if bad_version != "": context.workflow_test.update_bad_version(bad_version) -@given(u'the report contains "{error}"') + +@given('the report contains "{error}"') def sha_value_does_not_match(context, error): - if error == 'sha_mismatch': + if error == "sha_mismatch": context.workflow_test.process_report(update_chart_sha=True) else: raise AssertionError(f"This {error} handling is not implemented yet") -@when(u'the user sends a pull request with the chart tar and report') + +@when("the user sends a pull request with the chart tar and report") def user_sends_pull_request_with_chart_tarball_and_report(context): context.workflow_test.push_charts() context.workflow_test.send_pull_request() + ######## Test Submitted Charts Step definitions ########## -@given(u'there is a github workflow for testing existing charts') +@given("there is a github workflow for testing existing charts") def theres_github_workflow_for_testing_charts(context): print("[INFO] Running step: there is a github workflow for testing existing charts") -@when(u'a new Openshift or chart-verifier version is specified') + +@when("a new Openshift or chart-verifier version is specified") def new_openshift_or_verifier_version_is_specified(context): print("[INFO] Running step: a new Openshift or chart-verifier version is specified") -@when(u'the vendor type is specified, e.g. partner, and/or redhat') + +@when("the vendor type is specified, e.g. partner, and/or redhat") def vendor_type_is_specified(context): - print("[INFO] Running step: the vendor type is specified, e.g. partner, and/or redhat") + print( + "[INFO] Running step: the vendor type is specified, e.g. partner, and/or redhat" + ) -@when(u'workflow for testing existing charts is triggered') + +@when("workflow for testing existing charts is triggered") def workflow_is_triggered(context): print("[INFO] Running step: workflow for testing existing charts is triggered") -@then(u'submission tests are run for existing charts') + +@then("submission tests are run for existing charts") def submission_tests_run_for_submitted_charts(context): print("[INFO] Running step: submission tests are run for existing charts") context.chart_test.process_all_charts() -@then(u'all results are reported back to the caller') + +@then("all results are reported back to the caller") def all_results_report_back_to_caller(context): - print("[INFO] Running step: all results are reported back to the caller") \ No newline at end of file + print("[INFO] Running step: all results are reported back to the caller") diff --git a/tests/functional/step_defs/HC-16_test_dash_in_version.py b/tests/functional/step_defs/HC-16_test_dash_in_version.py index 4eaee44a..2cf574f2 100644 --- a/tests/functional/step_defs/HC-16_test_dash_in_version.py +++ b/tests/functional/step_defs/HC-16_test_dash_in_version.py @@ -9,14 +9,18 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Report Only' + test_name = "Test Chart Report Only" workflow_test = ChartCertificationE2ETestSingle(test_name=test_name) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-16_dash_in_version.feature', "[HC-16-001] A partner or redhat associate submits report only with dash in chart version") +@scenario( + "../features/HC-16_dash_in_version.feature", + "[HC-16-001] A partner or redhat associate submits report only with dash in chart version", +) def test_partner_or_redhat_user_submits_report_dash_in_version(): """A community user submits an error-free report""" diff --git a/tests/functional/step_defs/conftest.py b/tests/functional/step_defs/conftest.py index 83de3898..0a82a31c 100644 --- a/tests/functional/step_defs/conftest.py +++ b/tests/functional/step_defs/conftest.py @@ -1,10 +1,6 @@ import pytest -from pytest_bdd import ( - given, - then, - when, - parsers -) +from pytest_bdd import given, then, when, parsers + ########### GIVEN #################### @given(parsers.parse("A wants to submit a chart in ")) @@ -13,30 +9,43 @@ def user_wants_to_submit_a_chart(workflow_test, user, chart_path): workflow_test.update_test_chart(chart_path) workflow_test.secrets.bot_name = user + @given(parsers.parse("An authorized user wants to submit a chart in ")) def authorized_user_wants_to_submit_a_chart(workflow_test, chart_path): """A wants to submit a chart in .""" workflow_test.update_test_chart(chart_path) + @given(parsers.parse(" of wants to submit of ")) -def vendor_of_vendor_type_wants_to_submit_chart_of_version(workflow_test, vendor, vendor_type, chart, version): +def vendor_of_vendor_type_wants_to_submit_chart_of_version( + workflow_test, vendor, vendor_type, chart, version +): """ of wants to submit of """ workflow_test.set_vendor(vendor, vendor_type) workflow_test.chart_name, workflow_test.chart_version = chart, version + @given(parsers.parse("Chart.yaml specifies a ")) def chart_yaml_specifies_bad_version(workflow_test, bad_version): - """ Chart.yaml specifies a """ - if bad_version != '': + """Chart.yaml specifies a """ + if bad_version != "": workflow_test.secrets.bad_version = bad_version + @given(parsers.parse("the vendor has a valid identity as ")) def user_has_valid_identity(workflow_test, vendor, vendor_type): """the vendor has a valid identity as .""" workflow_test.set_vendor(vendor, vendor_type) -@given(parsers.parse("an error-free chart source is used in and report in ")) -def user_has_created_error_free_chart_src_and_report(workflow_test, chart_path, report_path): + +@given( + parsers.parse( + "an error-free chart source is used in and report in " + ) +) +def user_has_created_error_free_chart_src_and_report( + workflow_test, chart_path, report_path +): """an error-free chart source is used in and report in .""" workflow_test.update_test_chart(chart_path) workflow_test.update_test_report(report_path) @@ -49,6 +58,7 @@ def user_has_created_error_free_chart_src_and_report(workflow_test, chart_path, workflow_test.process_report() workflow_test.push_chart(is_tarball=False) + @given(parsers.parse("an error-free chart source is used in ")) def user_has_created_error_free_chart_src(workflow_test, chart_path): """an error-free chart source is used in .""" @@ -60,8 +70,15 @@ def user_has_created_error_free_chart_src(workflow_test, chart_path): workflow_test.process_chart(is_tarball=False) workflow_test.push_chart(is_tarball=False) -@given(parsers.parse("an error-free chart tarball is used in and report in ")) -def user_has_created_error_free_chart_tarball_and_report(workflow_test, chart_path, report_path): + +@given( + parsers.parse( + "an error-free chart tarball is used in and report in " + ) +) +def user_has_created_error_free_chart_tarball_and_report( + workflow_test, chart_path, report_path +): """an error-free chart tarball is used in and report in .""" workflow_test.update_test_chart(chart_path) workflow_test.update_test_report(report_path) @@ -74,6 +91,7 @@ def user_has_created_error_free_chart_tarball_and_report(workflow_test, chart_pa workflow_test.process_report() workflow_test.push_chart(is_tarball=True) + @given(parsers.parse("an error-free chart tarball is used in ")) def user_has_created_error_free_chart_tarball(workflow_test, chart_path): """an error-free chart tarball is used in .""" @@ -85,6 +103,7 @@ def user_has_created_error_free_chart_tarball(workflow_test, chart_path): workflow_test.process_chart(is_tarball=True) workflow_test.push_chart(is_tarball=True) + @given(parsers.parse("report is used in ")) @given(parsers.parse("an error-free report is used in ")) def user_has_created_error_free_report(workflow_test, report_path): @@ -96,6 +115,7 @@ def user_has_created_error_free_report(workflow_test, report_path): workflow_test.process_owners_file() workflow_test.process_report() + @given(parsers.parse("a is provided")) def user_generated_a_report(workflow_test, report_path): """report used in """ @@ -115,9 +135,12 @@ def the_user_creates_a_branch_to_add_a_new_chart_version(workflow_test): workflow_test.process_owners_file() workflow_test.process_chart(is_tarball=False) if workflow_test.secrets.bad_version: - workflow_test.update_chart_version_in_chart_yaml(workflow_test.secrets.bad_version) + workflow_test.update_chart_version_in_chart_yaml( + workflow_test.secrets.bad_version + ) workflow_test.push_chart(is_tarball=False) + @given(parsers.parse("chart source is used in ")) def user_has_used_chart_src(workflow_test, chart_path): """chart source is used in .""" @@ -128,7 +151,10 @@ def user_has_used_chart_src(workflow_test, chart_path): workflow_test.process_owners_file() workflow_test.process_chart(is_tarball=False) -@given(parsers.parse("a chart tarball is used in and report in ")) + +@given( + parsers.parse("a chart tarball is used in and report in ") +) def user_has_created_a_chart_tarball_and_report(workflow_test, chart_path, report_path): """an error-free chart tarball is used in and report in .""" workflow_test.update_test_chart(chart_path) @@ -140,27 +166,32 @@ def user_has_created_a_chart_tarball_and_report(workflow_test, chart_path, repor workflow_test.process_owners_file() workflow_test.process_chart(is_tarball=True) + @given("README file is missing in the chart") def readme_file_is_missing(workflow_test): """README file is missing in the chart""" workflow_test.remove_readme_file() + @given(parsers.parse("the report contains an ")) def sha_value_does_not_match(workflow_test, invalid_url): workflow_test.process_report(update_url=True, url=invalid_url) + @given("user adds a non chart related file") def user_adds_a_non_chart_related_file(workflow_test): """user adds a non chart related file""" workflow_test.add_non_chart_related_file() + @given(parsers.parse("the report contains an ")) def sha_value_does_not_match(workflow_test, error): - if error == 'sha_mismatch': + if error == "sha_mismatch": workflow_test.process_report(update_chart_sha=True) else: pytest.fail(f"This {error} handling is not implemented yet") + ############### WHEN #################### @when("the user sends a pull request with the report") @when("the user sends a pull request with the chart") @@ -176,47 +207,57 @@ def user_pushed_the_chart_and_created_pull_request_with_chart_src(workflow_test) workflow_test.push_chart(is_tarball=False) workflow_test.send_pull_request() + @when("the user sends a pull request with both chart and non related file") def user_sends_pull_request_with_chart_and_non_related_file(workflow_test): """the user sends a pull request with both chart and non related file""" workflow_test.push_chart(is_tarball=False, add_non_chart_file=True) workflow_test.send_pull_request() + @when("the user sends a pull request with the chart tar and report") def user_sends_pull_request_with_chart_tarball_and_report(workflow_test): """the user sends a pull request with the chart and report.""" workflow_test.push_chart(is_tarball=True) workflow_test.send_pull_request() + ################ THEN ################ @then("the user sees the pull request is merged") def user_should_see_pull_request_getting_merged(workflow_test): """the user sees the pull request is merged.""" - workflow_test.check_workflow_conclusion(expect_result='success') + workflow_test.check_workflow_conclusion(expect_result="success") workflow_test.check_pull_request_result(expect_merged=True) workflow_test.check_pull_request_labels() + @then("the pull request is not merged") def the_pull_request_is_not_getting_merged(workflow_test): """the pull request is not merged""" - workflow_test.check_workflow_conclusion(expect_result='failure') + workflow_test.check_workflow_conclusion(expect_result="failure") workflow_test.check_pull_request_result(expect_merged=False) + @then("the index.yaml file is updated with an entry for the submitted chart") def index_yaml_is_updated_with_new_entry(workflow_test): """the index.yaml file is updated with an entry for the submitted chart.""" workflow_test.check_index_yaml() -@then("the index.yaml file is updated with an entry for the submitted chart with correct providerType") + +@then( + "the index.yaml file is updated with an entry for the submitted chart with correct providerType" +) def index_yaml_is_updated_with_new_entry_with_correct_provider_type(workflow_test): """the index.yaml file is updated with an entry for the submitted chart with correct providerType""" workflow_test.check_index_yaml(check_provider_type=True) + @then("a release is published with corresponding report and chart tarball") def release_is_published(workflow_test): """a release is published with corresponding report and chart tarball.""" workflow_test.check_release_result() + @then(parsers.parse("user gets the in the pull request comment")) def user_gets_the_message_in_the_pull_request_comment(workflow_test, message): """user gets the message in the pull request comment""" diff --git a/tests/functional/step_defs/test_chart_src_with_report.py b/tests/functional/step_defs/test_chart_src_with_report.py index 3c234d25..450d2bb4 100644 --- a/tests/functional/step_defs/test_chart_src_with_report.py +++ b/tests/functional/step_defs/test_chart_src_with_report.py @@ -9,20 +9,30 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Source With Report' - test_chart = 'tests/data/vault-0.17.0.tgz' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart, test_report=test_report) + test_name = "Test Chart Source With Report" + test_chart = "tests/data/vault-0.17.0.tgz" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-07_report_and_chart_src.feature', "[HC-07-001] A partner or redhat associate submits an error-free chart source with report") +@scenario( + "../features/HC-07_report_and_chart_src.feature", + "[HC-07-001] A partner or redhat associate submits an error-free chart source with report", +) def test_partner_or_redhat_user_submits_chart_src_with_report(): """A partner or redhat associate submits an error-free chart source with report.""" -@scenario('../features/HC-07_report_and_chart_src.feature', "[HC-07-002] A community user submits an error-free chart source with report") + +@scenario( + "../features/HC-07_report_and_chart_src.feature", + "[HC-07-002] A community user submits an error-free chart source with report", +) def test_community_user_submits_chart_src_with_report(): """A community user submits an error-free chart source with report""" diff --git a/tests/functional/step_defs/test_chart_src_without_report.py b/tests/functional/step_defs/test_chart_src_without_report.py index 0648024b..efa930a2 100644 --- a/tests/functional/step_defs/test_chart_src_without_report.py +++ b/tests/functional/step_defs/test_chart_src_without_report.py @@ -10,18 +10,29 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Source Without Report' - test_chart = 'tests/data/vault-0.17.0.tgz' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart) + test_name = "Test Chart Source Without Report" + test_chart = "tests/data/vault-0.17.0.tgz" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-01_chart_src_without_report.feature', "[HC-01-001] A partner or redhat associate submits an error-free chart source") + +@scenario( + "../features/HC-01_chart_src_without_report.feature", + "[HC-01-001] A partner or redhat associate submits an error-free chart source", +) def test_partner_or_redhat_user_submits_chart_src(): """A partner or redhat associate submits an error-free chart source.""" -@scenario('../features/HC-01_chart_src_without_report.feature', "[HC-01-002] A community user submits an error-free chart source without report") + +@scenario( + "../features/HC-01_chart_src_without_report.feature", + "[HC-01-002] A community user submits an error-free chart source without report", +) def test_community_user_submits_chart_src(): """A community user submits an error-free chart source without report""" diff --git a/tests/functional/step_defs/test_chart_tar_with_report.py b/tests/functional/step_defs/test_chart_tar_with_report.py index 57110abf..e19b2df0 100644 --- a/tests/functional/step_defs/test_chart_tar_with_report.py +++ b/tests/functional/step_defs/test_chart_tar_with_report.py @@ -9,21 +9,30 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Tarball With Report' - test_chart = 'tests/data/vault-0.17.0.tgz' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart, test_report=test_report) + test_name = "Test Chart Tarball With Report" + test_chart = "tests/data/vault-0.17.0.tgz" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-08_report_and_chart_tar.feature', "[HC-08-001] A partner or redhat associate submits an error-free chart tarball with report") +@scenario( + "../features/HC-08_report_and_chart_tar.feature", + "[HC-08-001] A partner or redhat associate submits an error-free chart tarball with report", +) def test_partners_or_redhat_user_submits_chart_tarball_with_report(): """A partner or redhat associate submits an error-free chart tarball with report.""" -@scenario('../features/HC-08_report_and_chart_tar.feature', "[HC-08-002] A community user submits an error-free chart tarball with report") + +@scenario( + "../features/HC-08_report_and_chart_tar.feature", + "[HC-08-002] A community user submits an error-free chart tarball with report", +) def test_community_user_submits_chart_tarball_with_report(): """A community user submits an error-free chart tarball with report""" - diff --git a/tests/functional/step_defs/test_chart_tar_without_report.py b/tests/functional/step_defs/test_chart_tar_without_report.py index d5b00acc..abf8efe3 100644 --- a/tests/functional/step_defs/test_chart_tar_without_report.py +++ b/tests/functional/step_defs/test_chart_tar_without_report.py @@ -9,20 +9,29 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Tarball Without Report' - test_chart = 'tests/data/vault-0.17.0.tgz' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart) + test_name = "Test Chart Tarball Without Report" + test_chart = "tests/data/vault-0.17.0.tgz" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-02_chart_tar_without_report.feature', "[HC-02-001] A partner or redhat associate submits an error-free chart tarball") +@scenario( + "../features/HC-02_chart_tar_without_report.feature", + "[HC-02-001] A partner or redhat associate submits an error-free chart tarball", +) def test_partner_or_redhat_user_submits_chart_tarball(): """A partner or redhat associate submits an error-free chart tarball.""" -@scenario('../features/HC-02_chart_tar_without_report.feature', "[HC-02-002] A community user submits an error-free chart tarball without report") + +@scenario( + "../features/HC-02_chart_tar_without_report.feature", + "[HC-02-002] A community user submits an error-free chart tarball without report", +) def test_community_user_submits_chart_tarball(): """A community user submits an error-free chart tarball without report""" - diff --git a/tests/functional/step_defs/test_chart_test_takes_more_than_30mins.py b/tests/functional/step_defs/test_chart_test_takes_more_than_30mins.py index 473d2628..a38356ca 100644 --- a/tests/functional/step_defs/test_chart_test_takes_more_than_30mins.py +++ b/tests/functional/step_defs/test_chart_test_takes_more_than_30mins.py @@ -2,18 +2,20 @@ """ Chart test takes longer time and exceeds default timeout Partners, redhat or community user submit charts which result in errors """ -import logging import datetime import pytest from pytest_bdd import scenario from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Chart test takes more than 30mins' - test_chart = 'tests/data/vault-test-timeout-0.17.0.tgz' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart) + test_name = "Chart test takes more than 30mins" + test_chart = "tests/data/vault-test-timeout-0.17.0.tgz" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart + ) start_time = datetime.datetime.now() yield workflow_test workflow_test.cleanup() @@ -24,10 +26,17 @@ def workflow_test(): pytest.fail(f"Timeout is not as expected: {total_diff_seconds}") -@scenario('../features/HC-16_chart_test_takes_more_than_30mins.feature', "[HC-16-001] A partner or community user submits chart that takes more than 30 mins") +@scenario( + "../features/HC-16_chart_test_takes_more_than_30mins.feature", + "[HC-16-001] A partner or community user submits chart that takes more than 30 mins", +) def test_partner_or_community_chart_test_takes_more_than_30mins(): - """ A partner or community submitted chart takes more than 30 mins""" + """A partner or community submitted chart takes more than 30 mins""" + -@scenario('../features/HC-16_chart_test_takes_more_than_30mins.feature', "[HC-16-002] A redhat associate submits a chart that takes more than 30 mins") +@scenario( + "../features/HC-16_chart_test_takes_more_than_30mins.feature", + "[HC-16-002] A redhat associate submits a chart that takes more than 30 mins", +) def test_redhat_chart_test_takes_more_than_30mins(): - """ A redhat submitted chart takes more than 30 mins""" \ No newline at end of file + """A redhat submitted chart takes more than 30 mins""" diff --git a/tests/functional/step_defs/test_chart_verifier_comes_back_with_failures.py b/tests/functional/step_defs/test_chart_verifier_comes_back_with_failures.py index 46a673bf..7529f0f6 100644 --- a/tests/functional/step_defs/test_chart_verifier_comes_back_with_failures.py +++ b/tests/functional/step_defs/test_chart_verifier_comes_back_with_failures.py @@ -8,19 +8,29 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Submission Without Readme' - test_chart = 'tests/data/vault-0.17.0.tgz' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart) + test_name = "Test Chart Submission Without Readme" + test_chart = "tests/data/vault-0.17.0.tgz" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-03_chart_verifier_comes_back_with_failures.feature', "[HC-03-001] A partner or community user submits a chart which does not contain a readme file") + +@scenario( + "../features/HC-03_chart_verifier_comes_back_with_failures.feature", + "[HC-03-001] A partner or community user submits a chart which does not contain a readme file", +) def test_partner_or_community_user_submits_chart_without_readme(): """A partner or community user submits a chart which does not contain a readme file""" -@scenario('../features/HC-03_chart_verifier_comes_back_with_failures.feature', "[HC-03-002] A redhat user submits a chart which does not contain a readme file") + +@scenario( + "../features/HC-03_chart_verifier_comes_back_with_failures.feature", + "[HC-03-002] A redhat user submits a chart which does not contain a readme file", +) def test_redhat_user_submits_chart_without_readme(): """A redhat user submits a chart which does not contain a readme file""" - diff --git a/tests/functional/step_defs/test_invalid_url_in_the_report.py b/tests/functional/step_defs/test_invalid_url_in_the_report.py index 392a8822..d5691c5d 100644 --- a/tests/functional/step_defs/test_invalid_url_in_the_report.py +++ b/tests/functional/step_defs/test_invalid_url_in_the_report.py @@ -8,17 +8,21 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Invalid Chart URL' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_report=test_report) + test_name = "Invalid Chart URL" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-04_invalid_url_in_the_report.feature', "[HC-04-001] A user submits a report with an invalid url") +@scenario( + "../features/HC-04_invalid_url_in_the_report.feature", + "[HC-04-001] A user submits a report with an invalid url", +) def test_report_submission_with_invalid_url(): """A user submits a report with an invalid url.""" - - diff --git a/tests/functional/step_defs/test_pr_includes_a_file_which_is_not_related.py b/tests/functional/step_defs/test_pr_includes_a_file_which_is_not_related.py index 371b99bb..cf64b378 100644 --- a/tests/functional/step_defs/test_pr_includes_a_file_which_is_not_related.py +++ b/tests/functional/step_defs/test_pr_includes_a_file_which_is_not_related.py @@ -8,15 +8,21 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test PR Includes A Non Related File' - test_chart = 'tests/data/vault-0.17.0.tgz' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart) + test_name = "Test PR Includes A Non Related File" + test_chart = "tests/data/vault-0.17.0.tgz" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-05_pr_includes_a_file_which_is_not_chart_related.feature', "[HC-05-001] A user submits a chart with non chart related file") + +@scenario( + "../features/HC-05_pr_includes_a_file_which_is_not_chart_related.feature", + "[HC-05-001] A user submits a chart with non chart related file", +) def test_user_submits_chart_with_non_related_file(): """A user submits a chart with non chart related file""" - diff --git a/tests/functional/step_defs/test_provider_delivery_control.py b/tests/functional/step_defs/test_provider_delivery_control.py index 930bb8ba..383c8d1b 100644 --- a/tests/functional/step_defs/test_provider_delivery_control.py +++ b/tests/functional/step_defs/test_provider_delivery_control.py @@ -5,56 +5,86 @@ error-free chart in tarball format with a report. """ import pytest -from pytest_bdd import ( - scenario, - given, - parsers, - then -) +from pytest_bdd import scenario, given, parsers, then from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Porvider Delivery Control' - test_chart = 'tests/data/vault-0.17.0.tgz' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart, test_report=test_report) + test_name = "Test Porvider Delivery Control" + test_chart = "tests/data/vault-0.17.0.tgz" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-06_provider_delivery_control.feature', "[HC-06-001] A partner associate submits an error-free report with provider controlled delivery") +@scenario( + "../features/HC-06_provider_delivery_control.feature", + "[HC-06-001] A partner associate submits an error-free report with provider controlled delivery", +) def test_partners_submits_error_free_report_for_provider_controlled_delivery(): """A partner submits an error-free report for provider controlled delivery.""" -@scenario('../features/HC-06_provider_delivery_control.feature', "[HC-06-002] A partner associate submits an error-free report and chart with provider controlled delivery") + +@scenario( + "../features/HC-06_provider_delivery_control.feature", + "[HC-06-002] A partner associate submits an error-free report and chart with provider controlled delivery", +) def test_partners_submits_error_free_report_and_chart_for_provider_controlled_delivery(): """A partner submits an error-free report and chart for provider controlled delivery.""" -@scenario('../features/HC-06_provider_delivery_control.feature', "[HC-06-003] A partner associate submits an error-free report with inconsistent provider controlled delivery setting") + +@scenario( + "../features/HC-06_provider_delivery_control.feature", + "[HC-06-003] A partner associate submits an error-free report with inconsistent provider controlled delivery setting", +) def test_partners_submits_error_free_report_with_inconsistent_provider_controlled_delivery_settings(): """A partner submits an error-free report with inconsistent settings for provider controlled delivery.""" -@given(parsers.parse("provider delivery control is set to in the OWNERS file")) -def provider_delivery_control_set_in_owners(workflow_test,provider_control_owners): + +@given( + parsers.parse( + "provider delivery control is set to in the OWNERS file" + ) +) +def provider_delivery_control_set_in_owners(workflow_test, provider_control_owners): if provider_control_owners == "true": print("[INFO] set provider delivery control_in owners file") - workflow_test.secrets.provider_delivery=True + workflow_test.secrets.provider_delivery = True else: print("[INFO] un-set provider delivery control_in owners file") - workflow_test.secrets.provider_delivery=False + workflow_test.secrets.provider_delivery = False + -@given(parsers.parse("provider delivery control is set to in the report")) -def provider_delivery_control_set_in_report(workflow_test,provider_control_report): +@given( + parsers.parse( + "provider delivery control is set to in the report" + ) +) +def provider_delivery_control_set_in_report(workflow_test, provider_control_report): if provider_control_report == "true": print("[INFO] set provider delivery control_in report") - workflow_test.process_report(update_provider_delivery=True,provider_delivery=True) + workflow_test.process_report( + update_provider_delivery=True, provider_delivery=True + ) else: print("[INFO] un-set provider delivery control_in report") - workflow_test.process_report(update_provider_delivery=True,provider_delivery=False) + workflow_test.process_report( + update_provider_delivery=True, provider_delivery=False + ) + -@given(parsers.parse("provider delivery control is set to and a package digest is in the report")) -def provider_delivery_control_and_package_digest_set_in_report(workflow_test,provider_control_report,package_digest_set=True): +@given( + parsers.parse( + "provider delivery control is set to and a package digest is in the report" + ) +) +def provider_delivery_control_and_package_digest_set_in_report( + workflow_test, provider_control_report, package_digest_set=True +): if package_digest_set == "true": no_package_digest = False else: @@ -62,12 +92,23 @@ def provider_delivery_control_and_package_digest_set_in_report(workflow_test,pro if provider_control_report == "true": print("[INFO] set provider delivery control_in report") - workflow_test.process_report(update_provider_delivery=True,provider_delivery=True,unset_package_digest=no_package_digest) + workflow_test.process_report( + update_provider_delivery=True, + provider_delivery=True, + unset_package_digest=no_package_digest, + ) else: print("[INFO] un-set provider delivery control_in report") - workflow_test.process_report(update_provider_delivery=True,provider_delivery=False,unset_package_digest=no_package_digest) + workflow_test.process_report( + update_provider_delivery=True, + provider_delivery=False, + unset_package_digest=no_package_digest, + ) + -@then(parsers.parse("the is updated with an entry for the submitted chart")) -def index_file_is_updated(workflow_test,index_file): +@then( + parsers.parse("the is updated with an entry for the submitted chart") +) +def index_file_is_updated(workflow_test, index_file): workflow_test.secrets.index_file = index_file workflow_test.check_index_yaml(True) diff --git a/tests/functional/step_defs/test_report_in_json_format.py b/tests/functional/step_defs/test_report_in_json_format.py index af9f388b..3bf59b64 100644 --- a/tests/functional/step_defs/test_report_in_json_format.py +++ b/tests/functional/step_defs/test_report_in_json_format.py @@ -8,16 +8,21 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Report in Json Format' - test_report = 'tests/data/report.json' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_report=test_report) + test_name = "Test Report in Json Format" + test_report = "tests/data/report.json" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-09_report_in_json_format.feature', "[HC-09-001] An user submits an report in json format") +@scenario( + "../features/HC-09_report_in_json_format.feature", + "[HC-09-001] An user submits an report in json format", +) def an_user_submits_report_in_json_format(): """An user submits an report in json format.""" - diff --git a/tests/functional/step_defs/test_report_only_edited.py b/tests/functional/step_defs/test_report_only_edited.py index 1b5c93ad..684eca9a 100644 --- a/tests/functional/step_defs/test_report_only_edited.py +++ b/tests/functional/step_defs/test_report_only_edited.py @@ -1,29 +1,37 @@ import pytest -from pytest_bdd import ( - scenario, - given, - parsers -) +from pytest_bdd import scenario, given, parsers from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Edited Report Failures' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_report=test_report) + test_name = "Test Edited Report Failures" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-10_report_only_edited.feature', "[HC-10-001] A partner or redhat associate submits an edited report") +@scenario( + "../features/HC-10_report_only_edited.feature", + "[HC-10-001] A partner or redhat associate submits an edited report", +) def test_partner_or_redhat_user_submits_edited_report(): """A partner or redhat associate submits an edited report.""" -@given(parsers.parse("the report includes and OpenshiftVersion values and chart value")) -def report_includes_specified_versions(workflow_test,tested,supported,kubeversion): - workflow_test.process_report(update_versions=True,supported_versions=supported,tested_version=tested,kube_version=kubeversion) - - - +@given( + parsers.parse( + "the report includes and OpenshiftVersion values and chart value" + ) +) +def report_includes_specified_versions(workflow_test, tested, supported, kubeversion): + workflow_test.process_report( + update_versions=True, + supported_versions=supported, + tested_version=tested, + kube_version=kubeversion, + ) diff --git a/tests/functional/step_defs/test_report_only_no_errors.py b/tests/functional/step_defs/test_report_only_no_errors.py index 8984f148..a77e5ae7 100644 --- a/tests/functional/step_defs/test_report_only_no_errors.py +++ b/tests/functional/step_defs/test_report_only_no_errors.py @@ -9,19 +9,29 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Test Chart Report Only' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_report=test_report) + test_name = "Test Chart Report Only" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-12_report_without_chart.feature', "[HC-12-001] A partner or redhat associate submits an error-free report") +@scenario( + "../features/HC-12_report_without_chart.feature", + "[HC-12-001] A partner or redhat associate submits an error-free report", +) def test_partner_or_redhat_user_submits_report(): """A partner or redhat associate submits an error-free report.""" -@scenario('../features/HC-12_report_without_chart.feature', "[HC-12-002] A community user submits an error-free report") + +@scenario( + "../features/HC-12_report_without_chart.feature", + "[HC-12-002] A community user submits an error-free report", +) def test_community_user_submits_report(): """A community user submits an error-free report""" diff --git a/tests/functional/step_defs/test_report_with_missing_checks.py b/tests/functional/step_defs/test_report_with_missing_checks.py index 60c7e43d..5e3fa6b9 100644 --- a/tests/functional/step_defs/test_report_with_missing_checks.py +++ b/tests/functional/step_defs/test_report_with_missing_checks.py @@ -4,24 +4,26 @@ Partners, redhat and community users submits only report which does not include full set of checks """ import pytest -from pytest_bdd import ( - scenario, - given, - parsers -) +from pytest_bdd import scenario, given, parsers from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Report with missing checks' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_report=test_report) + test_name = "Report with missing checks" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-11_report_with_missing_checks.feature', "[HC-11-001] A user submits a report with missing checks") +@scenario( + "../features/HC-11_report_with_missing_checks.feature", + "[HC-11-001] A user submits a report with missing checks", +) def test_report_submission_with_missing_checks(): """A user submits a report with missing checks.""" diff --git a/tests/functional/step_defs/test_sha_value_does_not_match.py b/tests/functional/step_defs/test_sha_value_does_not_match.py index f4fd332e..373dc227 100644 --- a/tests/functional/step_defs/test_sha_value_does_not_match.py +++ b/tests/functional/step_defs/test_sha_value_does_not_match.py @@ -8,17 +8,22 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'SHA Value Does Not Match' - test_chart = 'tests/data/vault-0.17.0.tgz' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart, test_report=test_report) + test_name = "SHA Value Does Not Match" + test_chart = "tests/data/vault-0.17.0.tgz" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-13_sha_value_does_not_match.feature', "[HC-13-001] A user submits a chart tarball with report") +@scenario( + "../features/HC-13_sha_value_does_not_match.feature", + "[HC-13-001] A user submits a chart tarball with report", +) def test_chart_submission_with_report(): """A user submits a chart tarball with report.""" - diff --git a/tests/functional/step_defs/test_smoke_scenarios.py b/tests/functional/step_defs/test_smoke_scenarios.py index 0ce8d278..12dbbc2a 100644 --- a/tests/functional/step_defs/test_smoke_scenarios.py +++ b/tests/functional/step_defs/test_smoke_scenarios.py @@ -4,107 +4,200 @@ from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Smoke Test' - test_chart = 'tests/data/vault-0.17.0.tgz' - test_report = 'tests/data/report.yaml' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart, test_report=test_report) + test_name = "Smoke Test" + test_chart = "tests/data/vault-0.17.0.tgz" + test_report = "tests/data/report.yaml" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart, test_report=test_report + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/smoke/chart_src_without_report.feature', "A partner or redhat associate submits an error-free chart source") + +@scenario( + "../features/smoke/chart_src_without_report.feature", + "A partner or redhat associate submits an error-free chart source", +) def test_partner_or_redhat_user_submits_chart_src(): """A partner or redhat associate submits an error-free chart source.""" -@scenario('../features/smoke/chart_tar_without_report.feature', "A partner or redhat associate submits an error-free chart tarball") + +@scenario( + "../features/smoke/chart_tar_without_report.feature", + "A partner or redhat associate submits an error-free chart tarball", +) def test_partner_or_redhat_user_submits_chart_tarball(): """A partner or redhat associate submits an error-free chart tarball.""" -@scenario('../features/smoke/report_and_chart_src.feature', "A partner or redhat associate submits an error-free chart source with report") + +@scenario( + "../features/smoke/report_and_chart_src.feature", + "A partner or redhat associate submits an error-free chart source with report", +) def test_partner_or_redhat_user_submits_chart_src_with_report(): """A partner or redhat associate submits an error-free chart source with report.""" -@scenario('../features/smoke/report_and_chart_src.feature', "A community user submits an error-free chart source with report") + +@scenario( + "../features/smoke/report_and_chart_src.feature", + "A community user submits an error-free chart source with report", +) def test_community_user_submits_chart_src_with_report(): """A community user submits an error-free chart source with report""" -@scenario('../features/smoke/report_and_chart_tar.feature', "A partner or redhat associate submits an error-free chart tarball with report") + +@scenario( + "../features/smoke/report_and_chart_tar.feature", + "A partner or redhat associate submits an error-free chart tarball with report", +) def test_partners_or_redhat_user_submits_chart_tarball_with_report(): """A partner or redhat associate submits an error-free chart tarball with report.""" -@scenario('../features/smoke/report_and_chart_tar.feature', "A community user submits an error-free chart tarball with report") + +@scenario( + "../features/smoke/report_and_chart_tar.feature", + "A community user submits an error-free chart tarball with report", +) def test_community_user_submits_chart_tarball_with_report(): """A community user submits an error-free chart tarball with report""" -@scenario('../features/smoke/report_without_chart.feature', "A partner or redhat associate submits an error-free report") + +@scenario( + "../features/smoke/report_without_chart.feature", + "A partner or redhat associate submits an error-free report", +) def test_partner_or_redhat_user_submits_report(): """A partner or redhat associate submits an error-free report.""" -@scenario('../features/smoke/report_without_chart.feature', "A community user submits an error-free report") + +@scenario( + "../features/smoke/report_without_chart.feature", + "A community user submits an error-free report", +) def test_community_user_submits_report(): """A community user submits an error-free report""" -@scenario('../features/smoke/chart_verifier_comes_back_with_failures.feature', "A partner or community user submits a chart which does not contain a readme file") + +@scenario( + "../features/smoke/chart_verifier_comes_back_with_failures.feature", + "A partner or community user submits a chart which does not contain a readme file", +) def test_partner_or_community_user_submits_chart_without_readme(): """A partner or community user submits a chart which does not contain a readme file""" -@scenario('../features/smoke/invalid_url_in_the_report.feature', "A user submits a report with an invalid url") + +@scenario( + "../features/smoke/invalid_url_in_the_report.feature", + "A user submits a report with an invalid url", +) def test_report_submission_with_invalid_url(): """A user submits a report with an invalid url.""" -@scenario('../features/smoke/pr_includes_a_file_which_is_not_chart_related.feature', "A user submits a chart with non chart related file") + +@scenario( + "../features/smoke/pr_includes_a_file_which_is_not_chart_related.feature", + "A user submits a chart with non chart related file", +) def test_user_submits_chart_with_non_related_file(): """A user submits a chart with non chart related file""" -@scenario('../features/smoke/report_only_edited.feature', "A partner or redhat associate submits an edited report") + +@scenario( + "../features/smoke/report_only_edited.feature", + "A partner or redhat associate submits an edited report", +) def test_partner_or_redhat_user_submits_edited_report(): """A partner or redhat associate submits an edited report.""" -@scenario('../features/smoke/report_with_missing_checks.feature', "A user submits a report with missing checks") + +@scenario( + "../features/smoke/report_with_missing_checks.feature", + "A user submits a report with missing checks", +) def test_report_submission_with_missing_checks(): """A user submits a report with missing checks.""" -@scenario('../features/smoke/user_submits_chart_with_errors.feature', "An unauthorized user submits a chart") + +@scenario( + "../features/smoke/user_submits_chart_with_errors.feature", + "An unauthorized user submits a chart", +) def test_chart_submission_by_unauthorized_user(): """An unauthorized user submits a chart""" -@scenario('../features/smoke/user_submits_chart_with_errors.feature', "An authorized user submits a chart with incorrect version") + +@scenario( + "../features/smoke/user_submits_chart_with_errors.feature", + "An authorized user submits a chart with incorrect version", +) def test_chart_submission_with_incorrect_version(): - """ An authorized user submits a chart with incorrect version """ + """An authorized user submits a chart with incorrect version""" + -@scenario('../features/smoke/provider_delivery_control.feature', "A partner associate submits an error-free report with provider controlled delivery") +@scenario( + "../features/smoke/provider_delivery_control.feature", + "A partner associate submits an error-free report with provider controlled delivery", +) def test_partners_submits_error_free_report_for_provider_controlled_delivery(): """A partner submits an error-free report for provider controlled delivery.""" + @given(parsers.parse("the report has a missing")) def report_has_a_check_missing(workflow_test, check): workflow_test.process_report(missing_check=check) -@given(parsers.parse("the report includes and OpenshiftVersion values and chart value")) -def report_includes_specified_versions(workflow_test,tested,supported,kubeversion): - workflow_test.process_report(update_versions=True,supported_versions=supported,tested_version=tested,kube_version=kubeversion) -@given(parsers.parse("provider delivery control is set to in the OWNERS file")) -def provider_delivery_control_set_in_owners(workflow_test,provider_control_owners): +@given( + parsers.parse( + "the report includes and OpenshiftVersion values and chart value" + ) +) +def report_includes_specified_versions(workflow_test, tested, supported, kubeversion): + workflow_test.process_report( + update_versions=True, + supported_versions=supported, + tested_version=tested, + kube_version=kubeversion, + ) + + +@given( + parsers.parse( + "provider delivery control is set to in the OWNERS file" + ) +) +def provider_delivery_control_set_in_owners(workflow_test, provider_control_owners): if provider_control_owners == "true": print("[INFO] set provider delivery control_in owners file") - workflow_test.secrets.provider_delivery=True + workflow_test.secrets.provider_delivery = True else: print("[INFO] un-set provider delivery control_in owners file") - workflow_test.secrets.provider_delivery=False + workflow_test.secrets.provider_delivery = False + -@given(parsers.parse("provider delivery control is set to in the report")) -def provider_delivery_control_set_in_report(workflow_test,provider_control_report): +@given( + parsers.parse( + "provider delivery control is set to in the report" + ) +) +def provider_delivery_control_set_in_report(workflow_test, provider_control_report): if provider_control_report == "true": print("[INFO] set provider delivery control_in report") - workflow_test.process_report(update_provider_delivery=True,provider_delivery=True) + workflow_test.process_report( + update_provider_delivery=True, provider_delivery=True + ) else: print("[INFO] un-set provider delivery control_in report") - workflow_test.process_report(update_provider_delivery=True,provider_delivery=False) - -@then(parsers.parse("the is updated with an entry for the submitted chart")) -def index_file_is_updated(workflow_test,index_file): - workflow_test.secrets.index_file = index_file + workflow_test.process_report( + update_provider_delivery=True, provider_delivery=False + ) +@then( + parsers.parse("the is updated with an entry for the submitted chart") +) +def index_file_is_updated(workflow_test, index_file): + workflow_test.secrets.index_file = index_file diff --git a/tests/functional/step_defs/test_submitted_charts.py b/tests/functional/step_defs/test_submitted_charts.py index c3d8254e..3e6aadd0 100644 --- a/tests/functional/step_defs/test_submitted_charts.py +++ b/tests/functional/step_defs/test_submitted_charts.py @@ -17,13 +17,18 @@ ) from functional.utils.chart_certification import ChartCertificationE2ETestMultiple + @pytest.fixture def workflow_test(): workflow_test = ChartCertificationE2ETestMultiple() yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-15_check_submitted_charts.feature', "[HC-15-001] A new Openshift or chart-verifier version is specified either by a cron job or manually") + +@scenario( + "../features/HC-15_check_submitted_charts.feature", + "[HC-15-001] A new Openshift or chart-verifier version is specified either by a cron job or manually", +) def test_submitted_charts(): """A new Openshift or chart-verifier version is specified either by a cron job or manually.""" diff --git a/tests/functional/step_defs/test_user_submits_chart_with_errors.py b/tests/functional/step_defs/test_user_submits_chart_with_errors.py index 529f3feb..4d7d4cf3 100644 --- a/tests/functional/step_defs/test_user_submits_chart_with_errors.py +++ b/tests/functional/step_defs/test_user_submits_chart_with_errors.py @@ -2,26 +2,34 @@ """ Chart submission with errors Partners, redhat or community user submit charts which result in errors """ -import logging import pytest from pytest_bdd import scenario from functional.utils.chart_certification import ChartCertificationE2ETestSingle + @pytest.fixture def workflow_test(): - test_name = 'Chart Submission with Errors' - test_chart = 'tests/data/vault-0.17.0.tgz' - workflow_test = ChartCertificationE2ETestSingle(test_name=test_name, test_chart=test_chart) + test_name = "Chart Submission with Errors" + test_chart = "tests/data/vault-0.17.0.tgz" + workflow_test = ChartCertificationE2ETestSingle( + test_name=test_name, test_chart=test_chart + ) yield workflow_test workflow_test.cleanup() -@scenario('../features/HC-14_user_submits_chart_with_errors.feature', "[HC-14-001] An unauthorized user submits a chart") +@scenario( + "../features/HC-14_user_submits_chart_with_errors.feature", + "[HC-14-001] An unauthorized user submits a chart", +) def test_chart_submission_by_unauthorized_user(): """An unauthorized user submits a chart""" -@scenario('../features/HC-14_user_submits_chart_with_errors.feature', "[HC-14-002] An authorized user submits a chart with incorrect version") -def test_chart_submission_with_incorrect_version(): - """ An authorized user submits a chart with incorrect version """ +@scenario( + "../features/HC-14_user_submits_chart_with_errors.feature", + "[HC-14-002] An authorized user submits a chart with incorrect version", +) +def test_chart_submission_with_incorrect_version(): + """An authorized user submits a chart with incorrect version""" diff --git a/tests/functional/utils/chart.py b/tests/functional/utils/chart.py index 66e24a61..c5656721 100644 --- a/tests/functional/utils/chart.py +++ b/tests/functional/utils/chart.py @@ -7,6 +7,7 @@ import yaml import shutil + def get_name_and_version_from_report(path): """ Parameters: @@ -16,13 +17,13 @@ def get_name_and_version_from_report(path): str: chart name str: chart version """ - with open(path, 'r') as fd: + with open(path, "r") as fd: try: report = yaml.safe_load(fd) except yaml.YAMLError as err: pytest.fail(f"error parsing '{path}': {err}") - chart = report['metadata']['chart'] - return chart['name'], chart['version'] + chart = report["metadata"]["chart"] + return chart["name"], chart["version"] def get_name_and_version_from_chart_tar(path): @@ -36,13 +37,13 @@ def get_name_and_version_from_chart_tar(path): """ tar = tarfile.open(path) for member in tar.getmembers(): - if member.name.split('/')[-1] == 'Chart.yaml': + if member.name.split("/")[-1] == "Chart.yaml": chart = tar.extractfile(member) if chart is not None: content = chart.read() try: chart_yaml = yaml.safe_load(content) - return chart_yaml['name'], chart_yaml['version'] + return chart_yaml["name"], chart_yaml["version"] except yaml.YAMLError as err: pytest.fail(f"error parsing '{path}': {err}") else: @@ -58,13 +59,14 @@ def get_name_and_version_from_chart_src(path): str: chart name str: chart version """ - chart_path = os.path.join(path, 'Chart.yaml') - with open(chart_path, 'r') as fd: + chart_path = os.path.join(path, "Chart.yaml") + with open(chart_path, "r") as fd: try: chart_yaml = yaml.safe_load(fd) except yaml.YAMLError as err: pytest.fail(f"error parsing '{path}': {err}") - return chart_yaml['name'], chart_yaml['version'] + return chart_yaml["name"], chart_yaml["version"] + def extract_chart_tgz(src, dst, secrets, logger): """Extracts the chart tgz file into the target location under 'charts/' for PR submission tests @@ -75,13 +77,14 @@ def extract_chart_tgz(src, dst, secrets, logger): """ try: logger.info(f"Remove existing local '{dst}/src'") - shutil.rmtree(f'{dst}/src') + shutil.rmtree(f"{dst}/src") except FileNotFoundError: logger.info(f"'{dst}/src' does not exist") finally: - with tarfile.open(src, 'r') as fd: + with tarfile.open(src, "r") as fd: fd.extractall(dst) - os.rename(f'{dst}/{secrets.chart_name}', f'{dst}/src') + os.rename(f"{dst}/{secrets.chart_name}", f"{dst}/src") + def get_all_charts(charts_path: str, vendor_types: str) -> list: # TODO: Support `community` as vendor_type. @@ -97,37 +100,44 @@ def get_all_charts(charts_path: str, vendor_types: str) -> list: """ ret = [] # Pre-process vendor types - vendor_types = vendor_types.replace('partner', 'partners') - vendor_types = [vt.strip() for vt in vendor_types.split(',')] - vendor_types = list( - {'partners', 'redhat', 'all'}.intersection(set(vendor_types))) - vendor_types = ['partners', - 'redhat'] if 'all' in vendor_types else vendor_types + vendor_types = vendor_types.replace("partner", "partners") + vendor_types = [vt.strip() for vt in vendor_types.split(",")] + vendor_types = list({"partners", "redhat", "all"}.intersection(set(vendor_types))) + vendor_types = ["partners", "redhat"] if "all" in vendor_types else vendor_types # Iterate through `charts/` to find chart submission with src or tgz for vt in vendor_types: - charts_path_vt = f'{charts_path}/{vt}' - vendor_names = [name for name in os.listdir( - charts_path_vt) if os.path.isdir(f'{charts_path_vt}/{name}')] + charts_path_vt = f"{charts_path}/{vt}" + vendor_names = [ + name + for name in os.listdir(charts_path_vt) + if os.path.isdir(f"{charts_path_vt}/{name}") + ] for vn in vendor_names: - charts_path_vt_vn = f'{charts_path_vt}/{vn}' - chart_names = [name for name in os.listdir( - charts_path_vt_vn) if os.path.isdir(f'{charts_path_vt_vn}/{name}')] + charts_path_vt_vn = f"{charts_path_vt}/{vn}" + chart_names = [ + name + for name in os.listdir(charts_path_vt_vn) + if os.path.isdir(f"{charts_path_vt_vn}/{name}") + ] for cn in chart_names: - charts_path_vt_vn_cn = f'{charts_path_vt_vn}/{cn}' - file_names = [name for name in os.listdir( - charts_path_vt_vn_cn)] - if 'OWNERS' not in file_names: + charts_path_vt_vn_cn = f"{charts_path_vt_vn}/{cn}" + file_names = [name for name in os.listdir(charts_path_vt_vn_cn)] + if "OWNERS" not in file_names: continue - chart_versions = [name for name in os.listdir( - charts_path_vt_vn_cn) if os.path.isdir(f'{charts_path_vt_vn_cn}/{name}')] + chart_versions = [ + name + for name in os.listdir(charts_path_vt_vn_cn) + if os.path.isdir(f"{charts_path_vt_vn_cn}/{name}") + ] # Only interest in latest chart version if len(chart_versions) == 0: continue cv = max(chart_versions) - charts_path_vt_vn_cn_cv = f'{charts_path_vt_vn_cn}/{cv}' - file_names = [name for name in os.listdir( - charts_path_vt_vn_cn_cv)] - if 'report.yaml' not in file_names and (f'{cn}-{cv}.tgz' in file_names or 'src' in file_names): + charts_path_vt_vn_cn_cv = f"{charts_path_vt_vn_cn}/{cv}" + file_names = [name for name in os.listdir(charts_path_vt_vn_cn_cv)] + if "report.yaml" not in file_names and ( + f"{cn}-{cv}.tgz" in file_names or "src" in file_names + ): ret.append((vt, vn, cn, cv)) return ret diff --git a/tests/functional/utils/chart_certification.py b/tests/functional/utils/chart_certification.py index b9b4e424..5d52c035 100644 --- a/tests/functional/utils/chart_certification.py +++ b/tests/functional/utils/chart_certification.py @@ -24,6 +24,7 @@ from functional.utils.setttings import * from functional.utils.chart import * + @dataclass class ChartCertificationE2ETest: owners_file_content: str = """\ @@ -70,97 +71,140 @@ def get_bot_name_and_token(self): raise Exception("BOT_NAME set but BOT_TOKEN not specified") return bot_name, bot_token - def remove_chart(self, chart_directory, chart_version, remote_repo, base_branch, bot_token): + def remove_chart( + self, chart_directory, chart_version, remote_repo, base_branch, bot_token + ): # Remove chart files from base branch logging.info( - f"Remove {chart_directory}/{chart_version} from {remote_repo}:{base_branch}") + f"Remove {chart_directory}/{chart_version} from {remote_repo}:{base_branch}" + ) try: - self.temp_repo.git.rm('-rf', '--cached', f'{chart_directory}/{chart_version}') - self.temp_repo.git.commit( - '-m', f'Remove {chart_directory}/{chart_version}') - self.temp_repo.git.push(f'https://x-access-token:{bot_token}@github.com/{remote_repo}', - f'HEAD:refs/heads/{base_branch}') + self.temp_repo.git.rm( + "-rf", "--cached", f"{chart_directory}/{chart_version}" + ) + self.temp_repo.git.commit("-m", f"Remove {chart_directory}/{chart_version}") + self.temp_repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{remote_repo}", + f"HEAD:refs/heads/{base_branch}", + ) except git.exc.GitCommandError: logging.info( - f"{chart_directory}/{chart_version} not exist on {remote_repo}:{base_branch}") + f"{chart_directory}/{chart_version} not exist on {remote_repo}:{base_branch}" + ) def remove_owners_file(self, chart_directory, remote_repo, base_branch, bot_token): # Remove the OWNERS file from base branch logging.info( - f"Remove {chart_directory}/OWNERS from {remote_repo}:{base_branch}") + f"Remove {chart_directory}/OWNERS from {remote_repo}:{base_branch}" + ) try: - self.temp_repo.git.rm('-rf', '--cached', f'{chart_directory}/OWNERS') - self.temp_repo.git.commit( - '-m', f'Remove {chart_directory}/OWNERS') - self.temp_repo.git.push(f'https://x-access-token:{bot_token}@github.com/{remote_repo}', - f'HEAD:refs/heads/{base_branch}') + self.temp_repo.git.rm("-rf", "--cached", f"{chart_directory}/OWNERS") + self.temp_repo.git.commit("-m", f"Remove {chart_directory}/OWNERS") + self.temp_repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{remote_repo}", + f"HEAD:refs/heads/{base_branch}", + ) except git.exc.GitCommandError: logging.info( - f"{chart_directory}/OWNERS not exist on {remote_repo}:{base_branch}") + f"{chart_directory}/OWNERS not exist on {remote_repo}:{base_branch}" + ) def create_test_gh_pages_branch(self, remote_repo, base_branch, bot_token): # Get SHA from 'dev-gh-pages' branch logging.info( - f"Create '{remote_repo}:{base_branch}-gh-pages' from '{remote_repo}:dev-gh-pages'") + f"Create '{remote_repo}:{base_branch}-gh-pages' from '{remote_repo}:dev-gh-pages'" + ) r = github_api( - 'get', f'repos/{remote_repo}/git/ref/heads/dev-gh-pages', bot_token) + "get", f"repos/{remote_repo}/git/ref/heads/dev-gh-pages", bot_token + ) j = json.loads(r.text) - sha = j['object']['sha'] + sha = j["object"]["sha"] # Create a new gh-pages branch for testing - data = {'ref': f'refs/heads/{base_branch}-gh-pages', 'sha': sha} - r = github_api( - 'post', f'repos/{remote_repo}/git/refs', bot_token, json=data) + data = {"ref": f"refs/heads/{base_branch}-gh-pages", "sha": sha} + r = github_api("post", f"repos/{remote_repo}/git/refs", bot_token, json=data) - logging.info(f'gh-pages branch created: {base_branch}-gh-pages') + logging.info(f"gh-pages branch created: {base_branch}-gh-pages") def setup_git_context(self, repo: git.Repo): - self.set_git_username_email(repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL) - if os.environ.get('WORKFLOW_DEVELOPMENT'): + self.set_git_username_email( + repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL + ) + if os.environ.get("WORKFLOW_DEVELOPMENT"): logging.info("Wokflow development enabled") repo.git.add(A=True) - repo.git.commit('-m', 'Checkpoint') + repo.git.commit("-m", "Checkpoint") def send_pull_request(self, remote_repo, base_branch, pr_branch, bot_token): - data = {'head': pr_branch, 'base': base_branch, - 'title': base_branch, 'body': os.environ.get('PR_BODY')} - - logging.info( - f"Create PR from '{remote_repo}:{pr_branch}'") - r = github_api( - 'post', f'repos/{remote_repo}/pulls', bot_token, json=data) + data = { + "head": pr_branch, + "base": base_branch, + "title": base_branch, + "body": os.environ.get("PR_BODY"), + } + + logging.info(f"Create PR from '{remote_repo}:{pr_branch}'") + r = github_api("post", f"repos/{remote_repo}/pulls", bot_token, json=data) j = json.loads(r.text) - if not 'number' in j: + if "number" not in j: pytest.fail(f"error sending pull request, response was: {r.text}") - return j['number'] - - def create_and_push_owners_file(self, chart_directory, base_branch, vendor_name, vendor_type, chart_name, provider_delivery=False): + return j["number"] + + def create_and_push_owners_file( + self, + chart_directory, + base_branch, + vendor_name, + vendor_type, + chart_name, + provider_delivery=False, + ): with SetDirectory(Path(self.temp_dir.name)): # Create the OWNERS file from the string template - values = {'bot_name': self.secrets.bot_name, - 'vendor': vendor_name, 'chart_name': chart_name, - "provider_delivery" : provider_delivery} + values = { + "bot_name": self.secrets.bot_name, + "vendor": vendor_name, + "chart_name": chart_name, + "provider_delivery": provider_delivery, + } content = Template(self.secrets.owners_file_content).substitute(values) - with open(f'{chart_directory}/OWNERS', 'w') as fd: + with open(f"{chart_directory}/OWNERS", "w") as fd: fd.write(content) # Push OWNERS file to the test_repo logging.info( - f"Push OWNERS file to '{self.secrets.test_repo}:{base_branch}'") - self.temp_repo.git.add(f'{chart_directory}/OWNERS') + f"Push OWNERS file to '{self.secrets.test_repo}:{base_branch}'" + ) + self.temp_repo.git.add(f"{chart_directory}/OWNERS") self.temp_repo.git.commit( - '-m', f"Add {vendor_type} {vendor_name} {chart_name} OWNERS file") - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{base_branch}', '-f') - - def check_index_yaml(self,base_branch, vendor, chart_name, chart_version, index_file="index.yaml", check_provider_type=False, logger=pytest.fail): + "-m", f"Add {vendor_type} {vendor_name} {chart_name} OWNERS file" + ) + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{base_branch}", + "-f", + ) + + def check_index_yaml( + self, + base_branch, + vendor, + chart_name, + chart_version, + index_file="index.yaml", + check_provider_type=False, + logger=pytest.fail, + ): old_branch = self.repo.active_branch.name - self.repo.git.fetch(f'https://github.com/{self.secrets.test_repo}.git', - '{0}:{0}'.format(f'{base_branch}-gh-pages'), '-f') + self.repo.git.fetch( + f"https://github.com/{self.secrets.test_repo}.git", + "{0}:{0}".format(f"{base_branch}-gh-pages"), + "-f", + ) - self.repo.git.checkout(f'{base_branch}-gh-pages') + self.repo.git.checkout(f"{base_branch}-gh-pages") - with open(index_file, 'r') as fd: + with open(index_file, "r") as fd: try: index = yaml.safe_load(fd) except yaml.YAMLError as err: @@ -169,42 +213,47 @@ def check_index_yaml(self,base_branch, vendor, chart_name, chart_version, index_ if index: entry = f"{vendor}-{chart_name}" - if "entries" not in index or entry not in index['entries']: + if "entries" not in index or entry not in index["entries"]: logger(f"{entry} not added in entries to {index_file}") logger(f"Index.yaml entries: {index['entries']}") return False - version_list = [release['version'] for release in index['entries'][entry]] + version_list = [release["version"] for release in index["entries"][entry]] if chart_version not in version_list: logger(f"{chart_version} not added to {index_file}") logger(f"Index.yaml entry content: {index['entries'][entry]}") return False - #This check is applicable for charts submitted in redhat path when one of the chart-verifier check fails - #Check whether providerType annotations is community in index.yaml when vendor_type is redhat - if check_provider_type and self.secrets.vendor_type == 'redhat': - provider_type_in_index_yaml = index['entries'][entry][0]['annotations']['charts.openshift.io/providerType'] - if provider_type_in_index_yaml != 'community': - logger(f"{provider_type_in_index_yaml} is not correct as providerType in index.yaml") - + # This check is applicable for charts submitted in redhat path when one of the chart-verifier check fails + # Check whether providerType annotations is community in index.yaml when vendor_type is redhat + if check_provider_type and self.secrets.vendor_type == "redhat": + provider_type_in_index_yaml = index["entries"][entry][0]["annotations"][ + "charts.openshift.io/providerType" + ] + if provider_type_in_index_yaml != "community": + logger( + f"{provider_type_in_index_yaml} is not correct as providerType in index.yaml" + ) logging.info("Index updated correctly, cleaning up local branch") self.repo.git.checkout(old_branch) - self.repo.git.branch('-D', f'{base_branch}-gh-pages') + self.repo.git.branch("-D", f"{base_branch}-gh-pages") return True else: return False - def check_release_result(self, vendor, chart_name, chart_version, chart_tgz, logger=pytest.fail): - expected_tag = f'{vendor}-{chart_name}-{chart_version}' + def check_release_result( + self, vendor, chart_name, chart_version, chart_tgz, logger=pytest.fail + ): + expected_tag = f"{vendor}-{chart_name}-{chart_version}" try: release = get_release_by_tag(self.secrets, expected_tag) logging.info(f"Released '{expected_tag}' successfully") - expected_chart_asset = f'{vendor}-{chart_tgz}' + expected_chart_asset = f"{vendor}-{chart_tgz}" required_assets = [expected_chart_asset] logging.info(f"Check '{required_assets}' is in release assets") - release_id = release['id'] + release_id = release["id"] get_release_assets(self.secrets, release_id, required_assets) return True except Exception as e: @@ -213,33 +262,49 @@ def check_release_result(self, vendor, chart_name, chart_version, chart_tgz, log finally: logging.info(f"Delete release '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/releases/{release_id}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/releases/{release_id}", + self.secrets.bot_token, + ) logging.info(f"Delete release tag '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}", + self.secrets.bot_token, + ) # expect_result: a string representation of expected result, e.g. 'success' - def check_workflow_conclusion(self, pr_number, expect_result: str, logger=pytest.fail): + def check_workflow_conclusion( + self, pr_number, expect_result: str, logger=pytest.fail + ): try: # Check workflow conclusion run_id = get_run_id(self.secrets, pr_number) conclusion = get_run_result(self.secrets, run_id) if conclusion == expect_result: - logging.info(f"PR{pr_number} Workflow run was '{expect_result}' which is expected") + logging.info( + f"PR{pr_number} Workflow run was '{expect_result}' which is expected" + ) else: logger( - f"PR{pr_number if pr_number else self.secrets.pr_number} Workflow run was '{conclusion}' which is unexpected, run id: {run_id}") + f"PR{pr_number if pr_number else self.secrets.pr_number} Workflow run was '{conclusion}' which is unexpected, run id: {run_id}" + ) return run_id, conclusion except Exception as e: logger(e) return None, None # expect_merged: boolean representing whether the PR should be merged - def check_pull_request_result(self, pr_number, expect_merged: bool, logger=pytest.fail): + def check_pull_request_result( + self, pr_number, expect_merged: bool, logger=pytest.fail + ): # Check if PR merged r = github_api( - 'get', f'repos/{self.secrets.test_repo}/pulls/{pr_number}/merge', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/pulls/{pr_number}/merge", + self.secrets.bot_token, + ) logging.info(f"PR{pr_number} result status_code : {r.status_code}") if r.status_code == 204 and expect_merged: logging.info(f"PR{pr_number} merged sucessfully as expected") @@ -257,53 +322,66 @@ def check_pull_request_result(self, pr_number, expect_merged: bool, logger=pytes logger(f"PR{pr_number} Got unexpected status code from PR: {r.status_code}") return False - def check_pull_request_labels(self,pr_number,logger=pytest.fail): + def check_pull_request_labels(self, pr_number, logger=pytest.fail): r = github_api( - 'get', f'repos/{self.secrets.test_repo}/issues/{pr_number}/labels', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/issues/{pr_number}/labels", + self.secrets.bot_token, + ) labels = json.loads(r.text) authorized_request = False content_ok = False for label in labels: logging.info(f"PR{pr_number} found label {label['name']}") - if label['name'] == "authorized-request": + if label["name"] == "authorized-request": authorized_request = True - if label['name'] == "content-ok": + if label["name"] == "content-ok": content_ok = True - if authorized_request and content_ok: - logging.info(f"PR{pr_number} authorized request and content-ok labels were found as expected") + logging.info( + f"PR{pr_number} authorized request and content-ok labels were found as expected" + ) return True else: - logger(f"PR{pr_number} authorized request and/or content-ok labels were not found as expected") + logger( + f"PR{pr_number} authorized request and/or content-ok labels were not found as expected" + ) return False - def cleanup_release(self, expected_tag): """Cleanup the release and release tag. Releases might be left behind if check_index_yam() ran before check_release_result() and fails the test. """ r = github_api( - 'get', f'repos/{self.secrets.test_repo}/releases', self.secrets.bot_token) + "get", f"repos/{self.secrets.test_repo}/releases", self.secrets.bot_token + ) releases = json.loads(r.text) for release in releases: - if release['tag_name'] == expected_tag: - release_id = release['id'] + if release["tag_name"] == expected_tag: + release_id = release["id"] logging.info(f"Delete release '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/releases/{release_id}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/releases/{release_id}", + self.secrets.bot_token, + ) logging.info(f"Delete release tag '{expected_tag}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/tags/{expected_tag}", + self.secrets.bot_token, + ) + @dataclass class ChartCertificationE2ETestSingle(ChartCertificationE2ETest): - test_name: str = '' # Meaningful test name for this test, displayed in PR title - test_chart: str = '' - test_report: str = '' - chart_directory: str = '' + test_name: str = "" # Meaningful test name for this test, displayed in PR title + test_chart: str = "" + test_report: str = "" + chart_directory: str = "" secrets: E2ETestSecretOneShot = E2ETestSecretOneShot() def __post_init__(self) -> None: @@ -313,33 +391,43 @@ def __post_init__(self) -> None: self.uuid = uuid.uuid4().hex if self.test_report or self.test_chart: - self.secrets.chart_name, self.secrets.chart_version = self.get_chart_name_version() - self.chart_directory = f'charts/{self.secrets.vendor_type}/{self.secrets.vendor}/{self.secrets.chart_name}' + ( + self.secrets.chart_name, + self.secrets.chart_version, + ) = self.get_chart_name_version() + self.chart_directory = f"charts/{self.secrets.vendor_type}/{self.secrets.vendor}/{self.secrets.chart_name}" bot_name, bot_token = self.get_bot_name_and_token() test_repo = TEST_REPO # Create a new branch locally from detached HEAD - head_sha = self.repo.git.rev_parse('--short', 'HEAD') - unique_branch = f'{head_sha}-{self.uuid}' + head_sha = self.repo.git.rev_parse("--short", "HEAD") + unique_branch = f"{head_sha}-{self.uuid}" local_branches = [h.name for h in self.repo.heads] if unique_branch not in local_branches: - self.repo.git.checkout('-b', f'{unique_branch}') + self.repo.git.checkout("-b", f"{unique_branch}") current_branch = self.repo.active_branch.name - r = github_api( - 'get', f'repos/{test_repo}/branches', bot_token) + r = github_api("get", f"repos/{test_repo}/branches", bot_token) branches = json.loads(r.text) - branch_names = [branch['name'] for branch in branches] + branch_names = [branch["name"] for branch in branches] if current_branch not in branch_names: logging.info( - f"{test_repo}:{current_branch} does not exists, creating with local branch") - self.repo.git.push(f'https://x-access-token:{bot_token}@github.com/{test_repo}', - f'HEAD:refs/heads/{current_branch}', '-f') - - pretty_test_name = self.test_name.strip().lower().replace(' ', '-') - base_branch = f'{self.uuid}-{pretty_test_name}-{current_branch}' if pretty_test_name else f'{self.uuid}-test-{current_branch}' - pr_branch = base_branch + '-pr-branch' + f"{test_repo}:{current_branch} does not exists, creating with local branch" + ) + self.repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{test_repo}", + f"HEAD:refs/heads/{current_branch}", + "-f", + ) + + pretty_test_name = self.test_name.strip().lower().replace(" ", "-") + base_branch = ( + f"{self.uuid}-{pretty_test_name}-{current_branch}" + if pretty_test_name + else f"{self.uuid}-test-{current_branch}" + ) + pr_branch = base_branch + "-pr-branch" self.secrets.owners_file_content = self.owners_file_content self.secrets.test_chart = self.test_chart @@ -352,41 +440,55 @@ def __post_init__(self) -> None: self.secrets.index_file = "index.yaml" self.secrets.provider_delivery = False - def cleanup (self): + def cleanup(self): # Cleanup releases and release tags self.cleanup_release() # Teardown step to cleanup branches if self.temp_dir is not None: self.temp_dir.cleanup() - self.repo.git.worktree('prune') + self.repo.git.worktree("prune") - head_sha = self.repo.git.rev_parse('--short', 'HEAD') - current_branch = f'{head_sha}-{self.uuid}' + head_sha = self.repo.git.rev_parse("--short", "HEAD") + current_branch = f"{head_sha}-{self.uuid}" logging.info(f"Delete remote '{current_branch}' branch") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{current_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{current_branch}", + self.secrets.bot_token, + ) logging.info(f"Delete '{self.secrets.test_repo}:{self.secrets.base_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}", + self.secrets.bot_token, + ) - logging.info(f"Delete '{self.secrets.test_repo}:{self.secrets.base_branch}-gh-pages'") + logging.info( + f"Delete '{self.secrets.test_repo}:{self.secrets.base_branch}-gh-pages'" + ) github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}-gh-pages', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.base_branch}-gh-pages", + self.secrets.bot_token, + ) logging.info(f"Delete '{self.secrets.test_repo}:{self.secrets.pr_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.pr_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{self.secrets.pr_branch}", + self.secrets.bot_token, + ) logging.info(f"Delete local '{self.secrets.base_branch}'") try: - self.repo.git.branch('-D', self.secrets.base_branch) + self.repo.git.branch("-D", self.secrets.base_branch) except git.exc.GitCommandError: logging.info(f"Local '{self.secrets.base_branch}' does not exist") logging.info(f"Delete local '{current_branch}'") try: - self.repo.git.branch('-D', current_branch) + self.repo.git.branch("-D", current_branch) except git.exc.GitCommandError: logging.info(f"Local '{current_branch}' does not exist") @@ -417,9 +519,13 @@ def get_chart_name_version(self): if not self.test_report and not self.test_chart: pytest.fail("Provide at least one of test report or test chart.") if self.test_report: - chart_name, chart_version = get_name_and_version_from_report(self.test_report) + chart_name, chart_version = get_name_and_version_from_report( + self.test_report + ) else: - chart_name, chart_version = get_name_and_version_from_chart_tar(self.test_chart) + chart_name, chart_version = get_name_and_version_from_chart_tar( + self.test_chart + ) return chart_name, chart_version def set_vendor(self, vendor, vendor_type): @@ -428,88 +534,132 @@ def set_vendor(self, vendor, vendor_type): self.secrets.vendor_type = vendor_type base_branch_without_uuid = "-".join(self.secrets.base_branch.split("-")[:-1]) vendor_without_suffix = self.secrets.vendor.split("-")[0] - self.secrets.base_branch = f'{base_branch_without_uuid}-{self.secrets.vendor_type}-{vendor_without_suffix}-{self.secrets.chart_name}-{self.secrets.chart_version}' - self.secrets.pr_branch = f'{self.secrets.base_branch}-pr-branch' - self.chart_directory = f'charts/{self.secrets.vendor_type}/{self.secrets.vendor}/{self.secrets.chart_name}' - + self.secrets.base_branch = f"{base_branch_without_uuid}-{self.secrets.vendor_type}-{vendor_without_suffix}-{self.secrets.chart_name}-{self.secrets.chart_version}" + self.secrets.pr_branch = f"{self.secrets.base_branch}-pr-branch" + self.chart_directory = f"charts/{self.secrets.vendor_type}/{self.secrets.vendor}/{self.secrets.chart_name}" def setup_git_context(self): super().setup_git_context(self.repo) def setup_gh_pages_branch(self): - self.create_test_gh_pages_branch(self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token) + self.create_test_gh_pages_branch( + self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token + ) def setup_temp_dir(self): - self.temp_dir = TemporaryDirectory(prefix='tci-') + self.temp_dir = TemporaryDirectory(prefix="tci-") with SetDirectory(Path(self.temp_dir.name)): # Make PR's from a temporary directory - logging.info(f'Worktree directory: {self.temp_dir.name}') - self.repo.git.worktree('add', '--detach', self.temp_dir.name, f'HEAD') + logging.info(f"Worktree directory: {self.temp_dir.name}") + self.repo.git.worktree("add", "--detach", self.temp_dir.name, "HEAD") self.temp_repo = git.Repo(self.temp_dir.name) - self.set_git_username_email(self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL) - self.temp_repo.git.checkout('-b', self.secrets.base_branch) - pathlib.Path( - f'{self.chart_directory}/{self.secrets.chart_version}').mkdir(parents=True, exist_ok=True) - - self.remove_chart(self.chart_directory, self.secrets.chart_version, self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token) - self.remove_owners_file(self.chart_directory, self.secrets.test_repo, self.secrets.base_branch, self.secrets.bot_token) + self.set_git_username_email( + self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL + ) + self.temp_repo.git.checkout("-b", self.secrets.base_branch) + pathlib.Path(f"{self.chart_directory}/{self.secrets.chart_version}").mkdir( + parents=True, exist_ok=True + ) + + self.remove_chart( + self.chart_directory, + self.secrets.chart_version, + self.secrets.test_repo, + self.secrets.base_branch, + self.secrets.bot_token, + ) + self.remove_owners_file( + self.chart_directory, + self.secrets.test_repo, + self.secrets.base_branch, + self.secrets.bot_token, + ) def update_chart_version_in_chart_yaml(self, new_version): with SetDirectory(Path(self.temp_dir.name)): - path = f'{self.chart_directory}/{self.secrets.chart_version}/src/Chart.yaml' - with open(path, 'r') as fd: + path = f"{self.chart_directory}/{self.secrets.chart_version}/src/Chart.yaml" + with open(path, "r") as fd: try: chart = yaml.safe_load(fd) except yaml.YAMLError as err: pytest.fail(f"error parsing '{path}': {err}") - current_version = chart['version'] + current_version = chart["version"] if current_version != new_version: - chart['version'] = new_version + chart["version"] = new_version try: - with open(path, 'w') as fd: + with open(path, "w") as fd: fd.write(yaml.dump(chart)) except Exception as e: pytest.fail("Failed to update version in yaml file") - + def remove_readme_file(self): with SetDirectory(Path(self.temp_dir.name)): - path = f'{self.chart_directory}/{self.secrets.chart_version}/src/README.md' + path = f"{self.chart_directory}/{self.secrets.chart_version}/src/README.md" try: os.remove(path) except Exception as e: pytest.fail(f"Failed to remove readme file : {e}") def process_owners_file(self): - super().create_and_push_owners_file(self.chart_directory, self.secrets.base_branch, self.secrets.vendor, self.secrets.vendor_type, self.secrets.chart_name,self.secrets.provider_delivery) + super().create_and_push_owners_file( + self.chart_directory, + self.secrets.base_branch, + self.secrets.vendor, + self.secrets.vendor_type, + self.secrets.chart_name, + self.secrets.provider_delivery, + ) def process_chart(self, is_tarball: bool): with SetDirectory(Path(self.temp_dir.name)): if is_tarball: # Copy the chart tar into temporary directory for PR submission - chart_tar = self.secrets.test_chart.split('/')[-1] - shutil.copyfile(f'{self.old_cwd}/{self.secrets.test_chart}', - f'{self.chart_directory}/{self.secrets.chart_version}/{chart_tar}') + chart_tar = self.secrets.test_chart.split("/")[-1] + shutil.copyfile( + f"{self.old_cwd}/{self.secrets.test_chart}", + f"{self.chart_directory}/{self.secrets.chart_version}/{chart_tar}", + ) else: # Unzip files into temporary directory for PR submission - extract_chart_tgz(self.secrets.test_chart, f'{self.chart_directory}/{self.secrets.chart_version}', self.secrets, logging) - - - def process_report(self, update_chart_sha=False, update_url=False, url=None, - update_versions=False,supported_versions=None,tested_version=None,kube_version=None, - update_provider_delivery=False, provider_delivery=False, missing_check=None,unset_package_digest=False): - + extract_chart_tgz( + self.secrets.test_chart, + f"{self.chart_directory}/{self.secrets.chart_version}", + self.secrets, + logging, + ) + + def process_report( + self, + update_chart_sha=False, + update_url=False, + url=None, + update_versions=False, + supported_versions=None, + tested_version=None, + kube_version=None, + update_provider_delivery=False, + provider_delivery=False, + missing_check=None, + unset_package_digest=False, + ): with SetDirectory(Path(self.temp_dir.name)): # Copy report to temporary location and push to test_repo:pr_branch logging.info( - f"Push report to '{self.secrets.test_repo}:{self.secrets.pr_branch}'") + f"Push report to '{self.secrets.test_repo}:{self.secrets.pr_branch}'" + ) tmpl = open(self.secrets.test_report).read() - values = {'repository': self.secrets.test_repo, - 'branch': self.secrets.base_branch} + values = { + "repository": self.secrets.test_repo, + "branch": self.secrets.base_branch, + } content = Template(tmpl).substitute(values) - report_path = f'{self.chart_directory}/{self.secrets.chart_version}/' + self.secrets.test_report.split('/')[-1] + report_path = ( + f"{self.chart_directory}/{self.secrets.chart_version}/" + + self.secrets.test_report.split("/")[-1] + ) try: report = yaml.safe_load(content) @@ -517,51 +667,75 @@ def process_report(self, update_chart_sha=False, update_url=False, url=None, pytest.fail(f"error parsing '{report_path}': {err}") if self.secrets.vendor_type != "partners": - report["metadata"]["tool"]["profile"]["VendorType"] = self.secrets.vendor_type - logging.info(f'VendorType set to {report["metadata"]["tool"]["profile"]["VendorType"]} in report.yaml') - - if update_chart_sha or update_url or update_versions or update_provider_delivery or unset_package_digest: - #For updating the report.yaml, for chart sha mismatch scenario + report["metadata"]["tool"]["profile"][ + "VendorType" + ] = self.secrets.vendor_type + logging.info( + f'VendorType set to {report["metadata"]["tool"]["profile"]["VendorType"]} in report.yaml' + ) + + if ( + update_chart_sha + or update_url + or update_versions + or update_provider_delivery + or unset_package_digest + ): + # For updating the report.yaml, for chart sha mismatch scenario if update_chart_sha: - new_sha_value = 'sha256:5b85ae00b9ca2e61b2d70a59f98fd72136453b1a185676b29d4eb862981c1xyz' - logging.info(f"Current SHA Value in report: {report['metadata']['tool']['digests']['chart']}") - report['metadata']['tool']['digests']['chart'] = new_sha_value + new_sha_value = "sha256:5b85ae00b9ca2e61b2d70a59f98fd72136453b1a185676b29d4eb862981c1xyz" + logging.info( + f"Current SHA Value in report: {report['metadata']['tool']['digests']['chart']}" + ) + report["metadata"]["tool"]["digests"]["chart"] = new_sha_value logging.info(f"Updated sha value in report: {new_sha_value}") - #For updating the report.yaml, for invalid_url sceanrio + # For updating the report.yaml, for invalid_url sceanrio if update_url: - logging.info(f"Current chart-uri in report: {report['metadata']['tool']['chart-uri']}") - report['metadata']['tool']['chart-uri'] = url + logging.info( + f"Current chart-uri in report: {report['metadata']['tool']['chart-uri']}" + ) + report["metadata"]["tool"]["chart-uri"] = url logging.info(f"Updated chart-uri value in report: {url}") if update_versions: - report['metadata']['tool']['testedOpenShiftVersion'] = tested_version - report['metadata']['tool']['supportedOpenShiftVersions'] = supported_versions - report['metadata']['chart']['kubeversion'] = kube_version - logging.info(f"Updated testedOpenShiftVersion value in report: {tested_version}") - logging.info(f"Updated supportedOpenShiftVersions value in report: {supported_versions}") + report["metadata"]["tool"][ + "testedOpenShiftVersion" + ] = tested_version + report["metadata"]["tool"][ + "supportedOpenShiftVersions" + ] = supported_versions + report["metadata"]["chart"]["kubeversion"] = kube_version + logging.info( + f"Updated testedOpenShiftVersion value in report: {tested_version}" + ) + logging.info( + f"Updated supportedOpenShiftVersions value in report: {supported_versions}" + ) logging.info(f"Updated kubeversion value in report: {kube_version}") if update_provider_delivery: - report['metadata']['tool']['providerControlledDelivery'] = provider_delivery + report["metadata"]["tool"][ + "providerControlledDelivery" + ] = provider_delivery if unset_package_digest: - del report['metadata']['tool']['digests']['package'] + del report["metadata"]["tool"]["digests"]["package"] - with open(report_path, 'w') as fd: + with open(report_path, "w") as fd: try: fd.write(yaml.dump(report)) logging.info("Report updated with new values") except Exception as e: pytest.fail("Failed to update report yaml with new values") - #For removing the check for missing check scenario + # For removing the check for missing check scenario if missing_check: logging.info(f"Updating report with {missing_check}") - with open(report_path, 'r+') as fd: + with open(report_path, "r+") as fd: report_content = yaml.safe_load(fd) results = report_content["results"] - new_results = filter(lambda x: x['check'] != missing_check, results) + new_results = filter(lambda x: x["check"] != missing_check, results) report_content["results"] = list(new_results) fd.seek(0) yaml.dump(report_content, fd) @@ -569,34 +743,53 @@ def process_report(self, update_chart_sha=False, update_url=False, url=None, self.temp_repo.git.add(report_path) self.temp_repo.git.commit( - '-m', f"Add {self.secrets.vendor} {self.secrets.chart_name} {self.secrets.chart_version} report") - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{self.secrets.pr_branch}', '-f') + "-m", + f"Add {self.secrets.vendor} {self.secrets.chart_name} {self.secrets.chart_version} report", + ) + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{self.secrets.pr_branch}", + "-f", + ) def add_non_chart_related_file(self): with SetDirectory(Path(self.temp_dir.name)): - path = f'{self.chart_directory}/Notes.txt' - with open(path, 'w') as fd: + path = f"{self.chart_directory}/Notes.txt" + with open(path, "w") as fd: fd.write("This is a test file") def push_chart(self, is_tarball: bool, add_non_chart_file=False): # Push chart to test_repo:pr_branch if is_tarball: - chart_tar = self.secrets.test_chart.split('/')[-1] - self.temp_repo.git.add(f'{self.chart_directory}/{self.secrets.chart_version}/{chart_tar}') + chart_tar = self.secrets.test_chart.split("/")[-1] + self.temp_repo.git.add( + f"{self.chart_directory}/{self.secrets.chart_version}/{chart_tar}" + ) else: if add_non_chart_file: - self.temp_repo.git.add(f'{self.chart_directory}/') + self.temp_repo.git.add(f"{self.chart_directory}/") else: - self.temp_repo.git.add(f'{self.chart_directory}/{self.secrets.chart_version}/src') + self.temp_repo.git.add( + f"{self.chart_directory}/{self.secrets.chart_version}/src" + ) self.temp_repo.git.commit( - '-m', f"Add {self.secrets.vendor} {self.secrets.chart_name} {self.secrets.chart_version} chart") + "-m", + f"Add {self.secrets.vendor} {self.secrets.chart_name} {self.secrets.chart_version} chart", + ) - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{self.secrets.pr_branch}', '-f') + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{self.secrets.pr_branch}", + "-f", + ) def send_pull_request(self): - self.secrets.pr_number = super().send_pull_request(self.secrets.test_repo, self.secrets.base_branch, self.secrets.pr_branch, self.secrets.bot_token) + self.secrets.pr_number = super().send_pull_request( + self.secrets.test_repo, + self.secrets.base_branch, + self.secrets.pr_branch, + self.secrets.bot_token, + ) logging.info(f"[INFO] PR number: {self.secrets.pr_number}") # expect_result: a string representation of expected result, e.g. 'success' @@ -606,37 +799,58 @@ def check_workflow_conclusion(self, expect_result: str): # expect_merged: boolean representing whether the PR should be merged def check_pull_request_result(self, expect_merged: bool): - super().check_pull_request_result(self.secrets.pr_number, expect_merged, pytest.fail) + super().check_pull_request_result( + self.secrets.pr_number, expect_merged, pytest.fail + ) # expect_merged: boolean representing whether the PR should be merged def check_pull_request_labels(self): super().check_pull_request_labels(self.secrets.pr_number, pytest.fail) - def check_pull_request_comments(self, expect_message: str): r = github_api( - 'get', f'repos/{self.secrets.test_repo}/issues/{self.secrets.pr_number}/comments', self.secrets.bot_token) - logging.info(f'STATUS_CODE: {r.status_code}') + "get", + f"repos/{self.secrets.test_repo}/issues/{self.secrets.pr_number}/comments", + self.secrets.bot_token, + ) + logging.info(f"STATUS_CODE: {r.status_code}") response = json.loads(r.text) - complete_comment = response[0]['body'] + complete_comment = response[0]["body"] if expect_message in complete_comment: logging.info("Found the expected comment in the PR") else: - pytest.fail(f"Was expecting '{expect_message}' in the comment {complete_comment}") + pytest.fail( + f"Was expecting '{expect_message}' in the comment {complete_comment}" + ) def check_index_yaml(self, check_provider_type=False): - super().check_index_yaml(self.secrets.base_branch, self.secrets.vendor, self.secrets.chart_name, self.secrets.chart_version, self.secrets.index_file,check_provider_type, pytest.fail) + super().check_index_yaml( + self.secrets.base_branch, + self.secrets.vendor, + self.secrets.chart_name, + self.secrets.chart_version, + self.secrets.index_file, + check_provider_type, + pytest.fail, + ) def check_release_result(self): - chart_tgz = self.secrets.test_chart.split('/')[-1] - super().check_release_result(self.secrets.vendor, self.secrets.chart_name, self.secrets.chart_version, chart_tgz, pytest.fail) + chart_tgz = self.secrets.test_chart.split("/")[-1] + super().check_release_result( + self.secrets.vendor, + self.secrets.chart_name, + self.secrets.chart_version, + chart_tgz, + pytest.fail, + ) def cleanup_release(self): - expected_tag = f'{self.secrets.vendor}-{self.secrets.chart_name}-{self.secrets.chart_version}' + expected_tag = f"{self.secrets.vendor}-{self.secrets.chart_name}-{self.secrets.chart_version}" super().cleanup_release(expected_tag) + @dataclass class ChartCertificationE2ETestMultiple(ChartCertificationE2ETest): secrets: E2ETestSecretRecursive = E2ETestSecretRecursive() @@ -653,15 +867,18 @@ def __post_init__(self) -> None: pr_branches = [] pr_base_branch = self.repo.active_branch.name - r = github_api( - 'get', f'repos/{test_repo}/branches', bot_token) + r = github_api("get", f"repos/{test_repo}/branches", bot_token) branches = json.loads(r.text) - branch_names = [branch['name'] for branch in branches] + branch_names = [branch["name"] for branch in branches] if pr_base_branch not in branch_names: logging.info( - f"{test_repo}:{pr_base_branch} does not exists, creating with local branch") - self.repo.git.push(f'https://x-access-token:{bot_token}@github.com/{test_repo}', - f'HEAD:refs/heads/{pr_base_branch}', '-f') + f"{test_repo}:{pr_base_branch} does not exists, creating with local branch" + ) + self.repo.git.push( + f"https://x-access-token:{bot_token}@github.com/{test_repo}", + f"HEAD:refs/heads/{pr_base_branch}", + "-f", + ) self.secrets = E2ETestSecretRecursive() self.secrets.software_name = software_name @@ -678,45 +895,54 @@ def __post_init__(self) -> None: self.secrets.owners_file_content = self.owners_file_content self.secrets.release_tags = list() - def cleanup (self): + def cleanup(self): # Teardown step to cleanup releases and branches for release_tag in self.secrets.release_tags: self.cleanup_release(release_tag) - self.repo.git.worktree('prune') + self.repo.git.worktree("prune") for base_branch in self.secrets.base_branches: logging.info(f"Delete '{self.secrets.test_repo}:{base_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}", + self.secrets.bot_token, + ) logging.info(f"Delete '{self.secrets.test_repo}:{base_branch}-gh-pages'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}-gh-pages', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{base_branch}-gh-pages", + self.secrets.bot_token, + ) logging.info(f"Delete local '{base_branch}'") try: - self.repo.git.branch('-D', base_branch) + self.repo.git.branch("-D", base_branch) except git.exc.GitCommandError: logging.info(f"Local '{base_branch}' does not exist") for pr_branch in self.secrets.pr_branches: logging.info(f"Delete '{self.secrets.test_repo}:{pr_branch}'") github_api( - 'delete', f'repos/{self.secrets.test_repo}/git/refs/heads/{pr_branch}', self.secrets.bot_token) + "delete", + f"repos/{self.secrets.test_repo}/git/refs/heads/{pr_branch}", + self.secrets.bot_token, + ) try: logging.info("Delete local 'tmp' branch") - self.temp_repo.git.branch('-D', 'tmp') + self.temp_repo.git.branch("-D", "tmp") except git.exc.GitCommandError: - logging.info(f"Local 'tmp' branch does not exist") + logging.info("Local 'tmp' branch does not exist") def get_dry_run(self): # Accepts 'true' or 'false', depending on whether we want to notify # Don't notify on dry runs, default to True - dry_run = False if os.environ.get("DRY_RUN") == 'false' else True + dry_run = False if os.environ.get("DRY_RUN") == "false" else True # Don't notify if not triggerd on PROD_REPO and PROD_BRANCH if not dry_run: - triggered_branch = os.environ.get("GITHUB_REF").split('/')[-1] + triggered_branch = os.environ.get("GITHUB_REF").split("/")[-1] triggered_repo = os.environ.get("GITHUB_REPOSITORY") if triggered_repo != PROD_REPO or triggered_branch != PROD_BRANCH: dry_run = True @@ -726,9 +952,9 @@ def get_notify_id(self): # Accepts comma separated Github IDs or empty strings to override people to tag in notifications notify_id = os.environ.get("NOTIFY_ID") if notify_id: - notify_id = [vt.strip() for vt in notify_id.split(',')] + notify_id = [vt.strip() for vt in notify_id.split(",")] else: - notify_id = ["dperaza","mmulholla"] + notify_id = ["dperaza", "mmulholla"] return notify_id def get_software_name_version(self): @@ -736,7 +962,7 @@ def get_software_name_version(self): if not software_name: raise Exception("SOFTWARE_NAME environment variable not defined") - software_version = os.environ.get("SOFTWARE_VERSION").strip('\"') + software_version = os.environ.get("SOFTWARE_VERSION").strip('"') if not software_version: raise Exception("SOFTWARE_VERSION environment variable not defined") elif software_version.startswith("sha256"): @@ -748,176 +974,300 @@ def get_vendor_type(self): vendor_type = os.environ.get("VENDOR_TYPE") if not vendor_type: logging.info( - f"VENDOR_TYPE environment variable not defined, default to `all`") - vendor_type = 'all' + "VENDOR_TYPE environment variable not defined, default to `all`" + ) + vendor_type = "all" return vendor_type def setup_temp_dir(self): - self.temp_dir = TemporaryDirectory(prefix='tci-') + self.temp_dir = TemporaryDirectory(prefix="tci-") with SetDirectory(Path(self.temp_dir.name)): # Make PR's from a temporary directory - logging.info(f'Worktree directory: {self.temp_dir.name}') - self.repo.git.worktree('add', '--detach', self.temp_dir.name, f'HEAD') + logging.info(f"Worktree directory: {self.temp_dir.name}") + self.repo.git.worktree("add", "--detach", self.temp_dir.name, "HEAD") self.temp_repo = git.Repo(self.temp_dir.name) # Run submission flow test with charts in PROD_REPO:PROD_BRANCH - self.set_git_username_email(self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL) - self.temp_repo.git.checkout(PROD_BRANCH, 'charts') - self.temp_repo.git.restore('--staged', 'charts') + self.set_git_username_email( + self.temp_repo, self.secrets.bot_name, GITHUB_ACTIONS_BOT_EMAIL + ) + self.temp_repo.git.checkout(PROD_BRANCH, "charts") + self.temp_repo.git.restore("--staged", "charts") self.secrets.submitted_charts = get_all_charts( - 'charts', self.secrets.vendor_type) + "charts", self.secrets.vendor_type + ) logging.info( - f"Found charts for {self.secrets.vendor_type}: {self.secrets.submitted_charts}") - self.temp_repo.git.checkout('-b', 'tmp') + f"Found charts for {self.secrets.vendor_type}: {self.secrets.submitted_charts}" + ) + self.temp_repo.git.checkout("-b", "tmp") def get_owner_ids(self, chart_directory, owners_table): - - with open(f'{chart_directory}/OWNERS', 'r') as fd: + with open(f"{chart_directory}/OWNERS", "r") as fd: try: owners = yaml.safe_load(fd) # Pick owner ids for notification owners_table[chart_directory] = [ - owner.get('githubUsername', '') for owner in owners['users']] + owner.get("githubUsername", "") for owner in owners["users"] + ] except yaml.YAMLError as err: - logging.warning( - f"Error parsing OWNERS of {chart_directory}: {err}") - - def push_chart(self, chart_directory, chart_name, chart_version, vendor_name, vendor_type, pr_branch): + logging.warning(f"Error parsing OWNERS of {chart_directory}: {err}") + + def push_chart( + self, + chart_directory, + chart_name, + chart_version, + vendor_name, + vendor_type, + pr_branch, + ): # Push chart files to test_repo:pr_branch - self.temp_repo.git.add(f'{chart_directory}/{chart_version}') + self.temp_repo.git.add(f"{chart_directory}/{chart_version}") self.temp_repo.git.commit( - '-m', f"Add {vendor_type} {vendor_name} {chart_name} {chart_version} chart files") - self.temp_repo.git.push(f'https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}', - f'HEAD:refs/heads/{pr_branch}', '-f') - - def report_failure(self,chart,chart_owners,failure_type,pr_html_url=None,run_html_url=None): - - os.environ['GITHUB_REPO'] = PROD_REPO.split('/')[1] - os.environ['GITHUB_AUTH_TOKEN'] = self.secrets.bot_token + "-m", + f"Add {vendor_type} {vendor_name} {chart_name} {chart_version} chart files", + ) + self.temp_repo.git.push( + f"https://x-access-token:{self.secrets.bot_token}@github.com/{self.secrets.test_repo}", + f"HEAD:refs/heads/{pr_branch}", + "-f", + ) + + def report_failure( + self, chart, chart_owners, failure_type, pr_html_url=None, run_html_url=None + ): + os.environ["GITHUB_REPO"] = PROD_REPO.split("/")[1] + os.environ["GITHUB_AUTH_TOKEN"] = self.secrets.bot_token if not self.secrets.dry_run: - os.environ['GITHUB_REPO'] = PROD_REPO.split('/')[1] - os.environ['GITHUB_AUTH_TOKEN'] = self.secrets.bot_token - os.environ['GITHUB_ORGANIZATION'] = PROD_REPO.split('/')[0] - logging.info(f"Send notification to '{self.secrets.notify_id}' about verification result of '{chart}'") - create_verification_issue(chart, chart_owners, failure_type,self.secrets.notify_id, pr_html_url, run_html_url, self.secrets.software_name, - self.secrets.software_version, self.secrets.bot_token, self.secrets.dry_run) + os.environ["GITHUB_REPO"] = PROD_REPO.split("/")[1] + os.environ["GITHUB_AUTH_TOKEN"] = self.secrets.bot_token + os.environ["GITHUB_ORGANIZATION"] = PROD_REPO.split("/")[0] + logging.info( + f"Send notification to '{self.secrets.notify_id}' about verification result of '{chart}'" + ) + create_verification_issue( + chart, + chart_owners, + failure_type, + self.secrets.notify_id, + pr_html_url, + run_html_url, + self.secrets.software_name, + self.secrets.software_version, + self.secrets.bot_token, + self.secrets.dry_run, + ) else: - os.environ['GITHUB_ORGANIZATION'] = PROD_REPO.split('/')[0] - os.environ['GITHUB_REPO'] = "sandbox" - os.environ['GITHUB_AUTH_TOKEN'] = self.secrets.bot_token - logging.info(f"Send notification to '{self.secrets.notify_id}' about dry run verification result of '{chart}'") - create_verification_issue(chart, chart_owners, failure_type,self.secrets.notify_id, pr_html_url, run_html_url, self.secrets.software_name, - self.secrets.software_version, self.secrets.bot_token, self.secrets.dry_run) - logging.info(f"Dry Run - send sandbox notification to '{chart_owners}' about verification result of '{chart}'") - - - def check_single_chart_result(self, vendor_type, vendor_name, chart_name, chart_version, pr_number, owners_table): - base_branch = f'{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}' + os.environ["GITHUB_ORGANIZATION"] = PROD_REPO.split("/")[0] + os.environ["GITHUB_REPO"] = "sandbox" + os.environ["GITHUB_AUTH_TOKEN"] = self.secrets.bot_token + logging.info( + f"Send notification to '{self.secrets.notify_id}' about dry run verification result of '{chart}'" + ) + create_verification_issue( + chart, + chart_owners, + failure_type, + self.secrets.notify_id, + pr_html_url, + run_html_url, + self.secrets.software_name, + self.secrets.software_version, + self.secrets.bot_token, + self.secrets.dry_run, + ) + logging.info( + f"Dry Run - send sandbox notification to '{chart_owners}' about verification result of '{chart}'" + ) + + def check_single_chart_result( + self, + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number, + owners_table, + ): + base_branch = f"{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}" # Check workflow conclusion - chart = f'{vendor_name} {chart_name} {chart_version}' - run_id, conclusion = super().check_workflow_conclusion(pr_number, 'success', logging.warning) + chart = f"{vendor_name} {chart_name} {chart_version}" + run_id, conclusion = super().check_workflow_conclusion( + pr_number, "success", logging.warning + ) if conclusion and run_id: - if conclusion != 'success': + if conclusion != "success": # Send notification to owner through GitHub issues r = github_api( - 'get', f'repos/{self.secrets.test_repo}/actions/runs/{run_id}', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/actions/runs/{run_id}", + self.secrets.bot_token, + ) run = r.json() - run_html_url = run['html_url'] + run_html_url = run["html_url"] - pr = get_pr(self.secrets,pr_number) + pr = get_pr(self.secrets, pr_number) pr_html_url = pr["html_url"] - chart_directory = f'charts/{vendor_type}/{vendor_name}/{chart_name}' + chart_directory = f"charts/{vendor_type}/{vendor_name}/{chart_name}" chart_owners = owners_table[chart_directory] - self.report_failure(chart,chart_owners,CHECKS_FAILED,pr_html_url,run_html_url) + self.report_failure( + chart, chart_owners, CHECKS_FAILED, pr_html_url, run_html_url + ) - logging.warning(f"PR{pr_number} workflow failed: {vendor_name}, {chart_name}, {chart_version}") + logging.warning( + f"PR{pr_number} workflow failed: {vendor_name}, {chart_name}, {chart_version}" + ) return else: - logging.info(f"PR{pr_number} workflow passed: {vendor_name}, {chart_name}, {chart_version}") + logging.info( + f"PR{pr_number} workflow passed: {vendor_name}, {chart_name}, {chart_version}" + ) else: - logging.warning(f"PR{pr_number} workflow did not complete: {vendor_name}, {chart_name}, {chart_version}") + logging.warning( + f"PR{pr_number} workflow did not complete: {vendor_name}, {chart_name}, {chart_version}" + ) return - # Check PRs are merged if not super().check_pull_request_result(pr_number, True, logging.warning): - logging.warning(f"PR{pr_number} pull request was not merged: {vendor_name}, {chart_name}, {chart_version}") + logging.warning( + f"PR{pr_number} pull request was not merged: {vendor_name}, {chart_name}, {chart_version}" + ) return - logging.info(f"PR{pr_number} pull request was merged: {vendor_name}, {chart_name}, {chart_version}") + logging.info( + f"PR{pr_number} pull request was merged: {vendor_name}, {chart_name}, {chart_version}" + ) # Check index.yaml is updated - if not super().check_index_yaml(base_branch, vendor_name, chart_name, chart_version, check_provider_type=False, logger=logging.warning): - logging.warning(f"PR{pr_number} - Chart was not found in Index file: {vendor_name}, {chart_name}, {chart_version}") - logging.info(f"PR{pr_number} - Chart was found in Index file: {vendor_name}, {chart_name}, {chart_version}") + if not super().check_index_yaml( + base_branch, + vendor_name, + chart_name, + chart_version, + check_provider_type=False, + logger=logging.warning, + ): + logging.warning( + f"PR{pr_number} - Chart was not found in Index file: {vendor_name}, {chart_name}, {chart_version}" + ) + logging.info( + f"PR{pr_number} - Chart was found in Index file: {vendor_name}, {chart_name}, {chart_version}" + ) # Check release is published - chart_tgz = f'{chart_name}-{chart_version}.tgz' - if not super().check_release_result(vendor_name, chart_name, chart_version, chart_tgz, logging.warning): - logging.warning(f"PR{pr_number} - Release was not created: {vendor_name}, {chart_name}, {chart_version}") - logging.info(f"PR{pr_number} - Release was created: {vendor_name}, {chart_name}, {chart_version}") - - def process_single_chart(self, vendor_type, vendor_name, chart_name, chart_version, pr_number_list, owners_table): + chart_tgz = f"{chart_name}-{chart_version}.tgz" + if not super().check_release_result( + vendor_name, chart_name, chart_version, chart_tgz, logging.warning + ): + logging.warning( + f"PR{pr_number} - Release was not created: {vendor_name}, {chart_name}, {chart_version}" + ) + logging.info( + f"PR{pr_number} - Release was created: {vendor_name}, {chart_name}, {chart_version}" + ) + + def process_single_chart( + self, + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number_list, + owners_table, + ): # Get SHA from 'pr_base_branch' branch - logging.info(f"Process chart: {vendor_type}/{vendor_name}/{chart_name}/{chart_version}") + logging.info( + f"Process chart: {vendor_type}/{vendor_name}/{chart_name}/{chart_version}" + ) r = github_api( - 'get', f'repos/{self.secrets.test_repo}/git/ref/heads/{self.secrets.pr_base_branch}', self.secrets.bot_token) + "get", + f"repos/{self.secrets.test_repo}/git/ref/heads/{self.secrets.pr_base_branch}", + self.secrets.bot_token, + ) j = json.loads(r.text) - pr_base_branch_sha = j['object']['sha'] + pr_base_branch_sha = j["object"]["sha"] - chart_directory = f'charts/{vendor_type}/{vendor_name}/{chart_name}' - base_branch = f'{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}' - base_branch = base_branch.replace(":","-") - pr_branch = f'{base_branch}-pr-branch' + chart_directory = f"charts/{vendor_type}/{vendor_name}/{chart_name}" + base_branch = f"{self.secrets.software_name}-{self.secrets.software_version}-{self.secrets.pr_base_branch}-{vendor_type}-{vendor_name}-{chart_name}-{chart_version}" + base_branch = base_branch.replace(":", "-") + pr_branch = f"{base_branch}-pr-branch" self.secrets.base_branches.append(base_branch) self.secrets.pr_branches.append(pr_branch) - self.temp_repo.git.checkout('tmp') - self.temp_repo.git.checkout('-b', base_branch) + self.temp_repo.git.checkout("tmp") + self.temp_repo.git.checkout("-b", base_branch) # Create test gh-pages branch for checking index.yaml - self.create_test_gh_pages_branch(self.secrets.test_repo, base_branch, self.secrets.bot_token) + self.create_test_gh_pages_branch( + self.secrets.test_repo, base_branch, self.secrets.bot_token + ) # Create a new base branch for testing current chart - logging.info( - f"Create {self.secrets.test_repo}:{base_branch} for testing") + logging.info(f"Create {self.secrets.test_repo}:{base_branch} for testing") r = github_api( - 'get', f'repos/{self.secrets.test_repo}/branches', self.secrets.bot_token) + "get", f"repos/{self.secrets.test_repo}/branches", self.secrets.bot_token + ) branches = json.loads(r.text) - branch_names = [branch['name'] for branch in branches] + branch_names = [branch["name"] for branch in branches] if base_branch in branch_names: - logging.warning( - f"{self.secrets.test_repo}:{base_branch} already exists") + logging.warning(f"{self.secrets.test_repo}:{base_branch} already exists") return - data = {'ref': f'refs/heads/{base_branch}', - 'sha': pr_base_branch_sha} + data = {"ref": f"refs/heads/{base_branch}", "sha": pr_base_branch_sha} r = github_api( - 'post', f'repos/{self.secrets.test_repo}/git/refs', self.secrets.bot_token, json=data) + "post", + f"repos/{self.secrets.test_repo}/git/refs", + self.secrets.bot_token, + json=data, + ) # Remove chart and owners file from git - self.remove_chart(chart_directory, chart_version, self.secrets.test_repo, base_branch, self.secrets.bot_token) - self.remove_owners_file(chart_directory, self.secrets.test_repo, base_branch, self.secrets.bot_token) + self.remove_chart( + chart_directory, + chart_version, + self.secrets.test_repo, + base_branch, + self.secrets.bot_token, + ) + self.remove_owners_file( + chart_directory, self.secrets.test_repo, base_branch, self.secrets.bot_token + ) # Get owners id for notifications self.get_owner_ids(chart_directory, owners_table) # Create and push test owners file - super().create_and_push_owners_file(chart_directory, base_branch, vendor_name, vendor_type, chart_name) + super().create_and_push_owners_file( + chart_directory, base_branch, vendor_name, vendor_type, chart_name + ) # Push test chart to pr_branch - self.push_chart(chart_directory, chart_name, chart_version, vendor_name, vendor_type, pr_branch) + self.push_chart( + chart_directory, + chart_name, + chart_version, + vendor_name, + vendor_type, + pr_branch, + ) # Create PR from pr_branch to base_branch logging.info("sleep for 5 seconds to avoid secondary api limit") time.sleep(5) - pr_number = super().send_pull_request(self.secrets.test_repo, base_branch, pr_branch, self.secrets.bot_token) - pr_number_list.append((vendor_type, vendor_name, chart_name, chart_version, pr_number)) - logging.info(f"PR{pr_number} created in {self.secrets.test_repo} into {base_branch} from {pr_branch}") + pr_number = super().send_pull_request( + self.secrets.test_repo, base_branch, pr_branch, self.secrets.bot_token + ) + pr_number_list.append( + (vendor_type, vendor_name, chart_name, chart_version, pr_number) + ) + logging.info( + f"PR{pr_number} created in {self.secrets.test_repo} into {base_branch} from {pr_branch}" + ) # Record expected release tags - self.secrets.release_tags.append(f'{vendor_name}-{chart_name}-{chart_version}') + self.secrets.release_tags.append(f"{vendor_name}-{chart_name}-{chart_version}") def process_all_charts(self): self.setup_git_context(self.repo) @@ -928,34 +1278,69 @@ def process_all_charts(self): skip_charts = list() - logging.info(f"Running tests for : {self.secrets.software_name} {self.secrets.software_version} :") + logging.info( + f"Running tests for : {self.secrets.software_name} {self.secrets.software_version} :" + ) # First look for charts in index.yaml to see if kubeVersion is good: if self.secrets.software_name == "OpenShift": logging.info("check index file for invalid kubeVersions") failed_charts = check_index_entries(self.secrets.software_version) if failed_charts: for chart in failed_charts: - providerDir = chart["providerType"].replace("partner","partners") - chart_directory = f'charts/{providerDir}/{chart["provider"]}/{chart["name"]}' - self.get_owner_ids(chart_directory,owners_table) + providerDir = chart["providerType"].replace("partner", "partners") + chart_directory = ( + f'charts/{providerDir}/{chart["provider"]}/{chart["name"]}' + ) + self.get_owner_ids(chart_directory, owners_table) chart_owners = owners_table[chart_directory] chart_id = f'{chart["provider"]} {chart["name"]} {chart["version"]}' - self.report_failure(chart_id,chart_owners,chart["message"],"","") + self.report_failure( + chart_id, chart_owners, chart["message"], "", "" + ) skip_charts.append(f'{chart["name"]}-{chart["version"]}') - # Process test charts and send PRs from temporary directory with SetDirectory(Path(self.temp_dir.name)): - for vendor_type, vendor_name, chart_name, chart_version in self.secrets.submitted_charts: - if f'{chart_name}-{chart_version}' in skip_charts: - logging.info(f"Skip already failed chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}") + for ( + vendor_type, + vendor_name, + chart_name, + chart_version, + ) in self.secrets.submitted_charts: + if f"{chart_name}-{chart_version}" in skip_charts: + logging.info( + f"Skip already failed chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}" + ) else: - logging.info(f"Process chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}") - self.process_single_chart(vendor_type, vendor_name, chart_name, chart_version, pr_number_list, owners_table) + logging.info( + f"Process chart: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}" + ) + self.process_single_chart( + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number_list, + owners_table, + ) logging.info("sleep for 5 seconds to avoid secondary api limit") time.sleep(5) - for vendor_type, vendor_name, chart_name, chart_version, pr_number in pr_number_list: - logging.info(f"PR{pr_number} Check result: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}") - self.check_single_chart_result(vendor_type, vendor_name, chart_name, chart_version, pr_number, owners_table) - + for ( + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number, + ) in pr_number_list: + logging.info( + f"PR{pr_number} Check result: {vendor_type}, {vendor_name}, {chart_name}, {chart_version}" + ) + self.check_single_chart_result( + vendor_type, + vendor_name, + chart_name, + chart_version, + pr_number, + owners_table, + ) diff --git a/tests/functional/utils/github.py b/tests/functional/utils/github.py index 73a816d5..f91ecc52 100644 --- a/tests/functional/utils/github.py +++ b/tests/functional/utils/github.py @@ -2,45 +2,50 @@ """Utility class for setting up and manipulating GitHub operations.""" import json -import pytest import requests from retrying import retry from functional.utils.setttings import * + @retry(stop_max_delay=30_000, wait_fixed=1000) def get_run_id(secrets, pr_number=None): - pr = get_pr(secrets, pr_number) - r = github_api( - 'get', f'repos/{secrets.test_repo}/actions/runs', secrets.bot_token) + r = github_api("get", f"repos/{secrets.test_repo}/actions/runs", secrets.bot_token) runs = json.loads(r.text) - for run in runs['workflow_runs']: - if run['head_sha'] == pr['head']['sha'] and run['name'] == CERTIFICATION_CI_NAME: - return run['id'] + for run in runs["workflow_runs"]: + if ( + run["head_sha"] == pr["head"]["sha"] + and run["name"] == CERTIFICATION_CI_NAME + ): + return run["id"] else: raise Exception("Workflow for the submitted PR did not run.") -@retry(stop_max_delay=60_000*40, wait_fixed=2000) +@retry(stop_max_delay=60_000 * 40, wait_fixed=2000) def get_run_result(secrets, run_id): r = github_api( - 'get', f'repos/{secrets.test_repo}/actions/runs/{run_id}', secrets.bot_token) + "get", f"repos/{secrets.test_repo}/actions/runs/{run_id}", secrets.bot_token + ) run = json.loads(r.text) - if run['conclusion'] is None: + if run["conclusion"] is None: raise Exception(f"Workflow {run_id} is still running, PR: {secrets.pr_number} ") - return run['conclusion'] + return run["conclusion"] @retry(stop_max_delay=10_000, wait_fixed=1000) def get_release_assets(secrets, release_id, required_assets): r = github_api( - 'get', f'repos/{secrets.test_repo}/releases/{release_id}/assets', secrets.bot_token) + "get", + f"repos/{secrets.test_repo}/releases/{release_id}/assets", + secrets.bot_token, + ) asset_list = json.loads(r.text) - asset_names = [asset['name'] for asset in asset_list] + asset_names = [asset["name"] for asset in asset_list] missing_assets = list() for asset in required_assets: if asset not in asset_names: @@ -51,11 +56,10 @@ def get_release_assets(secrets, release_id, required_assets): @retry(stop_max_delay=15_000, wait_fixed=1000) def get_release_by_tag(secrets, release_tag): - r = github_api( - 'get', f'repos/{secrets.test_repo}/releases', secrets.bot_token) + r = github_api("get", f"repos/{secrets.test_repo}/releases", secrets.bot_token) releases = json.loads(r.text) for release in releases: - if release['tag_name'] == release_tag: + if release["tag_name"] == release_tag: return release raise Exception("Release not published") @@ -63,46 +67,51 @@ def get_release_by_tag(secrets, release_tag): def get_pr(secrets, pr_number=None): pr_number = secrets.pr_number if pr_number is None else pr_number r = github_api( - 'post', f'repos/{secrets.test_repo}/pulls/{pr_number}', secrets.bot_token) + "post", f"repos/{secrets.test_repo}/pulls/{pr_number}", secrets.bot_token + ) pr = json.loads(r.text) return pr def github_api_get(endpoint, bot_token, headers={}): if not headers: - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - r = requests.get(f'{GITHUB_BASE_URL}/{endpoint}', headers=headers) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + r = requests.get(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers) return r def github_api_delete(endpoint, bot_token, headers={}): if not headers: - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - r = requests.delete(f'{GITHUB_BASE_URL}/{endpoint}', headers=headers) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + r = requests.delete(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers) return r def github_api_post(endpoint, bot_token, headers={}, json={}): if not headers: - headers = {'Accept': 'application/vnd.github.v3+json', - 'Authorization': f'Bearer {bot_token}'} - r = requests.post(f'{GITHUB_BASE_URL}/{endpoint}', - headers=headers, json=json) + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": f"Bearer {bot_token}", + } + r = requests.post(f"{GITHUB_BASE_URL}/{endpoint}", headers=headers, json=json) return r def github_api(method, endpoint, bot_token, headers={}, data={}, json={}): - if method == 'get': + if method == "get": return github_api_get(endpoint, bot_token, headers=headers) - elif method == 'post': + elif method == "post": return github_api_post(endpoint, bot_token, headers=headers, json=json) - elif method == 'delete': + elif method == "delete": return github_api_delete(endpoint, bot_token, headers=headers) else: - raise ValueError( - "Github API method not implemented in helper function") + raise ValueError("Github API method not implemented in helper function") diff --git a/tests/functional/utils/index.py b/tests/functional/utils/index.py index 8ca01488..45ad4ab8 100644 --- a/tests/functional/utils/index.py +++ b/tests/functional/utils/index.py @@ -1,43 +1,49 @@ - import logging import semantic_version import sys -sys.path.append('../../../scripts/src') +sys.path.append("../../../scripts/src") from chartrepomanager import indexannotations from indexfile import index - def check_index_entries(ocpVersion): - all_chart_list = index.get_latest_charts() failed_chart_list = [] OCP_VERSION = semantic_version.Version.coerce(ocpVersion) for chart in all_chart_list: - if "supportedOCP" in chart and chart["supportedOCP"] != "N/A" and chart["supportedOCP"] != "": + if ( + "supportedOCP" in chart + and chart["supportedOCP"] != "N/A" + and chart["supportedOCP"] != "" + ): if OCP_VERSION in semantic_version.NpmSpec(chart["supportedOCP"]): - logging.info(f'PASS: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} includes: {OCP_VERSION}') + logging.info( + f'PASS: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} includes: {OCP_VERSION}' + ) else: - chart["message"] = f'chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include latest OCP version {OCP_VERSION}' - logging.info(f' ERROR: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include {OCP_VERSION}') + chart[ + "message" + ] = f'chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include latest OCP version {OCP_VERSION}' + logging.info( + f' ERROR: Chart {chart["name"]} {chart["version"]} supported OCP version {chart["supportedOCP"]} does not include {OCP_VERSION}' + ) failed_chart_list.append(chart) elif "kubeVersion" in chart and chart["kubeVersion"] != "": supportedOCPVersion = indexannotations.getOCPVersions(chart["kubeVersion"]) if OCP_VERSION in semantic_version.NpmSpec(supportedOCPVersion): - logging.info(f'PASS: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) includes OCP version: {OCP_VERSION}') + logging.info( + f'PASS: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) includes OCP version: {OCP_VERSION}' + ) else: - chart["message"] = f'chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include latest OCP version {OCP_VERSION}' - logging.info(f' ERROR: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include {OCP_VERSION}') + chart[ + "message" + ] = f'chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include latest OCP version {OCP_VERSION}' + logging.info( + f' ERROR: Chart {chart["name"]} {chart["version"]} kubeVersion {chart["kubeVersion"]} (OCP: {supportedOCPVersion}) does not include {OCP_VERSION}' + ) failed_chart_list.append(chart) return failed_chart_list - - - - - - - diff --git a/tests/functional/utils/notifier.py b/tests/functional/utils/notifier.py index d44279e7..923859fd 100755 --- a/tests/functional/utils/notifier.py +++ b/tests/functional/utils/notifier.py @@ -13,13 +13,15 @@ CHECKS_FAILED = "checks failed" + def _set_endpoint_key(key, env_var): if key not in endpoint_data: if env_var in os.environ: endpoint_data[key] = os.environ[env_var] else: raise Exception( - f"Environment variables {env_var} is required to connect to github") + f"Environment variables {env_var} is required to connect to github" + ) def _set_endpoint(): @@ -29,17 +31,23 @@ def _set_endpoint(): def _make_gihub_request(method, uri, body=None, params={}, headers={}, verbose=False): - headers.update({"Authorization": f'Bearer {endpoint_data["access_token"]}', - "Accept": "application/vnd.github.v3+json"}) + headers.update( + { + "Authorization": f'Bearer {endpoint_data["access_token"]}', + "Accept": "application/vnd.github.v3+json", + } + ) url = f'{GITHUB_BASE_URL}/repos/{endpoint_data["organization"]}/{endpoint_data["repo"]}/{uri}' print(f"API url: {url}") - method_map = {"get": requests.get, - "post": requests.post, - "put": requests.put, - "delete": requests.delete, - "patch": requests.patch} + method_map = { + "get": requests.get, + "post": requests.post, + "put": requests.put, + "delete": requests.delete, + "patch": requests.patch, + } request_method = method_map[method] response = request_method(url, params=params, headers=headers, json=body) if verbose: @@ -56,14 +64,17 @@ def _make_gihub_request(method, uri, body=None, params={}, headers={}, verbose=F print(json.dumps(resp_json, indent=4, sort_keys=True)) return resp_json + # Call this method directly if you are not creating a verification issue nor a version change issue. def create_an_issue(title, description, assignees=[], labels=[]): uri = "issues" method = "post" - body = {"title": title, - "body": description, - "assignees": assignees, - "labels": labels} + body = { + "title": title, + "body": description, + "assignees": assignees, + "labels": labels, + } _make_gihub_request(method, uri, body=body, verbose=False) @@ -78,7 +89,18 @@ def _verify_endpoint(access_token): endpoint_data["access_token"] = access_token -def create_verification_issue(chart, chart_owners, failure_type, notify_developers, pr_url, report_url, software_name, software_version, access_token=None, dry_run=False): +def create_verification_issue( + chart, + chart_owners, + failure_type, + notify_developers, + pr_url, + report_url, + software_name, + software_version, + access_token=None, + dry_run=False, +): """Create and issue with chart-verifier findings after a version change trigger. chart_name -- Name of the chart that was verified. Include version for more verbose information\n @@ -92,7 +114,6 @@ def create_verification_issue(chart, chart_owners, failure_type, notify_develope dry-run -- Set if the test run is a dry-run. """ - title = f"Chart {chart}" if dry_run: title = f"Dry Run: Chart {chart}" @@ -100,22 +121,26 @@ def create_verification_issue(chart, chart_owners, failure_type, notify_develope if failure_type == CHECKS_FAILED: title = f"{title} has failures with {software_name} version {software_version}" report_result = "some chart checks have failed. Please review the failures and, if required, consider submitting a new chart version with the appropriate additions/corrections." - body = (f"FYI @{' @'.join(notify_developers)}, in PR {pr_url} we triggered the chart certification workflow against chart {chart} because the workflow " - f"now supports {software_name} version {software_version}. We have found that {report_result}. Check details in the report: " - f"{report_url}, Chart owners are: {chart_owners}") + body = ( + f"FYI @{' @'.join(notify_developers)}, in PR {pr_url} we triggered the chart certification workflow against chart {chart} because the workflow " + f"now supports {software_name} version {software_version}. We have found that {report_result}. Check details in the report: " + f"{report_url}, Chart owners are: {chart_owners}" + ) else: title = f"{title} does not support {software_name} version {software_version}" - body = (f"FYI @{' @'.join(notify_developers)}, we checked the OCP versions supported by {chart} because the workflow " - f"now supports {software_name} version {software_version}. We have found that {failure_type}. Chart owners are: {chart_owners}") + body = ( + f"FYI @{' @'.join(notify_developers)}, we checked the OCP versions supported by {chart} because the workflow " + f"now supports {software_name} version {software_version}. We have found that {failure_type}. Chart owners are: {chart_owners}" + ) _set_endpoint() _verify_endpoint(access_token) create_an_issue(title, body) - - -def create_version_change_issue(chart_name, chart_owners, software_name, software_version, access_token=None): +def create_version_change_issue( + chart_name, chart_owners, software_name, software_version, access_token=None +): """Create and issue with new version of software dependencies supported by certitifcation program. chart_name -- Name of the chart afected. Include version for more verbose information @@ -127,8 +152,10 @@ def create_version_change_issue(chart_name, chart_owners, software_name, softwar title = f"Action needed for {chart_name} after a certification dependency change" - body = (f"FYI @{' @'.join(chart_owners)}, {software_name} {software_version} is now supported by the certification program. " - "Consider submiting a new chart version.") + body = ( + f"FYI @{' @'.join(chart_owners)}, {software_name} {software_version} is now supported by the certification program. " + "Consider submiting a new chart version." + ) _set_endpoint() _verify_endpoint(access_token) @@ -165,8 +192,15 @@ def create_version_change_issue(chart_name, chart_owners, software_name, softwar print("Did the chart verification pass (yes/no)?: ") pass_answer = sys.stdin.readline().strip() pass_verification = pass_answer == "yes" - create_verification_issue(chart_name, chart_owners, report_url, - software_name, software_version, pass_verification=pass_verification) + create_verification_issue( + chart_name, + chart_owners, + report_url, + software_name, + software_version, + pass_verification=pass_verification, + ) else: create_version_change_issue( - chart_name, chart_owners, software_name, software_version) + chart_name, chart_owners, software_name, software_version + ) diff --git a/tests/functional/utils/secret.py b/tests/functional/utils/secret.py index 520d537c..91a9d49f 100644 --- a/tests/functional/utils/secret.py +++ b/tests/functional/utils/secret.py @@ -3,37 +3,40 @@ from dataclasses import dataclass + @dataclass class E2ETestSecret: # common secrets between one-shot and recursive tests - test_repo: str = '' - bot_name: str = '' - bot_token: str = '' + test_repo: str = "" + bot_name: str = "" + bot_token: str = "" pr_number: int = -1 - vendor_type: str = '' - owners_file_content: str = '' - test_chart: str = '' - test_report: str = '' - chart_name: str = '' - chart_version: str = '' + vendor_type: str = "" + owners_file_content: str = "" + test_chart: str = "" + test_report: str = "" + chart_name: str = "" + chart_version: str = "" + @dataclass class E2ETestSecretOneShot(E2ETestSecret): # one-shot testing - base_branch: str = '' - pr_branch: str = '' + base_branch: str = "" + pr_branch: str = "" pr_number: int = -1 - vendor: str = '' - bad_version: str = '' + vendor: str = "" + bad_version: str = "" provider_delivery: bool = False index_file: str = "index.yaml" + @dataclass class E2ETestSecretRecursive(E2ETestSecret): # recursive testing - software_name: str = '' - software_version: str = '' - pr_base_branch: str = '' + software_name: str = "" + software_version: str = "" + pr_base_branch: str = "" base_branches: list = None pr_branches: list = None dry_run: bool = True diff --git a/tests/functional/utils/set_directory.py b/tests/functional/utils/set_directory.py index 921c295f..becb220e 100644 --- a/tests/functional/utils/set_directory.py +++ b/tests/functional/utils/set_directory.py @@ -8,16 +8,19 @@ from dataclasses import dataclass from pathlib import Path + @dataclass class SetDirectory(object): """ Args: path (Path): The path to the cwd """ + path: Path origin: Path = Path().absolute() def __enter__(self): os.chdir(self.path) + def __exit__(self, exc_type, exc_value, traceback): os.chdir(self.origin) diff --git a/tests/functional/utils/setttings.py b/tests/functional/utils/setttings.py index 5f2439f1..45caafc8 100644 --- a/tests/functional/utils/setttings.py +++ b/tests/functional/utils/setttings.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- """Settings and global variables for e2e tests""" -GITHUB_BASE_URL = 'https://api.github.com' +GITHUB_BASE_URL = "https://api.github.com" # The sandbox repository where we run all our tests on -TEST_REPO = 'openshift-helm-charts/sandbox' +TEST_REPO = "openshift-helm-charts/sandbox" # The prod repository where we create notification issues -PROD_REPO = 'openshift-helm-charts/charts' +PROD_REPO = "openshift-helm-charts/charts" # The prod branch where we store all chart files -PROD_BRANCH = 'main' +PROD_BRANCH = "main" # This is used to find chart certification workflow run id -CERTIFICATION_CI_NAME = 'CI' +CERTIFICATION_CI_NAME = "CI" # GitHub actions bot email for git email -GITHUB_ACTIONS_BOT_EMAIL = '41898282+github-actions[bot]@users.noreply.github.com' +GITHUB_ACTIONS_BOT_EMAIL = "41898282+github-actions[bot]@users.noreply.github.com"