diff --git a/monitoring/uss_qualifier/.gitignore b/monitoring/uss_qualifier/.gitignore index 3159fa8e0..5df253d20 100644 --- a/monitoring/uss_qualifier/.gitignore +++ b/monitoring/uss_qualifier/.gitignore @@ -1,4 +1,6 @@ -config_run_locally.json -config_test_fully_mocked_local_system.json -report*.json +/config_run_locally.json +/config_test_fully_mocked_local_system.json +/report*.json client_secret.json +/tested_requirements.html +/report.gv diff --git a/monitoring/uss_qualifier/README.md b/monitoring/uss_qualifier/README.md index 5b1c6be27..ac270b341 100644 --- a/monitoring/uss_qualifier/README.md +++ b/monitoring/uss_qualifier/README.md @@ -52,3 +52,4 @@ Note: We are currently in the process of migrating the technical implementation * Partial test configurations, including RID telemetry to inject, operational intents to inject, etc, can be tracked in the InterUSS repository, but they could not be used without specifying the missing resources describing systems under test. ### [Test resources](resources/README.md) + diff --git a/monitoring/uss_qualifier/bin/visualize_configuration.sh b/monitoring/uss_qualifier/bin/visualize_configuration.sh deleted file mode 100755 index 2dcf11ba0..000000000 --- a/monitoring/uss_qualifier/bin/visualize_configuration.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -# Find and change to repo root directory -OS=$(uname) -if [[ "$OS" == "Darwin" ]]; then - # OSX uses BSD readlink - BASEDIR="$(dirname "$0")" -else - BASEDIR=$(readlink -e "$(dirname "$0")") -fi -cd "${BASEDIR}/../../.." || exit 1 - -monitoring/build.sh || exit 1 - -REPORT_FILE="$(pwd)/monitoring/uss_qualifier/report.json" - -QUALIFIER_OPTIONS="--report /app/monitoring/uss_qualifier/report.json" - -if [ "$CI" == "true" ]; then - docker_args="--add-host host.docker.internal:host-gateway" # Required to reach other containers in Ubuntu (used for Github Actions) -else - docker_args="-it" -fi - -# shellcheck disable=SC2086 -docker run ${docker_args} --name uss_qualifier \ - --rm \ - -e QUALIFIER_OPTIONS="${QUALIFIER_OPTIONS}" \ - -e PYTHONBUFFERED=1 \ - -v "${REPORT_FILE}:/app/monitoring/uss_qualifier/report.json" \ - -w /app/monitoring/uss_qualifier \ - interuss/monitoring \ - python visualize_configuration.py $QUALIFIER_OPTIONS diff --git a/monitoring/uss_qualifier/configurations/dev/non_docker/local_test.json b/monitoring/uss_qualifier/configurations/dev/non_docker/local_test.json new file mode 100644 index 000000000..e99cbca69 --- /dev/null +++ b/monitoring/uss_qualifier/configurations/dev/non_docker/local_test.json @@ -0,0 +1,24 @@ +{ + "resources": { + "resource_declarations": { + "$ref": "resources.yaml#/all" + } + }, + "test_suite": { + "suite_type": "suites.dev.local_test", + "resources": { + "adjacent_circular_flights_data": "adjacent_circular_flights_data", + "adjacent_circular_storage_config": "adjacent_circular_storage_config", + "kml_flights_data": "kml_flights_data", + "kml_storage_config": "kml_storage_config", + "service_providers": "netrid_service_providers", + "observers": "netrid_observers", + "evaluation_configuration": "netrid_observation_evaluation_configuration", + "flight_planners": "flight_planners", + "conflicting_flights": "conflicting_flights", + "priority_preemption_flights": "priority_preemption_flights", + "invalid_flight_auth_flights": "invalid_flight_auth_flights", + "dss": "dss" + } + } +} diff --git a/monitoring/uss_qualifier/configurations/dev/non_docker/resources.yaml b/monitoring/uss_qualifier/configurations/dev/non_docker/resources.yaml new file mode 100644 index 000000000..b25e00535 --- /dev/null +++ b/monitoring/uss_qualifier/configurations/dev/non_docker/resources.yaml @@ -0,0 +1,94 @@ +all: + allOf: + - $ref: '#/uspace' + - $ref: '#/net_rid_sims' + +uspace: + allOf: + - $ref: '#/net_rid' + - $ref: '#/flight_auth' + +net_rid: + $ref: '#/common' + netrid_service_providers: + resource_type: resources.netrid.NetRIDServiceProviders + dependencies: + auth_adapter: utm_auth + specification: + service_providers: + - participant_id: uss1 + injection_base_url: http://localhost:8071/ridsp/injection + netrid_observers: + resource_type: resources.netrid.NetRIDObserversResource + dependencies: + auth_adapter: utm_auth + specification: + observers: + - participant_id: uss2 + observation_base_url: http://localhost:8073/riddp/observation + netrid_observation_evaluation_configuration: + resource_type: resources.netrid.EvaluationConfigurationResource + specification: {} + +net_rid_sims: + adjacent_circular_flights_data: + resource_type: resources.netrid.FlightDataResource + specification: + adjacent_circular_flights_simulation_source: {} + adjacent_circular_storage_config: + resource_type: resources.netrid.FlightDataStorageResource + specification: + flight_record_collection_path: "./test_data/che/netrid/circular_flights.json" + kml_flights_data: + resource_type: resources.netrid.FlightDataResource + specification: + kml_source: + kml_location: file://./test_data/usa/netrid/dcdemo.kml + kml_storage_config: + resource_type: resources.netrid.FlightDataStorageResource + specification: + flight_record_collection_path: "./test_data/usa/netrid/dcdemo_flights.json" + +flight_auth: + $ref: '#/f3548' + invalid_flight_auth_flights: + resource_type: resources.flight_planning.FlightIntentsResource + specification: + planning_time: '0:05:00' + file_source: file://./test_data/che/flight_intents/invalid_flight_auths.json + +f3548: + $ref: '#/common' + flight_planners: + resource_type: resources.flight_planning.FlightPlannersResource + dependencies: + auth_adapter: utm_auth + specification: + flight_planners: + - participant_id: uss1 + injection_base_url: http://localhost:8074/scdsc + - participant_id: uss2 + injection_base_url: http://localhost:8074/scdsc + conflicting_flights: + resource_type: resources.flight_planning.FlightIntentsResource + specification: + planning_time: '0:05:00' + file_source: file://./test_data/che/flight_intents/conflicting_flights.json + priority_preemption_flights: + resource_type: resources.flight_planning.FlightIntentsResource + specification: + planning_time: '0:05:00' + file_source: test_data.che.flight_intents.priority_preemption + dss: + resource_type: resources.astm.f3548.v21.DSSInstanceResource + dependencies: + auth_adapter: utm_auth + specification: + participant_id: uss1 + base_url: http://localhost:8082 + +common: + utm_auth: + resource_type: resources.communications.AuthAdapterResource + specification: + environment_variable_containing_auth_spec: AUTH_SPEC diff --git a/monitoring/uss_qualifier/main.py b/monitoring/uss_qualifier/main.py index a11556aa5..08bd97474 100644 --- a/monitoring/uss_qualifier/main.py +++ b/monitoring/uss_qualifier/main.py @@ -5,10 +5,16 @@ import os import sys +from implicitdict import ImplicitDict from monitoring.monitorlib.versioning import get_code_version from monitoring.uss_qualifier.configurations.configuration import TestConfiguration +from monitoring.uss_qualifier.reports.documents import render_requirement_table +from monitoring.uss_qualifier.reports.graphs import make_graph from monitoring.uss_qualifier.reports.report import TestRunReport from monitoring.uss_qualifier.resources.resource import create_resources +from monitoring.uss_qualifier.scenarios.documentation.requirements import ( + evaluate_requirements, +) from monitoring.uss_qualifier.suites.suite import ( TestSuite, ) @@ -19,10 +25,24 @@ def parseArgs() -> argparse.Namespace: parser.add_argument( "--config", - required=True, help="Configuration string according to monitoring/uss_qualifier/configurations/README.md", ) + parser.add_argument( + "--report", + help="File name of the report to write (if --config provided) or read (if --config not provided)", + ) + + parser.add_argument( + "--dot", + help="File name to create for a GraphViz dot text file summarizing the test run", + ) + + parser.add_argument( + "--tested_requirements", + help="File name to create for a tested requirements HTML summary", + ) + return parser.parse_args() @@ -45,9 +65,29 @@ def uss_test_executor(config: str): def main() -> int: args = parseArgs() - reports = uss_test_executor(args.config) - with open("report.json", "w") as f: - json.dump(reports, f, indent=2) + if args.config is not None: + report = uss_test_executor(args.config) + if args.report is not None: + print(f"Writing report to {args.report}") + with open(args.report, "w") as f: + json.dump(report, f, indent=2) + elif args.report is not None: + with open(args.report, "r") as f: + report = ImplicitDict.parse(json.load(f), TestRunReport) + else: + raise ValueError("No input provided; --config or --report must be specified") + + if args.dot is not None: + print(f"Writing GraphViz dot source to {args.dot}") + with open(args.dot, "w") as f: + f.write(make_graph(report).source) + + if args.tested_requirements is not None: + print(f"Writing tested requirements summary to {args.tested_requirements}") + requirements = evaluate_requirements(report) + with open(args.tested_requirements, "w") as f: + f.write(render_requirement_table(requirements)) + return os.EX_OK diff --git a/monitoring/uss_qualifier/reports/documents.py b/monitoring/uss_qualifier/reports/documents.py new file mode 100644 index 000000000..5a04f82c2 --- /dev/null +++ b/monitoring/uss_qualifier/reports/documents.py @@ -0,0 +1,36 @@ +from typing import List + +from jinja2 import Environment, PackageLoader + +from monitoring.uss_qualifier.reports.report import ParticipantID +from monitoring.uss_qualifier.scenarios.documentation.requirements import Requirement + + +def _all_participants(requirements: List[Requirement]) -> List[ParticipantID]: + participants = set() + for requirement in requirements: + for participant_id in requirement.participant_performance: + participants.add(participant_id) + result = list(participants) + result.sort() + return result + + +def render_requirement_table(requirements: List[Requirement]) -> str: + all_participants = _all_participants(requirements) + rows = [["Requirement"] + all_participants] + for requirement in requirements: + cols = [requirement.requirement_id] + for participant in all_participants: + performance = requirement.participant_performance.get(participant, None) + if performance is None: + cols.append("") + else: + n_total = len(performance.successes) + len(performance.failures) + percentage_successful = 100 * len(performance.successes) / n_total + cols.append("{:.0f}%".format(percentage_successful)) + rows.append(cols) + + env = Environment(loader=PackageLoader(__name__)) + template = env.get_template("tested_requirements.html") + return template.render(rows=rows) diff --git a/monitoring/uss_qualifier/reports/graphs.py b/monitoring/uss_qualifier/reports/graphs.py index 608b4c0f3..ec73ba591 100644 --- a/monitoring/uss_qualifier/reports/graphs.py +++ b/monitoring/uss_qualifier/reports/graphs.py @@ -4,8 +4,12 @@ from implicitdict import ImplicitDict -from monitoring.uss_qualifier.reports import TestSuiteReport, TestScenarioReport -from monitoring.uss_qualifier.reports.report import ActionGeneratorReport, TestRunReport +from monitoring.uss_qualifier.reports.report import ( + ActionGeneratorReport, + TestRunReport, + TestSuiteReport, + TestScenarioReport, +) from monitoring.uss_qualifier.resources.definitions import ( ResourceID, ResourceCollection, diff --git a/monitoring/uss_qualifier/reports/templates/tested_requirements.html b/monitoring/uss_qualifier/reports/templates/tested_requirements.html new file mode 100644 index 000000000..bd57a545a --- /dev/null +++ b/monitoring/uss_qualifier/reports/templates/tested_requirements.html @@ -0,0 +1,16 @@ +<html> +<body> + Note: Only requirements that were tested appear in the table below. If checks are skipped or otherwise not performed, they may not appear below. + <table> + {% for row in rows %} + <tr> + {% for cell in row %} + <td> + {{ cell }} + </td> + {% endfor %} + </tr> + {% endfor %} + </table> +</body> +</html> diff --git a/monitoring/uss_qualifier/run_locally.sh b/monitoring/uss_qualifier/run_locally.sh index 721436eaa..e40939abd 100755 --- a/monitoring/uss_qualifier/run_locally.sh +++ b/monitoring/uss_qualifier/run_locally.sh @@ -33,6 +33,12 @@ REPORT_FILE="$(pwd)/monitoring/uss_qualifier/report.json" # Report file must already exist to share correctly with the Docker container touch "${REPORT_FILE}" +TESTED_REQS_FILE="$(pwd)/monitoring/uss_qualifier/tested_requirements.html" +touch "${TESTED_REQS_FILE}" + +DOT_FILE="$(pwd)/monitoring/uss_qualifier/report.gv" +touch "${DOT_FILE}" + if [ "$CI" == "true" ]; then docker_args="--add-host host.docker.internal:host-gateway" # Required to reach other containers in Ubuntu (used for Github Actions) else @@ -46,7 +52,12 @@ docker run ${docker_args} --name uss_qualifier \ -e PYTHONBUFFERED=1 \ -e AUTH_SPEC=${AUTH_SPEC} \ -v "${REPORT_FILE}:/app/monitoring/uss_qualifier/report.json" \ + -v "${TESTED_REQS_FILE}:/app/monitoring/uss_qualifier/tested_requirements.html" \ + -v "${DOT_FILE}:/app/monitoring/uss_qualifier/report.gv" \ -v "$(pwd):/app" \ -w /app/monitoring/uss_qualifier \ interuss/monitoring \ - python main.py $QUALIFIER_OPTIONS + python main.py $QUALIFIER_OPTIONS \ + --report report.json \ + --tested_requirements tested_requirements.html \ + --dot report.gv diff --git a/monitoring/uss_qualifier/scenarios/documentation/__init__.py b/monitoring/uss_qualifier/scenarios/documentation/__init__.py index 3d1a39757..69f72a8d5 100644 --- a/monitoring/uss_qualifier/scenarios/documentation/__init__.py +++ b/monitoring/uss_qualifier/scenarios/documentation/__init__.py @@ -7,7 +7,8 @@ import marko.element import marko.inline -from monitoring.monitorlib.inspection import fullname +from monitoring import uss_qualifier as uss_qualifier_module +from monitoring.monitorlib.inspection import fullname, get_module_object_by_name from monitoring.uss_qualifier.reports.report import RequirementID RESOURCES_HEADING = "resources" @@ -35,6 +36,12 @@ class TestCaseDocumentation(ImplicitDict): url: Optional[str] = None steps: List[TestStepDocumentation] + def get_step_by_name(self, step_name: str) -> Optional[TestStepDocumentation]: + for step in self.steps: + if step.name == step_name: + return step + return None + class TestScenarioDocumentation(ImplicitDict): name: str @@ -43,6 +50,12 @@ class TestScenarioDocumentation(ImplicitDict): cases: List[TestCaseDocumentation] cleanup: Optional[TestStepDocumentation] + def get_case_by_name(self, case_name: str) -> Optional[TestCaseDocumentation]: + for case in self.cases: + if case.name == case_name: + return case + return None + _test_step_cache: Dict[str, TestStepDocumentation] = {} @@ -191,7 +204,7 @@ def _parse_resources(values) -> List[str]: return resources -def parse_documentation(scenario: Type) -> TestScenarioDocumentation: +def _parse_documentation(scenario: Type) -> TestScenarioDocumentation: # Load the .md file matching the Python file where this scenario type is defined doc_filename = os.path.splitext(inspect.getfile(scenario))[0] + ".md" if not os.path.exists(doc_filename): @@ -263,3 +276,15 @@ def parse_documentation(scenario: Type) -> TestScenarioDocumentation: if cleanup is not None: kwargs["cleanup"] = cleanup return TestScenarioDocumentation(**kwargs) + + +def get_documentation(scenario: Type) -> TestScenarioDocumentation: + DOC_CACHE_ATTRIBUTE = "_md_documentation" + if not hasattr(scenario, DOC_CACHE_ATTRIBUTE): + setattr(scenario, DOC_CACHE_ATTRIBUTE, _parse_documentation(scenario)) + return getattr(scenario, DOC_CACHE_ATTRIBUTE) + + +def get_documentation_by_name(scenario_type_name: str) -> TestScenarioDocumentation: + scenario_type = get_module_object_by_name(uss_qualifier_module, scenario_type_name) + return get_documentation(scenario_type) diff --git a/monitoring/uss_qualifier/scenarios/documentation/requirements.py b/monitoring/uss_qualifier/scenarios/documentation/requirements.py new file mode 100644 index 000000000..1c078d618 --- /dev/null +++ b/monitoring/uss_qualifier/scenarios/documentation/requirements.py @@ -0,0 +1,187 @@ +from typing import List, Dict, Union + +from implicitdict import ImplicitDict + +from monitoring.monitorlib.inspection import ( + get_module_object_by_name, + import_submodules, +) +from monitoring import uss_qualifier as uss_qualifier_module +from monitoring.uss_qualifier import scenarios as scenarios_module +from monitoring.uss_qualifier.reports.report import ( + TestRunReport, + TestSuiteReport, + ActionGeneratorReport, + TestSuiteActionReport, + TestScenarioReport, + RequirementID, + ParticipantID, + TestCaseReport, + TestStepReport, + PassedCheck, + FailedCheck, +) +from monitoring.uss_qualifier.scenarios.documentation import ( + get_documentation_by_name, + TestScenarioDocumentation, + TestCaseDocumentation, + TestStepDocumentation, +) + +JSONPath = str + + +class ParticipantRequirementPerformance(ImplicitDict): + successes: List[JSONPath] + """List of passed checks involving the requirement""" + + failures: List[JSONPath] + """List of failed checks involving the requirement""" + + +class Requirement(ImplicitDict): + requirement_id: RequirementID + """Identity of the requirement""" + + participant_performance: Dict[ParticipantID, ParticipantRequirementPerformance] + """The performance of each involved participant on the requirement""" + + +def _add_check( + check: Union[PassedCheck, FailedCheck], + path: JSONPath, + scenario_docs: TestScenarioDocumentation, + case_docs: TestCaseDocumentation, + step_docs: TestStepDocumentation, + requirements: Dict[RequirementID, Requirement], +): + if not check.requirements: + # Generate an implied requirement ID + tested_requirements = [ + f"{scenario_docs.name.title()}.{case_docs.name.title()}.{step_docs.name.title()}.{check.name.title()}".replace( + " ", "" + ) + ] + else: + tested_requirements = check.requirements + for requirement_id in tested_requirements: + if requirement_id not in requirements: + requirements[requirement_id] = Requirement( + requirement_id=requirement_id, participant_performance={} + ) + requirement = requirements[requirement_id] + + participants = check.participants if check.participants else [""] + for participant_id in participants: + if participant_id not in requirement.participant_performance: + requirement.participant_performance[ + participant_id + ] = ParticipantRequirementPerformance(successes=[], failures=[]) + performance = requirement.participant_performance[participant_id] + if isinstance(check, PassedCheck): + performance.successes.append(path) + elif isinstance(check, FailedCheck): + performance.failures.append(path) + else: + raise ValueError("Provided check was not a PassedCheck or FailedCheck") + + +def _evaluate_requirements_in_step( + report: TestStepReport, + scenario_docs: TestScenarioDocumentation, + case_docs: TestCaseDocumentation, + path: JSONPath, + requirements: Dict[RequirementID, Requirement], +) -> None: + step_docs = case_docs.get_step_by_name(report.name) + for i, check in enumerate(report.passed_checks): + _add_check( + check, + path + f".passed_checks[{i}]", + scenario_docs, + case_docs, + step_docs, + requirements, + ) + for i, check in enumerate(report.failed_checks): + _add_check( + check, + path + f".failed_checks[{i}]", + scenario_docs, + case_docs, + step_docs, + requirements, + ) + + +def _evaluate_requirements_in_case( + report: TestCaseReport, + scenario_docs: TestScenarioDocumentation, + path: JSONPath, + requirements: Dict[RequirementID, Requirement], +) -> None: + case_docs = scenario_docs.get_case_by_name(report.name) + for i, step in enumerate(report.steps): + _evaluate_requirements_in_step( + step, scenario_docs, case_docs, path + f".steps[{i}]", requirements + ) + + +def _evaluate_requirements_in_scenario( + report: TestScenarioReport, + path: JSONPath, + requirements: Dict[RequirementID, Requirement], +) -> None: + scenario_docs = get_documentation_by_name(report.scenario_type) + for i, case in enumerate(report.cases): + _evaluate_requirements_in_case( + case, scenario_docs, path + f".cases[{i}]", requirements + ) + + +def _evaluate_requirements_in_action( + report: TestSuiteActionReport, + path: JSONPath, + requirements: Dict[RequirementID, Requirement], +) -> None: + if "test_suite" in report: + _evaluate_requirements_in_suite( + report.test_suite, path + ".test_suite", requirements + ) + elif "action_generator" in report: + _evaluate_requirements_in_generator( + report.action_generator, path + ".action_generator", requirements + ) + elif "test_scenario" in report: + _evaluate_requirements_in_scenario( + report.test_scenario, path + ".test_scenario", requirements + ) + else: + raise ValueError("Unsupported action type found in TestSuiteActionReport") + + +def _evaluate_requirements_in_generator( + report: ActionGeneratorReport, + path: JSONPath, + requirements: Dict[RequirementID, Requirement], +) -> None: + for i, action in enumerate(report.actions): + _evaluate_requirements_in_action(action, path + f".action[{i}]", requirements) + + +def _evaluate_requirements_in_suite( + report: TestSuiteReport, + path: JSONPath, + requirements: Dict[RequirementID, Requirement], +) -> None: + for i, action in enumerate(report.actions): + _evaluate_requirements_in_action(action, path + f".action[{i}]", requirements) + + +def evaluate_requirements(report: TestRunReport) -> List[Requirement]: + import_submodules(scenarios_module) + reqs = {} + _evaluate_requirements_in_suite(report.report, "$.report", reqs) + sorted_ids = list(reqs.keys()) + sorted_ids.sort() + return [reqs[k] for k in sorted_ids] diff --git a/monitoring/uss_qualifier/scenarios/documentation/validation.py b/monitoring/uss_qualifier/scenarios/documentation/validation.py index 9932ff7c1..6ccfaf86e 100644 --- a/monitoring/uss_qualifier/scenarios/documentation/validation.py +++ b/monitoring/uss_qualifier/scenarios/documentation/validation.py @@ -9,7 +9,7 @@ def validate(test_scenarios: List[TestScenarioType]): for test_scenario in test_scenarios: # Verify that documentation parses - docs = documentation.parse_documentation(test_scenario) + docs = documentation.get_documentation(test_scenario) # Verify that all resources are documented constructor_signature = inspect.signature(test_scenario.__init__) diff --git a/monitoring/uss_qualifier/scenarios/scenario.py b/monitoring/uss_qualifier/scenarios/scenario.py index 8389f2b7e..af1ab2b38 100644 --- a/monitoring/uss_qualifier/scenarios/scenario.py +++ b/monitoring/uss_qualifier/scenarios/scenario.py @@ -24,10 +24,10 @@ ) from monitoring.uss_qualifier.scenarios.definitions import TestScenarioDeclaration from monitoring.uss_qualifier.scenarios.documentation import ( + get_documentation, TestScenarioDocumentation, TestCaseDocumentation, TestStepDocumentation, - parse_documentation, TestCheckDocumentation, ) from monitoring.uss_qualifier.resources.definitions import ResourceTypeName, ResourceID @@ -145,7 +145,7 @@ class TestScenario(ABC): _step_report: Optional[TestStepReport] = None def __init__(self): - self.documentation = parse_documentation(self.__class__) + self.documentation = get_documentation(self.__class__) self._phase = ScenarioPhase.NotStarted @staticmethod diff --git a/monitoring/uss_qualifier/visualize_configuration.py b/monitoring/uss_qualifier/visualize_configuration.py deleted file mode 100644 index ea146093a..000000000 --- a/monitoring/uss_qualifier/visualize_configuration.py +++ /dev/null @@ -1,36 +0,0 @@ -#!env/bin/python3 - -import argparse -import json -import os -import sys - -from implicitdict import ImplicitDict -from monitoring.uss_qualifier.reports.graphs import make_graph -from monitoring.uss_qualifier.reports.report import TestRunReport - - -def parseArgs() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Visualize a complete test run") - - parser.add_argument( - "--report", - required=True, - help="Path to file containing a JSON representation of a TestRunReport", - ) - - return parser.parse_args() - - -def main() -> int: - args = parseArgs() - - with open(args.report, "r") as f: - report = ImplicitDict.parse(json.load(f), TestRunReport) - print(make_graph(report).source) - - return os.EX_OK - - -if __name__ == "__main__": - sys.exit(main())