diff --git a/.gitlab/ci/.gitlab-ci.global.yml b/.gitlab/ci/.gitlab-ci.global.yml index ff29f1fc3e5c..df36af92b74b 100644 --- a/.gitlab/ci/.gitlab-ci.global.yml +++ b/.gitlab/ci/.gitlab-ci.global.yml @@ -95,7 +95,7 @@ - section_end "Stop contrib external build" .create_artifacts_and_server_type_instance_folders: &create_artifacts_and_server_type_instance_folders - - section_start "Create Artifacts & Server Instance & Server Type folders" --collapsed + - section_start "Create Artifacts, Server Instance, Server Type folders" --collapsed - | if [[ -n "${ARTIFACTS_FOLDER}" ]] && [[ ! -d "${ARTIFACTS_FOLDER}" ]]; then echo "Creating Artifacts folder: ${ARTIFACTS_FOLDER}" @@ -113,7 +113,7 @@ mkdir -p -m 777 "${ARTIFACTS_FOLDER_SERVER_TYPE}/logs" # using the -p to create the logs folder as well. echo "${SERVER_TYPE}" > "${ARTIFACTS_FOLDER_SERVER_TYPE}/server_type.txt" fi - - section_end "Create Artifacts & Server Instance & Server Type folders" + - section_end "Create Artifacts, Server Instance, Server Type folders" .clone_and_export_variables: &clone_and_export_variables - section_start "Git - Job Start Actions" --collapsed diff --git a/.gitlab/ci/.gitlab-ci.on-push.yml b/.gitlab/ci/.gitlab-ci.on-push.yml index b59fae70369d..32af242d1a9b 100644 --- a/.gitlab/ci/.gitlab-ci.on-push.yml +++ b/.gitlab/ci/.gitlab-ci.on-push.yml @@ -291,6 +291,7 @@ xpanse-prepare-testing-bucket: echo "Instance role:${INSTANCE_ROLE} Product type:${PRODUCT_TYPE}" if [ -n "${NIGHTLY}" ] && [ "${INSTANCE_ROLE}" != "Server Master" ]; then echo "Not running the instance flow, not a Server Master instance and we are in a nightly build." + rm -f "${ARTIFACTS_FOLDER_INSTANCE}/instance_role.txt" # delete the instance_role.txt file so the job will not be collected by slack notifier. job-done exit 0 fi diff --git a/Tests/Marketplace/print_test_modeling_rule_summary.py b/Tests/Marketplace/print_test_modeling_rule_summary.py index 1c0496194cad..e7fb60030707 100644 --- a/Tests/Marketplace/print_test_modeling_rule_summary.py +++ b/Tests/Marketplace/print_test_modeling_rule_summary.py @@ -11,7 +11,8 @@ TEST_SUITE_CELL_EXPLANATION from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, \ JIRA_API_KEY, jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type, JIRA_LABELS -from Tests.scripts.test_modeling_rule_report import TEST_MODELING_RULES_BASE_HEADERS, calculate_test_modeling_rule_results +from Tests.scripts.test_modeling_rule_report import TEST_MODELING_RULES_BASE_HEADERS, calculate_test_modeling_rule_results, \ + write_test_modeling_rule_to_jira_mapping from Tests.scripts.utils import logging_wrapper as logging from Tests.scripts.utils.log_util import install_logging @@ -55,17 +56,19 @@ def print_test_modeling_rule_summary(artifacts_path: Path, without_jira: bool) - calculate_test_modeling_rule_results(test_modeling_rules_results_files, issues) ) + write_test_modeling_rule_to_jira_mapping(artifacts_path, jira_tickets_for_modeling_rule) + if modeling_rules_to_test_suite: logging.info(f"Found {len(jira_tickets_for_modeling_rule)} Jira tickets out of {len(modeling_rules_to_test_suite)} " "Test modeling rules") - headers, column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_modeling_rule, - modeling_rules_to_test_suite, - server_versions, - TEST_MODELING_RULES_BASE_HEADERS, - without_jira=without_jira) + column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_modeling_rule, + modeling_rules_to_test_suite, + server_versions, + TEST_MODELING_RULES_BASE_HEADERS, + without_jira=without_jira) - table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align) + table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align) logging.info(f"Test Modeling rule Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}") return total_errors != 0 diff --git a/Tests/Marketplace/print_test_playbook_summary.py b/Tests/Marketplace/print_test_playbook_summary.py index 8b521a0735ff..d4b5ade575af 100644 --- a/Tests/Marketplace/print_test_playbook_summary.py +++ b/Tests/Marketplace/print_test_playbook_summary.py @@ -13,7 +13,7 @@ from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, \ JIRA_API_KEY, jira_server_information, generate_query_by_component_and_issue_type, jira_search_all_by_query, JIRA_LABELS from Tests.scripts.test_playbooks_report import calculate_test_playbooks_results, \ - TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks + TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks, write_test_playbook_to_jira_mapping from Tests.scripts.utils import logging_wrapper as logging from Tests.scripts.utils.log_util import install_logging @@ -39,41 +39,6 @@ def read_file_contents(file_path: Path) -> list | None: return None -def print_test_playbooks_summary_without_junit_report(artifacts_path: Path) -> bool: - """ - Takes the information stored in the files and prints it in a human-readable way. - """ - instance_path = Path(artifacts_path) / "instance_Server Master" - failed_tests_path = instance_path / "failed_tests.txt" - succeeded_tests_path = instance_path / "succeeded_tests.txt" - succeeded_playbooks = read_file_contents(succeeded_tests_path) - failed_playbooks = read_file_contents(failed_tests_path) - - # if one of the files isn't existing, we want to fail. - if succeeded_playbooks is None or failed_playbooks is None: - return True - - succeeded_count = len(succeeded_playbooks) - failed_count = len(failed_playbooks) - - logging.info("TEST RESULTS:") - logging.info(f"Number of playbooks tested - {succeeded_count + failed_count}") - - if succeeded_count: - logging.success(f"Number of succeeded tests - {succeeded_count}") - logging.success("Successful Tests:") - for playbook_id in succeeded_playbooks: - logging.success(f"\t- {playbook_id}") - - if failed_count: - logging.error(f"Number of failed tests - {failed_count}:") - logging.error("Failed Tests:") - for playbook_id in failed_playbooks: - logging.error(f"\t- {playbook_id}") - return True - return False - - def filter_skipped_playbooks(playbooks_results: dict[str, dict[str, TestSuite]]) -> list[str]: filtered_playbooks_ids = [] for playbook_id, playbook_results in playbooks_results.items(): @@ -90,7 +55,7 @@ def filter_skipped_playbooks(playbooks_results: dict[str, dict[str, TestSuite]]) return filtered_playbooks_ids -def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tuple[bool, bool]: +def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> bool: test_playbooks_report = artifacts_path / TEST_PLAYBOOKS_REPORT_FILE_NAME # iterate over the artifacts path and find all the test playbook result files @@ -98,7 +63,7 @@ def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tu # Write an empty report file to avoid failing the build artifacts collection. JUnitXml().write(test_playbooks_report.as_posix(), pretty=True) logging.error(f"Could not find any test playbook result files in {artifacts_path}") - return False, False + return True logging.info(f"Found {len(test_playbooks_result_files_list)} test playbook result files") playbooks_results, server_versions = calculate_test_playbooks_results(test_playbooks_result_files_list) @@ -124,16 +89,18 @@ def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tu jira_tickets_for_playbooks = get_jira_tickets_for_playbooks(playbooks_ids, issues) logging.info(f"Found {len(jira_tickets_for_playbooks)} Jira tickets out of {len(playbooks_ids)} filtered playbooks") - headers, column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks, - playbooks_results, - server_versions, - TEST_PLAYBOOKS_BASE_HEADERS, - without_jira=without_jira) + column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks, + playbooks_results, + server_versions, + TEST_PLAYBOOKS_BASE_HEADERS, + without_jira=without_jira) logging.info(f"Writing test playbook report to {test_playbooks_report}") xml.write(test_playbooks_report.as_posix(), pretty=True) - table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align) + write_test_playbook_to_jira_mapping(artifacts_path, jira_tickets_for_playbooks) + + table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align) logging.info(f"Test Playbook Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}") - return True, total_errors != 0 + return total_errors != 0 def main(): @@ -142,10 +109,8 @@ def main(): options = options_handler() artifacts_path = Path(options.artifacts_path) logging.info(f"Printing test playbook summary - artifacts path: {artifacts_path}") - junit_result_exist, errors_found = print_test_playbooks_summary(artifacts_path, options.without_jira) - if not junit_result_exist: - errors_found = print_test_playbooks_summary_without_junit_report(artifacts_path) - if errors_found: + + if print_test_playbooks_summary(artifacts_path, options.without_jira): logging.critical("Test playbook summary found errors") sys.exit(1) diff --git a/Tests/scripts/common.py b/Tests/scripts/common.py index 21cf8b9c4b5e..3df792f0a1ad 100644 --- a/Tests/scripts/common.py +++ b/Tests/scripts/common.py @@ -1,6 +1,7 @@ from pathlib import Path from typing import Any +import pandas as pd from jira import Issue from junitparser import TestSuite, JUnitXml @@ -15,6 +16,8 @@ TEST_NATIVE_CANDIDATE = 'Test Native Candidate' SECURITY_SCANS = 'Security Scans' BUILD_MACHINES_CLEANUP = 'Build Machines Cleanup' +UNIT_TESTS_WORKFLOW_SUBSTRINGS = {'lint', 'unit', 'demisto sdk nightly', TEST_NATIVE_CANDIDATE.lower()} + WORKFLOW_TYPES = { CONTENT_NIGHTLY, CONTENT_PR, @@ -105,7 +108,7 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue], without_jira: bool = False, with_skipped: bool = False, multiline_headers: bool = True, - transpose: bool = False) -> tuple[list[str], list[str], list[list[Any]], JUnitXml, int]: + transpose: bool = False) -> tuple[list[str], list[list[Any]], JUnitXml, int]: xml = JUnitXml() headers_multiline_char = "\n" if multiline_headers else " " headers = [h.replace("\n", headers_multiline_char) for h in base_headers] @@ -119,7 +122,7 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue], server_version if transpose else f"{server_version}{headers_multiline_char}({TEST_SUITE_DATA_CELL_HEADER})" ) column_align.append("center") - tabulate_data = [] + tabulate_data = [headers] total_row: list[Any] = ([""] * fixed_headers_length + [TestSuiteStatistics(no_color) for _ in range(len(server_versions_list))]) total_errors = 0 @@ -180,7 +183,11 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue], total_row[0] = (green_text(TOTAL_HEADER) if total_errors == 0 else red_text(TOTAL_HEADER)) \ if not no_color else TOTAL_HEADER tabulate_data.append(total_row) - return headers, column_align, tabulate_data, xml, total_errors + + if transpose: + tabulate_data = pd.DataFrame(tabulate_data, index=None).transpose().to_numpy() + + return column_align, tabulate_data, xml, total_errors def get_all_failed_results(results: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]: diff --git a/Tests/scripts/convert_test_modeling_result_to_jira_issues.py b/Tests/scripts/convert_test_modeling_result_to_jira_issues.py index 047354992bb2..410fcca8c380 100644 --- a/Tests/scripts/convert_test_modeling_result_to_jira_issues.py +++ b/Tests/scripts/convert_test_modeling_result_to_jira_issues.py @@ -11,13 +11,15 @@ from tabulate import tabulate from Tests.scripts.common import get_all_failed_results, calculate_results_table, TEST_MODELING_RULES_REPORT_FILE_NAME, \ - get_test_results_files, TEST_SUITE_CELL_EXPLANATION + get_test_results_files, TEST_SUITE_CELL_EXPLANATION, get_properties_for_test_suite from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_API_KEY, \ JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, JIRA_ISSUE_UNRESOLVED_TRANSITION_NAME, JIRA_LABELS, \ jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type from Tests.scripts.test_modeling_rule_report import (create_jira_issue_for_test_modeling_rule, TEST_MODELING_RULES_BASE_HEADERS, - calculate_test_modeling_rule_results) + calculate_test_modeling_rule_results, + write_test_modeling_rule_to_jira_mapping, get_summary_for_test_modeling_rule) +from Tests.scripts.test_playbooks_report import TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED from Tests.scripts.utils import logging_wrapper as logging from Tests.scripts.utils.log_util import install_logging @@ -47,8 +49,9 @@ def main(): install_logging('convert_test_modeling_result_to_jira_issues.log', logger=logging) now = datetime.now(tz=timezone.utc) options = options_handler() + artifacts_path = Path(options.artifacts_path) logging.info("Converting test modeling rule report to Jira issues with the following settings:") - logging.info(f"\tArtifacts path: {options.artifacts_path}") + logging.info(f"\tArtifacts path: {artifacts_path}") logging.info(f"\tJira server url: {JIRA_SERVER_URL}") logging.info(f"\tJira verify SSL: {JIRA_VERIFY_SSL}") logging.info(f"\tJira project id: {JIRA_PROJECT_ID}") @@ -60,9 +63,9 @@ def main(): jira_server = JIRA(JIRA_SERVER_URL, token_auth=JIRA_API_KEY, options={'verify': JIRA_VERIFY_SSL}) jira_server_information(jira_server) - if not (test_modeling_rules_results_files := get_test_results_files(Path(options.artifacts_path), + if not (test_modeling_rules_results_files := get_test_results_files(artifacts_path, TEST_MODELING_RULES_REPORT_FILE_NAME)): - logging.critical(f"Could not find any test modeling rules result files in {options.artifacts_path}") + logging.critical(f"Could not find any test modeling rules result files in {artifacts_path}") sys.exit(1) logging.info(f"Found {len(test_modeling_rules_results_files)} test modeling rules files") @@ -81,11 +84,11 @@ def main(): failed_test_modeling_rule = get_all_failed_results(modeling_rules_to_test_suite) if len(failed_test_modeling_rule) >= options.max_failures_to_handle: - headers, column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_modeling_rule, - failed_test_modeling_rule, - server_versions, - TEST_MODELING_RULES_BASE_HEADERS) - table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align) + column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_modeling_rule, + failed_test_modeling_rule, + server_versions, + TEST_MODELING_RULES_BASE_HEADERS) + table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align) logging.info(f"Test Modeling rule Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}") logging.critical(f"Found {len(failed_test_modeling_rule)} failed test modeling rule, " f"which is more than the max allowed limit of {options.max_failures_to_handle} to handle.") @@ -95,7 +98,14 @@ def main(): for result_file in test_modeling_rules_results_files.values(): xml = JUnitXml.fromfile(result_file.as_posix()) for test_suite in xml.iterchildren(TestSuite): - create_jira_issue_for_test_modeling_rule(jira_server, test_suite, options.max_days_to_reopen, now) + if issue := create_jira_issue_for_test_modeling_rule(jira_server, test_suite, options.max_days_to_reopen, now): + # if the ticket was created/updated successfully, we add it to the mapping and override the previous ticket. + properties = get_properties_for_test_suite(test_suite) + if summary := get_summary_for_test_modeling_rule(properties): + jira_tickets_for_modeling_rule[summary] = issue + + write_test_modeling_rule_to_jira_mapping(artifacts_path, jira_tickets_for_modeling_rule) + open(artifacts_path / TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED, "w") logging.info("Finished creating/updating Jira issues for test modeling rules") diff --git a/Tests/scripts/convert_test_playbook_result_to_jira_issues.py b/Tests/scripts/convert_test_playbook_result_to_jira_issues.py index 301167723c01..440b4f137a6d 100644 --- a/Tests/scripts/convert_test_playbook_result_to_jira_issues.py +++ b/Tests/scripts/convert_test_playbook_result_to_jira_issues.py @@ -8,7 +8,6 @@ from tempfile import NamedTemporaryFile from typing import Any -import pandas as pd import urllib3 from jira import Issue from jira.client import JIRA @@ -20,9 +19,10 @@ from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_API_KEY, \ JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, JIRA_ISSUE_UNRESOLVED_TRANSITION_NAME, JIRA_LABELS, \ find_existing_jira_ticket, JIRA_ADDITIONAL_FIELDS, generate_ticket_summary, generate_build_markdown_link, \ - jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type + jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type, jira_ticket_to_json_data from Tests.scripts.test_playbooks_report import calculate_test_playbooks_results, \ - TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks, TEST_PLAYBOOKS_JIRA_BASE_HEADERS + TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks, TEST_PLAYBOOKS_JIRA_BASE_HEADERS, \ + write_test_playbook_to_jira_mapping, TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED from Tests.scripts.utils import logging_wrapper as logging from Tests.scripts.utils.log_util import install_logging @@ -49,8 +49,7 @@ def options_handler() -> argparse.Namespace: def generate_description(playbook_id: str, build_number: str, junit_file_name: str, table_data: Any, failed: bool) -> str: build_markdown_link = generate_build_markdown_link(build_number) - transposed = pd.DataFrame(table_data, index=None).transpose().to_numpy() - table = tabulate(transposed, headers="firstrow", tablefmt="jira") + table = tabulate(table_data, headers="firstrow", tablefmt="jira") msg = "failed" if failed else "succeeded" description = f""" *{playbook_id}* {msg} in {build_markdown_link} @@ -115,8 +114,9 @@ def main(): install_logging('convert_test_playbook_result_to_jira_issues.log', logger=logging) now = datetime.now(tz=timezone.utc) options = options_handler() + artifacts_path = Path(options.artifacts_path) logging.info("Converting test playbook results to Jira issues with the following settings:") - logging.info(f"\tArtifacts path: {options.artifacts_path}") + logging.info(f"\tArtifacts path: {artifacts_path}") logging.info(f"\tJira server url: {JIRA_SERVER_URL}") logging.info(f"\tJira verify SSL: {JIRA_VERIFY_SSL}") logging.info(f"\tJira project id: {JIRA_PROJECT_ID}") @@ -144,52 +144,56 @@ def main(): jira_tickets_for_playbooks = get_jira_tickets_for_playbooks(list(playbooks_results.keys()), issues) logging.info(f"Found {len(jira_tickets_for_playbooks)} Jira tickets out of {len(playbooks_results)} playbooks") + write_test_playbook_to_jira_mapping(artifacts_path, jira_tickets_for_playbooks) + open(artifacts_path / TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED, "w") + # Search if we have too many test playbooks that failed beyond the max allowed limit to open, if so we print the # list and exit. This is to avoid opening too many Jira issues. failed_playbooks = get_all_failed_results(playbooks_results) if len(failed_playbooks) >= options.max_failures_to_handle: - headers, column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_playbooks, - failed_playbooks, - server_versions, - TEST_PLAYBOOKS_BASE_HEADERS) - table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align) + column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_playbooks, + failed_playbooks, + server_versions, + TEST_PLAYBOOKS_BASE_HEADERS) + table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align) logging.info(f"Test Playbook Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}") logging.critical(f"Found {len(failed_playbooks)} failed test playbooks, " f"which is more than the max allowed limit of {options.max_failures_to_handle} to handle.") sys.exit(1) - + playbook_to_jira_mapping = {} for playbook_id, test_suites in playbooks_results.items(): # We create the table without Jira tickets columns, as we don't want to have them within the Jira issue. # We also add the skipped tests, as we want to have them within the Jira issue. # The table should be created without colors, as we don't want to have them within the Jira issue. # We also don't want to have the total row, as we don't want to have it within the Jira issue # since it's a single playbook. - headers, _, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks, - { - playbook_id: test_suites - }, - server_versions, - TEST_PLAYBOOKS_JIRA_BASE_HEADERS, - add_total_row=False, - no_color=True, - without_jira=True, - with_skipped=True, - multiline_headers=False, - transpose=True, - ) - - if (jira_ticket := jira_tickets_for_playbooks.get(playbook_id)) or total_errors: + _, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks, + { + playbook_id: test_suites + }, + server_versions, + TEST_PLAYBOOKS_JIRA_BASE_HEADERS, + add_total_row=False, + no_color=True, + without_jira=True, + with_skipped=True, + multiline_headers=False, + transpose=True, + ) + + jira_ticket = jira_tickets_for_playbooks.get(playbook_id) + if jira_ticket or total_errors: # if the ticket isn't resolved, or we found new errors, we update it, otherwise we skip it. if jira_ticket and jira_ticket.get_field("resolution") and not total_errors: + playbook_to_jira_mapping[playbook_id] = jira_ticket_to_json_data(jira_ticket) logging.debug(f"Skipped updating Jira issue for resolved test playbook:{playbook_id}") continue - # We append the headers to the table data, as we want to have them within the Jira issue. - jira_table_data = [headers] + tabulate_data junit_file_name = get_attachment_file_name(playbook_id, options.build_number) - create_jira_issue(jira_server, jira_ticket, xml, playbook_id, options.build_number, jira_table_data, - options.max_days_to_reopen, now, junit_file_name, total_errors > 0) + jira_ticket = create_jira_issue(jira_server, jira_ticket, xml, playbook_id, options.build_number, tabulate_data, + options.max_days_to_reopen, now, junit_file_name, total_errors > 0) + playbook_to_jira_mapping[playbook_id] = jira_ticket_to_json_data(jira_ticket) else: logging.debug(f"Skipped creating Jira issue for successful test playbook:{playbook_id}") diff --git a/Tests/scripts/gitlab_slack_notifier.py b/Tests/scripts/gitlab_slack_notifier.py index faed0520bf33..3661681a5690 100644 --- a/Tests/scripts/gitlab_slack_notifier.py +++ b/Tests/scripts/gitlab_slack_notifier.py @@ -11,18 +11,21 @@ import gitlab import requests +from dateutil.relativedelta import relativedelta from demisto_sdk.commands.coverage_analyze.tools import get_total_coverage -from junitparser import TestSuite +from gitlab.v4.objects import ProjectPipelineJob from slack_sdk import WebClient from slack_sdk.web import SlackResponse from Tests.Marketplace.marketplace_constants import BucketUploadFlow from Tests.Marketplace.marketplace_services import get_upload_data -from Tests.scripts.common import CONTENT_NIGHTLY, CONTENT_PR, TEST_NATIVE_CANDIDATE, WORKFLOW_TYPES, get_instance_directories, \ +from Tests.scripts.common import CONTENT_NIGHTLY, CONTENT_PR, WORKFLOW_TYPES, get_instance_directories, \ get_properties_for_test_suite, BUCKET_UPLOAD, BUCKET_UPLOAD_BRANCH_SUFFIX, TEST_MODELING_RULES_REPORT_FILE_NAME, \ - get_test_results_files, CONTENT_MERGE + get_test_results_files, CONTENT_MERGE, UNIT_TESTS_WORKFLOW_SUBSTRINGS from Tests.scripts.github_client import GithubPullRequest -from Tests.scripts.test_modeling_rule_report import calculate_test_modeling_rule_results +from Tests.scripts.test_modeling_rule_report import calculate_test_modeling_rule_results, \ + read_test_modeling_rule_to_jira_mapping, get_summary_for_test_modeling_rule, TEST_MODELING_RULES_TO_JIRA_TICKETS_CONVERTED +from Tests.scripts.test_playbooks_report import read_test_playbook_to_jira_mapping, TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED from Tests.scripts.utils.log_util import install_logging ROOT_ARTIFACTS_FOLDER = Path(os.getenv('ARTIFACTS_FOLDER', './artifacts')) @@ -41,7 +44,9 @@ GITHUB_TOKEN = os.getenv('GITHUB_TOKEN', '') CI_COMMIT_BRANCH = os.getenv('CI_COMMIT_BRANCH', '') CI_COMMIT_SHA = os.getenv('CI_COMMIT_SHA', '') +CI_SERVER_HOST = os.getenv('CI_SERVER_HOST', '') DEFAULT_BRANCH = 'master' +ALL_FAILURES_WERE_CONVERTED_TO_JIRA_TICKETS = ' (All failures were converted to Jira tickets)' def options_handler() -> argparse.Namespace: @@ -87,18 +92,12 @@ def get_artifact_data(artifact_folder: Path, artifact_relative_path: str) -> str return None -def get_failed_modeling_rule_name_from_test_suite(test_suite: TestSuite) -> str: - - properties = get_properties_for_test_suite(test_suite) - - return f"{properties['modeling_rule_file_name']} ({properties['pack_id']})" - - def get_test_report_pipeline_url(pipeline_url: str) -> str: return f"{pipeline_url}/test_report" -def test_modeling_rules_results(artifact_folder: Path, pipeline_url: str, title: str) -> list[dict[str, Any]]: +def test_modeling_rules_results(artifact_folder: Path, + pipeline_url: str, title: str) -> list[dict[str, Any]]: if not (test_modeling_rules_results_files := get_test_results_files(artifact_folder, TEST_MODELING_RULES_REPORT_FILE_NAME)): logging.error(f"Could not find any test modeling rule result files in {artifact_folder}") @@ -109,6 +108,8 @@ def test_modeling_rules_results(artifact_folder: Path, pipeline_url: str, title: 'title': title, }] + failed_test_to_jira_mapping = read_test_modeling_rule_to_jira_mapping(artifact_folder) + modeling_rules_to_test_suite, _, _ = ( calculate_test_modeling_rule_results(test_modeling_rules_results_files) ) @@ -128,19 +129,30 @@ def test_modeling_rules_results(artifact_folder: Path, pipeline_url: str, title: for test_suite in test_suites.values(): total_test_suites += 1 if test_suite.failures or test_suite.errors: - failed_test_suites.append(get_failed_modeling_rule_name_from_test_suite(test_suite)) + properties = get_properties_for_test_suite(test_suite) + if modeling_rule := get_summary_for_test_modeling_rule(properties): + failed_test_suites.append(failed_test_data_to_slack_link(modeling_rule, + failed_test_to_jira_mapping.get(modeling_rule))) if failed_test_suites: + + if (artifact_folder / TEST_MODELING_RULES_TO_JIRA_TICKETS_CONVERTED).exists(): + title_suffix = ALL_FAILURES_WERE_CONVERTED_TO_JIRA_TICKETS + color = 'warning' + else: + title_suffix = '' + color = 'danger' + title = (f"{title} - Failed Tests Modeling rules - Passed:{total_test_suites - len(failed_test_suites)}, " f"Failed:{len(failed_test_suites)}") return [{ 'fallback': title, - 'color': 'danger', + 'color': color, 'title': title, 'title_link': get_test_report_pipeline_url(pipeline_url), 'fields': [ { - "title": "Failed Tests Modeling rules", + "title": f"Failed Tests Modeling rules{title_suffix}", "value": ' ,'.join(failed_test_suites), "short": False } @@ -156,24 +168,45 @@ def test_modeling_rules_results(artifact_folder: Path, pipeline_url: str, title: }] +def failed_test_data_to_slack_link(failed_test: str, jira_ticket_data: dict[str, str] | None) -> str: + if jira_ticket_data: + return slack_link(jira_ticket_data['url'], f"{failed_test} [{jira_ticket_data['key']}]") + return failed_test + + +def slack_link(url: str, text: str) -> str: + return f"<{url}|{text}>" + + def test_playbooks_results_to_slack_msg(instance_role: str, succeeded_tests: list[str], failed_tests: list[str], skipped_integrations: list[str], skipped_tests: list[str], + playbook_to_jira_mapping: dict[str, Any], + test_playbook_tickets_converted: bool, title: str, pipeline_url: str) -> list[dict[str, Any]]: if failed_tests: title = (f"{title} ({instance_role}) - Test Playbooks - Passed:{len(succeeded_tests)}, Failed:{len(failed_tests)}, " f"Skipped - {len(skipped_tests)}, Skipped Integrations - {len(skipped_integrations)}") + if test_playbook_tickets_converted: + title_suffix = ALL_FAILURES_WERE_CONVERTED_TO_JIRA_TICKETS + color = 'warning' + else: + title_suffix = '' + color = 'danger' return [{ 'fallback': title, - 'color': 'danger', + 'color': color, 'title': title, 'title_link': get_test_report_pipeline_url(pipeline_url), + "mrkdwn_in": ["fields"], 'fields': [{ - "title": "Failed Test Playbooks", - "value": ', '.join(failed_tests), + "title": f"Failed Test Playbooks{title_suffix}", + "value": ', '.join( + failed_test_data_to_slack_link(playbook_id, + playbook_to_jira_mapping.get(playbook_id)) for playbook_id in failed_tests), "short": False }] }] @@ -193,6 +226,9 @@ def split_results_file(tests_data: str | None) -> list[str]: def test_playbooks_results(artifact_folder: Path, pipeline_url: str, title: str) -> list[dict[str, Any]]: + test_playbook_to_jira_mapping = read_test_playbook_to_jira_mapping(artifact_folder) + test_playbook_tickets_converted = (artifact_folder / TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED).exists() + content_team_fields = [] for instance_role, instance_directory in get_instance_directories(artifact_folder).items(): succeeded_tests = split_results_file(get_artifact_data(instance_directory, 'succeeded_tests.txt')) @@ -201,7 +237,9 @@ def test_playbooks_results(artifact_folder: Path, pipeline_url: str, title: str) skipped_integrations = split_results_file(get_artifact_data(instance_directory, 'skipped_integrations.txt')) content_team_fields += test_playbooks_results_to_slack_msg(instance_role, succeeded_tests, failed_tests, - skipped_integrations, skipped_tests, title, pipeline_url) + skipped_integrations, skipped_tests, + test_playbook_to_jira_mapping, test_playbook_tickets_converted, + title, pipeline_url) return content_team_fields @@ -255,26 +293,34 @@ def bucket_upload_results(bucket_artifact_folder: Path, def construct_slack_msg(triggering_workflow: str, pipeline_url: str, - pipeline_failed_jobs: list, + pipeline_failed_jobs: list[ProjectPipelineJob], pull_request: GithubPullRequest | None) -> list[dict[str, Any]]: # report failing jobs content_fields = [] - failed_jobs_names = {job.name for job in pipeline_failed_jobs} + + failed_jobs_names = {job.name: job.web_url for job in pipeline_failed_jobs} if failed_jobs_names: + failed_jobs = [slack_link(url, name) for name, url in sorted(failed_jobs_names.items())] content_fields.append({ "title": f'Failed Jobs - ({len(failed_jobs_names)})', - "value": '\n'.join(sorted(failed_jobs_names)), + "value": '\n'.join(failed_jobs), + "short": False + }) + + if pull_request: + content_fields.append({ + "title": "Pull Request", + "value": slack_link(pull_request.data['html_url'], pull_request.data['title']), "short": False }) # report failing unit-tests triggering_workflow_lower = triggering_workflow.lower() - check_unittests_substrings = {'lint', 'unit', 'demisto sdk nightly', TEST_NATIVE_CANDIDATE.lower()} failed_jobs_or_workflow_title = {job_name.lower() for job_name in failed_jobs_names} failed_jobs_or_workflow_title.add(triggering_workflow_lower) if any(substr in means_include_unittests_results - for substr in check_unittests_substrings + for substr in UNIT_TESTS_WORKFLOW_SUBSTRINGS for means_include_unittests_results in failed_jobs_or_workflow_title): content_fields += unit_tests_results() @@ -309,6 +355,7 @@ def construct_slack_msg(triggering_workflow: str, else: title += ' - Success' color = 'good' + title += f' (@{CI_SERVER_HOST})' if CI_SERVER_HOST else '' slack_msg = [{ 'fallback': title, 'color': color, @@ -340,27 +387,28 @@ def missing_content_packs_test_conf(artifact_folder: Path) -> list[dict[str, Any def collect_pipeline_data(gitlab_client: gitlab.Gitlab, project_id: str, - pipeline_id: str) -> tuple[str, list]: + pipeline_id: str) -> tuple[str, list[ProjectPipelineJob]]: project = gitlab_client.projects.get(int(project_id)) pipeline = project.pipelines.get(int(pipeline_id)) - failed_jobs = [] + failed_jobs: list[ProjectPipelineJob] = [] for job in pipeline.jobs.list(iterator=True): logging.info(f'status of gitlab job with id {job.id} and name {job.name} is {job.status}') if job.status == 'failed': logging.info(f'collecting failed job {job.name}') logging.info(f'pipeline associated with failed job is {job.pipeline.get("web_url")}') - failed_jobs.append(job) + failed_jobs.append(job) # type: ignore[arg-type] return pipeline.web_url, failed_jobs def construct_coverage_slack_msg() -> list[dict[str, Any]]: coverage_today = get_total_coverage(filename=(ROOT_ARTIFACTS_FOLDER / "coverage_report" / "coverage-min.json").as_posix()) - yesterday = datetime.now() - timedelta(days=1) - coverage_yesterday = get_total_coverage(date=yesterday) + coverage_yesterday = get_total_coverage(date=datetime.now() - timedelta(days=1)) + coverage_last_month = get_total_coverage(date=datetime.now() - relativedelta(months=-1)) color = 'good' if coverage_today >= coverage_yesterday else 'danger' - title = f'Content code coverage: {coverage_today:.3f}%' + title = (f'Content code coverage: {coverage_today:.3f}% (Yesterday: {coverage_yesterday:.3f}%, ' + f'Last month: {coverage_last_month:.3f}%)') return [{ 'fallback': title, @@ -425,22 +473,22 @@ def main(): with contextlib.suppress(Exception): output_file = ROOT_ARTIFACTS_FOLDER / 'slack_msg.json' - logging.info(f'Writing slack message to {output_file}') + logging.info(f'Writing Slack message to {output_file}') with open(output_file, 'w') as f: f.write(json.dumps(slack_msg_data, indent=4, sort_keys=True, default=str)) - logging.info(f'Successfully wrote slack message to {output_file}') + logging.info(f'Successfully wrote Slack message to {output_file}') try: response = slack_client.chat_postMessage( channel=computed_slack_channel, attachments=slack_msg_data, username=SLACK_USERNAME ) link = build_link_to_message(response) - logging.info(f'Successfully sent slack message to channel {computed_slack_channel} link: {link}') + logging.info(f'Successfully sent Slack message to channel {computed_slack_channel} link: {link}') except Exception: if strtobool(options.allow_failure): - logging.warning(f'Failed to send slack message to channel {computed_slack_channel} not failing build') + logging.warning(f'Failed to send Slack message to channel {computed_slack_channel} not failing build') else: - logging.exception(f'Failed to send slack message to channel {computed_slack_channel}') + logging.exception(f'Failed to send Slack message to channel {computed_slack_channel}') sys.exit(1) diff --git a/Tests/scripts/jira_issues.py b/Tests/scripts/jira_issues.py index ebc48d35193c..d3aae2a39a5f 100644 --- a/Tests/scripts/jira_issues.py +++ b/Tests/scripts/jira_issues.py @@ -3,6 +3,7 @@ from collections import defaultdict from datetime import datetime, timedelta from distutils.util import strtobool +from typing import Any from jira import JIRA, Issue from jira.client import ResultList @@ -124,3 +125,10 @@ def jira_search_all_by_query(jira_server: JIRA, break return issues + + +def jira_ticket_to_json_data(jira_ticket: Issue) -> dict[str, Any]: + return { + "url": jira_ticket.permalink(), + "key": jira_ticket.key, + } diff --git a/Tests/scripts/test_modeling_rule_report.py b/Tests/scripts/test_modeling_rule_report.py index 778de35fb893..6f31f83b72c3 100644 --- a/Tests/scripts/test_modeling_rule_report.py +++ b/Tests/scripts/test_modeling_rule_report.py @@ -1,3 +1,5 @@ +import contextlib +import json from collections import defaultdict from datetime import datetime from pathlib import Path @@ -11,10 +13,12 @@ from Tests.scripts.common import get_properties_for_test_suite from Tests.scripts.jira_issues import generate_ticket_summary, generate_query_with_summary, \ find_existing_jira_ticket, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, JIRA_LABELS, JIRA_ADDITIONAL_FIELDS, \ - generate_build_markdown_link, convert_jira_time_to_datetime + generate_build_markdown_link, convert_jira_time_to_datetime, jira_ticket_to_json_data from Tests.scripts.utils import logging_wrapper as logging TEST_MODELING_RULES_BASE_HEADERS = ["Test Modeling Rule"] +TEST_MODELING_RULES_TO_JIRA_MAPPING = "test_modeling_rule_to_jira_mapping.json" +TEST_MODELING_RULES_TO_JIRA_TICKETS_CONVERTED = "test_modeling_rule_to_jira_tickets_converted.txt" def get_summary_for_test_modeling_rule(properties: dict[str, str]) -> str | None: @@ -121,3 +125,21 @@ def calculate_test_modeling_rule_results(test_modeling_rules_results_files: dict jira_tickets_for_modeling_rule[summary] = sorted_issues_matching_summary[0] return modeling_rules_to_test_suite, jira_tickets_for_modeling_rule, server_versions + + +def write_test_modeling_rule_to_jira_mapping(artifacts_path: Path, jira_tickets_for_modeling_rule: dict[str, Issue]): + test_modeling_rule_to_jira_mapping_file = artifacts_path / TEST_MODELING_RULES_TO_JIRA_MAPPING + logging.info(f"Writing test_modeling_rules_to_jira_mapping to {test_modeling_rule_to_jira_mapping_file}") + with open(test_modeling_rule_to_jira_mapping_file, "w") as test_modeling_rule_to_jira_mapping_fp: + test_modeling_rule_to_jira_mapping = {modeling_rule: jira_ticket_to_json_data(jira_ticket) + for modeling_rule, jira_ticket in jira_tickets_for_modeling_rule.items()} + test_modeling_rule_to_jira_mapping_fp.write(json.dumps(test_modeling_rule_to_jira_mapping, indent=4, sort_keys=True, + default=str)) + + +def read_test_modeling_rule_to_jira_mapping(artifacts_path: Path) -> dict[str, dict[str, str]]: + logging.debug(f"Reading test_modeling_rules_to_jira_mapping from {TEST_MODELING_RULES_TO_JIRA_MAPPING}") + with (contextlib.suppress(Exception), + open(artifacts_path / TEST_MODELING_RULES_TO_JIRA_MAPPING) as playbook_to_jira_mapping_file): + return json.load(playbook_to_jira_mapping_file) + return {} diff --git a/Tests/scripts/test_playbooks_report.py b/Tests/scripts/test_playbooks_report.py index 8e78da17dca1..0d564c5b0cfb 100644 --- a/Tests/scripts/test_playbooks_report.py +++ b/Tests/scripts/test_playbooks_report.py @@ -1,3 +1,5 @@ +import contextlib +import json from pathlib import Path from typing import Any @@ -5,11 +7,13 @@ from junitparser import JUnitXml, TestSuite from Tests.scripts.common import get_properties_for_test_suite -from Tests.scripts.jira_issues import generate_ticket_summary, convert_jira_time_to_datetime +from Tests.scripts.jira_issues import generate_ticket_summary, convert_jira_time_to_datetime, jira_ticket_to_json_data from Tests.scripts.utils import logging_wrapper as logging TEST_PLAYBOOKS_BASE_HEADERS = ["Playbook ID"] TEST_PLAYBOOKS_JIRA_BASE_HEADERS = ["Instance Role"] +TEST_PLAYBOOKS_TO_JIRA_MAPPING = "test_playbook_to_jira_mapping.json" +TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED = "test_playbook_to_jira_tickets_converted.txt" def calculate_test_playbooks_results(test_playbooks_result_files_list: dict[str, Path] @@ -41,3 +45,22 @@ def get_jira_tickets_for_playbooks(playbook_ids: list[str], reverse=True) playbook_ids_to_jira_tickets[playbook_id] = sorted_issues[0] return playbook_ids_to_jira_tickets + + +def write_test_playbook_to_jira_mapping(artifacts_path: Path, jira_tickets_for_playbooks: dict[str, Issue]): + test_playbooks_to_jira_mapping = artifacts_path / TEST_PLAYBOOKS_TO_JIRA_MAPPING + logging.info(f"Writing test_playbooks_to_jira_mapping to {test_playbooks_to_jira_mapping}") + with open(test_playbooks_to_jira_mapping, "w") as playbook_to_jira_mapping_file: + playbook_to_jira_mapping = {playbook_id: jira_ticket_to_json_data(jira_ticket) + for playbook_id, jira_ticket in jira_tickets_for_playbooks.items()} + playbook_to_jira_mapping_file.write(json.dumps(playbook_to_jira_mapping, indent=4, sort_keys=True, + default=str)) + + +def read_test_playbook_to_jira_mapping(artifacts_path: Path): + logging.debug(f"Reading test_playbooks_to_jira_mapping from {TEST_PLAYBOOKS_TO_JIRA_MAPPING}") + with (contextlib.suppress(Exception), + open(artifacts_path / TEST_PLAYBOOKS_TO_JIRA_MAPPING) as playbook_to_jira_mapping_file): + return json.load(playbook_to_jira_mapping_file) + + return {}