Skip to content

Commit

Permalink
convert jira issues - removing backward compatible, adding links (#30714
Browse files Browse the repository at this point in the history
)

convert jira issues - removing backward compatible, adding links #30714
  • Loading branch information
kobymeir authored Nov 8, 2023
1 parent f29ddb3 commit 125b5f5
Show file tree
Hide file tree
Showing 11 changed files with 231 additions and 140 deletions.
4 changes: 2 additions & 2 deletions .gitlab/ci/.gitlab-ci.global.yml
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@
- section_end "Stop contrib external build"

.create_artifacts_and_server_type_instance_folders: &create_artifacts_and_server_type_instance_folders
- section_start "Create Artifacts & Server Instance & Server Type folders" --collapsed
- section_start "Create Artifacts, Server Instance, Server Type folders" --collapsed
- |
if [[ -n "${ARTIFACTS_FOLDER}" ]] && [[ ! -d "${ARTIFACTS_FOLDER}" ]]; then
echo "Creating Artifacts folder: ${ARTIFACTS_FOLDER}"
Expand All @@ -113,7 +113,7 @@
mkdir -p -m 777 "${ARTIFACTS_FOLDER_SERVER_TYPE}/logs" # using the -p to create the logs folder as well.
echo "${SERVER_TYPE}" > "${ARTIFACTS_FOLDER_SERVER_TYPE}/server_type.txt"
fi
- section_end "Create Artifacts & Server Instance & Server Type folders"
- section_end "Create Artifacts, Server Instance, Server Type folders"

.clone_and_export_variables: &clone_and_export_variables
- section_start "Git - Job Start Actions" --collapsed
Expand Down
1 change: 1 addition & 0 deletions .gitlab/ci/.gitlab-ci.on-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ xpanse-prepare-testing-bucket:
echo "Instance role:${INSTANCE_ROLE} Product type:${PRODUCT_TYPE}"
if [ -n "${NIGHTLY}" ] && [ "${INSTANCE_ROLE}" != "Server Master" ]; then
echo "Not running the instance flow, not a Server Master instance and we are in a nightly build."
rm -f "${ARTIFACTS_FOLDER_INSTANCE}/instance_role.txt" # delete the instance_role.txt file so the job will not be collected by slack notifier.
job-done
exit 0
fi
Expand Down
17 changes: 10 additions & 7 deletions Tests/Marketplace/print_test_modeling_rule_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
TEST_SUITE_CELL_EXPLANATION
from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, \
JIRA_API_KEY, jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type, JIRA_LABELS
from Tests.scripts.test_modeling_rule_report import TEST_MODELING_RULES_BASE_HEADERS, calculate_test_modeling_rule_results
from Tests.scripts.test_modeling_rule_report import TEST_MODELING_RULES_BASE_HEADERS, calculate_test_modeling_rule_results, \
write_test_modeling_rule_to_jira_mapping
from Tests.scripts.utils import logging_wrapper as logging
from Tests.scripts.utils.log_util import install_logging

Expand Down Expand Up @@ -55,17 +56,19 @@ def print_test_modeling_rule_summary(artifacts_path: Path, without_jira: bool) -
calculate_test_modeling_rule_results(test_modeling_rules_results_files, issues)
)

write_test_modeling_rule_to_jira_mapping(artifacts_path, jira_tickets_for_modeling_rule)

if modeling_rules_to_test_suite:
logging.info(f"Found {len(jira_tickets_for_modeling_rule)} Jira tickets out of {len(modeling_rules_to_test_suite)} "
"Test modeling rules")

headers, column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_modeling_rule,
modeling_rules_to_test_suite,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS,
without_jira=without_jira)
column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_modeling_rule,
modeling_rules_to_test_suite,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS,
without_jira=without_jira)

table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align)
table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align)
logging.info(f"Test Modeling rule Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}")
return total_errors != 0

Expand Down
63 changes: 14 additions & 49 deletions Tests/Marketplace/print_test_playbook_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, \
JIRA_API_KEY, jira_server_information, generate_query_by_component_and_issue_type, jira_search_all_by_query, JIRA_LABELS
from Tests.scripts.test_playbooks_report import calculate_test_playbooks_results, \
TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks
TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks, write_test_playbook_to_jira_mapping
from Tests.scripts.utils import logging_wrapper as logging
from Tests.scripts.utils.log_util import install_logging

Expand All @@ -39,41 +39,6 @@ def read_file_contents(file_path: Path) -> list | None:
return None


def print_test_playbooks_summary_without_junit_report(artifacts_path: Path) -> bool:
"""
Takes the information stored in the files and prints it in a human-readable way.
"""
instance_path = Path(artifacts_path) / "instance_Server Master"
failed_tests_path = instance_path / "failed_tests.txt"
succeeded_tests_path = instance_path / "succeeded_tests.txt"
succeeded_playbooks = read_file_contents(succeeded_tests_path)
failed_playbooks = read_file_contents(failed_tests_path)

# if one of the files isn't existing, we want to fail.
if succeeded_playbooks is None or failed_playbooks is None:
return True

succeeded_count = len(succeeded_playbooks)
failed_count = len(failed_playbooks)

logging.info("TEST RESULTS:")
logging.info(f"Number of playbooks tested - {succeeded_count + failed_count}")

if succeeded_count:
logging.success(f"Number of succeeded tests - {succeeded_count}")
logging.success("Successful Tests:")
for playbook_id in succeeded_playbooks:
logging.success(f"\t- {playbook_id}")

if failed_count:
logging.error(f"Number of failed tests - {failed_count}:")
logging.error("Failed Tests:")
for playbook_id in failed_playbooks:
logging.error(f"\t- {playbook_id}")
return True
return False


def filter_skipped_playbooks(playbooks_results: dict[str, dict[str, TestSuite]]) -> list[str]:
filtered_playbooks_ids = []
for playbook_id, playbook_results in playbooks_results.items():
Expand All @@ -90,15 +55,15 @@ def filter_skipped_playbooks(playbooks_results: dict[str, dict[str, TestSuite]])
return filtered_playbooks_ids


def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tuple[bool, bool]:
def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> bool:
test_playbooks_report = artifacts_path / TEST_PLAYBOOKS_REPORT_FILE_NAME

# iterate over the artifacts path and find all the test playbook result files
if not (test_playbooks_result_files_list := get_test_results_files(artifacts_path, TEST_PLAYBOOKS_REPORT_FILE_NAME)):
# Write an empty report file to avoid failing the build artifacts collection.
JUnitXml().write(test_playbooks_report.as_posix(), pretty=True)
logging.error(f"Could not find any test playbook result files in {artifacts_path}")
return False, False
return True

logging.info(f"Found {len(test_playbooks_result_files_list)} test playbook result files")
playbooks_results, server_versions = calculate_test_playbooks_results(test_playbooks_result_files_list)
Expand All @@ -124,16 +89,18 @@ def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tu
jira_tickets_for_playbooks = get_jira_tickets_for_playbooks(playbooks_ids, issues)
logging.info(f"Found {len(jira_tickets_for_playbooks)} Jira tickets out of {len(playbooks_ids)} filtered playbooks")

headers, column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks,
playbooks_results,
server_versions,
TEST_PLAYBOOKS_BASE_HEADERS,
without_jira=without_jira)
column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks,
playbooks_results,
server_versions,
TEST_PLAYBOOKS_BASE_HEADERS,
without_jira=without_jira)
logging.info(f"Writing test playbook report to {test_playbooks_report}")
xml.write(test_playbooks_report.as_posix(), pretty=True)
table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align)
write_test_playbook_to_jira_mapping(artifacts_path, jira_tickets_for_playbooks)

table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align)
logging.info(f"Test Playbook Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}")
return True, total_errors != 0
return total_errors != 0


def main():
Expand All @@ -142,10 +109,8 @@ def main():
options = options_handler()
artifacts_path = Path(options.artifacts_path)
logging.info(f"Printing test playbook summary - artifacts path: {artifacts_path}")
junit_result_exist, errors_found = print_test_playbooks_summary(artifacts_path, options.without_jira)
if not junit_result_exist:
errors_found = print_test_playbooks_summary_without_junit_report(artifacts_path)
if errors_found:

if print_test_playbooks_summary(artifacts_path, options.without_jira):
logging.critical("Test playbook summary found errors")
sys.exit(1)

Expand Down
13 changes: 10 additions & 3 deletions Tests/scripts/common.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from pathlib import Path
from typing import Any

import pandas as pd
from jira import Issue
from junitparser import TestSuite, JUnitXml

Expand All @@ -15,6 +16,8 @@
TEST_NATIVE_CANDIDATE = 'Test Native Candidate'
SECURITY_SCANS = 'Security Scans'
BUILD_MACHINES_CLEANUP = 'Build Machines Cleanup'
UNIT_TESTS_WORKFLOW_SUBSTRINGS = {'lint', 'unit', 'demisto sdk nightly', TEST_NATIVE_CANDIDATE.lower()}

WORKFLOW_TYPES = {
CONTENT_NIGHTLY,
CONTENT_PR,
Expand Down Expand Up @@ -105,7 +108,7 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue],
without_jira: bool = False,
with_skipped: bool = False,
multiline_headers: bool = True,
transpose: bool = False) -> tuple[list[str], list[str], list[list[Any]], JUnitXml, int]:
transpose: bool = False) -> tuple[list[str], list[list[Any]], JUnitXml, int]:
xml = JUnitXml()
headers_multiline_char = "\n" if multiline_headers else " "
headers = [h.replace("\n", headers_multiline_char) for h in base_headers]
Expand All @@ -119,7 +122,7 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue],
server_version if transpose else f"{server_version}{headers_multiline_char}({TEST_SUITE_DATA_CELL_HEADER})"
)
column_align.append("center")
tabulate_data = []
tabulate_data = [headers]
total_row: list[Any] = ([""] * fixed_headers_length + [TestSuiteStatistics(no_color)
for _ in range(len(server_versions_list))])
total_errors = 0
Expand Down Expand Up @@ -180,7 +183,11 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue],
total_row[0] = (green_text(TOTAL_HEADER) if total_errors == 0 else red_text(TOTAL_HEADER)) \
if not no_color else TOTAL_HEADER
tabulate_data.append(total_row)
return headers, column_align, tabulate_data, xml, total_errors

if transpose:
tabulate_data = pd.DataFrame(tabulate_data, index=None).transpose().to_numpy()

return column_align, tabulate_data, xml, total_errors


def get_all_failed_results(results: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
Expand Down
32 changes: 21 additions & 11 deletions Tests/scripts/convert_test_modeling_result_to_jira_issues.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,15 @@
from tabulate import tabulate

from Tests.scripts.common import get_all_failed_results, calculate_results_table, TEST_MODELING_RULES_REPORT_FILE_NAME, \
get_test_results_files, TEST_SUITE_CELL_EXPLANATION
get_test_results_files, TEST_SUITE_CELL_EXPLANATION, get_properties_for_test_suite
from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_API_KEY, \
JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, JIRA_ISSUE_UNRESOLVED_TRANSITION_NAME, JIRA_LABELS, \
jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type
from Tests.scripts.test_modeling_rule_report import (create_jira_issue_for_test_modeling_rule,
TEST_MODELING_RULES_BASE_HEADERS,
calculate_test_modeling_rule_results)
calculate_test_modeling_rule_results,
write_test_modeling_rule_to_jira_mapping, get_summary_for_test_modeling_rule)
from Tests.scripts.test_playbooks_report import TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED
from Tests.scripts.utils import logging_wrapper as logging
from Tests.scripts.utils.log_util import install_logging

Expand Down Expand Up @@ -47,8 +49,9 @@ def main():
install_logging('convert_test_modeling_result_to_jira_issues.log', logger=logging)
now = datetime.now(tz=timezone.utc)
options = options_handler()
artifacts_path = Path(options.artifacts_path)
logging.info("Converting test modeling rule report to Jira issues with the following settings:")
logging.info(f"\tArtifacts path: {options.artifacts_path}")
logging.info(f"\tArtifacts path: {artifacts_path}")
logging.info(f"\tJira server url: {JIRA_SERVER_URL}")
logging.info(f"\tJira verify SSL: {JIRA_VERIFY_SSL}")
logging.info(f"\tJira project id: {JIRA_PROJECT_ID}")
Expand All @@ -60,9 +63,9 @@ def main():

jira_server = JIRA(JIRA_SERVER_URL, token_auth=JIRA_API_KEY, options={'verify': JIRA_VERIFY_SSL})
jira_server_information(jira_server)
if not (test_modeling_rules_results_files := get_test_results_files(Path(options.artifacts_path),
if not (test_modeling_rules_results_files := get_test_results_files(artifacts_path,
TEST_MODELING_RULES_REPORT_FILE_NAME)):
logging.critical(f"Could not find any test modeling rules result files in {options.artifacts_path}")
logging.critical(f"Could not find any test modeling rules result files in {artifacts_path}")
sys.exit(1)

logging.info(f"Found {len(test_modeling_rules_results_files)} test modeling rules files")
Expand All @@ -81,11 +84,11 @@ def main():
failed_test_modeling_rule = get_all_failed_results(modeling_rules_to_test_suite)

if len(failed_test_modeling_rule) >= options.max_failures_to_handle:
headers, column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_modeling_rule,
failed_test_modeling_rule,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS)
table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align)
column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_modeling_rule,
failed_test_modeling_rule,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS)
table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align)
logging.info(f"Test Modeling rule Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}")
logging.critical(f"Found {len(failed_test_modeling_rule)} failed test modeling rule, "
f"which is more than the max allowed limit of {options.max_failures_to_handle} to handle.")
Expand All @@ -95,7 +98,14 @@ def main():
for result_file in test_modeling_rules_results_files.values():
xml = JUnitXml.fromfile(result_file.as_posix())
for test_suite in xml.iterchildren(TestSuite):
create_jira_issue_for_test_modeling_rule(jira_server, test_suite, options.max_days_to_reopen, now)
if issue := create_jira_issue_for_test_modeling_rule(jira_server, test_suite, options.max_days_to_reopen, now):
# if the ticket was created/updated successfully, we add it to the mapping and override the previous ticket.
properties = get_properties_for_test_suite(test_suite)
if summary := get_summary_for_test_modeling_rule(properties):
jira_tickets_for_modeling_rule[summary] = issue

write_test_modeling_rule_to_jira_mapping(artifacts_path, jira_tickets_for_modeling_rule)
open(artifacts_path / TEST_PLAYBOOKS_TO_JIRA_TICKETS_CONVERTED, "w")

logging.info("Finished creating/updating Jira issues for test modeling rules")

Expand Down
Loading

0 comments on commit 125b5f5

Please sign in to comment.