diff --git a/.circleci/config.yml b/.circleci/config.yml index f40db15741c8..69c30ab27089 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -348,10 +348,7 @@ references: exit 0 fi - # Run flake8 on all excluding Packs (Integraions and Scripts) - they will be handled in linting - ./Tests/scripts/pyflake.sh *.py - find . -maxdepth 1 -type d -not \( -path . -o -path ./Packs -o -path ./venv \) | xargs ./Tests/scripts/pyflake.sh - + ./Tests/scripts/linters_runner.sh ./Tests/scripts/validate.sh run_unit_testing_and_lint: &run_unit_testing_and_lint diff --git a/.gitlab/ci/global.yml b/.gitlab/ci/global.yml index 95c2f9e22983..9505d3568be8 100644 --- a/.gitlab/ci/global.yml +++ b/.gitlab/ci/global.yml @@ -244,14 +244,7 @@ if [[ -n $FORCE_BUCKET_UPLOAD || -n $BUCKET_UPLOAD ]] && [[ "$(echo "$GCS_MARKET_BUCKET" | tr '[:upper:]' '[:lower:]')" != "marketplace-dist" ]] && [[ $CI_COMMIT_BRANCH != "master" ]]; then echo "Skipping the -Validate Files and Yaml- step when uploading to a test bucket." else - echo "Run flake8 on all excluding Packs (Integrations and Scripts) - they will be handled in linting" - ./Tests/scripts/pyflake.sh *.py - # do not run pyflake on venv or content-test-conf awsinstancetool - find . -maxdepth 1 -type d -not \( -path . -o -path ./Packs -o -path ./venv -o -path ./Tests \) | xargs ./Tests/scripts/pyflake.sh - ./Tests/scripts/pyflake.sh ./Tests/*.py - find ./Tests -maxdepth 1 -type d -not \( -path ./Tests -o -path ./Tests/scripts \) | xargs ./Tests/scripts/pyflake.sh - ./Tests/scripts/pyflake.sh ./Tests/scripts/*.py - find ./Tests/scripts -maxdepth 1 -type d -not \( -path ./Tests/scripts -o -path ./Tests/scripts/awsinstancetool \) | xargs ./Tests/scripts/pyflake.sh + ./Tests/scripts/linters_runner.sh ./Tests/scripts/validate.sh fi - section_end "Validate Files and Yaml" diff --git a/.gitlab/ci/miscellaneous.yml b/.gitlab/ci/miscellaneous.yml index 651e4ecf1cdb..43855f876b84 100644 --- a/.gitlab/ci/miscellaneous.yml +++ b/.gitlab/ci/miscellaneous.yml @@ -8,7 +8,6 @@ delete-mismatched-branches: - if: '$DELETE_MISMATCHED_BRANCHES == "true"' when: always script: - - cd Utils - - python3 delete_mismatched_branches.py + - python3 Utils/delete_mismatched_branches.py retry: max: 2 diff --git a/Documentation/common_server_docs.py b/Documentation/common_server_docs.py index 2d006a0f22b9..84e0ef720d6c 100644 --- a/Documentation/common_server_docs.py +++ b/Documentation/common_server_docs.py @@ -144,9 +144,9 @@ def create_py_documentation(path, origin, language): code = compile(py_script, '', 'exec') ns = {'demisto': demistomock} - exec(code, ns) # guardrails-disable-line + exec(code, ns) # guardrails-disable-line # pylint: disable=W0122 - x = [] + x: list = [] for a in ns: a_object = ns.get(a) @@ -216,7 +216,7 @@ def create_ps_documentation(path, origin, language): for parameter in parameters[1:]: - split_param = list(filter(None, parameter.split('\n'))) + split_param: list = list(filter(None, parameter.split('\n'))) required = False param_name = split_param[0].strip() if 'required' in param_name: diff --git a/Tests/Marketplace/Tests/marketplace_services_test.py b/Tests/Marketplace/Tests/marketplace_services_test.py index 7c216dea20c8..578bef4f07d4 100644 --- a/Tests/Marketplace/Tests/marketplace_services_test.py +++ b/Tests/Marketplace/Tests/marketplace_services_test.py @@ -1,3 +1,6 @@ + +# type: ignore[attr-defined] + import shutil import pytest import json @@ -12,6 +15,9 @@ from datetime import datetime, timedelta from typing import List, Dict, Optional, Tuple, Any +# pylint: disable=no-member + + from Tests.Marketplace.marketplace_services import Pack, input_to_list, get_valid_bool, convert_price, \ get_updated_server_version, load_json, \ store_successful_and_failed_packs_in_ci_artifacts, is_ignored_pack_file, \ @@ -936,7 +942,7 @@ def mock_os_path_join(path, *paths): return TestChangelogCreation.dummy_pack_changelog(CHANGELOG_DATA_INITIAL_VERSION) if path == 'changelog_new_exist': return TestChangelogCreation.dummy_pack_changelog(CHANGELOG_DATA_MULTIPLE_VERSIONS) - if path == 'changelog_not_exist' or path == 'metadata_not_exist': + if path in ['changelog_not_exist', 'metadata_not_exist']: return path_to_non_existing_changelog @freeze_time("2020-11-04T13:34:14.75Z") diff --git a/Tests/Marketplace/Tests/test_pack_dependencies.py b/Tests/Marketplace/Tests/test_pack_dependencies.py index 7466c3ead873..141a16c75895 100644 --- a/Tests/Marketplace/Tests/test_pack_dependencies.py +++ b/Tests/Marketplace/Tests/test_pack_dependencies.py @@ -1,3 +1,4 @@ +# type: ignore[attr-defined] from unittest.mock import patch import networkx as nx diff --git a/Tests/Marketplace/Tests/upload_packs_test.py b/Tests/Marketplace/Tests/upload_packs_test.py index efc1868b11e6..479d00b1143a 100644 --- a/Tests/Marketplace/Tests/upload_packs_test.py +++ b/Tests/Marketplace/Tests/upload_packs_test.py @@ -1,3 +1,5 @@ +# type: ignore[attr-defined] +# pylint: disable=no-member import copy import json import os @@ -8,6 +10,7 @@ # disable-secrets-detection-start + class TestModifiedPacks: @pytest.mark.parametrize("packs_names_input, expected_result", [ ("pack1,pack2,pack1", {"pack1", "pack2"}), @@ -43,7 +46,7 @@ def is_dir(self): @staticmethod def isdir(path): - return True if path == 'mock_path' else False + return path == 'mock_path' def scan_dir(dirs=None): @@ -591,7 +594,7 @@ def test_is_private_packs_updated(self, mocker): assert not is_private_packs_updated(public_index_json, index_file_path) # private pack was deleted - del (private_index_json.get("packs")[0]) + del private_index_json.get("packs")[0] mocker.patch('Tests.Marketplace.upload_packs.load_json', return_value=private_index_json) assert is_private_packs_updated(public_index_json, index_file_path) diff --git a/Tests/Marketplace/Tests/zip_packs_test.py b/Tests/Marketplace/Tests/zip_packs_test.py index 8451d075d6b4..9ed35e940846 100644 --- a/Tests/Marketplace/Tests/zip_packs_test.py +++ b/Tests/Marketplace/Tests/zip_packs_test.py @@ -1,3 +1,6 @@ +# type: ignore[attr-defined] +# pylint: disable=no-member + import pytest from Tests.Marketplace.zip_packs import get_latest_pack_zip_from_pack_files, zip_packs,\ diff --git a/Tests/Marketplace/configure_and_install_packs.py b/Tests/Marketplace/configure_and_install_packs.py index e178b5256b1c..cb6fe7ff7f7d 100644 --- a/Tests/Marketplace/configure_and_install_packs.py +++ b/Tests/Marketplace/configure_and_install_packs.py @@ -1,5 +1,4 @@ import argparse -import logging import sys from Tests.configure_and_test_integration_instances import set_marketplace_url, MARKET_PLACE_CONFIGURATION, \ @@ -7,6 +6,7 @@ from Tests.test_content import get_json_file from Tests.Marketplace.search_and_install_packs import install_all_content_packs_from_build_bucket from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging from Tests.Marketplace.marketplace_constants import GCPConfig @@ -30,7 +30,7 @@ def options_handler(): def main(): - install_logging('Install_Packs.log') + install_logging('Install_Packs.log', logger=logging) options = options_handler() # Get the host by the ami env diff --git a/Tests/Marketplace/copy_and_upload_packs.py b/Tests/Marketplace/copy_and_upload_packs.py index 1a3efb6fea60..a6bd8905e73e 100644 --- a/Tests/Marketplace/copy_and_upload_packs.py +++ b/Tests/Marketplace/copy_and_upload_packs.py @@ -3,7 +3,6 @@ import sys import argparse import shutil -import logging import re from zipfile import ZipFile from google.cloud.storage import Blob, Bucket @@ -15,6 +14,7 @@ from Tests.Marketplace.marketplace_constants import PackStatus, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \ PACKS_FULL_PATH, IGNORED_FILES from Tests.Marketplace.upload_packs import extract_packs_artifacts, print_packs_summary, get_packs_summary +from Tests.scripts.utils import logging_wrapper as logging LATEST_ZIP_REGEX = re.compile(fr'^{GCPConfig.GCS_PUBLIC_URL}/[\w./-]+/content/packs/([A-Za-z0-9-_.]+/\d+\.\d+\.\d+/' r'[A-Za-z0-9-_.]+\.zip$)') @@ -314,7 +314,7 @@ def options_handler(): def main(): - install_logging('Copy_and_Upload_Packs.log') + install_logging('Copy_and_Upload_Packs.log', logger=logging) options = options_handler() packs_artifacts_path = options.artifacts_path extract_destination_path = options.extract_path diff --git a/Tests/Marketplace/download_private_id_set.py b/Tests/Marketplace/download_private_id_set.py index d3f83a63be4e..7245ab3480cd 100644 --- a/Tests/Marketplace/download_private_id_set.py +++ b/Tests/Marketplace/download_private_id_set.py @@ -5,7 +5,7 @@ def create_empty_id_set_in_artifacts(private_id_set_path): - empty_id_set = { + empty_id_set: dict = { "scripts": [], "playbooks": [], "integrations": [], diff --git a/Tests/Marketplace/marketplace_services.py b/Tests/Marketplace/marketplace_services.py index 256645334452..5a9a0a039505 100644 --- a/Tests/Marketplace/marketplace_services.py +++ b/Tests/Marketplace/marketplace_services.py @@ -2,7 +2,6 @@ import fnmatch import glob import json -import logging import os import re import shutil @@ -26,6 +25,7 @@ from Tests.Marketplace.marketplace_constants import PackFolders, Metadata, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \ PackTags, PackIgnored, Changelog from Utils.release_notes_generator import aggregate_release_notes_for_marketplace +from Tests.scripts.utils import logging_wrapper as logging class Pack(object): @@ -151,7 +151,7 @@ def is_feed(self, is_feed): """ self._is_feed = is_feed - @status.setter + @status.setter # type: ignore[attr-defined,no-redef] def status(self, status_value): """ setter of pack current status. """ @@ -229,7 +229,7 @@ def user_metadata(self): """ return self._user_metadata - @display_name.setter + @display_name.setter # type: ignore[attr-defined,no-redef] def display_name(self, display_name_value): """ setter of display name property of the pack. """ @@ -398,7 +398,7 @@ def _get_all_pack_images(pack_integration_images, display_dependencies_images, d list: collection of integration display name and it's path in gcs. """ - dependencies_integration_images_dict = {} + dependencies_integration_images_dict: dict = {} additional_dependencies_data = {k: v for k, v in dependencies_data.items() if k in display_dependencies_images} for dependency_data in additional_dependencies_data.values(): @@ -587,7 +587,7 @@ def _parse_pack_metadata(self, build_number, commit_hash): Metadata.VERSION_INFO: build_number, Metadata.COMMIT: commit_hash, Metadata.DOWNLOADS: self._downloads_count, - Metadata.TAGS: list(self._tags), + Metadata.TAGS: list(self._tags or []), Metadata.CATEGORIES: self._categories, Metadata.CONTENT_ITEMS: self._content_items, Metadata.SEARCH_RANK: self._search_rank, @@ -1195,7 +1195,8 @@ def get_same_block_versions(self, release_notes_dir: str, version: str, changelo """ lowest_version = [LooseVersion(Pack.PACK_INITIAL_VERSION)] - lower_versions, higher_versions = [], [] + lower_versions: list = [] + higher_versions: list = [] same_block_versions_dict: dict = dict() for item in changelog.keys(): # divide the versions into lists of lower and higher than given version (lower_versions if LooseVersion(item) < version else higher_versions).append(LooseVersion(item)) @@ -1275,7 +1276,7 @@ def assert_upload_bucket_version_matches_release_notes_version(self, changelog: The changelog from the production bucket. latest_release_notes: The latest release notes version string in the current branch """ - changelog_latest_release_notes = max(changelog, key=lambda k: LooseVersion(k)) + changelog_latest_release_notes = max(changelog, key=lambda k: LooseVersion(k)) # pylint: disable=W0108 assert LooseVersion(latest_release_notes) >= LooseVersion(changelog_latest_release_notes), \ f'{self._pack_name}: Version mismatch detected between upload bucket and current branch\n' \ f'Upload bucket version: {changelog_latest_release_notes}\n' \ @@ -1465,7 +1466,7 @@ def collect_content_items(self): . """ task_status = False - content_items_result = {} + content_items_result: dict = {} try: # the format is defined in issue #19786, may change in the future @@ -1696,7 +1697,7 @@ def load_user_metadata(self): self.current_version = user_metadata.get(Metadata.CURRENT_VERSION, '') self.hidden = user_metadata.get(Metadata.HIDDEN, False) self.description = user_metadata.get(Metadata.DESCRIPTION, False) - self.display_name = user_metadata.get(Metadata.NAME, '') + self.display_name = user_metadata.get(Metadata.NAME, '') # type: ignore[misc] self._user_metadata = user_metadata self.eula_link = user_metadata.get(Metadata.EULA_LINK, Metadata.EULA_URL) @@ -1826,7 +1827,7 @@ def format_metadata(self, index_folder_path, packs_dependencies_mapping, build_n try: self.set_pack_dependencies(packs_dependencies_mapping) - if Metadata.DISPLAYED_IMAGES not in self.user_metadata: + if Metadata.DISPLAYED_IMAGES not in self.user_metadata and self._user_metadata: self._user_metadata[Metadata.DISPLAYED_IMAGES] = packs_dependencies_mapping.get( self._pack_name, {}).get(Metadata.DISPLAYED_IMAGES, []) logging.info(f"Adding auto generated display images for {self._pack_name} pack") @@ -1882,7 +1883,7 @@ def _calculate_pack_creation_date(pack_name, index_folder_path): if metadata: if metadata.get(Metadata.CREATED): - created_time = metadata.get(Metadata.CREATED) + created_time = metadata.get(Metadata.CREATED, '') else: raise Exception(f'The metadata file of the {pack_name} pack does not contain "{Metadata.CREATED}" time') @@ -1908,7 +1909,7 @@ def _get_pack_update_date(self, index_folder_path, pack_was_modified): def set_pack_dependencies(self, packs_dependencies_mapping): pack_dependencies = packs_dependencies_mapping.get(self._pack_name, {}).get(Metadata.DEPENDENCIES, {}) - if Metadata.DEPENDENCIES not in self.user_metadata: + if Metadata.DEPENDENCIES not in self.user_metadata and self._user_metadata: self._user_metadata[Metadata.DEPENDENCIES] = {} # If it is a core pack, check that no new mandatory packs (that are not core packs) were added @@ -1923,7 +1924,8 @@ def set_pack_dependencies(self, packs_dependencies_mapping): f'found in the core pack {self._pack_name}') pack_dependencies.update(self.user_metadata[Metadata.DEPENDENCIES]) - self._user_metadata[Metadata.DEPENDENCIES] = pack_dependencies + if self._user_metadata: + self._user_metadata[Metadata.DEPENDENCIES] = pack_dependencies def prepare_for_index_upload(self): """ Removes and leaves only necessary files in pack folder. @@ -1970,7 +1972,7 @@ def _get_spitted_yml_image_data(root, target_folder_files): for pack_file in target_folder_files: if pack_file.startswith('.'): continue - elif pack_file.endswith('_image.png'): + if pack_file.endswith('_image.png'): image_data['repo_image_path'] = os.path.join(root, pack_file) elif pack_file.endswith('.yml'): with open(os.path.join(root, pack_file), 'r') as integration_file: @@ -2447,7 +2449,7 @@ def add_bc_entries_if_needed(self, release_notes_dir: str, changelog: Dict[str, if not os.path.exists(release_notes_dir): return bc_version_to_text: Dict[str, Optional[str]] = self._breaking_changes_versions_to_text(release_notes_dir) - loose_versions: List[LooseVersion] = [LooseVersion(bc_ver) for bc_ver in bc_version_to_text.keys()] + loose_versions: List[LooseVersion] = [LooseVersion(bc_ver) for bc_ver in bc_version_to_text] predecessor_version: LooseVersion = LooseVersion('0.0.0') for changelog_entry in sorted(changelog.keys(), key=LooseVersion): rn_loose_version: LooseVersion = LooseVersion(changelog_entry) @@ -2489,7 +2491,7 @@ def _calculate_bc_text(self, release_notes_dir: str, bc_version_to_text: Dict[st else: # Important: Currently, implementation of aggregating BCs was decided to concat between them # In the future this might be needed to re-thought. - return '\n'.join(bc_version_to_text.values()) + return '\n'.join(bc_version_to_text.values()) # type: ignore[arg-type] def _handle_many_bc_versions_some_with_text(self, release_notes_dir: str, text_of_bc_versions: List[str], bc_versions_without_text: List[str], ) -> str: @@ -2619,11 +2621,11 @@ def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dic """ if os.path.exists(packs_results_file_path): packs_results_file = load_json(packs_results_file_path) - stage = packs_results_file.get(stage, {}) - successful_packs_dict = stage.get(BucketUploadFlow.SUCCESSFUL_PACKS, {}) - failed_packs_dict = stage.get(BucketUploadFlow.FAILED_PACKS, {}) - successful_private_packs_dict = stage.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {}) - images_data_dict = stage.get(BucketUploadFlow.IMAGES, {}) + stage_data: dict = packs_results_file.get(stage, {}) + successful_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PACKS, {}) + failed_packs_dict = stage_data.get(BucketUploadFlow.FAILED_PACKS, {}) + successful_private_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {}) + images_data_dict = stage_data.get(BucketUploadFlow.IMAGES, {}) return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict return {}, {}, {}, {} @@ -2674,7 +2676,7 @@ def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: s logging.debug(f"Successful packs {successful_packs_dict}") if updated_private_packs: - successful_private_packs_dict = { + successful_private_packs_dict: dict = { BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS: {pack_name: {} for pack_name in updated_private_packs} } packs_results[stage].update(successful_private_packs_dict) diff --git a/Tests/Marketplace/packs_dependencies.py b/Tests/Marketplace/packs_dependencies.py index c4f61d5c3729..aa584061bed0 100644 --- a/Tests/Marketplace/packs_dependencies.py +++ b/Tests/Marketplace/packs_dependencies.py @@ -1,7 +1,7 @@ import argparse import json -import logging import os +import sys from concurrent.futures import as_completed from contextlib import contextmanager from pprint import pformat @@ -9,9 +9,11 @@ from Tests.Marketplace.marketplace_constants import GCPConfig, PACKS_FOLDER, PACKS_FULL_PATH, IGNORED_FILES from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging from demisto_sdk.commands.find_dependencies.find_dependencies import PackDependencies, parse_for_pack_metadata from pebble import ProcessPool, ProcessFuture + PROCESS_FAILURE = False @@ -83,7 +85,7 @@ def calculate_single_pack_dependencies(pack: str, dependency_graph: object) -> T all_level_dependencies: A list with all dependencies names pack: The pack name """ - install_logging('Calculate_Packs_Dependencies.log', include_process_name=True) + install_logging('Calculate_Packs_Dependencies.log', include_process_name=True, logger=logging) first_level_dependencies = {} all_level_dependencies = [] try: @@ -117,7 +119,7 @@ def get_all_packs_dependency_graph(id_set: dict, packs: list) -> Iterable: return dependency_graph except Exception: logging.exception("Failed calculating dependencies graph") - exit(2) + sys.exit(2) def select_packs_for_calculation() -> list: @@ -197,13 +199,13 @@ def main(): packs dependencies. The logic of pack dependency is identical to sdk find-dependencies command. """ - install_logging('Calculate_Packs_Dependencies.log', include_process_name=True) + install_logging('Calculate_Packs_Dependencies.log', include_process_name=True, logger=logging) option = option_handler() output_path = option.output_path id_set_path = option.id_set_path id_set = get_id_set(id_set_path) - pack_dependencies_result = {} + pack_dependencies_result: dict = {} logging.info("Selecting packs for dependencies calculation") packs = select_packs_for_calculation() diff --git a/Tests/Marketplace/prepare_public_index_for_private_testing.py b/Tests/Marketplace/prepare_public_index_for_private_testing.py index 25c2d17023a2..d4c7b73306f8 100644 --- a/Tests/Marketplace/prepare_public_index_for_private_testing.py +++ b/Tests/Marketplace/prepare_public_index_for_private_testing.py @@ -4,7 +4,6 @@ import shutil import json import argparse -import logging from zipfile import ZipFile from contextlib import contextmanager from datetime import datetime @@ -12,6 +11,7 @@ extract_packs_artifacts from Tests.Marketplace.marketplace_services import init_storage_client from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging MAX_SECONDS_TO_WAIT_FOR_LOCK = 600 LOCK_FILE_PATH = 'lock.txt' @@ -115,7 +115,7 @@ def option_handler(): parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index", required=True) parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.", - required=False), + required=False) parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.") parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True) parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs", @@ -189,7 +189,7 @@ def add_private_packs_from_dummy_index(private_packs, dummy_index_blob): def main(): - install_logging('prepare_public_index_for_private_testing.log') + install_logging('prepare_public_index_for_private_testing.log', logger=logging) upload_config = option_handler() service_account = upload_config.service_account build_number = upload_config.ci_build_number @@ -222,7 +222,8 @@ def main(): private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket, extract_destination_path, public_index_folder_path, - changed_pack, True) + changed_pack, True, + storage_base_path) private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob) upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number, private_packs) diff --git a/Tests/Marketplace/search_and_install_packs.py b/Tests/Marketplace/search_and_install_packs.py index cc77fdc069df..81edf2331735 100644 --- a/Tests/Marketplace/search_and_install_packs.py +++ b/Tests/Marketplace/search_and_install_packs.py @@ -1,6 +1,5 @@ from __future__ import print_function -import logging import os import ast import json @@ -18,6 +17,7 @@ from Tests.Marketplace.upload_packs import download_and_extract_index from Tests.Marketplace.marketplace_constants import GCPConfig, PACKS_FULL_PATH, IGNORED_FILES, PACKS_FOLDER, Metadata from Tests.scripts.utils.content_packs_util import is_pack_deprecated +from Tests.scripts.utils import logging_wrapper as logging PACK_METADATA_FILE = 'pack_metadata.json' PACK_PATH_VERSION_REGEX = re.compile(fr'^{GCPConfig.PRODUCTION_STORAGE_BASE_PATH}/[A-Za-z0-9-_.]+/(\d+\.\d+\.\d+)/[A-Za-z0-9-_.]' @@ -110,7 +110,7 @@ def get_pack_dependencies(client: demisto_client, pack_data: dict, lock: Lock): ) if 200 <= status_code < 300: - dependencies_data = [] + dependencies_data: list = [] dependants_ids = [pack_id] reseponse_data = ast.literal_eval(response_data).get('dependencies', []) create_dependencies_data_structure(reseponse_data, dependants_ids, dependencies_data, dependants_ids) @@ -184,6 +184,7 @@ def search_pack(client: demisto_client, global SUCCESS_FLAG SUCCESS_FLAG = False lock.release() + return {} def find_malformed_pack_id(error_message: str) -> List: @@ -378,7 +379,7 @@ def search_pack_and_its_dependencies(client: demisto_client, installation_request_body (list): A list of packs to be installed, in the request format. lock (Lock): A lock object. """ - pack_data = [] + pack_data = {} if pack_id not in packs_to_install: pack_display_name = get_pack_display_name(pack_id) if pack_display_name: @@ -435,6 +436,7 @@ def get_latest_version_from_bucket(pack_id: str, production_bucket: Bucket) -> s return pack_latest_version else: logging.error(f'Could not find any versions for pack {pack_id} in bucket path {pack_bucket_path}') + return '' def get_pack_installation_request_data(pack_id: str, pack_version: str): @@ -597,8 +599,8 @@ def search_and_install_packs_and_their_dependencies(pack_ids: list, logging.info(f'Starting to search and install packs in server: {host}') - packs_to_install = [] # we save all the packs we want to install, to avoid duplications - installation_request_body = [] # the packs to install, in the request format + packs_to_install: list = [] # we save all the packs we want to install, to avoid duplications + installation_request_body: list = [] # the packs to install, in the request format threads_list = [] lock = Lock() diff --git a/Tests/Marketplace/upload_packs.py b/Tests/Marketplace/upload_packs.py index 238da735bbec..1bd0cb1360e3 100644 --- a/Tests/Marketplace/upload_packs.py +++ b/Tests/Marketplace/upload_packs.py @@ -7,12 +7,14 @@ import prettytable import glob import requests -import logging from datetime import datetime from google.cloud.storage import Bucket from zipfile import ZipFile from typing import Any, Tuple, Union, Optional + +from requests import Response + from Tests.Marketplace.marketplace_services import init_storage_client, Pack, \ load_json, get_content_git_client, get_recent_commits_data, store_successful_and_failed_packs_in_ci_artifacts, \ json_write @@ -22,6 +24,7 @@ from demisto_sdk.commands.common.tools import run_command, str2bool from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging def get_packs_names(target_packs: str, previous_commit_hash: str = "HEAD^") -> set: @@ -297,7 +300,7 @@ def upload_index_to_storage(index_folder_path: str, extract_destination_path: st 'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT), 'packs': private_packs, 'commit': commit, - 'landingPage': {'sections': landing_page_sections.get('sections', [])} + 'landingPage': {'sections': landing_page_sections.get('sections', [])} # type: ignore[union-attr] } json.dump(index, index_file, indent=4) @@ -333,7 +336,7 @@ def upload_index_to_storage(index_folder_path: str, extract_destination_path: st def create_corepacks_config(storage_bucket: Any, build_number: str, index_folder_path: str, - artifacts_dir: Optional[str], storage_base_path: str): + artifacts_dir: str, storage_base_path: str): """Create corepacks.json file and stores it in the artifacts dir. This files contains all of the server's core packs, under the key corepacks, and specifies which core packs should be upgraded upon XSOAR upgrade, under the key upgradeCorePacks. @@ -342,7 +345,7 @@ def create_corepacks_config(storage_bucket: Any, build_number: str, index_folder storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded. build_number (str): circleCI build number. index_folder_path (str): The index folder path. - artifacts_dir: The CI artifacts directory to upload the corepacks.json to. + artifacts_dir (str): The CI artifacts directory to upload the corepacks.json to. storage_base_path (str): the source path of the core packs in the target bucket. """ @@ -518,7 +521,7 @@ def get_updated_private_packs(private_packs, index_folder_path): return updated_private_packs -def get_private_packs(private_index_path: str, pack_names: set = set(), +def get_private_packs(private_index_path: str, pack_names: set = None, extract_destination_path: str = '') -> list: """ Gets a list of private packs. @@ -539,6 +542,7 @@ def get_private_packs(private_index_path: str, pack_names: set = set(), logging.warning(f'No metadata files found in [{private_index_path}]') private_packs = [] + pack_names = pack_names or set() logging.info(f'all metadata files found: {metadata_files}') for metadata_file_path in metadata_files: try: @@ -798,9 +802,9 @@ def add_pr_comment(comment: str): headers = {'Authorization': 'Bearer ' + token} try: res = requests.get(url + query, headers=headers, verify=False) - res = handle_github_response(res) - if res and res.get('total_count', 0) == 1: - issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None + res_json = handle_github_response(res) + if res_json and res_json.get('total_count', 0) == 1: + issue_url = res_json['items'][0].get('comments_url') if res_json.get('items', []) else None if issue_url: res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False) handle_github_response(res) @@ -811,7 +815,7 @@ def add_pr_comment(comment: str): logging.exception('Add pull request comment failed.') -def handle_github_response(response: json) -> dict: +def handle_github_response(response: Response) -> dict: """ Handles the response from the GitHub server after making a request. :param response: Response from the server. @@ -895,7 +899,7 @@ def get_images_data(packs_list: list): images_data = {} for pack in packs_list: - pack_image_data = {pack.name: {}} + pack_image_data: dict = {pack.name: {}} if pack.uploaded_author_image: pack_image_data[pack.name][BucketUploadFlow.AUTHOR] = True if pack.uploaded_integration_images: @@ -907,7 +911,7 @@ def get_images_data(packs_list: list): def main(): - install_logging('Prepare_Content_Packs_For_Testing.log') + install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging) option = option_handler() packs_artifacts_path = option.artifacts_path extract_destination_path = option.extract_path diff --git a/Tests/Marketplace/validate_landing_page_sections.py b/Tests/Marketplace/validate_landing_page_sections.py index 78800ac9ac52..5f29e7aef39b 100644 --- a/Tests/Marketplace/validate_landing_page_sections.py +++ b/Tests/Marketplace/validate_landing_page_sections.py @@ -1,11 +1,11 @@ import argparse import json -import logging import os import sys from glob import glob from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging LANDING_PAGE_SECTIONS_PAGE_PATH = 'Tests/Marketplace/landingPage_sections.json' @@ -67,5 +67,5 @@ def parse_landing_page_sections_to_json(): if __name__ in ("__main__", "__builtin__", "builtins"): - install_logging('ValidateLandingPageSections.log') + install_logging('ValidateLandingPageSections.log', logger=logging) main() diff --git a/Tests/configure_and_test_integration_instances.py b/Tests/configure_and_test_integration_instances.py index ff20058834c4..049d5a711616 100644 --- a/Tests/configure_and_test_integration_instances.py +++ b/Tests/configure_and_test_integration_instances.py @@ -3,7 +3,6 @@ import argparse import ast import json -import logging import os import subprocess import sys @@ -15,7 +14,8 @@ from pprint import pformat from threading import Thread from time import sleep -from typing import List, Tuple +from typing import List, Tuple, Union + from urllib.parse import quote_plus import demisto_client from demisto_sdk.commands.test_content.constants import SSH_USER @@ -24,6 +24,7 @@ from Tests.Marketplace.search_and_install_packs import search_and_install_packs_and_their_dependencies, \ upload_zipped_packs, install_all_content_packs_for_nightly from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging from Tests.test_content import extract_filtered_tests, get_server_numeric_version from Tests.test_integration import __get_integration_config, __test_integration_instance, disable_all_integrations from Tests.tools import run_with_proxy_configured @@ -107,7 +108,7 @@ def exec_command(self, command): stderr=subprocess.STDOUT) -def get_id_set(id_set_path) -> dict: +def get_id_set(id_set_path) -> Union[dict, None]: """ Used to collect the ID set so it can be passed to the Build class on init. @@ -115,6 +116,7 @@ def get_id_set(id_set_path) -> dict: """ if os.path.isfile(id_set_path): return get_json_file(id_set_path) + return None class Build: @@ -267,7 +269,7 @@ def check_test_version_compatible_with_server(test, server_version): test_to_version = format_version(test.get('toversion', '99.99.99')) server_version = format_version(server_version) - if not (LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version)): + if not LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version): playbook_id = test.get('playbookID') logging.debug( f'Test Playbook: {playbook_id} was ignored in the content installation test due to version mismatch ' @@ -575,7 +577,7 @@ def __set_server_keys(client, integration_params, integration_name): logging.info(f'Setting server keys for integration: {integration_name}') - data = { + data: dict = { 'data': {}, 'version': -1 } @@ -851,6 +853,7 @@ def get_env_conf(): "Role": "DEMISTO EVN" # e.g. 'Server Master' }] # END CHANGE ON LOCAL RUN # + return None def map_server_to_port(env_results, instance_role): @@ -900,7 +903,7 @@ def configure_servers_and_restart(build): sleep(60) -def get_tests(build: Build) -> List[str]: +def get_tests(build: Build) -> List[dict]: """ Selects the tests from that should be run in this execution and filters those that cannot run in this server version Args: @@ -917,10 +920,9 @@ def get_tests(build: Build) -> List[str]: # skip test button testing logging.debug('Not running instance tests in nightly flow') tests_for_iteration = [] - elif filtered_tests: - tests_for_iteration = [test for test in tests if test.get('playbookID', '') in filtered_tests] else: - tests_for_iteration = tests + tests_for_iteration = [test for test in tests + if not filtered_tests or test.get('playbookID', '') in filtered_tests] tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version) return tests_for_iteration @@ -1401,7 +1403,7 @@ def install_packs_pre_update(build: Build) -> bool: def main(): - install_logging('Install_Content_And_Configure_Integrations_On_Server.log') + install_logging('Install_Content_And_Configure_Integrations_On_Server.log', logger=logging) build = Build(options_handler()) logging.info(f"Build Number: {build.ci_build_number}") diff --git a/Tests/demistomock/demistomock.py b/Tests/demistomock/demistomock.py index ebe7dda06804..530263c42a7f 100644 --- a/Tests/demistomock/demistomock.py +++ b/Tests/demistomock/demistomock.py @@ -600,7 +600,7 @@ def results(results): None: No data returned """ - if type(results) is dict and results.get("contents"): + if isinstance(results, dict) and results.get("contents"): results = results.get("contents") log("demisto results: {}".format(json.dumps(results, indent=4, sort_keys=True))) @@ -764,7 +764,7 @@ def incidents(incidents=None): """ if incidents is None: - return exampleIncidents[0]['Contents']['data'] + return exampleIncidents[0]['Contents']['data'] # type: ignore[index] else: return results( {"Type": 1, "Contents": json.dumps(incidents), "ContentsFormat": "json"} diff --git a/Tests/instance_notifier.py b/Tests/instance_notifier.py index a289e3461f8e..470c55fa0207 100644 --- a/Tests/instance_notifier.py +++ b/Tests/instance_notifier.py @@ -1,6 +1,5 @@ import argparse import json -import logging import os import demisto_client @@ -8,6 +7,7 @@ from Tests.configure_and_test_integration_instances import update_content_on_demisto_instance from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging from Tests.test_integration import __create_integration_instance, __delete_integrations_instances from demisto_sdk.commands.common.tools import str2bool @@ -154,7 +154,7 @@ def slack_notifier(slack_token, secret_conf_path, server, user, password, build_ if __name__ == "__main__": - install_logging('Instance-Test.log') + install_logging('Instance-Test.log', logger=logging) options = options_handler() if options.instance_tests: env_results_path = os.path.join(os.getenv('ARTIFACTS_FOLDER', './artifacts'), 'env_results.json') diff --git a/Tests/private_build/configure_and_test_integration_instances_private.py b/Tests/private_build/configure_and_test_integration_instances_private.py index 4a80635d30c5..361fcf5955b1 100644 --- a/Tests/private_build/configure_and_test_integration_instances_private.py +++ b/Tests/private_build/configure_and_test_integration_instances_private.py @@ -139,7 +139,7 @@ def main(): pre_update=False, use_mock=False) # Gather tests to add to test pack - test_playbooks_from_id_set = build.id_set.get('TestPlaybooks', []) + test_playbooks_from_id_set = build.id_set.get('TestPlaybooks', []) if build.id_set else None tests_to_add_to_test_pack = find_needed_test_playbook_paths(test_playbooks=test_playbooks_from_id_set, tests_to_run=build.tests_to_run, path_to_content=build.content_root) diff --git a/Tests/private_build/run_content_tests_private.py b/Tests/private_build/run_content_tests_private.py index bc29558cbfe2..9ddbffac1812 100644 --- a/Tests/private_build/run_content_tests_private.py +++ b/Tests/private_build/run_content_tests_private.py @@ -6,7 +6,7 @@ from distutils.version import LooseVersion from typing import Any -import logging +from Tests.scripts.utils import logging_wrapper as logging import urllib3 import demisto_client.demisto_api @@ -129,17 +129,15 @@ def run_test(tests_settings: SettingsTester, demisto_user: str, demisto_pass: st start_message = f'------ Test {test_message} start ------' client = demisto_client.configure(base_url=server_url, username=demisto_user, password=demisto_pass, verify_ssl=False) - logging.info(start_message + ' (Private Build Test)') + logging.info(f'{start_message} (Private Build Test)') run_test_logic(tests_settings, client, failed_playbooks, integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url, demisto_user, demisto_pass, build_name) logging.info(f'------ Test {test_message} end ------\n') - return - def run_private_test_scenario(tests_settings: SettingsTester, t: dict, default_test_timeout: int, - skipped_tests_conf: set, nightly_integrations: list, skipped_integrations_conf: set, + skipped_tests_conf: dict, nightly_integrations: list, skipped_integrations_conf: set, skipped_integration: set, filtered_tests: list, skipped_tests: set, secret_params: dict, failed_playbooks: list, playbook_skipped_integration: set, succeed_playbooks: list, slack: str, circle_ci: str, build_number: str, server: str, build_name: str, @@ -213,7 +211,7 @@ def run_private_test_scenario(tests_settings: SettingsTester, t: dict, default_t test_from_version = t.get('fromversion', '0.0.0') test_to_version = t.get('toversion', '99.99.99') - if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)): + if not LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version): warning_message = f'Test {test_message} ignored due to version mismatch ' \ f'(test versions: {test_from_version}-{test_to_version})' logging.warning(warning_message) @@ -246,7 +244,7 @@ def execute_testing(tests_settings: SettingsTester, server_ip: str, all_tests: s :return: No object is returned, just updates the tests_data_keep object. """ server = SERVER_URL.format(server_ip) - server_numeric_version = tests_settings.serverNumericVersion + server_numeric_version = tests_settings.serverNumericVersion or '' logging.info(f"Executing tests with the server {server} - and the server ip {server_ip}") slack = tests_settings.slack circle_ci = tests_settings.circleci @@ -278,11 +276,11 @@ def execute_testing(tests_settings: SettingsTester, server_ip: str, all_tests: s # turn off telemetry turn_off_telemetry(xsoar_client) - failed_playbooks = [] - succeed_playbooks = [] - skipped_tests = set([]) - skipped_integration = set([]) - playbook_skipped_integration = set([]) + failed_playbooks: list = [] + succeed_playbooks: list = [] + skipped_tests: set = set([]) + skipped_integration: set = set([]) + playbook_skipped_integration: set = set([]) # Private builds do not use mocking. Here we copy the mocked test list to the unmockable list. private_tests = get_test_records_of_given_test_names(tests_settings, all_tests) @@ -364,7 +362,7 @@ def manage_tests(tests_settings: SettingsTester): execute_testing(tests_settings, ami_instance_ip, all_tests, tests_data_keeper) sleep(8) - print_test_summary(tests_data_keeper, tests_settings.isAMI) + print_test_summary(tests_data_keeper, tests_settings.isAMI, logging_module=logging) create_result_files(tests_data_keeper) if tests_data_keeper.failed_playbooks: @@ -374,7 +372,7 @@ def manage_tests(tests_settings: SettingsTester): def main(): - install_logging('Run_Tests.log') + install_logging('Run_Tests.log', logger=logging) tests_settings = options_handler() logging.info(f"Build Name: {tests_settings.buildName}") logging.info(f" Build Number: {tests_settings.buildNumber}") diff --git a/Tests/private_build/tests/test_configure_and_test_integration_instances_private.py b/Tests/private_build/tests/test_configure_and_test_integration_instances_private.py index a5686e982fef..495284409ce8 100644 --- a/Tests/private_build/tests/test_configure_and_test_integration_instances_private.py +++ b/Tests/private_build/tests/test_configure_and_test_integration_instances_private.py @@ -53,7 +53,7 @@ def __init__(self): self.ci_build_number = '100' self.is_nightly = False self.ami_env = 'Server Master' - self.servers, self.server_numeric_version = ('8.8.8.8', '6.1.0') + self.server_numeric_version = '6.1.0' self.secret_conf = {} self.username = 'TestUser' self.password = 'TestPassword' diff --git a/Tests/private_build/tests/test_upload_packs_private.py b/Tests/private_build/tests/test_upload_packs_private.py index c097a75fe531..1e68a042631b 100644 --- a/Tests/private_build/tests/test_upload_packs_private.py +++ b/Tests/private_build/tests/test_upload_packs_private.py @@ -11,7 +11,7 @@ def is_dir(self): @staticmethod def isdir(path): - return True if path == 'mock_path' else False + return path == 'mock_path' def scan_dir(dirs=None): @@ -32,8 +32,8 @@ def test_add_private_packs_to_index(self, mocker): upload_packs_private.add_private_packs_to_index('test', 'private_test') - index_call_args = upload_packs_private.update_index_folder.call_args[0] - index_call_count = upload_packs_private.update_index_folder.call_count + index_call_args = upload_packs_private.update_index_folder.call_args[0] # pylint: disable=no-member + index_call_count = upload_packs_private.update_index_folder.call_count # pylint: disable=no-member assert index_call_count == 1 assert index_call_args[0] == 'test' diff --git a/Tests/private_build/upload_packs_private.py b/Tests/private_build/upload_packs_private.py index 96b9e5bedc62..05b10e17a3e2 100644 --- a/Tests/private_build/upload_packs_private.py +++ b/Tests/private_build/upload_packs_private.py @@ -125,7 +125,7 @@ def add_existing_private_packs_from_index(metadata_files, changed_pack_id): Returns: private_packs (list): The modified list of private packs, including the added pack. """ - private_packs = [] + private_packs: list = [] for metadata_file_path in metadata_files: # Adding all the existing private packs, already found in the index logging.info(f'Getting existing metadata files from the index, in path: {metadata_file_path}') @@ -158,7 +158,7 @@ def get_existing_private_packs_metadata_paths(private_index_path): return metadata_files -def get_private_packs(private_index_path: str, pack_names: set = set(), +def get_private_packs(private_index_path: str, pack_names: set = None, extract_destination_path: str = '') -> list: """Gets a list of private packs, that will later be added to index.json. @@ -170,7 +170,7 @@ def get_private_packs(private_index_path: str, pack_names: set = set(), private_metadata_paths = get_existing_private_packs_metadata_paths(private_index_path) # In the private build, there is always exactly one modified pack - changed_pack_id = list(pack_names)[0] if len(pack_names) > 0 else '' + changed_pack_id = list(pack_names)[0] if pack_names and len(pack_names) > 0 else '' private_packs = add_existing_private_packs_from_index(private_metadata_paths, changed_pack_id) private_packs = add_changed_private_pack(private_packs, extract_destination_path, changed_pack_id) @@ -240,6 +240,7 @@ def should_upload_core_packs(storage_bucket_name: str) -> bool: return not (is_private_storage_bucket or is_private_ci_bucket) +# pylint: disable=R0911 def create_and_upload_marketplace_pack(upload_config: Any, pack: Any, storage_bucket: Any, index_folder_path: str, packs_dependencies_mapping: dict, private_bucket_name: str, storage_base_path, private_storage_bucket: bool = None, diff --git a/Tests/scripts/.mypy_ignored_messages b/Tests/scripts/.mypy_ignored_messages new file mode 100644 index 000000000000..c4c8480d4ae0 --- /dev/null +++ b/Tests/scripts/.mypy_ignored_messages @@ -0,0 +1,2 @@ +Found [0-9]* errors* in [0-9]* files* +There are no \.py diff --git a/Tests/scripts/collect_tests_and_content_packs.py b/Tests/scripts/collect_tests_and_content_packs.py index 63c82b5b0cd4..30134bb2b79f 100755 --- a/Tests/scripts/collect_tests_and_content_packs.py +++ b/Tests/scripts/collect_tests_and_content_packs.py @@ -6,10 +6,10 @@ import argparse import glob import json -import logging +import re from copy import deepcopy from distutils.version import LooseVersion -from typing import Dict, Tuple, Union, Optional +from typing import Dict, Tuple, Optional import os import sys @@ -21,7 +21,8 @@ is_pack_xsoar_supported from Tests.scripts.utils.get_modified_files_for_testing import get_modified_files_for_testing from Tests.scripts.utils.log_util import install_logging -from demisto_sdk.commands.common.constants import * # noqa: E402 +from Tests.scripts.utils import logging_wrapper as logging +from demisto_sdk.commands.common import constants SANITY_TESTS = { 'Sanity Test - Playbook with integration', @@ -135,7 +136,7 @@ def get_test_playbooks_configured_with_integration(self, integration_id): # Global used to indicate if failed during any of the validation states _FAILED = False ID_SET = {} -CONF: Union[TestConf, dict] = {} +CONF: TestConf = None # type: ignore[assignment] if os.path.isfile('./artifacts/id_set.json'): with open('./artifacts/id_set.json', 'r') as conf_file: @@ -166,6 +167,8 @@ def get_name(file_path): if data_dictionary: return data_dictionary.get('name', '-') + return None + def get_tests(file_path): """Collect tests mentioned in file_path""" @@ -173,6 +176,7 @@ def get_tests(file_path): # inject no tests to whitelist so adding values to white list will not force all tests if data_dictionary: return data_dictionary.get('tests', []) + return [] def collect_tests_and_content_packs( @@ -333,12 +337,16 @@ def id_set__get_test_playbook(id_set, test_playbook_id): if test_playbook_id in test_playbook.keys(): return test_playbook[test_playbook_id] + return None + def id_set__get_integration_file_path(id_set, integration_id): for integration in id_set.get('integrations', []): if integration_id in integration.keys(): return integration[integration_id]['file_path'] + logging.critical(f'Could not find integration "{integration_id}" in the id_set') + return None def check_if_fetch_incidents_is_tested(missing_ids, integration_ids, id_set, conf, tests_set): @@ -369,9 +377,9 @@ def check_if_fetch_incidents_is_tested(missing_ids, integration_ids, id_set, con def find_tests_and_content_packs_for_modified_files(modified_files, conf=deepcopy(CONF), id_set=deepcopy(ID_SET)): - script_names = set([]) - playbook_names = set([]) - integration_ids = set([]) + script_names: set = set([]) + playbook_names: set = set([]) + integration_ids: set = set([]) tests_set, caught_scripts, caught_playbooks, packs_to_install = collect_changed_ids( integration_ids, playbook_names, script_names, modified_files, id_set) @@ -492,17 +500,18 @@ def get_api_module_integrations(changed_api_modules, integration_set): def collect_changed_ids(integration_ids, playbook_names, script_names, modified_files, id_set=deepcopy(ID_SET)): - tests_set = set([]) - updated_script_names = set([]) - updated_playbook_names = set([]) - catched_scripts, catched_playbooks = set([]), set([]) - changed_api_modules = set([]) + tests_set: set = set([]) + updated_script_names: set = set([]) + updated_playbook_names: set = set([]) + catched_scripts: set = set([]) + catched_playbooks: set = set([]) + changed_api_modules: set = set([]) script_to_version = {} playbook_to_version = {} integration_to_version = {} for file_path in modified_files: - if collect_helpers.checked_type(file_path, collect_helpers.SCRIPT_REGEXES + YML_SCRIPT_REGEXES): + if collect_helpers.checked_type(file_path, collect_helpers.SCRIPT_REGEXES + constants.YML_SCRIPT_REGEXES): name = get_name(file_path) script_names.add(name) script_to_version[name] = (tools.get_from_version(file_path), tools.get_to_version(file_path)) @@ -512,17 +521,17 @@ def collect_changed_ids(integration_ids, playbook_names, script_names, modified_ catched_scripts.add(name) tests_set.add('Found a unittest for the script {}'.format(package_name)) - elif collect_helpers.checked_type(file_path, YML_PLAYBOOKS_NO_TESTS_REGEXES): + elif collect_helpers.checked_type(file_path, constants.YML_PLAYBOOKS_NO_TESTS_REGEXES): name = get_name(file_path) playbook_names.add(name) playbook_to_version[name] = (tools.get_from_version(file_path), tools.get_to_version(file_path)) - elif collect_helpers.checked_type(file_path, collect_helpers.INTEGRATION_REGEXES + YML_INTEGRATION_REGEXES): + elif collect_helpers.checked_type(file_path, collect_helpers.INTEGRATION_REGEXES + constants.YML_INTEGRATION_REGEXES): _id = tools.get_script_or_integration_id(file_path) integration_ids.add(_id) integration_to_version[_id] = (tools.get_from_version(file_path), tools.get_to_version(file_path)) - if collect_helpers.checked_type(file_path, API_MODULE_REGEXES): + if collect_helpers.checked_type(file_path, constants.API_MODULE_REGEXES): api_module_name = tools.get_script_or_integration_id(file_path) changed_api_modules.add(api_module_name) @@ -960,7 +969,7 @@ def is_test_uses_active_integration(integration_ids, conf=deepcopy(CONF)): def get_tests_for_pack(pack_path): pack_yml_files = tools.get_files_in_dir(pack_path, ['yml']) pack_test_playbooks = [tools.collect_ids(file) for file in pack_yml_files if - collect_helpers.checked_type(file, YML_TEST_PLAYBOOKS_REGEXES)] + collect_helpers.checked_type(file, constants.YML_TEST_PLAYBOOKS_REGEXES)] return pack_test_playbooks @@ -1066,7 +1075,7 @@ def remove_tests_for_non_supported_packs(tests: set, id_set: dict) -> set: if tests_that_should_not_be_tested: logging.debug('The following test playbooks are not supported and will not be tested: \n{} '.format( '\n'.join(tests_that_should_not_be_tested))) - tests_names = set([test.split(':')[0] for test in tests_that_should_not_be_tested]) + tests_names = {test.split(':')[0] for test in tests_that_should_not_be_tested} tests.difference_update(tests_names) return tests @@ -1083,7 +1092,7 @@ def remove_private_tests(tests_without_private_packs): tests_without_private_packs.remove(private_test) -def filter_tests(tests: set, id_set: json, modified_packs: set, is_nightly=False) -> set: +def filter_tests(tests: set, id_set: dict, modified_packs: set, is_nightly=False) -> set: """ Filter tests out from the test set if they are: a. Ignored @@ -1149,10 +1158,7 @@ def is_documentation_changes_only(files_string: str) -> bool: files = [s for s in files_string.split('\n') if s] documentation_changes_only = \ all(map(lambda s: s.endswith('.md') or s.endswith('.png') or s.endswith('.jpg') or s.endswith('.mp4'), files)) - if documentation_changes_only: - return True - else: - return False + return documentation_changes_only def get_test_list_and_content_packs_to_install(files_string, @@ -1283,7 +1289,7 @@ def get_from_version_and_to_version_bounderies(all_modified_files_paths: set, logging.info("\n\n Tests list:") logging.info(modified_packs) for pack_name in modified_packs: - pack_metadata_path = os.path.join(tools.pack_name_to_path(pack_name), PACKS_PACK_META_FILE_NAME) + pack_metadata_path = os.path.join(tools.pack_name_to_path(pack_name), constants.PACKS_PACK_META_FILE_NAME) pack_metadata = tools.get_pack_metadata(pack_metadata_path) from_version = pack_metadata.get('serverMinVersion') to_version = pack_metadata.get('serverMaxVersion') @@ -1373,7 +1379,7 @@ def changed_files_to_string(changed_files): def create_test_file(is_nightly, skip_save=False, path_to_pack=''): """Create a file containing all the tests we need to run for the CI""" if is_nightly: - packs_to_install = filter_installed_packs(set(os.listdir(PACKS_DIR))) + packs_to_install = filter_installed_packs(set(os.listdir(constants.PACKS_DIR))) tests = filter_tests(set(CONF.get_test_playbook_ids()), id_set=deepcopy(ID_SET), is_nightly=True, modified_packs=set()) logging.info("Nightly - collected all tests that appear in conf.json and all packs from content repo that " @@ -1381,7 +1387,8 @@ def create_test_file(is_nightly, skip_save=False, path_to_pack=''): else: branches = tools.run_command("git branch") branch_name_reg = re.search(r"\* (.*)", branches) - branch_name = branch_name_reg.group(1) + if branch_name_reg: + branch_name = branch_name_reg.group(1) logging.info("Getting changed files from the branch: {0}".format(branch_name)) if path_to_pack: @@ -1423,18 +1430,20 @@ def create_test_file(is_nightly, skip_save=False, path_to_pack=''): else: if tests_string: - logging.success('Collected the following tests:\n{0}\n'.format(tests_string)) + success_msg = 'Collected the following tests:\n{0}\n'.format(tests_string) + logging.success(success_msg) else: logging.error('Did not find tests to run') if packs_to_install_string: - logging.success('Collected the following content packs to install:\n{0}\n'.format(packs_to_install_string)) + success_msg = 'Collected the following content packs to install:\n{0}\n'.format(packs_to_install_string) + logging.success(success_msg) else: logging.error('Did not find content packs to install') if __name__ == "__main__": - install_logging('Collect_Tests_And_Content_Packs.log') + install_logging('Collect_Tests_And_Content_Packs.log', logger=logging) logging.info("Starting creation of test filter file") parser = argparse.ArgumentParser(description='Utility CircleCI usage') diff --git a/Tests/scripts/gitlab_slack_notifier.py b/Tests/scripts/gitlab_slack_notifier.py index 69c673455222..0578e5829ad8 100644 --- a/Tests/scripts/gitlab_slack_notifier.py +++ b/Tests/scripts/gitlab_slack_notifier.py @@ -17,7 +17,7 @@ ENV_RESULTS_PATH = os.getenv('ENV_RESULTS_PATH', os.path.join(ARTIFACTS_FOLDER, 'env_results.json')) PACK_RESULTS_PATH = os.path.join(ARTIFACTS_FOLDER, BucketUploadFlow.PACKS_RESULTS_FILE) CONTENT_CHANNEL = 'dmst-content-team' -GITLAB_PROJECT_ID = os.getenv('CI_PROJECT_ID', 2596) # the default is the id of the content repo in code.pan.run +GITLAB_PROJECT_ID = os.getenv('CI_PROJECT_ID') or 2596 # the default is the id of the content repo in code.pan.run GITLAB_SERVER_URL = os.getenv('CI_SERVER_URL', 'https://code.pan.run') # disable-secrets-detection CONTENT_NIGHTLY = 'Content Nightly' BUCKET_UPLOAD = 'Upload Packs to Marketplace Storage' @@ -49,13 +49,13 @@ def options_handler(): def unit_tests_results(): - failing_unit_tests = get_artifact_data('failed_lint_report.txt') + failing_tests = get_artifact_data('failed_lint_report.txt') slack_results = [] - if failing_unit_tests: - failing_unit_tests = failing_unit_tests.split('\n') + if failing_tests: + failing_test_list = failing_tests.split('\n') slack_results.append({ - "title": f'{"Failed Unit Tests"} - ({len(failing_unit_tests)})', - "value": '\n'.join(failing_unit_tests), + "title": f'{"Failed Unit Tests"} - ({len(failing_test_list)})', + "value": '\n'.join(failing_test_list), "short": False }) return slack_results diff --git a/Tests/scripts/infrastructure_tests/release_notes_generator_test.py b/Tests/scripts/infrastructure_tests/release_notes_generator_test.py index 9f193443cdde..4acb6974f6f6 100644 --- a/Tests/scripts/infrastructure_tests/release_notes_generator_test.py +++ b/Tests/scripts/infrastructure_tests/release_notes_generator_test.py @@ -72,6 +72,7 @@ def test_ignored_entire_release_note(self): assert formatted_text == '' +# pylint: disable=W0201 class TestGenerateReleaseNotesSummary: def setup(self): self._version = VERSION diff --git a/Tests/scripts/infrastructure_tests/test_collect_tests_and_content_packs.py b/Tests/scripts/infrastructure_tests/test_collect_tests_and_content_packs.py index d09a5801c25f..2a79db103a6b 100644 --- a/Tests/scripts/infrastructure_tests/test_collect_tests_and_content_packs.py +++ b/Tests/scripts/infrastructure_tests/test_collect_tests_and_content_packs.py @@ -1,6 +1,7 @@ +# type: ignore[attr-defined] +# pylint: disable=no-member import copy import json -import logging import os import tempfile from pathlib import Path @@ -10,10 +11,12 @@ import demisto_sdk.commands.common.tools as demisto_sdk_tools import Tests +from Tests.scripts.utils import logging_wrapper as logging from demisto_sdk.commands.common.constants import (PACK_METADATA_SUPPORT, - PACKS_PACK_META_FILE_NAME) + PACKS_PACK_META_FILE_NAME, + PACKS_DIR) from Tests.scripts.collect_tests_and_content_packs import ( - PACKS_DIR, SANITY_TESTS, TestConf, collect_content_packs_to_install, + SANITY_TESTS, TestConf, collect_content_packs_to_install, create_filter_envs_file, get_from_version_and_to_version_bounderies, get_test_list_and_content_packs_to_install, is_documentation_changes_only, remove_ignored_tests, remove_tests_for_non_supported_packs, check_if_test_should_not_be_missed) @@ -1167,7 +1170,7 @@ def test_remove_ignored_tests(tests_to_filter, ignored_tests, expected_result, m """ mocker.patch.object(Tests.scripts.collect_tests_and_content_packs.tools, 'get_ignore_pack_skipped_tests', return_value=ignored_tests) - mocker.patch('logging.info') + mocker.patch.object(logging, 'info') res = remove_ignored_tests(tests_to_filter, MOCK_ID_SET, tests_to_filter) assert res == expected_result if ignored_tests: @@ -1192,7 +1195,7 @@ def test_remove_tests_for_non_supported_packs(tests_to_filter, should_test_conte """ mocker.patch.object(Tests.scripts.collect_tests_and_content_packs, 'should_test_content_pack', return_value=should_test_content) - mocker.patch('logging.debug') + mocker.patch.object(logging, 'debug') filtered_tests = copy.deepcopy(tests_to_filter) res = remove_tests_for_non_supported_packs(tests_to_filter, MOCK_ID_SET) assert res == expected_result @@ -1230,7 +1233,7 @@ def test_get_from_version_and_to_version_bounderies_modified_metadata(): - Check that the minimum version is 6.1.0 """ - all_modified_files_paths = set([]) + all_modified_files_paths: set = set([]) pack_list = {'Pack1'} with tempfile.TemporaryDirectory() as temp_dir: diff --git a/Tests/scripts/linters_runner.sh b/Tests/scripts/linters_runner.sh new file mode 100755 index 000000000000..92ddffec833a --- /dev/null +++ b/Tests/scripts/linters_runner.sh @@ -0,0 +1,23 @@ +#!/bin/bash + + +# Run flake8 pylint and mypy on all excluding Packs, (Integraions and Scripts) - they will be handled in linting +errors=0 +all_dirs=$(find . -type d -not \( -path "./.*" -o -path "./Templates*" -o -path "./TestPlaybooks*" -o -path "./node_modules*" -o -path "./venv*" -o -path "./Packs*" -o -path "*infrastructure_tests*" -o -path "*scripts/awsinstancetool*" \)) +all_1_depth_dirs=$(find . -maxdepth 1 -type d -not \( -path . -o -path ./Packs -o -path ./venv -o -path ./Templates -o -path ./TestPlaybooks -o -path ./node_modules -o -path "./.*" \)) + +# run mypy +./Tests/scripts/mypy.sh $all_dirs || errors=$? + +# run pylint +./Tests/scripts/pylint.sh $all_dirs || errors=$? + +# run flake8 +./Tests/scripts/pyflake.sh *.py || errors=$? +./Tests/scripts/pyflake.sh $all_1_depth_dirs || errors=$? + + +echo 'Linter exit code:' $errors +if [[ $errors -ne 0 ]]; then + exit 1 +fi \ No newline at end of file diff --git a/Tests/scripts/mypy.sh b/Tests/scripts/mypy.sh new file mode 100755 index 000000000000..76334fbbe8cb --- /dev/null +++ b/Tests/scripts/mypy.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# ignored_messages_file contains patterns of mypy messages we want to ignore +ignored_messages_file="./Tests/scripts/.mypy_ignored_messages" +mypy_flags="--check-untyped-defs --ignore-missing-imports --follow-imports=silent --show-column-numbers --show-error-codes --allow-redefinition --show-absolute-path --cache-dir=/dev/null" + +errors=0 + +echo "Starting mypy run" + +for dir in $*; do + mypy_out=$(python3 -m mypy $dir/*.py $mypy_flags 2>&1) + if [[ $? -ne 0 && $? -ne 2 ]]; then + + echo -e "$mypy_out" | sort | uniq | grep -v -f $ignored_messages_file + if [[ $? -eq 0 ]]; then + errors=1 # some errors founded by grep + fi + + fi +done + +if [[ $errors -ne 0 ]]; then + echo "*** Finished mypy run, please fix the above errors ***" + exit 1 +fi + +echo "Finished mypy run - no errors were found" + diff --git a/Tests/scripts/pyflake.sh b/Tests/scripts/pyflake.sh index ea6c37303c4f..5ca5cffd79a9 100755 --- a/Tests/scripts/pyflake.sh +++ b/Tests/scripts/pyflake.sh @@ -16,7 +16,7 @@ if [[ $? -ne 0 ]] echo `echo "$flake8_out" | grep $f` echo "python3 output:" echo "$flake8_py3_out" - echo "*** Please fix the errors according to the python version you are using" + echo "*** Please fix the errors according to the python version you are using ***" fi done fi @@ -25,4 +25,4 @@ if [[ $errors -ne 0 ]]; then exit 1 fi -echo "Finished flake8 run" \ No newline at end of file +echo "Finished flake8 run - no errors were found" diff --git a/Tests/scripts/pylint.sh b/Tests/scripts/pylint.sh new file mode 100755 index 000000000000..caf4071ca509 --- /dev/null +++ b/Tests/scripts/pylint.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +pylint_disabled_errors=C0103,C0114,C0115,C0116,C0122,C0301,C0302,C0325,C0411,C0412,C0413,C0415,E0401,E0611,E1136,E1205,F0001,F0010,R0201,R0205,R0401,R0801,R0902,R0903,R0904,R0912,R0913,R0914,R0915,R1702,R1705,R1710,R1721,R1725,W0105,W0150,W0212,W0401,W0404,W0511,W0603,W0612,W0613,W0621,W0622,W0703,W1202,W1203 +echo "Starting pylint run" + +for dir in $*; do + pylint_out=$(python3 -m pylint --disable=$pylint_disabled_errors 2>&1 $dir/*.py) + if [[ $? -ne 0 ]]; then + echo -e "$pylint_out" | sort | uniq | grep ": [A-Z][0-9]*: " + if [[ $? -eq 0 ]]; then + errors=1 # some errors founded by grep + fi + fi +done + +if [[ $errors -ne 0 ]]; then + echo "*** Finished pylint run, please fix the above errors ***" + exit 1 +fi + + +echo "Finished pylint run - no errors were found" diff --git a/Tests/scripts/slack_notifier.py b/Tests/scripts/slack_notifier.py index 7d32212a95c9..c76a2ff5f3d0 100644 --- a/Tests/scripts/slack_notifier.py +++ b/Tests/scripts/slack_notifier.py @@ -166,9 +166,9 @@ def get_failed_unit_tests_attachment(build_url: str, is_sdk_build: bool = False) (List[Dict]) Dict wrapped inside a list containing failed unit tests attachment. """ if artifact_data := get_artifact_data('failed_lint_report.txt'): - artifact_data = artifact_data.split('\n') - unittests_fields: Optional[List[Dict]] = get_entities_fields(f'Failed Unittests - ({len(artifact_data)})', - artifact_data) + artifacts = artifact_data.split('\n') + unittests_fields: Optional[List[Dict]] = get_entities_fields(f'Failed Unittests - ({len(artifacts)})', + artifacts) else: unittests_fields = [] color: str = 'good' if not unittests_fields else 'danger' @@ -209,7 +209,7 @@ def get_coverage_attachment(build_number: str) -> Optional[Dict]: Returns: (Dict): Attachment of the coverage if coverage report exists. """ - xml_coverage_data: str = get_artifact_data('coverage_report/coverage.xml') + xml_coverage_data: Optional[str] = get_artifact_data('coverage_report/coverage.xml') if not xml_coverage_data: return None coverage_dict_data: OrderedDict = xmltodict.parse(xml_coverage_data) @@ -418,7 +418,7 @@ def slack_notifier(build_url, slack_token, test_type, build_number, env_results_ job_name="", slack_channel=CONTENT_CHANNEL, gitlab_server=None): branches = run_command("git branch") branch_name_reg = re.search(r'\* (.*)', branches) - branch_name = branch_name_reg.group(1) + branch_name = branch_name_reg.group(1) # type: ignore[union-attr] if branch_name == 'master' or slack_channel.lower() != CONTENT_CHANNEL: logging.info("Extracting build status") diff --git a/Tests/scripts/spell_checker.py b/Tests/scripts/spell_checker.py index dcfe8cd86cf5..0939e05aed94 100644 --- a/Tests/scripts/spell_checker.py +++ b/Tests/scripts/spell_checker.py @@ -42,7 +42,7 @@ def check_md_file(spellchecker, md_data, unknown_words): def spell_checker(path, is_md=False): - unknown_words = set([]) + unknown_words: set = set([]) spellchecker = SpellChecker() spellchecker.word_frequency.load_text_file('Tests/known_words.txt') diff --git a/Tests/scripts/utils/log_util.py b/Tests/scripts/utils/log_util.py index 6cc0978cd5aa..1de611f2d2fb 100644 --- a/Tests/scripts/utils/log_util.py +++ b/Tests/scripts/utils/log_util.py @@ -7,45 +7,46 @@ from demisto_sdk.commands.test_content.ParallelLoggingManager import LOGGING_FORMAT, LEVEL_STYLES, ARTIFACTS_PATH -def _add_logging_level(level_name: str, level_num: int, method_name: str = None) -> None: +def _add_logging_level(level_name: str, level_num: int, method_name: str = None, logger=logging) -> None: """ - Comprehensively adds a new logging level to the `logging` module and the + Comprehensively adds a new logging level to the passed `logger` and the currently configured logging class. - `level_name` becomes an attribute of the `logging` module with the value - `level_num`. `method_name` becomes a convenience method for both `logging` - itself and the class returned by `logging.getLoggerClass()` (usually just + `level_name` becomes an attribute of the `logger` with the value + `level_num`. `method_name` becomes a convenience method for both `logger` + itself and the class returned by `logger.getLoggerClass()` (usually just `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is used. To avoid accidental clobberings of existing attributes, this method will raise an `AttributeError` if the level name is already an attribute of the - `logging` module or if the method name is already present + `logger` or if the method name is already present Example ------- - >>> _add_logging_level('TRACE', logging.DEBUG - 5) - >>> logging.getLogger(__name__).setLevel("TRACE") - >>> logging.getLogger(__name__).trace('that worked') - >>> logging.trace('so did this') - >>> logging.TRACE + >>> _add_logging_level('TRACE', logger.DEBUG - 5) + >>> logger.getLogger(__name__).setLevel("TRACE") + >>> logger.getLogger(__name__).trace('that worked') + >>> logger.trace('so did this') + >>> logger.TRACE 5 Args: level_name: The name of the level that will become an attribute of the `logging` module level_num: The logging value of the new level method_name: The method name with which the new level will be called + logger: the logger to add level for, default are the `logging` module """ if not method_name: method_name = level_name.lower() - if hasattr(logging, level_name): - raise AttributeError(f'{level_name} already defined in logging module') - if hasattr(logging, method_name): - raise AttributeError(f'{method_name} already defined in logging module') - if hasattr(logging.getLoggerClass(), method_name): + if hasattr(logger, level_name): + raise AttributeError(f'{level_name} already defined in `logging` module') + if hasattr(logger, method_name): + raise AttributeError(f'{method_name} already defined in `logging` module') + if hasattr(logger.getLoggerClass(), method_name): raise AttributeError(f'{method_name} already defined in logger class') # This method was inspired by the answers to Stack Overflow post @@ -56,15 +57,15 @@ def logForLevel(self, message, *args, **kwargs): self._log(level_num, message, args, **kwargs) def logToRoot(message, *args, **kwargs): - logging.log(level_num, message, *args, **kwargs) + logger.log(level_num, message, *args, **kwargs) - logging.addLevelName(level_num, level_name) - setattr(logging, level_name, level_num) - setattr(logging.getLoggerClass(), method_name, logForLevel) - setattr(logging, method_name, logToRoot) + logger.addLevelName(level_num, level_name) + setattr(logger, level_name, level_num) + setattr(logger.getLoggerClass(), method_name, logForLevel) + setattr(logger, method_name, logToRoot) -def install_logging(log_file_name: str, include_process_name=False) -> str: +def install_logging(log_file_name: str, include_process_name=False, logger=logging) -> str: """ This method install the logging mechanism so that info level logs will be sent to the console and debug level logs will be sent to the log_file_name only. @@ -72,49 +73,53 @@ def install_logging(log_file_name: str, include_process_name=False) -> str: include_process_name: Whether to include the process name in the logs format, Should be used when using multiprocessing log_file_name: The name of the file in which the debug logs will be saved + logger: the logger to be configured, default are the `logging` module """ - if not hasattr(logging, 'success'): - _add_logging_level('SUCCESS', 25) + if not hasattr(logger, 'success'): + _add_logging_level('SUCCESS', 25, logger=logger) logging_format = LOGGING_FORMAT if include_process_name: logging_format = '[%(asctime)s] - [%(processName)s] - [%(threadName)s] - [%(levelname)s] - %(message)s' formatter = coloredlogs.ColoredFormatter(fmt=logging_format, level_styles=LEVEL_STYLES) - ch = logging.StreamHandler(sys.stdout) + ch = logger.StreamHandler(sys.stdout) ch.setFormatter(formatter) log_file_path = os.path.join(ARTIFACTS_PATH, 'logs', log_file_name) if os.path.exists( os.path.join(ARTIFACTS_PATH, 'logs')) else os.path.join(ARTIFACTS_PATH, log_file_name) - fh = logging.FileHandler(log_file_path) + fh = logger.FileHandler(log_file_path) fh.setFormatter(formatter) - ch.setLevel(logging.INFO) - fh.setLevel(logging.DEBUG) - configure_root_logger(ch, fh) + ch.setLevel(logger.INFO) + fh.setLevel(logger.DEBUG) + configure_root_logger(ch, fh, logger) return log_file_path -def configure_root_logger(ch: logging.StreamHandler, fh: logging.FileHandler) -> None: +def configure_root_logger(ch: logging.StreamHandler, fh: logging.FileHandler, logger=logging) -> None: """ - Configures the root logger with DEBUG level - Removes existing handlers from the root logger and adds the console handler and the file handler. Args: ch: StreamHandler to add to the root logger fh: FileHandler to add to the root logger + logger: The logger, default are the `logging` module """ - logging.root.setLevel(logging.DEBUG) - for h in logging.root.handlers[:]: - logging.root.removeHandler(h) + logger.root.setLevel(logger.DEBUG) + for h in logger.root.handlers[:]: + logger.root.removeHandler(h) h.close() - logging.root.addHandler(ch) - logging.root.addHandler(fh) + logger.root.addHandler(ch) + logger.root.addHandler(fh) -def install_simple_logging(): +def install_simple_logging(logger=logging): """ - This method implements logging module to print the message only with colors + This method implements the passed `logger` to print the message only with colors This function is implemented to support backward compatibility for functions that cannot yet support the full `install_logging` method capabilities + Args: + logger: The logger, default are the `logging` module """ - if not hasattr(logging, 'success'): + if not hasattr(logger, 'success'): _add_logging_level('SUCCESS', 25) coloredlogs.install(fmt='%(message)s', level_styles=LEVEL_STYLES) diff --git a/Tests/scripts/utils/logging_wrapper/__init__.py b/Tests/scripts/utils/logging_wrapper/__init__.py new file mode 100644 index 000000000000..0e976010f329 --- /dev/null +++ b/Tests/scripts/utils/logging_wrapper/__init__.py @@ -0,0 +1,15 @@ +import logging +from logging import * + +# add success level in addition to levels imported from logging +SUCCESS = 25 + +root = logging.root +addLevelName(SUCCESS, 'SUCCESS') + + +def success(msg, *args, **kwargs): + """ + Log a message with severity 'SUCCESS' on the root logger. + """ + root.log(SUCCESS, msg, *args, **kwargs) diff --git a/Tests/scripts/validate_index.py b/Tests/scripts/validate_index.py index 9d3d0da8f935..533b4564240f 100644 --- a/Tests/scripts/validate_index.py +++ b/Tests/scripts/validate_index.py @@ -5,14 +5,15 @@ Validate commit hash is in master's history. """ import argparse -import logging import sys import os +from typing import Tuple from Tests.Marketplace.marketplace_services import init_storage_client, load_json, get_content_git_client from Tests.Marketplace.upload_packs import download_and_extract_index from Tests.Marketplace.marketplace_constants import GCPConfig, CONTENT_ROOT_PATH from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging from pprint import pformat MANDATORY_PREMIUM_PACKS_PATH = "Tests/Marketplace/mandatory_premium_packs.json" @@ -121,7 +122,7 @@ def check_commit_in_branch_history(index_commit_hash: str, circle_branch: str) - def get_index_json_data(service_account: str, production_bucket_name: str, extract_path: str, storage_base_path: str) \ - -> (dict, str): + -> Tuple[dict, str]: """Retrieve the index.json file from production bucket. Args: @@ -147,7 +148,7 @@ def get_index_json_data(service_account: str, production_bucket_name: str, extra def main(): - install_logging("Validate index.log") + install_logging("Validate index.log", logger=logging) options = options_handler() exit_code = 0 index_data, index_file_path = get_index_json_data( diff --git a/Tests/scripts/validate_premium_packs.py b/Tests/scripts/validate_premium_packs.py index 8bd6f512b717..7614404128ec 100644 --- a/Tests/scripts/validate_premium_packs.py +++ b/Tests/scripts/validate_premium_packs.py @@ -3,6 +3,8 @@ Check the server configured on master. Validate the pack id's in the index file are present on the server and the prices match. """ +from typing import Tuple + import demisto_client import argparse import logging @@ -42,7 +44,7 @@ def options_handler(): def get_paid_packs_page(client: demisto_client, page: int = 0, size: int = DEFAULT_PAGE_SIZE, - request_timeout: int = 999999) -> (dict, int): + request_timeout: int = 999999) -> Tuple[list, int]: """Get premium packs from client. Trigger an API request to demisto server. @@ -55,8 +57,8 @@ def get_paid_packs_page(client: demisto_client, request_timeout: Timeout of API request Returns: - (Dict: premium packs as found in the server, int: Total premium packs that exist) - (None, 0) if no premium packs were found. + (list: premium packs as found in the server, int: Total premium packs that exist) + ([], 0) if no premium packs were found. """ request_data = { 'page': page, @@ -80,7 +82,7 @@ def get_paid_packs_page(client: demisto_client, _request_timeout=request_timeout) except Exception as exception: logging.error(f"Error trying to communicate with demisto server: {exception}") - return None, 0 + return [], 0 logging.debug(f"Got response data {pformat(response_data)}") response = ast.literal_eval(response_data) @@ -90,10 +92,10 @@ def get_paid_packs_page(client: demisto_client, message = response.get('message', '') logging.error(f"Failed to retrieve premium packs - with status code {status_code}\n{message}\n") - return None, 0 + return [], 0 -def get_premium_packs(client: demisto_client, request_timeout: int = 999999) -> dict: +def get_premium_packs(client: demisto_client, request_timeout: int = 999999) -> list: """Get premium packs from client. Handle the pagination. @@ -103,7 +105,7 @@ def get_premium_packs(client: demisto_client, request_timeout: int = 999999) -> request_timeout: Timeout of each API request Returns: - Dict of premium packs as found in the server. + list of premium packs as found in the server. Return None if no premium packs were found. """ server_packs, total = get_paid_packs_page(client=client, @@ -122,7 +124,7 @@ def get_premium_packs(client: demisto_client, request_timeout: int = 999999) -> page=page, size=DEFAULT_PAGE_SIZE, request_timeout=request_timeout) - server_packs.update(next_server_packs) + server_packs.extend(next_server_packs) return server_packs @@ -203,7 +205,7 @@ def verify_server_paid_packs_by_index(server_paid_packs: list, index_data_packs: return all([all_index_packs_in_server, all_server_packs_in_index]) -def extract_credentials_from_secret(secret_path: str) -> (str, str): +def extract_credentials_from_secret(secret_path: str) -> Tuple[str, str]: """Extract Credentials from secret file. Args: diff --git a/Tests/scripts/wait_until_server_ready.py b/Tests/scripts/wait_until_server_ready.py index aae66e4fb3d4..0521157db2bb 100644 --- a/Tests/scripts/wait_until_server_ready.py +++ b/Tests/scripts/wait_until_server_ready.py @@ -55,7 +55,7 @@ def docker_login(ip: str) -> None: ip: The ip of the server that should be logged in """ docker_username = os.environ.get('DOCKER_READ_ONLY_USER') - docker_password = os.environ.get('DOCKER_READ_ONLY_PASSWORD') + docker_password = os.environ.get('DOCKER_READ_ONLY_PASSWORD') or '' container_engine_type = 'podman' if is_redhat_instance(ip) else 'docker' try: check_output( @@ -71,7 +71,7 @@ def main(): global SETUP_TIMEOUT instance_name_to_wait_on = sys.argv[1] - ready_ami_list = [] + ready_ami_list: list = [] env_results_path = os.path.join(ARTIFACTS_FOLDER, 'env_results.json') with open(env_results_path, 'r') as json_file: env_results = json.load(json_file) diff --git a/Tests/test_content.py b/Tests/test_content.py index 320e653ad346..f7a451b3f653 100644 --- a/Tests/test_content.py +++ b/Tests/test_content.py @@ -8,7 +8,7 @@ import sys from contextlib import contextmanager from queue import Queue -from typing import Union, Any +from typing import Union, Any, Generator import demisto_client.demisto_api import pytz @@ -397,8 +397,8 @@ def get_json_file(path): def initialize_queue_and_executed_tests_set(tests): - tests_queue = Queue() - already_executed_test_playbooks = set() + tests_queue: Queue = Queue() + already_executed_test_playbooks: set = set() for t in tests: tests_queue.put(t) return already_executed_test_playbooks, tests_queue @@ -440,10 +440,10 @@ def add_pr_comment(comment): headers = {'Authorization': 'Bearer ' + token} try: res = requests.get(url + query, headers=headers, verify=False) - res = handle_github_response(res) + res_dict = handle_github_response(res) - if res and res.get('total_count', 0) == 1: - issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None + if res_dict and res_dict.get('total_count', 0) == 1: + issue_url = res_dict['items'][0].get('comments_url') if res_dict.get('items', []) else None if issue_url: res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False) handle_github_response(res) @@ -464,7 +464,7 @@ def handle_github_response(response): @contextmanager def acquire_test_lock(integrations_details: list, test_timeout: int, - conf_json_path: str) -> None: + conf_json_path: str) -> Generator: """ This is a context manager that handles all the locking and unlocking of integrations. Execution is as following: diff --git a/Tests/test_dependencies.py b/Tests/test_dependencies.py index 090f56d2f11e..2d31e3ec95d9 100644 --- a/Tests/test_dependencies.py +++ b/Tests/test_dependencies.py @@ -46,7 +46,7 @@ def add_test_graph_neighbors(self, tests_data): integration_to_tests_mapping = get_integration_to_tests_mapping(tests_data) for integration_name in integration_to_tests_mapping: tests_using_integration = integration_to_tests_mapping[integration_name] - for i in range(len(tests_using_integration)): + for i in range(len(tests_using_integration)): # pylint: disable=C0200 first_test_name = tests_using_integration[i] first_test_vertex = self.test_vertices[first_test_name] @@ -81,7 +81,7 @@ def build_tests_graph_from_conf_json(self, tests_file_path, dependent_tests): def get_integration_to_tests_mapping(tests_data): - integration_to_tests_mapping = {} + integration_to_tests_mapping: dict = {} for test_playbook_record in tests_data: record_playbook_name = test_playbook_record.get("playbookID", None) record_integrations = get_used_integrations(test_playbook_record) @@ -108,7 +108,7 @@ def get_dependent_and_independent_integrations(tests_file_path): conf_json_obj = json.loads(conf_json_string) - integration_tests_count = {} + integration_tests_count: dict = {} for test_record in conf_json_obj["tests"]: integrations_used = get_used_integrations(test_record) for integration_name in integrations_used: @@ -157,7 +157,7 @@ def get_tests_allocation_for_threads(number_of_instances, tests_file_path): dependent_tests, independent_tests, all_tests = get_test_dependencies(tests_file_path) dependent_tests_clusters = get_dependent_integrations_clusters_data(tests_file_path, dependent_tests) dependent_tests_clusters.sort(key=len, reverse=True) # Sort the clusters from biggest to smallest - tests_allocation = [] + tests_allocation: list = [] number_of_tests_left = len(all_tests) while number_of_tests_left > 0: allocations_left = number_of_instances - len(tests_allocation) diff --git a/Tests/test_integration.py b/Tests/test_integration.py index e45304e9ac69..112f8c5f9144 100644 --- a/Tests/test_integration.py +++ b/Tests/test_integration.py @@ -116,7 +116,7 @@ def __set_server_keys(client, logging_manager, integration_params, integration_n logging_manager.debug(f'Setting server keys for integration: {integration_name}') - data = { + data: dict = { 'data': {}, 'version': -1 } @@ -308,10 +308,7 @@ def __create_incident_with_playbook(client: DefaultApi, try: inc_id = response.id - except: # noqa: E722 - inc_id = 'incCreateErr' - # inc_id = response_json.get('id', 'incCreateErr') - if inc_id == 'incCreateErr': + except AttributeError: integration_names = [integration['name'] for integration in integrations if 'name' in integration] error_message = f'Failed to create incident for integration names: {integration_names} ' \ @@ -363,11 +360,7 @@ def __get_investigation_playbook_state(client, inv_id, logging_manager): ) return PB_Status.FAILED - try: - state = investigation_playbook['state'] - return state - except: # noqa: E722 - return PB_Status.NOT_SUPPORTED_VERSION + return investigation_playbook.get('state', PB_Status.NOT_SUPPORTED_VERSION) # return True if delete-incident succeeded, False otherwise @@ -465,7 +458,7 @@ def check_integration(client, server_url, demisto_user, demisto_pass, integratio logging_module=logging, options=None, is_mock_run=False): options = options if options is not None else {} # create integrations instances - module_instances = [] + module_instances: list = [] for integration in integrations: integration_name = integration.get('name', None) diff --git a/Tests/update_content_data.py b/Tests/update_content_data.py index 718d35389439..ee5115fdcb9d 100644 --- a/Tests/update_content_data.py +++ b/Tests/update_content_data.py @@ -2,9 +2,9 @@ import os import ast import demisto_client -import logging from Tests.scripts.utils.log_util import install_simple_logging +from Tests.scripts.utils import logging_wrapper as logging def options_handler(): @@ -46,14 +46,14 @@ def update_content(content_zip_path, server=None, username=None, password=None, result_object = ast.literal_eval(response_data) message = result_object['message'] raise Exception(f"Upload has failed with status code {status_code}\n{message}") - else: - logging.success(f'"{content_zip_path}" successfully uploaded to server "{server}"') + success_msg = f'"{content_zip_path}" successfully uploaded to server "{server}"' + logging.success(success_msg) except Exception: logging.exception(f'Failed to upload {content_zip_path} to server {server}') def main(): - install_simple_logging() + install_simple_logging(logger=logging) options = options_handler() server_url = 'https://{}' server = options.server if options.server.startswith('http') else server_url.format(options.server) diff --git a/Utils/_script_docker_python_loop_example.py b/Utils/_script_docker_python_loop_example.py index 38e387887814..9c214af0cd1c 100644 --- a/Utils/_script_docker_python_loop_example.py +++ b/Utils/_script_docker_python_loop_example.py @@ -1,3 +1,4 @@ +# type: ignore[attr-defined] """ This is a simplified example script which demonstrates the concept of how the XSOAR Server executes python integrations/scripts. @@ -442,13 +443,13 @@ def do_ping_pong(): backup_env_vars = {} -for key in os.environ.keys(): +for key in os.environ: backup_env_vars[key] = os.environ[key] def rollback_system(): os.environ = {} - for key in backup_env_vars.keys(): + for key in backup_env_vars: os.environ[key] = backup_env_vars[key] @@ -479,7 +480,7 @@ def rollback_system(): 'win': win } - exec(code, sub_globals, sub_globals) # guardrails-disable-line + exec(code, sub_globals, sub_globals) # guardrails-disable-line # pylint: disable=W0122 except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() diff --git a/Utils/delete_mismatched_branches.py b/Utils/delete_mismatched_branches.py index 8d627e42d348..3048bbad70e5 100644 --- a/Utils/delete_mismatched_branches.py +++ b/Utils/delete_mismatched_branches.py @@ -2,7 +2,7 @@ import gitlab from github import Github -from github_workflow_scripts.utils import timestamped_print, get_env_var +from Utils.github_workflow_scripts.utils import timestamped_print, get_env_var # ANSI Colors diff --git a/Utils/get_private_build_status.py b/Utils/get_private_build_status.py index e8a5acb067e4..bbc24245881c 100644 --- a/Utils/get_private_build_status.py +++ b/Utils/get_private_build_status.py @@ -3,17 +3,19 @@ import json import time import argparse +from typing import Tuple + import requests -import logging from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging from Utils.trigger_private_build import GET_WORKFLOW_URL, PRIVATE_REPO_WORKFLOW_ID_FILE, \ GET_WORKFLOWS_TIMEOUT_THRESHOLD, WORKFLOW_HTML_URL # disable insecure warnings -requests.packages.urllib3.disable_warnings() +requests.packages.urllib3.disable_warnings() # pylint: disable=no-member -def get_workflow_status(github_token: str, workflow_id: str) -> (str, str, str): +def get_workflow_status(github_token: str, workflow_id: str) -> Tuple[str, str, str]: """ Returns a set with the workflow job status, job conclusion and current step that running now in the job for the given workflow id. @@ -69,7 +71,7 @@ def get_workflow_status(github_token: str, workflow_id: str) -> (str, str, str): def main(): - install_logging("GetPrivateBuildStatus.log") + install_logging("GetPrivateBuildStatus.log", logger=logging) if not os.path.isfile(PRIVATE_REPO_WORKFLOW_ID_FILE): logging.info('Build private repo skipped') @@ -90,7 +92,7 @@ def main(): # initialize timer start = time.time() - elapsed = 0 + elapsed: float = 0 # polling the workflow status while is in progress while status in ['queued', 'in_progress'] and elapsed < GET_WORKFLOWS_TIMEOUT_THRESHOLD: diff --git a/Utils/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py b/Utils/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py index 227c11137969..6ea8043266bb 100755 --- a/Utils/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py +++ b/Utils/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py @@ -64,8 +64,9 @@ def get_pack_support_type_from_pr_metadata_file(pr_metadata_filename: str, pr: P print(f'Branch name is: {branch_name}') contributor_repo: Repository = pr.head.repo branch: Branch = contributor_repo.get_branch(branch=branch_name) - metadata_file: ContentFile = contributor_repo.get_contents(path=pr_metadata_filename, ref=branch.commit.sha) - metadata_file_content: dict = json.loads(base64.b64decode(metadata_file.content)) + metadata_file: ContentFile = \ + contributor_repo.get_contents(path=pr_metadata_filename, ref=branch.commit.sha) # type: ignore[assignment] + metadata_file_content: dict = json.loads(base64.b64decode(metadata_file.content)) # type: ignore[arg-type] return metadata_file_content.get(SUPPORT) diff --git a/Utils/github_workflow_scripts/delete_stale_non_contrib_branches.py b/Utils/github_workflow_scripts/delete_stale_non_contrib_branches.py index 023efe803f5a..fb9415950751 100755 --- a/Utils/github_workflow_scripts/delete_stale_non_contrib_branches.py +++ b/Utils/github_workflow_scripts/delete_stale_non_contrib_branches.py @@ -42,7 +42,7 @@ def get_non_contributor_stale_branch_names(repo: Repository) -> List[str]: # no elapsed_days = (now - last_commit_datetime).days # print(f'{elapsed_days=}') if elapsed_days >= 60: - associated_open_prs = branch.commit.get_pulls() + associated_open_prs = branch.commit.get_pulls() # type: ignore[attr-defined] associated_open_prs = [pr for pr in associated_open_prs if pr.state == 'open'] if len(associated_open_prs) < 1: branch_names.append(branch.name) diff --git a/Utils/github_workflow_scripts/handle_external_pr.py b/Utils/github_workflow_scripts/handle_external_pr.py index 402e15a6c534..32c2a3fdabac 100755 --- a/Utils/github_workflow_scripts/handle_external_pr.py +++ b/Utils/github_workflow_scripts/handle_external_pr.py @@ -44,13 +44,13 @@ def determine_reviewer(potential_reviewers: List[str], repo: Repository) -> str: pr_labels = [label.name.casefold() for label in pull.labels] if label_to_consider not in pr_labels: continue - assignees = set([assignee.login for assignee in pull.assignees]) + assignees = {assignee.login for assignee in pull.assignees} requested_reviewers, _ = pull.get_review_requests() - requested_reviewers = set([requested_reviewer.login for requested_reviewer in requested_reviewers]) - combined_list = assignees.union(requested_reviewers) + reviewers_info = {requested_reviewer.login for requested_reviewer in requested_reviewers} + combined_list = assignees.union(reviewers_info) for reviewer in potential_reviewers: if reviewer in combined_list: - assigned_prs_per_potential_reviewer[reviewer] = assigned_prs_per_potential_reviewer.get(reviewer) + 1 + assigned_prs_per_potential_reviewer[reviewer] = assigned_prs_per_potential_reviewer.get(reviewer, 0) + 1 selected_reviewer = sorted(assigned_prs_per_potential_reviewer, key=assigned_prs_per_potential_reviewer.get)[0] return selected_reviewer diff --git a/Utils/github_workflow_scripts/run_secrets_detection.py b/Utils/github_workflow_scripts/run_secrets_detection.py index 971167b0a294..6f77fa432318 100755 --- a/Utils/github_workflow_scripts/run_secrets_detection.py +++ b/Utils/github_workflow_scripts/run_secrets_detection.py @@ -44,7 +44,7 @@ def trigger_generic_webhook(options): sys.exit(1) res_json = res.json() - if res_json and type(res_json) == list: + if res_json and isinstance(res_json, list): res_json_response_data = res.json()[0] if res_json_response_data: investigation_id = res_json_response_data.get("id") diff --git a/Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py b/Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py index f504dbe98a18..3ec37244c440 100755 --- a/Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py +++ b/Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py @@ -28,11 +28,7 @@ def get_playbook_state(client: demisto_client, inv_id: str): print('Failed to get investigation playbook state, error trying to communicate with demisto server') return PB_Status.FAILED - try: - state = investigation_playbook['state'] - return state - except: # noqa: E722 - return PB_Status.NOT_SUPPORTED_VERSION + return investigation_playbook.get('state', PB_Status.NOT_SUPPORTED_VERSION) def wait_for_playbook_to_complete(investigation_id, client): diff --git a/Utils/github_workflow_scripts/send_slack_message.py b/Utils/github_workflow_scripts/send_slack_message.py index 2f03548b0d4b..bba5c595b91e 100755 --- a/Utils/github_workflow_scripts/send_slack_message.py +++ b/Utils/github_workflow_scripts/send_slack_message.py @@ -7,7 +7,7 @@ from utils import get_env_var import json import requests -from github import Github, PaginatedList, File, PullRequest +from github import Github, File, PullRequest import urllib3 from pprint import pformat @@ -17,7 +17,7 @@ SLACK_CHANNEL_TO_SEND_PR_TO = 'contribution-reviews' -def get_metadata_file(file: File) -> dict: +def get_metadata_file(file: File.File) -> dict: """Perform a GET request to receive a given file content Args: @@ -30,7 +30,7 @@ def get_metadata_file(file: File) -> dict: try: response_json = requests.get(raw_url, verify=False).json() except ValueError: - raise Exception(f'{file.filename} is not a well-formatted metadata.json file') + raise Exception(f'{file.filename} is not a well-formatted metadata.json file') # pylint: disable=W0707 return response_json @@ -89,9 +89,9 @@ def create_individual_pack_segment(metadata_obj: dict) -> List[dict]: Returns: (List): List of slack blocks representing the pack information """ - pack_name = metadata_obj.get('name') - version = metadata_obj.get('currentVersion') - support = metadata_obj.get('support') + pack_name: str = metadata_obj.get('name', '') + version: str = metadata_obj.get('currentVersion', '') + support: str = metadata_obj.get('support', '') pack_details = [ create_slack_section('Pack Name', pack_name), @@ -104,11 +104,11 @@ def create_individual_pack_segment(metadata_obj: dict) -> List[dict]: return pack_details -def create_packs_segment(metadata_files: PaginatedList) -> List[dict]: +def create_packs_segment(metadata_files: list) -> List[dict]: """Aggregate the pack information segments of the message Args: - metadata_files (PaginatedList): List of File objects representing metadata files + metadata_files (List): List of File objects representing metadata files Returns: (List): List of slack blocks representing all packs information @@ -121,7 +121,7 @@ def create_packs_segment(metadata_files: PaginatedList) -> List[dict]: return all_packs -def create_pull_request_segment(pr: PullRequest) -> List[dict]: +def create_pull_request_segment(pr: PullRequest.PullRequest) -> List[dict]: """Create the pull request information segment of the message Args: @@ -143,7 +143,7 @@ def create_pull_request_segment(pr: PullRequest) -> List[dict]: return [pr_info_segment, {'text': create_slack_markdown(f'*URL:* `{pr.html_url}`'), 'type': 'section'}] -def create_pr_title(pr: PullRequest) -> List[dict]: +def create_pr_title(pr: PullRequest.PullRequest) -> List[dict]: """Create the message title Args: diff --git a/Utils/merge_content_new_zip.py b/Utils/merge_content_new_zip.py index a604901a29a4..4ed8399f8e5e 100755 --- a/Utils/merge_content_new_zip.py +++ b/Utils/merge_content_new_zip.py @@ -28,7 +28,8 @@ def download_zip_file_from_gcp(current_feature_branch_zip_file_path, zip_destina The new path of the zip file. """ file_path = os.environ.get('GCS_ARTIFACTS_KEY') - os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = file_path + if file_path: + os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = file_path storage_client = storage.Client() storage_bucket = storage_client.bucket(STORAGE_BUCKET_NAME) diff --git a/Utils/release_notes_generator.py b/Utils/release_notes_generator.py index 3dd2e4c289ee..551b36e0a16e 100644 --- a/Utils/release_notes_generator.py +++ b/Utils/release_notes_generator.py @@ -131,7 +131,7 @@ def construct_entities_block(entities_data: dict) -> str: def get_pack_entities(pack_path): logging.info(f'Processing "{pack_path}" files:') - pack_entities = sum([ + pack_entities: list = sum([ glob.glob(f'{pack_path}/*/*.json'), glob.glob(f'{pack_path}/*/*.yml'), glob.glob(f'{pack_path}/*/*/*.yml')], []) @@ -243,8 +243,8 @@ def get_release_notes_dict(release_notes_files): (dict) A mapping from pack names to dictionaries of pack versions to release notes. (dict) A mapping from pack name to the pack metadata object """ - release_notes_dict = {} - packs_metadata_dict = {} + release_notes_dict: dict = {} + packs_metadata_dict: dict = {} for file_path in release_notes_files: pack_path = get_pack_path_from_release_note(file_path) pack_metadata = get_pack_metadata(pack_path) @@ -311,7 +311,7 @@ def merge_version_blocks(pack_versions_dict: dict) -> Tuple[str, str]: """ latest_version = '1.0.0' - entities_data = {} + entities_data: dict = {} for pack_version, version_release_notes in sorted(pack_versions_dict.items(), key=lambda pack_item: LooseVersion(pack_item[0])): latest_version = pack_version diff --git a/Utils/request_contributor_review.py b/Utils/request_contributor_review.py index c6aa1afa926a..3a3f2547b684 100644 --- a/Utils/request_contributor_review.py +++ b/Utils/request_contributor_review.py @@ -7,7 +7,9 @@ import requests import sendgrid import sys -from sendgrid.helpers.mail import * + +import urllib3 +from sendgrid.helpers.mail import Email, Content, Mail REPO_OWNER = "demisto" REPO_NAME = "content" @@ -35,11 +37,7 @@ def check_if_user_exists(github_user, github_token=None, verify_ssl=True): sys.exit(1) github_user_info = response.json() - - if 'id' in github_user_info: - return True - else: - return False + return 'id' in github_user_info def get_pr_author(pr_number, github_token, verify_ssl): @@ -105,7 +103,7 @@ def get_pr_tagged_reviewers(pr_number, github_token, verify_ssl, pack): response = requests.get(comments_endpoint, headers=headers, verify=verify_ssl) - if response.status_code != requests.codes.ok: + if response.status_code != 200: print(f"Failed requesting PR {pr_number} comments:\n{response.text}") sys.exit(1) @@ -291,7 +289,7 @@ def main(): pr_number = args.pr_number github_token = args.github_token - verify_ssl = True if github_token else False + verify_ssl = bool(github_token) email_api_token = args.email_api_token if args.email_api_token else '' if not verify_ssl: diff --git a/Utils/tests/comment_on_pr_test.py b/Utils/tests/comment_on_pr_test.py index 67d830559513..143256a4a89d 100644 --- a/Utils/tests/comment_on_pr_test.py +++ b/Utils/tests/comment_on_pr_test.py @@ -23,7 +23,7 @@ } ] -github_comment_response_3 = [] +github_comment_response_3: list = [] def test_get_pr_comments_url_existing(requests_mock): diff --git a/Utils/tests/update_contribution_pack_in_base_branch_test.py b/Utils/tests/update_contribution_pack_in_base_branch_test.py index 608660333fe0..712b78c0226d 100755 --- a/Utils/tests/update_contribution_pack_in_base_branch_test.py +++ b/Utils/tests/update_contribution_pack_in_base_branch_test.py @@ -82,7 +82,7 @@ } ] -github_response_4 = [] +github_response_4: list = [] def pack_names(files): diff --git a/Utils/trigger_private_build.py b/Utils/trigger_private_build.py index 117169bb2621..1dfff0196d2c 100644 --- a/Utils/trigger_private_build.py +++ b/Utils/trigger_private_build.py @@ -5,13 +5,13 @@ import time import argparse import requests -import logging from typing import List import demisto_sdk.commands.common.tools as tools from Tests.scripts.utils.log_util import install_logging +from Tests.scripts.utils import logging_wrapper as logging # disable insecure warnings -requests.packages.urllib3.disable_warnings() +requests.packages.urllib3.disable_warnings() # pylint: disable=no-member PRIVATE_BUILD_INFRA_SCRIPTS = ['Tests/scripts/validate_premium_packs.sh', 'Tests/scripts/validate_premium_packs.py', 'Tests/scripts/validate_index.py'] @@ -107,7 +107,7 @@ def get_dispatch_workflows_ids(github_token: str, branch: str) -> List[int]: def main(): - install_logging("TriggerPrivateBuild.log") + install_logging("TriggerPrivateBuild.log", logger=logging) # get github token parameter arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--github-token', help='Github token') @@ -118,7 +118,8 @@ def main(): # get branch name branches = tools.run_command("git branch") branch_name_regex = re.search(r"\* (.*)", branches) - branch_name = branch_name_regex.group(1) + if branch_name_regex: + branch_name = branch_name_regex.group(1) if branch_has_private_build_infra_change(branch_name): # get the workflows ids before triggering the build diff --git a/Utils/update_branch_from_version.py b/Utils/update_branch_from_version.py index 9de13afffc6d..8c3432369910 100644 --- a/Utils/update_branch_from_version.py +++ b/Utils/update_branch_from_version.py @@ -9,8 +9,10 @@ import shutil ryaml = YAML() -ryaml.preserve_quotes = True -ryaml.width = 50000 # make sure long lines will not break (relevant for code section) +ryaml.preserve_quotes = True # type: ignore[assignment] + +# make sure long lines will not break (relevant for code section) +ryaml.width = 50000 # type: ignore[assignment] DOCKERIMAGE_45_TOP_VERSION = '4.5.9' diff --git a/Utils/update_playbook.py b/Utils/update_playbook.py index 2d4bedadecd2..6a01a62a104f 100644 --- a/Utils/update_playbook.py +++ b/Utils/update_playbook.py @@ -117,7 +117,7 @@ def update_playbook(source_path, destination_path): destination_path = "playbook-{}".format(destination_path) # Configure safe dumper (multiline for strings) - yaml.SafeDumper.org_represent_str = yaml.SafeDumper.represent_str + yaml.SafeDumper.org_represent_str = yaml.SafeDumper.represent_str # type: ignore[attr-defined] def repr_str(dumper, data): if '\n' in data: diff --git a/Utils/upload_code_coverage_report.py b/Utils/upload_code_coverage_report.py index 3bb9d7efe80f..c1eda9bc0c89 100644 --- a/Utils/upload_code_coverage_report.py +++ b/Utils/upload_code_coverage_report.py @@ -16,7 +16,7 @@ def create_minimal_report(source_file: str, destination_file: str) -> Tuple[bool, str]: if not os.path.isfile(source_file): print(f'File {source_file} does not exist.') - return False, {} + return False, '' with open(source_file, 'r') as cov_util_output: data = json.load(cov_util_output) @@ -24,7 +24,7 @@ def create_minimal_report(source_file: str, destination_file: str) -> Tuple[bool # Check that we were able to read the json report correctly if not data or 'files' not in data: print(f'Empty file, or unable to read contents of {source_file}.') - return False, {} + return False, '' minimal_coverage_contents_files: Dict[str, float] = {} files = data['files'] diff --git a/release_notes.py b/release_notes.py index d45c51c1544f..79be80a3e63a 100644 --- a/release_notes.py +++ b/release_notes.py @@ -646,7 +646,7 @@ def main(): 'these steps will merge your branch with content master as a base.') sys.exit(1) else: - modified_files, added_files, removed_files, _, _, _ = filter_changed_files(change_log) + modified_files, added_files, removed_files, _, _, _, _ = filter_changed_files(change_log) modified_files, added_files, removed_files = filter_packagify_changes(modified_files, added_files, removed_files, tag=tag) diff --git a/release_notes_clear.py b/release_notes_clear.py index 795567fa39fe..7e09f0065550 100755 --- a/release_notes_clear.py +++ b/release_notes_clear.py @@ -33,7 +33,7 @@ def get_file_data(file_path): load_function = FILE_TYPE_DICT[extension] with open(file_path, 'r') as file_obj: - data = load_function(file_obj) + data = load_function(file_obj) # type: ignore[operator] return data