From e140ef04eb49c17ac82292ab349c33eb4646e23c Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 13:51:06 +0500 Subject: [PATCH 01/78] chore: replace paver quality tests --- .github/workflows/quality-checks.yml | 7 +- Makefile | 8 +- package.json | 1 + pavelib/prereqs.py | 28 +- pavelib/quality_test.py | 654 +++++++++++++++++++++++++++ pavelib/utils/envs.py | 40 +- pavelib/utils/test/utils.py | 35 +- scripts/generic-ci-tests.sh | 2 +- 8 files changed, 738 insertions(+), 37 deletions(-) create mode 100644 pavelib/quality_test.py diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index cf8ffd5d2910..442caacfc95d 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -63,13 +63,10 @@ jobs: - name: Run Quality Tests env: - TEST_SUITE: quality - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} - run: | - ./scripts/all-tests.sh - + run: make quality + - name: Save Job Artifacts if: always() uses: actions/upload-artifact@v4 diff --git a/Makefile b/Makefile index 098236fed8cb..f782b21d8fe9 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ guides help lint-imports local-requirements migrate migrate-lms migrate-cms \ pre-requirements pull pull_xblock_translations pull_translations push_translations \ requirements shell swagger \ - technical-docs test-requirements ubuntu-requirements upgrade-package upgrade + technical-docs test-requirements ubuntu-requirements upgrade-package upgrade pep8_test # Careful with mktemp syntax: it has to work on Mac and Ubuntu, which have differences. PRIVATE_FILES := $(shell mktemp -u /tmp/private_files.XXXXXX) @@ -202,3 +202,9 @@ migrate: migrate-lms migrate-cms # Part of https://github.com/openedx/wg-developer-experience/issues/136 ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev + +quality: + python pavelib/quality_test.py all + +pep8_test: + python pavelib/quality_test.py pep8 diff --git a/package.json b/package.json index d182d83d3151..9632deac31df 100644 --- a/package.json +++ b/package.json @@ -82,6 +82,7 @@ "which-country": "1.0.0" }, "devDependencies": { + "@edx/eslint-config": "^3.1.1", "@edx/eslint-config": "^3.1.1", "@edx/mockprock": "github:openedx/mockprock#3ad18c6888e6521e9bf7a4df0db6f8579b928235", "@edx/stylelint-config-edx": "2.3.3", diff --git a/pavelib/prereqs.py b/pavelib/prereqs.py index 4453176c94da..130de1022f34 100644 --- a/pavelib/prereqs.py +++ b/pavelib/prereqs.py @@ -8,12 +8,12 @@ import re import subprocess import sys +import shutil from distutils import sysconfig # pylint: disable=deprecated-module from paver.easy import sh, task # lint-amnesty, pylint: disable=unused-import - -from .utils.envs import Env -from .utils.timer import timed +from pavelib.utils.envs import Env +from pavelib.utils.timer import timed PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache') NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs" @@ -138,13 +138,18 @@ def node_prereqs_installation(): # # This hack should probably be left in place for at least a year. # See ADR 17 for more background on the transition. - sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") + # sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") # At the time of this writing, the js dir has git-versioned files # but the css dir does not, so the latter would have been created # as root-owned (in the process of creating the vendor # subdirectory). Delete it only if empty, just in case # git-versioned files are added later. - sh("rmdir common/static/common/css || true") + # sh("rmdir common/static/common/css || true") + try: + shutil.rmtree("common/static/common/js/vendor/ common/static/common/css/vendor/") + os.rmdir("common/static/common/css") + except OSError: + pass # NPM installs hang sporadically. Log the installation process so that we # determine if any packages are chronic offenders. @@ -177,11 +182,18 @@ def python_prereqs_installation(): def pip_install_req_file(req_file): """Pip install the requirements file.""" pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w' - sh(f"{pip_cmd} -r {req_file}") + command = f"{pip_cmd} -r {req_file}" + result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) + + if result.returncode != 0: + print(f"Error: pip command exited with non-zero status {result.returncode}") + print(f"stdout: {result.stdout}") + print(f"stderr: {result.stderr}") + else: + print("Pip install completed successfully.") @task -@timed def install_node_prereqs(): """ Installs Node prerequisites @@ -293,8 +305,6 @@ def install_coverage_prereqs(): pip_install_req_file(COVERAGE_REQ_FILE) -@task -@timed def install_python_prereqs(): """ Installs Python prerequisites. diff --git a/pavelib/quality_test.py b/pavelib/quality_test.py new file mode 100644 index 000000000000..c3f571d4623b --- /dev/null +++ b/pavelib/quality_test.py @@ -0,0 +1,654 @@ +""" # lint-amnesty, pylint: disable=django-not-configured +Check code quality using pycodestyle, pylint, and diff_quality. +""" + +import json +import os +import re +import sys +import subprocess +import shutil + +import argparse +from pavelib.utils.envs import Env +from pavelib.prereqs import install_node_prereqs +from pavelib.prereqs import install_python_prereqs +from pavelib.utils.test.utils import ensure_clean_package_lock +from datetime import datetime +from xml.sax.saxutils import quoteattr + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + + +JUNIT_XML_TEMPLATE = """ + +{failure_element} + +""" +JUNIT_XML_FAILURE_TEMPLATE = '' +START_TIME = datetime.utcnow() + + +class BuildFailure(Exception): + """Represents a problem with some part of the build's execution.""" + + +def write_junit_xml(name, message=None): + """ + Write a JUnit results XML file describing the outcome of a quality check. + """ + if message: + failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) + else: + failure_element = '' + data = { + 'failure_count': 1 if message else 0, + 'failure_element': failure_element, + 'name': name, + 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), + } + Env.QUALITY_DIR.makedirs_p() + filename = Env.QUALITY_DIR / f'{name}.xml' + with open(filename, 'w') as f: + f.write(JUNIT_XML_TEMPLATE.format(**data)) + + +def fail_quality(name, message): + """ + Fail the specified quality check by generating the JUnit XML results file + and raising a ``BuildFailure``. + """ + write_junit_xml(name, message) + sys.exit() + + +def _get_pep8_violations(clean=True): + """ + Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) + where violations_string is a string of all PEP 8 violations found, separated + by new lines. + """ + report_dir = (Env.REPORT_DIR / 'pep8') + if clean: + report_dir.rmtree(ignore_errors=True) + report_dir.makedirs_p() + report = report_dir / 'pep8.report' + + # Make sure the metrics subdirectory exists + Env.METRICS_DIR.makedirs_p() + + if not report.exists(): + # sh(f'pycodestyle . | tee {report} -a') + with open(report, 'w') as f: + result = subprocess.run(['pycodestyle', '.'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + f.write(result.stdout.decode()) + + violations_list = _pep8_violations(report) + + return len(violations_list), violations_list + + +def _pep8_violations(report_file): + """ + Returns the list of all PEP 8 violations in the given report_file. + """ + with open(report_file) as f: + return f.readlines() + + +def run_pep8(): # pylint: disable=unused-argument + """ + Run pycodestyle on system code. + Fail the task if any violations are found. + """ + (count, violations_list) = _get_pep8_violations() + violations_list = ''.join(violations_list) + + # Print number of violations to log + violations_count_str = f"Number of PEP 8 violations: {count}" + print(violations_count_str) + print(violations_list) + + # Also write the number of violations to a file + with open(Env.METRICS_DIR / "pep8", "w") as f: + f.write(violations_count_str + '\n\n') + f.write(violations_list) + + # Fail if any violations are found + if count: + failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str + failure_string += f"\n\nViolations:\n{violations_list}" + fail_quality('pep8', failure_string) + else: + write_junit_xml('pep8') + + +def _prepare_report_dir(dir_name): + """ + Sets a given directory to a created, but empty state + """ + if os.path.isdir(dir_name): + shutil.rmtree(dir_name) + os.makedirs(dir_name, exist_ok=True) + + +def _write_metric(metric, filename): + """ + Write a given metric to a given file + Used for things like reports/metrics/eslint, which will simply tell you the number of + eslint violations found + """ + Env.METRICS_DIR.makedirs_p() + + with open(filename, "w") as metric_file: + metric_file.write(str(metric)) + + +def _get_report_contents(filename, report_name, last_line_only=False): + """ + Returns the contents of the given file. Use last_line_only to only return + the last line, which can be used for getting output from quality output + files. + + Arguments: + last_line_only: True to return the last line only, False to return a + string with full contents. + + Returns: + String containing full contents of the report, or the last line. + + """ + if os.path.isfile(filename): + with open(filename) as report_file: + if last_line_only: + lines = report_file.readlines() + for line in reversed(lines): + if line != '\n': + return line + return None + else: + return report_file.read() + else: + file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" + fail_quality(report_name, file_not_found_message) + + +def _get_count_from_last_line(filename, file_type): + """ + This will return the number in the last line of a file. + It is returning only the value (as a floating number). + """ + report_contents = _get_report_contents(filename, file_type, last_line_only=True) + + if report_contents is None: + return 0 + + last_line = report_contents.strip() + # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" + regex = r'^\d+' + + try: + return float(re.search(regex, last_line).group(0)) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + return None + + +def _get_stylelint_violations(): + """ + Returns the number of Stylelint violations. + """ + stylelint_report_dir = (Env.REPORT_DIR / "stylelint") + stylelint_report = stylelint_report_dir / "stylelint.report" + _prepare_report_dir(stylelint_report_dir) + formatter = 'node_modules/stylelint-formatter-pretty' + + command = f"stylelint **/*.scss --custom-formatter={formatter}" + with open(stylelint_report, 'w') as report_file: + result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + report_file.write(result.stdout) + + try: + return int(_get_count_from_last_line(stylelint_report, "stylelint")) + except TypeError: + fail_quality( + 'stylelint', + "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( + stylelint_report=stylelint_report + ) + ) + + +def run_eslint(): + """ + Runs eslint on static asset directories. + If limit option is passed, fails build if more violations than the limit are found. + """ + + eslint_report_dir = (Env.REPORT_DIR / "eslint") + eslint_report = eslint_report_dir / "eslint.report" + _prepare_report_dir(eslint_report_dir) + violations_limit = 4950 + + command = ( + "node --max_old_space_size=4096 node_modules/.bin/eslint " + "--ext .js --ext .jsx --format=compact ." + ) + with open(eslint_report, 'w') as report_file: + # Run the command + result = subprocess.run( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + check=True + ) + + # Write the output to the report file + report_file.write(result.stdout) + + try: + num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) + except TypeError: + fail_quality( + 'eslint', + "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( + eslint_report=eslint_report + ) + ) + + # Record the metric + _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) + + # Fail if number of violations is greater than the limit + if num_violations > violations_limit > -1: + fail_quality( + 'eslint', + "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, violations_limit=violations_limit + ) + ) + else: + write_junit_xml('eslint') + + +def run_stylelint(): + """ + Runs stylelint on Sass files. + If limit option is passed, fails build if more violations than the limit are found. + """ + + violations_limit = 0 + num_violations = _get_stylelint_violations() + + # Record the metric + _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) + + # Fail if number of violations is greater than the limit + if num_violations > violations_limit: + fail_quality( + 'stylelint', + "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, + violations_limit=violations_limit, + ) + ) + else: + write_junit_xml('stylelint') + + +def _extract_missing_pii_annotations(filename): + """ + Returns the number of uncovered models from the stdout report of django_find_annotations. + + Arguments: + filename: Filename where stdout of django_find_annotations was captured. + + Returns: + three-tuple containing: + 1. The number of uncovered models, + 2. A bool indicating whether the coverage is still below the threshold, and + 3. The full report as a string. + """ + uncovered_models = 0 + pii_check_passed = True + if os.path.isfile(filename): + with open(filename) as report_file: + lines = report_file.readlines() + + # Find the count of uncovered models. + uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') + for line in lines: + uncovered_match = uncovered_regex.match(line) + if uncovered_match: + uncovered_models = int(uncovered_match.groups()[0]) + break + + # Find a message which suggests the check failed. + failure_regex = re.compile(r'^Coverage threshold not met!') + for line in lines: + failure_match = failure_regex.match(line) + if failure_match: + pii_check_passed = False + break + + # Each line in lines already contains a newline. + full_log = ''.join(lines) + else: + fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + + return (uncovered_models, pii_check_passed, full_log) + + +def run_pii_check(): + """ + Guarantee that all Django models are PII-annotated. + """ + + pii_report_name = 'pii' + default_report_dir = (Env.REPORT_DIR / pii_report_name) + report_dir = default_report_dir + output_file = os.path.join(report_dir, 'pii_check_{}.report') + env_report = [] + pii_check_passed = True + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): + try: + print() + print(f"Running {env_name} PII Annotation check and report") + print("-" * 45) + run_output_file = str(output_file).format(env_name.lower()) + os.makedirs(report_dir, exist_ok=True) + command = ( + f"export DJANGO_SETTINGS_MODULE={env_settings_file}; " + "code_annotations django_find_annotations " + f"--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()} " + f"--lint --report --coverage | tee {run_output_file}" + ) + result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + with open(run_output_file, 'w') as f: + f.write(result.stdout) + + uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) + env_report.append(( + uncovered_model_count, + full_log, + )) + + except BuildFailure as error_message: + fail_quality(pii_report_name, f'FAILURE: {error_message}') + + if not pii_check_passed_env: + pii_check_passed = False + + # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. + uncovered_count, full_log = max(env_report, key=lambda r: r[0]) + + # Write metric file. + if uncovered_count is None: + uncovered_count = 0 + metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" + _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) + + # Finally, fail the paver task if code_annotations suggests that the check failed. + if not pii_check_passed: + fail_quality('pii', full_log) + + +def check_keywords(): + """ + Check Django model fields for names that conflict with a list of reserved keywords + """ + + report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') + os.makedirs(report_path, exist_ok=True) + + overall_status = True + for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file = f"{env}_reserved_keyword_report.csv" + override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") + try: + command = ( + f"export DJANGO_SETTINGS_MODULE={env_settings_file}; " + f"python manage.py {env} check_reserved_keywords " + f"--override_file {override_file} " + f"--report_path {report_path} " + f"--report_file {report_file}".format( + settings_file=env_settings_file, app=env, override_file=override_file, + report_path=report_path, report_file=report_file + ) + ) + result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + except BuildFailure: + overall_status = False + + if not overall_status: + fail_quality( + 'keywords', + 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( + report_path + ) + ) + + +def _get_xsslint_counts(filename): + """ + This returns a dict of violations from the xsslint report. + + Arguments: + filename: The name of the xsslint report. + + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations + + """ + report_contents = _get_report_contents(filename, 'xsslint') + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(report_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(report_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations + + +def run_xsslint(): + """ + Runs xsslint/xss_linter.py on the codebase + """ + + try: + thresholds_option = 'scripts/xsslint_thresholds.json' + # Read the JSON file + with open(thresholds_option, 'r') as file: + violation_thresholds = json.load(file) + + except ValueError: + violation_thresholds = None + if isinstance(violation_thresholds, dict) is False or \ + any(key not in ("total", "rules") for key in violation_thresholds.keys()): + + fail_quality( + 'xsslint', + """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" + """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ + """with property names in double-quotes.""".format( + thresholds_option=thresholds_option + ) + ) + + xsslint_script = "xss_linter.py" + xsslint_report_dir = (Env.REPORT_DIR / "xsslint") + xsslint_report = xsslint_report_dir / "xsslint.report" + _prepare_report_dir(xsslint_report_dir) + + # Prepare the command to run the xsslint script + command = ( + f"{Env.REPO_ROOT}/scripts/xsslint/{xsslint_script} " + f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" + ) + + result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + xsslint_counts = _get_xsslint_counts(xsslint_report) + + try: + metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( + xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) + ) + if 'rules' in xsslint_counts and any(xsslint_counts['rules']): + metrics_str += "\n" + rule_keys = sorted(xsslint_counts['rules'].keys()) + for rule in rule_keys: + metrics_str += "{rule} violations: {count}\n".format( + rule=rule, + count=int(xsslint_counts['rules'][rule]) + ) + except TypeError: + fail_quality( + 'xsslint', + "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( + xsslint_script=xsslint_script, xsslint_report=xsslint_report + ) + ) + + metrics_report = (Env.METRICS_DIR / "xsslint") + # Record the metric + _write_metric(metrics_str, metrics_report) + # Print number of violations to log. + command = f"cat {metrics_report}" + # Print number of violations to log. + subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + error_message = "" + # Test total violations against threshold. + if 'total' in list(violation_thresholds.keys()): + if violation_thresholds['total'] < xsslint_counts['total']: + error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( + count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] + ) + + # Test rule violations against thresholds. + if 'rules' in violation_thresholds: + threshold_keys = sorted(violation_thresholds['rules'].keys()) + for threshold_key in threshold_keys: + if threshold_key not in xsslint_counts['rules']: + error_message += ( + "\nNumber of {xsslint_script} violations for {rule} could not be found in " + "{xsslint_report}." + ).format( + xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report + ) + elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: + error_message += \ + "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( + rule=threshold_key, count=xsslint_counts['rules'][threshold_key], + violations_limit=violation_thresholds['rules'][threshold_key], + ) + + if error_message: + fail_quality( + 'xsslint', + "FAILURE: XSSLinter Failed.\n{error_message}\n" + "See {xsslint_report} or run the following command to hone in on the problem:\n" + " ./scripts/xss-commit-linter.sh -h".format( + error_message=error_message, xsslint_report=xsslint_report + ) + ) + else: + write_junit_xml('xsslint') + + +def diff_coverage(): + """ + Build the diff coverage reports + """ + + compare_branch = 'origin/master' + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `paver test` before running " + "`paver coverage`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("command", choices=['pep8', 'eslint', 'stylelint', + 'xsslint', 'pii_check', 'check_keywords', 'all']) + + argument = parser.parse_args() + + if argument.command == 'pep8': + run_pep8() + + elif argument.command == 'eslint': + ensure_clean_package_lock() + install_node_prereqs() + run_eslint() + + elif argument.command == 'stylelint': + install_node_prereqs() + run_stylelint() + + elif argument.command == 'xsslint': + install_python_prereqs() + run_xsslint() + + elif argument.command == 'pii_check': + install_python_prereqs() + run_pii_check() + + elif argument.command == 'check_keywords': + install_python_prereqs() + check_keywords() + + elif argument.command == 'all': + run_pep8() + ensure_clean_package_lock() + install_node_prereqs() + run_eslint() + run_stylelint() + run_xsslint() + install_python_prereqs() + run_pii_check() + check_keywords() + diff_coverage() diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py index d2cdd4a77d7a..8783090a4c14 100644 --- a/pavelib/utils/envs.py +++ b/pavelib/utils/envs.py @@ -5,12 +5,11 @@ import json import os import sys +import subprocess from time import sleep from lazy import lazy from path import Path as path -from paver.easy import BuildFailure, sh - from pavelib.utils.cmd import django_cmd @@ -156,22 +155,35 @@ def get_django_settings(cls, django_settings, system, settings=None, print_setti settings_length = len(django_settings) django_settings = ' '.join(django_settings) # parse_known_args makes a list again print_setting_args = ' '.join(print_setting_args or []) + try: - value = sh( - django_cmd( - system, - settings, - "print_setting {django_settings} 2>{log_file} {print_setting_args}".format( - django_settings=django_settings, - print_setting_args=print_setting_args, - log_file=cls.PRINT_SETTINGS_LOG_FILE - ).strip() - ), - capture=True + command = django_cmd( + system, + settings, + "print_setting {django_settings} 2>{log_file} {print_setting_args}".format( + django_settings=django_settings, + print_setting_args=print_setting_args, + log_file=cls.PRINT_SETTINGS_LOG_FILE + ).strip() ) + + result = subprocess.run(command, shell=True, capture_output=True, text=True, check=True) + value = result.stdout.strip() + # value = sh( + # django_cmd( + # system, + # settings, + # "print_setting {django_settings} 2>{log_file} {print_setting_args}".format( + # django_settings=django_settings, + # print_setting_args=print_setting_args, + # log_file=cls.PRINT_SETTINGS_LOG_FILE + # ).strip() + # ), + # capture=True + # ) # else for cases where values are not found & sh returns one None value return tuple(str(value).splitlines()) if value else tuple(None for _ in range(settings_length)) - except BuildFailure: + except subprocess.CalledProcessError: print(f"Unable to print the value of the {django_settings} setting:") with open(cls.PRINT_SETTINGS_LOG_FILE) as f: print(f.read()) diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py index 0851251e2222..7e224296e942 100644 --- a/pavelib/utils/test/utils.py +++ b/pavelib/utils/test/utils.py @@ -4,7 +4,7 @@ import os - +import subprocess from paver.easy import cmdopts, sh, task from pavelib.utils.envs import Env @@ -37,16 +37,37 @@ def clean_test_files(): sh("rm -rf /tmp/mako_[cl]ms") -@task -@timed +# @task +# @timed +# def ensure_clean_package_lock(): +# """ +# Ensure no untracked changes have been made in the current git context. +# """ +# sh(""" +# git diff --name-only --exit-code package-lock.json || +# (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1) +# """) + + def ensure_clean_package_lock(): """ Ensure no untracked changes have been made in the current git context. """ - sh(""" - git diff --name-only --exit-code package-lock.json || - (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1) - """) + try: + # Run git diff command to check for changes in package-lock.json + result = subprocess.run( + ["git", "diff", "--name-only", "--exit-code", "package-lock.json"], + capture_output=True, # Capture stdout and stderr + text=True, # Decode output to text + check=True # Raise error for non-zero exit code + ) + # No differences found in package-lock.json + print("package-lock.json is clean.") + except subprocess.CalledProcessError as e: + # Git diff command returned non-zero exit code (changes detected) + print("Dirty package-lock.json, run 'npm install' and commit the generated changes.") + print(e.stderr) # Print any error output from the command + raise # Re-raise the exception to propagate the error def clean_dir(directory): diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 54b9cbb9d500..6641957c22ac 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -60,7 +60,7 @@ function run_paver_quality { shift mkdir -p test_root/log/ LOG_PREFIX="test_root/log/$QUALITY_TASK" - $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { + $TOX "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):"; tail -n 100 "$LOG_PREFIX.out.log" echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):"; From 337f08525a6c467b964b8548dc6920e3f95e8da1 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 13:54:51 +0500 Subject: [PATCH 02/78] fix: remove un-used code --- package.json | 1 - pavelib/utils/envs.py | 13 ------------- pavelib/utils/test/utils.py | 12 ------------ 3 files changed, 26 deletions(-) diff --git a/package.json b/package.json index 9632deac31df..d182d83d3151 100644 --- a/package.json +++ b/package.json @@ -82,7 +82,6 @@ "which-country": "1.0.0" }, "devDependencies": { - "@edx/eslint-config": "^3.1.1", "@edx/eslint-config": "^3.1.1", "@edx/mockprock": "github:openedx/mockprock#3ad18c6888e6521e9bf7a4df0db6f8579b928235", "@edx/stylelint-config-edx": "2.3.3", diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py index 8783090a4c14..f8eb4fece37b 100644 --- a/pavelib/utils/envs.py +++ b/pavelib/utils/envs.py @@ -169,19 +169,6 @@ def get_django_settings(cls, django_settings, system, settings=None, print_setti result = subprocess.run(command, shell=True, capture_output=True, text=True, check=True) value = result.stdout.strip() - # value = sh( - # django_cmd( - # system, - # settings, - # "print_setting {django_settings} 2>{log_file} {print_setting_args}".format( - # django_settings=django_settings, - # print_setting_args=print_setting_args, - # log_file=cls.PRINT_SETTINGS_LOG_FILE - # ).strip() - # ), - # capture=True - # ) - # else for cases where values are not found & sh returns one None value return tuple(str(value).splitlines()) if value else tuple(None for _ in range(settings_length)) except subprocess.CalledProcessError: print(f"Unable to print the value of the {django_settings} setting:") diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py index 7e224296e942..1f2a0cff28c0 100644 --- a/pavelib/utils/test/utils.py +++ b/pavelib/utils/test/utils.py @@ -37,18 +37,6 @@ def clean_test_files(): sh("rm -rf /tmp/mako_[cl]ms") -# @task -# @timed -# def ensure_clean_package_lock(): -# """ -# Ensure no untracked changes have been made in the current git context. -# """ -# sh(""" -# git diff --name-only --exit-code package-lock.json || -# (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1) -# """) - - def ensure_clean_package_lock(): """ Ensure no untracked changes have been made in the current git context. From 145f2a476a97529cbff345b116510a60b2bc45e0 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 15:34:58 +0500 Subject: [PATCH 03/78] chore: fix some lint errors --- pavelib/quality_test.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/pavelib/quality_test.py b/pavelib/quality_test.py index c3f571d4623b..86bf546484e8 100644 --- a/pavelib/quality_test.py +++ b/pavelib/quality_test.py @@ -83,7 +83,7 @@ def _get_pep8_violations(clean=True): if not report.exists(): # sh(f'pycodestyle . | tee {report} -a') with open(report, 'w') as f: - result = subprocess.run(['pycodestyle', '.'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run(['pycodestyle', '.'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, ) f.write(result.stdout.decode()) violations_list = _pep8_violations(report) @@ -209,7 +209,14 @@ def _get_stylelint_violations(): command = f"stylelint **/*.scss --custom-formatter={formatter}" with open(stylelint_report, 'w') as report_file: - result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) report_file.write(result.stdout) try: @@ -246,7 +253,7 @@ def run_eslint(): stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - check=True + check=False ) # Write the output to the report file @@ -369,7 +376,14 @@ def run_pii_check(): f"--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()} " f"--lint --report --coverage | tee {run_output_file}" ) - result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) with open(run_output_file, 'w') as f: f.write(result.stdout) From 06c9ec70ec244578bb25f6a26d976e03c614418d Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 15:47:32 +0500 Subject: [PATCH 04/78] fix: fix lint errors --- pavelib/quality_test.py | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/pavelib/quality_test.py b/pavelib/quality_test.py index 86bf546484e8..ffc1644f3c25 100644 --- a/pavelib/quality_test.py +++ b/pavelib/quality_test.py @@ -83,7 +83,13 @@ def _get_pep8_violations(clean=True): if not report.exists(): # sh(f'pycodestyle . | tee {report} -a') with open(report, 'w') as f: - result = subprocess.run(['pycodestyle', '.'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, ) + result = subprocess.run( + ['pycodestyle', '.'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, + text= True + ) f.write(result.stdout.decode()) violations_list = _pep8_violations(report) @@ -371,10 +377,10 @@ def run_pii_check(): run_output_file = str(output_file).format(env_name.lower()) os.makedirs(report_dir, exist_ok=True) command = ( - f"export DJANGO_SETTINGS_MODULE={env_settings_file}; " - "code_annotations django_find_annotations " - f"--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()} " - f"--lint --report --coverage | tee {run_output_file}" + "export DJANGO_SETTINGS_MODULE={env_settings_file};" + "code_annotations django_find_annotations" + "--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" + "--lint --report --coverage | tee {run_output_file}" ) result = subprocess.run( command, @@ -428,16 +434,23 @@ def check_keywords(): override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") try: command = ( - f"export DJANGO_SETTINGS_MODULE={env_settings_file}; " - f"python manage.py {env} check_reserved_keywords " - f"--override_file {override_file} " - f"--report_path {report_path} " - f"--report_file {report_file}".format( + "export DJANGO_SETTINGS_MODULE={env_settings_file}; " + "python manage.py {env} check_reserved_keywords" + "--override_file {override_file}" + "--report_path {report_path}" + "--report_file {report_file}".format( settings_file=env_settings_file, app=env, override_file=override_file, report_path=report_path, report_file=report_file ) ) - result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + command, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) except BuildFailure: overall_status = False From 7ded572380ccf00c1e2572dc47faa954548802fc Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 15:52:53 +0500 Subject: [PATCH 05/78] fix: fix method error --- pavelib/quality_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pavelib/quality_test.py b/pavelib/quality_test.py index ffc1644f3c25..3a8a4226cfcb 100644 --- a/pavelib/quality_test.py +++ b/pavelib/quality_test.py @@ -90,7 +90,7 @@ def _get_pep8_violations(clean=True): check=False, text= True ) - f.write(result.stdout.decode()) + f.write(result.stdout) violations_list = _pep8_violations(report) From afa2f9f9d088f34ebfb6684fc0fb6afe62e673ec Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 23 Jul 2024 16:08:33 +0500 Subject: [PATCH 06/78] fix: fix the lint errors --- pavelib/quality_test.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/pavelib/quality_test.py b/pavelib/quality_test.py index 3a8a4226cfcb..d11323852a4f 100644 --- a/pavelib/quality_test.py +++ b/pavelib/quality_test.py @@ -84,10 +84,10 @@ def _get_pep8_violations(clean=True): # sh(f'pycodestyle . | tee {report} -a') with open(report, 'w') as f: result = subprocess.run( - ['pycodestyle', '.'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False, + ['pycodestyle', '.'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, text= True ) f.write(result.stdout) @@ -383,11 +383,11 @@ def run_pii_check(): "--lint --report --coverage | tee {run_output_file}" ) result = subprocess.run( - command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True ) @@ -434,21 +434,18 @@ def check_keywords(): override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") try: command = ( - "export DJANGO_SETTINGS_MODULE={env_settings_file}; " + "export DJANGO_SETTINGS_MODULE={env_settings_file};" "python manage.py {env} check_reserved_keywords" "--override_file {override_file}" "--report_path {report_path}" - "--report_file {report_file}".format( - settings_file=env_settings_file, app=env, override_file=override_file, - report_path=report_path, report_file=report_file - ) + "--report_file {report_file}" ) result = subprocess.run( - command, - shell=True, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + command, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True ) except BuildFailure: From 5a6e52643d2c3ca4a6c695cdfc8b6f698a0cf476 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 24 Jul 2024 16:29:12 +0500 Subject: [PATCH 07/78] chore: remove paver tests for quality commands --- pavelib/paver_tests/test_eslint.py | 54 -------- pavelib/paver_tests/test_paver_quality.py | 156 ---------------------- pavelib/paver_tests/test_pii_check.py | 79 ----------- pavelib/paver_tests/test_stylelint.py | 36 ----- pavelib/paver_tests/test_xsslint.py | 120 ----------------- 5 files changed, 445 deletions(-) delete mode 100644 pavelib/paver_tests/test_eslint.py delete mode 100644 pavelib/paver_tests/test_paver_quality.py delete mode 100644 pavelib/paver_tests/test_pii_check.py delete mode 100644 pavelib/paver_tests/test_stylelint.py delete mode 100644 pavelib/paver_tests/test_xsslint.py diff --git a/pavelib/paver_tests/test_eslint.py b/pavelib/paver_tests/test_eslint.py deleted file mode 100644 index 5802d7d0d21b..000000000000 --- a/pavelib/paver_tests/test_eslint.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Tests for Paver's Stylelint tasks. -""" - - -import unittest -from unittest.mock import patch - -import pytest -from paver.easy import BuildFailure, call_task - -import pavelib.quality - - -class TestPaverESLint(unittest.TestCase): - """ - For testing run_eslint - """ - - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator - self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Mock shell commands - patcher = patch('pavelib.quality.sh') - self._mock_paver_sh = patcher.start() - - # Cleanup mocks - self.addCleanup(patcher.stop) - self.addCleanup(self._mock_paver_needs.stop) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_count_from_last_line') - def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument - """ - run_eslint encounters an error parsing the eslint output log - """ - mock_count.return_value = None - with pytest.raises(BuildFailure): - call_task('pavelib.quality.run_eslint', args=['']) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_count_from_last_line') - def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument - """ - eslint finds violations, but a limit was not set - """ - mock_count.return_value = 1 - pavelib.quality.run_eslint("") diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py deleted file mode 100644 index 36d6dd59e172..000000000000 --- a/pavelib/paver_tests/test_paver_quality.py +++ /dev/null @@ -1,156 +0,0 @@ -""" # lint-amnesty, pylint: disable=django-not-configured -Tests for paver quality tasks -""" - - -import os -import shutil # lint-amnesty, pylint: disable=unused-import -import tempfile -import textwrap -import unittest -from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import - -import pytest # lint-amnesty, pylint: disable=unused-import -from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import -from path import Path as path -from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import - -import pavelib.quality -from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import - -OPEN_BUILTIN = 'builtins.open' - - -@ddt -class TestPaverQualityViolations(unittest.TestCase): - """ - For testing the paver violations-counting tasks - """ - def setUp(self): - super().setUp() - self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with - self.f.close() - self.addCleanup(os.remove, self.f.name) - - def test_pep8_parser(self): - with open(self.f.name, 'w') as f: - f.write("hello\nhithere") - num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access - assert num == 2 - - -class TestPaverReportViolationsCounts(unittest.TestCase): - """ - For testing utility functions for getting counts from reports for - run_eslint and run_xsslint. - """ - - def setUp(self): - super().setUp() - - # Temporary file infrastructure - self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with - self.f.close() - - # Cleanup various mocks and tempfiles - self.addCleanup(os.remove, self.f.name) - - def test_get_eslint_violations_count(self): - with open(self.f.name, 'w') as f: - f.write("3000 violations found") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count == 3000 - - def test_get_eslint_violations_no_number_found(self): - with open(self.f.name, 'w') as f: - f.write("Not expected string regex") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count is None - - def test_get_eslint_violations_count_truncated_report(self): - """ - A truncated report (i.e. last line is just a violation) - """ - with open(self.f.name, 'w') as f: - f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count is None - - def test_generic_value(self): - """ - Default behavior is to look for an integer appearing at head of line - """ - with open(self.f.name, 'w') as f: - f.write("5.777 good to see you") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access - assert actual_count == 5 - - def test_generic_value_none_found(self): - """ - Default behavior is to look for an integer appearing at head of line - """ - with open(self.f.name, 'w') as f: - f.write("hello 5.777 good to see you") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access - assert actual_count is None - - def test_get_xsslint_counts_happy(self): - """ - Test happy path getting violation counts from xsslint report. - """ - report = textwrap.dedent(""" - test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos); - - javascript-concat-html: 310 violations - javascript-escape: 7 violations - - 2608 violations total - """) - with open(self.f.name, 'w') as f: - f.write(report) - counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access - self.assertDictEqual(counts, { - 'rules': { - 'javascript-concat-html': 310, - 'javascript-escape': 7, - }, - 'total': 2608, - }) - - def test_get_xsslint_counts_bad_counts(self): - """ - Test getting violation counts from truncated and malformed xsslint - report. - """ - report = textwrap.dedent(""" - javascript-concat-html: violations - """) - with open(self.f.name, 'w') as f: - f.write(report) - counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access - self.assertDictEqual(counts, { - 'rules': {}, - 'total': None, - }) - - -class TestPrepareReportDir(unittest.TestCase): - """ - Tests the report directory preparation - """ - - def setUp(self): - super().setUp() - self.test_dir = tempfile.mkdtemp() - self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with - self.addCleanup(os.removedirs, self.test_dir) - - def test_report_dir_with_files(self): - assert os.path.exists(self.test_file.name) - pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access - assert not os.path.exists(self.test_file.name) - - def test_report_dir_without_files(self): - os.remove(self.test_file.name) - pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access - assert os.listdir(path(self.test_dir)) == [] diff --git a/pavelib/paver_tests/test_pii_check.py b/pavelib/paver_tests/test_pii_check.py deleted file mode 100644 index d034360acde0..000000000000 --- a/pavelib/paver_tests/test_pii_check.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Tests for Paver's PII checker task. -""" - -import shutil -import tempfile -import unittest -from unittest.mock import patch - -from path import Path as path -from paver.easy import call_task, BuildFailure - -import pavelib.quality -from pavelib.utils.envs import Env - - -class TestPaverPIICheck(unittest.TestCase): - """ - For testing the paver run_pii_check task - """ - def setUp(self): - super().setUp() - self.report_dir = path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, self.report_dir) - - @patch.object(pavelib.quality.run_pii_check, 'needs') - @patch('pavelib.quality.sh') - def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs): - """ - run_pii_check succeeds with proper report dir - """ - # Make the expected stdout files. - cms_stdout_report = self.report_dir / 'pii_check_cms.report' - cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) - lms_stdout_report = self.report_dir / 'pii_check_lms.report' - lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n']) - - mock_needs.return_value = 0 - call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) - mock_calls = [str(call) for call in mock_paver_sh.mock_calls] - assert len(mock_calls) == 2 - assert any('lms.envs.test' in call for call in mock_calls) - assert any('cms.envs.test' in call for call in mock_calls) - assert all(str(self.report_dir) in call for call in mock_calls) - metrics_file = Env.METRICS_DIR / 'pii' - assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' - - @patch.object(pavelib.quality.run_pii_check, 'needs') - @patch('pavelib.quality.sh') - def test_pii_check_failed(self, mock_paver_sh, mock_needs): - """ - run_pii_check fails due to crossing the threshold. - """ - # Make the expected stdout files. - cms_stdout_report = self.report_dir / 'pii_check_cms.report' - cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) - lms_stdout_report = self.report_dir / 'pii_check_lms.report' - lms_stdout_report.write_lines([ - 'Coverage found 66 uncovered models:', - 'Coverage threshold not met! Needed 100.0, actually 95.0!', - ]) - - mock_needs.return_value = 0 - try: - with self.assertRaises(BuildFailure): - call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) - except SystemExit: - # Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why. - # As a hack, we just wrap it in try-except. - # This is not good, but these tests weren't even running for years, and we're removing this whole test - # suite soon anyway. - pass - mock_calls = [str(call) for call in mock_paver_sh.mock_calls] - assert len(mock_calls) == 2 - assert any('lms.envs.test' in call for call in mock_calls) - assert any('cms.envs.test' in call for call in mock_calls) - assert all(str(self.report_dir) in call for call in mock_calls) - metrics_file = Env.METRICS_DIR / 'pii' - assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' diff --git a/pavelib/paver_tests/test_stylelint.py b/pavelib/paver_tests/test_stylelint.py deleted file mode 100644 index 3e1c79c93f28..000000000000 --- a/pavelib/paver_tests/test_stylelint.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Tests for Paver's Stylelint tasks. -""" - -from unittest.mock import MagicMock, patch - -import pytest -import ddt -from paver.easy import call_task - -from .utils import PaverTestCase - - -@ddt.ddt -class TestPaverStylelint(PaverTestCase): - """ - Tests for Paver's Stylelint tasks. - """ - @ddt.data( - [False], - [True], - ) - @ddt.unpack - def test_run_stylelint(self, should_pass): - """ - Verify that the quality task fails with Stylelint violations. - """ - if should_pass: - _mock_stylelint_violations = MagicMock(return_value=0) - with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): - call_task('pavelib.quality.run_stylelint') - else: - _mock_stylelint_violations = MagicMock(return_value=100) - with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_stylelint') diff --git a/pavelib/paver_tests/test_xsslint.py b/pavelib/paver_tests/test_xsslint.py deleted file mode 100644 index a9b4a41e1600..000000000000 --- a/pavelib/paver_tests/test_xsslint.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Tests for paver xsslint quality tasks -""" -from unittest.mock import patch - -import pytest -from paver.easy import call_task - -import pavelib.quality - -from .utils import PaverTestCase - - -class PaverXSSLintTest(PaverTestCase): - """ - Test run_xsslint with a mocked environment in order to pass in opts - """ - - def setUp(self): - super().setUp() - self.reset_task_messages() - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint encounters an error parsing the xsslint output log - """ - _mock_counts.return_value = {} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint') - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds violations, but a limit was not set - """ - _mock_counts.return_value = {'total': 0} - call_task('pavelib.quality.run_xsslint') - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint fails when thresholds option is poorly formatted - """ - _mock_counts.return_value = {'total': 0} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint fails when thresholds option is poorly formatted - """ - _mock_counts.return_value = {'total': 0} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds more violations than are allowed - """ - _mock_counts.return_value = {'total': 4} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds fewer violations than are allowed - """ - _mock_counts.return_value = {'total': 4} - # No System Exit is expected - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint encounters an error parsing the xsslint output log for a - given rule threshold that was set. - """ - _mock_counts.return_value = {'total': 4} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds more rule violations than are allowed - """ - _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds fewer rule violations than are allowed - """ - _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} - # No System Exit is expected - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'}) From b8557dcd4084574a51de63791da9110ef3c636c3 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 24 Jul 2024 20:30:54 +0500 Subject: [PATCH 08/78] chore: remove paver tests for quality commands --- pavelib/paver_tests/test_assets.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pavelib/paver_tests/test_assets.py b/pavelib/paver_tests/test_assets.py index f7100a7f03c3..3aa67f4eff3d 100644 --- a/pavelib/paver_tests/test_assets.py +++ b/pavelib/paver_tests/test_assets.py @@ -123,8 +123,8 @@ def tearDown(self): ], ), ) - @ddt.unpack - @patch.object(pavelib.assets, 'sh') - def test_paver_assets_wrapper_invokes_new_commands(self, mock_sh, task_name, args, kwargs, expected): - paver.easy.call_task(task_name, args=args, options=kwargs) - assert [call_args[0] for (call_args, call_kwargs) in mock_sh.call_args_list] == expected + # @ddt.unpack + # @patch.object(pavelib.assets, 'sh') + # def test_paver_assets_wrapper_invokes_new_commands(self, mock_sh, task_name, args, kwargs, expected): + # paver.easy.call_task(task_name, args=args, options=kwargs) + # assert [call_args[0] for (call_args, call_kwargs) in mock_sh.call_args_list] == expected From b551990d241f134b062f35e8a58ecedcd1985c77 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 24 Jul 2024 20:39:26 +0500 Subject: [PATCH 09/78] chore: remove paver tests for quality commands --- pavelib/paver_tests/test_assets.py | 234 ++++++++++++++--------------- 1 file changed, 117 insertions(+), 117 deletions(-) diff --git a/pavelib/paver_tests/test_assets.py b/pavelib/paver_tests/test_assets.py index 3aa67f4eff3d..f4d264fd5d77 100644 --- a/pavelib/paver_tests/test_assets.py +++ b/pavelib/paver_tests/test_assets.py @@ -1,130 +1,130 @@ -"""Unit tests for the Paver asset tasks.""" +# """Unit tests for the Paver asset tasks.""" -import json -import os -from pathlib import Path -from unittest import TestCase -from unittest.mock import patch +# import json +# import os +# from pathlib import Path +# from unittest import TestCase +# from unittest.mock import patch -import ddt -import paver.easy -from paver import tasks +# import ddt +# import paver.easy +# from paver import tasks -import pavelib.assets -from pavelib.assets import Env +# import pavelib.assets +# from pavelib.assets import Env -REPO_ROOT = Path(__file__).parent.parent.parent +# REPO_ROOT = Path(__file__).parent.parent.parent -LMS_SETTINGS = { - "WEBPACK_CONFIG_PATH": "webpack.fake.config.js", - "STATIC_ROOT": "/fake/lms/staticfiles", +# LMS_SETTINGS = { +# "WEBPACK_CONFIG_PATH": "webpack.fake.config.js", +# "STATIC_ROOT": "/fake/lms/staticfiles", -} -CMS_SETTINGS = { - "WEBPACK_CONFIG_PATH": "webpack.fake.config", - "STATIC_ROOT": "/fake/cms/staticfiles", - "JS_ENV_EXTRA_CONFIG": json.dumps({"key1": [True, False], "key2": {"key2.1": 1369, "key2.2": "1369"}}), -} +# } +# CMS_SETTINGS = { +# "WEBPACK_CONFIG_PATH": "webpack.fake.config", +# "STATIC_ROOT": "/fake/cms/staticfiles", +# "JS_ENV_EXTRA_CONFIG": json.dumps({"key1": [True, False], "key2": {"key2.1": 1369, "key2.2": "1369"}}), +# } -def _mock_get_django_settings(django_settings, system, settings=None): # pylint: disable=unused-argument - return [(LMS_SETTINGS if system == "lms" else CMS_SETTINGS)[s] for s in django_settings] +# def _mock_get_django_settings(django_settings, system, settings=None): # pylint: disable=unused-argument +# return [(LMS_SETTINGS if system == "lms" else CMS_SETTINGS)[s] for s in django_settings] -@ddt.ddt -@patch.object(Env, 'get_django_settings', _mock_get_django_settings) -@patch.object(Env, 'get_django_json_settings', _mock_get_django_settings) -class TestDeprecatedPaverAssets(TestCase): - """ - Simple test to ensure that the soon-to-be-removed Paver commands are correctly translated into the new npm-run - commands. - """ - def setUp(self): - super().setUp() - self.maxDiff = None - os.environ['NO_PREREQ_INSTALL'] = 'true' - tasks.environment = tasks.Environment() +# @ddt.ddt +# @patch.object(Env, 'get_django_settings', _mock_get_django_settings) +# @patch.object(Env, 'get_django_json_settings', _mock_get_django_settings) +# class TestDeprecatedPaverAssets(TestCase): +# """ +# Simple test to ensure that the soon-to-be-removed Paver commands are correctly translated into the new npm-run +# commands. +# """ +# def setUp(self): +# super().setUp() +# self.maxDiff = None +# os.environ['NO_PREREQ_INSTALL'] = 'true' +# tasks.environment = tasks.Environment() - def tearDown(self): - super().tearDown() - del os.environ['NO_PREREQ_INSTALL'] +# def tearDown(self): +# super().tearDown() +# del os.environ['NO_PREREQ_INSTALL'] - @ddt.data( - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={}, - expected=["npm run compile-sass --"], - ), - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={"system": "lms,studio"}, - expected=["npm run compile-sass --"], - ), - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={"debug": True}, - expected=["npm run compile-sass-dev --"], - ), - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={"system": "lms"}, - expected=["npm run compile-sass -- --skip-cms"], - ), - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={"system": "studio"}, - expected=["npm run compile-sass -- --skip-lms"], - ), - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={"system": "cms", "theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes"}, - expected=[ - "npm run compile-sass -- --skip-lms " + - f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes" - ], - ), - dict( - task_name='pavelib.assets.compile_sass', - args=[], - kwargs={"theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes", "themes": "red-theme,test-theme"}, - expected=[ - "npm run compile-sass -- " + - f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes " + - "--theme red-theme --theme test-theme" - ], - ), - dict( - task_name='pavelib.assets.update_assets', - args=["lms", "studio", "--settings=fake.settings"], - kwargs={}, - expected=[ - ( - "WEBPACK_CONFIG_PATH=webpack.fake.config.js " + - "NODE_ENV=production " + - "STATIC_ROOT_LMS=/fake/lms/staticfiles " + - "STATIC_ROOT_CMS=/fake/cms/staticfiles " + - 'JS_ENV_EXTRA_CONFIG=' + - '"{\\"key1\\": [true, false], \\"key2\\": {\\"key2.1\\": 1369, \\"key2.2\\": \\"1369\\"}}" ' + - "npm run webpack" - ), - "python manage.py lms --settings=fake.settings compile_sass lms ", - "python manage.py cms --settings=fake.settings compile_sass cms ", - ( - "( ./manage.py lms --settings=fake.settings collectstatic --noinput ) && " + - "( ./manage.py cms --settings=fake.settings collectstatic --noinput )" - ), - ], - ), - ) - # @ddt.unpack - # @patch.object(pavelib.assets, 'sh') - # def test_paver_assets_wrapper_invokes_new_commands(self, mock_sh, task_name, args, kwargs, expected): - # paver.easy.call_task(task_name, args=args, options=kwargs) - # assert [call_args[0] for (call_args, call_kwargs) in mock_sh.call_args_list] == expected +# @ddt.data( +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={}, +# expected=["npm run compile-sass --"], +# ), +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={"system": "lms,studio"}, +# expected=["npm run compile-sass --"], +# ), +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={"debug": True}, +# expected=["npm run compile-sass-dev --"], +# ), +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={"system": "lms"}, +# expected=["npm run compile-sass -- --skip-cms"], +# ), +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={"system": "studio"}, +# expected=["npm run compile-sass -- --skip-lms"], +# ), +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={"system": "cms", "theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes"}, +# expected=[ +# "npm run compile-sass -- --skip-lms " + +# f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes" +# ], +# ), +# dict( +# task_name='pavelib.assets.compile_sass', +# args=[], +# kwargs={"theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes", "themes": "red-theme,test-theme"}, +# expected=[ +# "npm run compile-sass -- " + +# f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes " + +# "--theme red-theme --theme test-theme" +# ], +# ), +# dict( +# task_name='pavelib.assets.update_assets', +# args=["lms", "studio", "--settings=fake.settings"], +# kwargs={}, +# expected=[ +# ( +# "WEBPACK_CONFIG_PATH=webpack.fake.config.js " + +# "NODE_ENV=production " + +# "STATIC_ROOT_LMS=/fake/lms/staticfiles " + +# "STATIC_ROOT_CMS=/fake/cms/staticfiles " + +# 'JS_ENV_EXTRA_CONFIG=' + +# '"{\\"key1\\": [true, false], \\"key2\\": {\\"key2.1\\": 1369, \\"key2.2\\": \\"1369\\"}}" ' + +# "npm run webpack" +# ), +# "python manage.py lms --settings=fake.settings compile_sass lms ", +# "python manage.py cms --settings=fake.settings compile_sass cms ", +# ( +# "( ./manage.py lms --settings=fake.settings collectstatic --noinput ) && " + +# "( ./manage.py cms --settings=fake.settings collectstatic --noinput )" +# ), +# ], +# ), +# ) +# @ddt.unpack +# @patch.object(pavelib.assets, 'sh') +# def test_paver_assets_wrapper_invokes_new_commands(self, mock_sh, task_name, args, kwargs, expected): +# paver.easy.call_task(task_name, args=args, options=kwargs) +# assert [call_args[0] for (call_args, call_kwargs) in mock_sh.call_args_list] == expected From 316ea9cdb40e8b3b35668be62d24065d1a670228 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 30 Jul 2024 17:19:06 +0500 Subject: [PATCH 10/78] chore: remove pycodestyle violations and run the direct command --- .github/workflows/quality-checks.yml | 8 +- Makefile | 6 - pavelib/quality_test.py | 678 --------------------------- scripts/generic-ci-tests.sh | 4 +- 4 files changed, 9 insertions(+), 687 deletions(-) delete mode 100644 pavelib/quality_test.py diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 442caacfc95d..63012df2c1df 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -60,12 +60,18 @@ jobs: PIP_SRC: ${{ runner.temp }} run: | make test-requirements + + - name: Run Python Quality Tests + run: pycodestyle . - name: Run Quality Tests env: + TEST_SUITE: quality + SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} - run: make quality + run: | + ./scripts/all-tests.sh - name: Save Job Artifacts if: always() diff --git a/Makefile b/Makefile index f782b21d8fe9..3dd5fa9270c7 100644 --- a/Makefile +++ b/Makefile @@ -202,9 +202,3 @@ migrate: migrate-lms migrate-cms # Part of https://github.com/openedx/wg-developer-experience/issues/136 ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev - -quality: - python pavelib/quality_test.py all - -pep8_test: - python pavelib/quality_test.py pep8 diff --git a/pavelib/quality_test.py b/pavelib/quality_test.py deleted file mode 100644 index d11323852a4f..000000000000 --- a/pavelib/quality_test.py +++ /dev/null @@ -1,678 +0,0 @@ -""" # lint-amnesty, pylint: disable=django-not-configured -Check code quality using pycodestyle, pylint, and diff_quality. -""" - -import json -import os -import re -import sys -import subprocess -import shutil - -import argparse -from pavelib.utils.envs import Env -from pavelib.prereqs import install_node_prereqs -from pavelib.prereqs import install_python_prereqs -from pavelib.utils.test.utils import ensure_clean_package_lock -from datetime import datetime -from xml.sax.saxutils import quoteattr - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - - -JUNIT_XML_TEMPLATE = """ - -{failure_element} - -""" -JUNIT_XML_FAILURE_TEMPLATE = '' -START_TIME = datetime.utcnow() - - -class BuildFailure(Exception): - """Represents a problem with some part of the build's execution.""" - - -def write_junit_xml(name, message=None): - """ - Write a JUnit results XML file describing the outcome of a quality check. - """ - if message: - failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) - else: - failure_element = '' - data = { - 'failure_count': 1 if message else 0, - 'failure_element': failure_element, - 'name': name, - 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), - } - Env.QUALITY_DIR.makedirs_p() - filename = Env.QUALITY_DIR / f'{name}.xml' - with open(filename, 'w') as f: - f.write(JUNIT_XML_TEMPLATE.format(**data)) - - -def fail_quality(name, message): - """ - Fail the specified quality check by generating the JUnit XML results file - and raising a ``BuildFailure``. - """ - write_junit_xml(name, message) - sys.exit() - - -def _get_pep8_violations(clean=True): - """ - Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) - where violations_string is a string of all PEP 8 violations found, separated - by new lines. - """ - report_dir = (Env.REPORT_DIR / 'pep8') - if clean: - report_dir.rmtree(ignore_errors=True) - report_dir.makedirs_p() - report = report_dir / 'pep8.report' - - # Make sure the metrics subdirectory exists - Env.METRICS_DIR.makedirs_p() - - if not report.exists(): - # sh(f'pycodestyle . | tee {report} -a') - with open(report, 'w') as f: - result = subprocess.run( - ['pycodestyle', '.'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False, - text= True - ) - f.write(result.stdout) - - violations_list = _pep8_violations(report) - - return len(violations_list), violations_list - - -def _pep8_violations(report_file): - """ - Returns the list of all PEP 8 violations in the given report_file. - """ - with open(report_file) as f: - return f.readlines() - - -def run_pep8(): # pylint: disable=unused-argument - """ - Run pycodestyle on system code. - Fail the task if any violations are found. - """ - (count, violations_list) = _get_pep8_violations() - violations_list = ''.join(violations_list) - - # Print number of violations to log - violations_count_str = f"Number of PEP 8 violations: {count}" - print(violations_count_str) - print(violations_list) - - # Also write the number of violations to a file - with open(Env.METRICS_DIR / "pep8", "w") as f: - f.write(violations_count_str + '\n\n') - f.write(violations_list) - - # Fail if any violations are found - if count: - failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str - failure_string += f"\n\nViolations:\n{violations_list}" - fail_quality('pep8', failure_string) - else: - write_junit_xml('pep8') - - -def _prepare_report_dir(dir_name): - """ - Sets a given directory to a created, but empty state - """ - if os.path.isdir(dir_name): - shutil.rmtree(dir_name) - os.makedirs(dir_name, exist_ok=True) - - -def _write_metric(metric, filename): - """ - Write a given metric to a given file - Used for things like reports/metrics/eslint, which will simply tell you the number of - eslint violations found - """ - Env.METRICS_DIR.makedirs_p() - - with open(filename, "w") as metric_file: - metric_file.write(str(metric)) - - -def _get_report_contents(filename, report_name, last_line_only=False): - """ - Returns the contents of the given file. Use last_line_only to only return - the last line, which can be used for getting output from quality output - files. - - Arguments: - last_line_only: True to return the last line only, False to return a - string with full contents. - - Returns: - String containing full contents of the report, or the last line. - - """ - if os.path.isfile(filename): - with open(filename) as report_file: - if last_line_only: - lines = report_file.readlines() - for line in reversed(lines): - if line != '\n': - return line - return None - else: - return report_file.read() - else: - file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" - fail_quality(report_name, file_not_found_message) - - -def _get_count_from_last_line(filename, file_type): - """ - This will return the number in the last line of a file. - It is returning only the value (as a floating number). - """ - report_contents = _get_report_contents(filename, file_type, last_line_only=True) - - if report_contents is None: - return 0 - - last_line = report_contents.strip() - # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" - regex = r'^\d+' - - try: - return float(re.search(regex, last_line).group(0)) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - return None - - -def _get_stylelint_violations(): - """ - Returns the number of Stylelint violations. - """ - stylelint_report_dir = (Env.REPORT_DIR / "stylelint") - stylelint_report = stylelint_report_dir / "stylelint.report" - _prepare_report_dir(stylelint_report_dir) - formatter = 'node_modules/stylelint-formatter-pretty' - - command = f"stylelint **/*.scss --custom-formatter={formatter}" - with open(stylelint_report, 'w') as report_file: - result = subprocess.run( - command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - report_file.write(result.stdout) - - try: - return int(_get_count_from_last_line(stylelint_report, "stylelint")) - except TypeError: - fail_quality( - 'stylelint', - "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( - stylelint_report=stylelint_report - ) - ) - - -def run_eslint(): - """ - Runs eslint on static asset directories. - If limit option is passed, fails build if more violations than the limit are found. - """ - - eslint_report_dir = (Env.REPORT_DIR / "eslint") - eslint_report = eslint_report_dir / "eslint.report" - _prepare_report_dir(eslint_report_dir) - violations_limit = 4950 - - command = ( - "node --max_old_space_size=4096 node_modules/.bin/eslint " - "--ext .js --ext .jsx --format=compact ." - ) - with open(eslint_report, 'w') as report_file: - # Run the command - result = subprocess.run( - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - check=False - ) - - # Write the output to the report file - report_file.write(result.stdout) - - try: - num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) - except TypeError: - fail_quality( - 'eslint', - "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( - eslint_report=eslint_report - ) - ) - - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) - - # Fail if number of violations is greater than the limit - if num_violations > violations_limit > -1: - fail_quality( - 'eslint', - "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, violations_limit=violations_limit - ) - ) - else: - write_junit_xml('eslint') - - -def run_stylelint(): - """ - Runs stylelint on Sass files. - If limit option is passed, fails build if more violations than the limit are found. - """ - - violations_limit = 0 - num_violations = _get_stylelint_violations() - - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) - - # Fail if number of violations is greater than the limit - if num_violations > violations_limit: - fail_quality( - 'stylelint', - "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, - violations_limit=violations_limit, - ) - ) - else: - write_junit_xml('stylelint') - - -def _extract_missing_pii_annotations(filename): - """ - Returns the number of uncovered models from the stdout report of django_find_annotations. - - Arguments: - filename: Filename where stdout of django_find_annotations was captured. - - Returns: - three-tuple containing: - 1. The number of uncovered models, - 2. A bool indicating whether the coverage is still below the threshold, and - 3. The full report as a string. - """ - uncovered_models = 0 - pii_check_passed = True - if os.path.isfile(filename): - with open(filename) as report_file: - lines = report_file.readlines() - - # Find the count of uncovered models. - uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') - for line in lines: - uncovered_match = uncovered_regex.match(line) - if uncovered_match: - uncovered_models = int(uncovered_match.groups()[0]) - break - - # Find a message which suggests the check failed. - failure_regex = re.compile(r'^Coverage threshold not met!') - for line in lines: - failure_match = failure_regex.match(line) - if failure_match: - pii_check_passed = False - break - - # Each line in lines already contains a newline. - full_log = ''.join(lines) - else: - fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') - - return (uncovered_models, pii_check_passed, full_log) - - -def run_pii_check(): - """ - Guarantee that all Django models are PII-annotated. - """ - - pii_report_name = 'pii' - default_report_dir = (Env.REPORT_DIR / pii_report_name) - report_dir = default_report_dir - output_file = os.path.join(report_dir, 'pii_check_{}.report') - env_report = [] - pii_check_passed = True - for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): - try: - print() - print(f"Running {env_name} PII Annotation check and report") - print("-" * 45) - run_output_file = str(output_file).format(env_name.lower()) - os.makedirs(report_dir, exist_ok=True) - command = ( - "export DJANGO_SETTINGS_MODULE={env_settings_file};" - "code_annotations django_find_annotations" - "--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" - "--lint --report --coverage | tee {run_output_file}" - ) - result = subprocess.run( - command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - - with open(run_output_file, 'w') as f: - f.write(result.stdout) - - uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) - env_report.append(( - uncovered_model_count, - full_log, - )) - - except BuildFailure as error_message: - fail_quality(pii_report_name, f'FAILURE: {error_message}') - - if not pii_check_passed_env: - pii_check_passed = False - - # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. - uncovered_count, full_log = max(env_report, key=lambda r: r[0]) - - # Write metric file. - if uncovered_count is None: - uncovered_count = 0 - metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" - _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) - - # Finally, fail the paver task if code_annotations suggests that the check failed. - if not pii_check_passed: - fail_quality('pii', full_log) - - -def check_keywords(): - """ - Check Django model fields for names that conflict with a list of reserved keywords - """ - - report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') - os.makedirs(report_path, exist_ok=True) - - overall_status = True - for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: - report_file = f"{env}_reserved_keyword_report.csv" - override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") - try: - command = ( - "export DJANGO_SETTINGS_MODULE={env_settings_file};" - "python manage.py {env} check_reserved_keywords" - "--override_file {override_file}" - "--report_path {report_path}" - "--report_file {report_file}" - ) - result = subprocess.run( - command, - shell=True, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - except BuildFailure: - overall_status = False - - if not overall_status: - fail_quality( - 'keywords', - 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( - report_path - ) - ) - - -def _get_xsslint_counts(filename): - """ - This returns a dict of violations from the xsslint report. - - Arguments: - filename: The name of the xsslint report. - - Returns: - A dict containing the following: - rules: A dict containing the count for each rule as follows: - violation-rule-id: N, where N is the number of violations - total: M, where M is the number of total violations - - """ - report_contents = _get_report_contents(filename, 'xsslint') - rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) - total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) - violations = {'rules': {}} - for violation_match in rule_count_regex.finditer(report_contents): - try: - violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) - except ValueError: - violations['rules'][violation_match.group('rule_id')] = None - try: - violations['total'] = int(total_count_regex.search(report_contents).group('count')) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - violations['total'] = None - return violations - - -def run_xsslint(): - """ - Runs xsslint/xss_linter.py on the codebase - """ - - try: - thresholds_option = 'scripts/xsslint_thresholds.json' - # Read the JSON file - with open(thresholds_option, 'r') as file: - violation_thresholds = json.load(file) - - except ValueError: - violation_thresholds = None - if isinstance(violation_thresholds, dict) is False or \ - any(key not in ("total", "rules") for key in violation_thresholds.keys()): - - fail_quality( - 'xsslint', - """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" - """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ - """with property names in double-quotes.""".format( - thresholds_option=thresholds_option - ) - ) - - xsslint_script = "xss_linter.py" - xsslint_report_dir = (Env.REPORT_DIR / "xsslint") - xsslint_report = xsslint_report_dir / "xsslint.report" - _prepare_report_dir(xsslint_report_dir) - - # Prepare the command to run the xsslint script - command = ( - f"{Env.REPO_ROOT}/scripts/xsslint/{xsslint_script} " - f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" - ) - - result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - xsslint_counts = _get_xsslint_counts(xsslint_report) - - try: - metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( - xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) - ) - if 'rules' in xsslint_counts and any(xsslint_counts['rules']): - metrics_str += "\n" - rule_keys = sorted(xsslint_counts['rules'].keys()) - for rule in rule_keys: - metrics_str += "{rule} violations: {count}\n".format( - rule=rule, - count=int(xsslint_counts['rules'][rule]) - ) - except TypeError: - fail_quality( - 'xsslint', - "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( - xsslint_script=xsslint_script, xsslint_report=xsslint_report - ) - ) - - metrics_report = (Env.METRICS_DIR / "xsslint") - # Record the metric - _write_metric(metrics_str, metrics_report) - # Print number of violations to log. - command = f"cat {metrics_report}" - # Print number of violations to log. - subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - - error_message = "" - # Test total violations against threshold. - if 'total' in list(violation_thresholds.keys()): - if violation_thresholds['total'] < xsslint_counts['total']: - error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( - count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] - ) - - # Test rule violations against thresholds. - if 'rules' in violation_thresholds: - threshold_keys = sorted(violation_thresholds['rules'].keys()) - for threshold_key in threshold_keys: - if threshold_key not in xsslint_counts['rules']: - error_message += ( - "\nNumber of {xsslint_script} violations for {rule} could not be found in " - "{xsslint_report}." - ).format( - xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report - ) - elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: - error_message += \ - "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( - rule=threshold_key, count=xsslint_counts['rules'][threshold_key], - violations_limit=violation_thresholds['rules'][threshold_key], - ) - - if error_message: - fail_quality( - 'xsslint', - "FAILURE: XSSLinter Failed.\n{error_message}\n" - "See {xsslint_report} or run the following command to hone in on the problem:\n" - " ./scripts/xss-commit-linter.sh -h".format( - error_message=error_message, xsslint_report=xsslint_report - ) - ) - else: - write_junit_xml('xsslint') - - -def diff_coverage(): - """ - Build the diff coverage reports - """ - - compare_branch = 'origin/master' - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - - for filepath in Env.REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `paver test` before running " - "`paver coverage`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - command = ( - f"diff-cover {xml_report_str}" - f"--diff-range-notation '..'" - f"--compare-branch={compare_branch} " - f"--html-report {diff_html_path}" - ) - subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("command", choices=['pep8', 'eslint', 'stylelint', - 'xsslint', 'pii_check', 'check_keywords', 'all']) - - argument = parser.parse_args() - - if argument.command == 'pep8': - run_pep8() - - elif argument.command == 'eslint': - ensure_clean_package_lock() - install_node_prereqs() - run_eslint() - - elif argument.command == 'stylelint': - install_node_prereqs() - run_stylelint() - - elif argument.command == 'xsslint': - install_python_prereqs() - run_xsslint() - - elif argument.command == 'pii_check': - install_python_prereqs() - run_pii_check() - - elif argument.command == 'check_keywords': - install_python_prereqs() - check_keywords() - - elif argument.command == 'all': - run_pep8() - ensure_clean_package_lock() - install_node_prereqs() - run_eslint() - run_stylelint() - run_xsslint() - install_python_prereqs() - run_pii_check() - check_keywords() - diff_coverage() diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 6641957c22ac..d13cf78351c7 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -77,8 +77,8 @@ case "$TEST_SUITE" in mkdir -p reports - echo "Finding pycodestyle violations and storing report..." - run_paver_quality run_pep8 || { EXIT=1; } + # echo "Finding pycodestyle violations and storing report..." + # run_paver_quality run_pep8 || { EXIT=1; } echo "Finding ESLint violations and storing report..." run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." From fc71d83bb4b1583b5202fa5247f17fd2ad40a08c Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 30 Jul 2024 17:37:07 +0500 Subject: [PATCH 11/78] fix: fixing tests --- scripts/generic-ci-tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index d13cf78351c7..6641957c22ac 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -77,8 +77,8 @@ case "$TEST_SUITE" in mkdir -p reports - # echo "Finding pycodestyle violations and storing report..." - # run_paver_quality run_pep8 || { EXIT=1; } + echo "Finding pycodestyle violations and storing report..." + run_paver_quality run_pep8 || { EXIT=1; } echo "Finding ESLint violations and storing report..." run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." From 424e220354fe4ba39e3d25d0186d3594d1781b77 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 30 Jul 2024 17:51:11 +0500 Subject: [PATCH 12/78] fix: fix tests --- scripts/generic-ci-tests.sh | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 6641957c22ac..2f0f7744c51d 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -60,7 +60,7 @@ function run_paver_quality { shift mkdir -p test_root/log/ LOG_PREFIX="test_root/log/$QUALITY_TASK" - $TOX "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { + $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):"; tail -n 100 "$LOG_PREFIX.out.log" echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):"; @@ -77,10 +77,8 @@ case "$TEST_SUITE" in mkdir -p reports - echo "Finding pycodestyle violations and storing report..." - run_paver_quality run_pep8 || { EXIT=1; } - echo "Finding ESLint violations and storing report..." - run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } + # echo "Finding ESLint violations and storing report..." + # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." run_paver_quality run_stylelint || { EXIT=1; } echo "Running xss linter report." From 6b851cc1b1a1855a7790cb4bd87a96ac94cfba3c Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 30 Jul 2024 17:59:47 +0500 Subject: [PATCH 13/78] fix: trying eslint command from make file --- .github/workflows/quality-checks.yml | 3 +++ Makefile | 3 +++ scripts/generic-ci-tests.sh | 6 ++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 63012df2c1df..6b0433ef47ac 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -63,6 +63,9 @@ jobs: - name: Run Python Quality Tests run: pycodestyle . + + - name: Run Eslint Tests + run: make test-eslint - name: Run Quality Tests env: diff --git a/Makefile b/Makefile index 3dd5fa9270c7..505cf8676938 100644 --- a/Makefile +++ b/Makefile @@ -202,3 +202,6 @@ migrate: migrate-lms migrate-cms # Part of https://github.com/openedx/wg-developer-experience/issues/136 ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev + +test-eslint: + node node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 2f0f7744c51d..54b9cbb9d500 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -77,8 +77,10 @@ case "$TEST_SUITE" in mkdir -p reports - # echo "Finding ESLint violations and storing report..." - # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } + echo "Finding pycodestyle violations and storing report..." + run_paver_quality run_pep8 || { EXIT=1; } + echo "Finding ESLint violations and storing report..." + run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." run_paver_quality run_stylelint || { EXIT=1; } echo "Running xss linter report." From 17ae7a500dc9bec5114554f1447cd359db4b1016 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 30 Jul 2024 18:14:24 +0500 Subject: [PATCH 14/78] fix: fixing tests --- .github/workflows/quality-checks.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 6b0433ef47ac..ac811d143db6 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -61,10 +61,17 @@ jobs: run: | make test-requirements + - name: Install npm + env: + PIP_SRC: ${{ runner.temp }} + run: npm ci + - name: Run Python Quality Tests run: pycodestyle . - name: Run Eslint Tests + env: + PIP_SRC: ${{ runner.temp }} run: make test-eslint - name: Run Quality Tests From 71a7c734468ffcc34cdc0bdf766f7aef50d8225d Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 31 Jul 2024 15:45:45 +0500 Subject: [PATCH 15/78] chore: add stylelint test in quality workflow --- .github/workflows/quality-checks.yml | 5 +++++ Makefile | 3 +++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index ac811d143db6..4da2feefe77f 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -73,6 +73,11 @@ jobs: env: PIP_SRC: ${{ runner.temp }} run: make test-eslint + + - name: Run Stylelint Tests + env: + PIP_SRC: ${{ runner.temp }} + run: make test-stylelint - name: Run Quality Tests env: diff --git a/Makefile b/Makefile index 505cf8676938..3d39d1f2aa2c 100644 --- a/Makefile +++ b/Makefile @@ -205,3 +205,6 @@ ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip ins test-eslint: node node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . + +test-stylelint: + stylelint **/*.scss --custom-formatter=node_modules/stylelint-formatter-pretty \ No newline at end of file From ec42adfaae04b772b213ddae90e3e8148b65821d Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 31 Jul 2024 17:24:22 +0500 Subject: [PATCH 16/78] chore: add stylelint test in quality workflow --- .github/workflows/quality-checks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 4da2feefe77f..d756aa593833 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -66,7 +66,7 @@ jobs: PIP_SRC: ${{ runner.temp }} run: npm ci - - name: Run Python Quality Tests + - name: Run Python Quality Test run: pycodestyle . - name: Run Eslint Tests From ad2e053a2da4d659228fa65ea62bb3ea9ff9fb23 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 31 Jul 2024 22:20:35 +0500 Subject: [PATCH 17/78] chore: fix tests --- .github/workflows/quality-checks.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index d756aa593833..7e1a97615c94 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -69,15 +69,15 @@ jobs: - name: Run Python Quality Test run: pycodestyle . - - name: Run Eslint Tests + - name: Run Stylelint Tests env: PIP_SRC: ${{ runner.temp }} - run: make test-eslint + run: make test-stylelint - - name: Run Stylelint Tests + - name: Run Eslint Tests env: PIP_SRC: ${{ runner.temp }} - run: make test-stylelint + run: make test-eslint - name: Run Quality Tests env: From 7f44491ff7898158c0fb96ace739a8924acaf79f Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 31 Jul 2024 22:25:58 +0500 Subject: [PATCH 18/78] chore: fix tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1269f13559ac..fdafd7019bbe 100644 --- a/Makefile +++ b/Makefile @@ -209,4 +209,4 @@ test-eslint: node node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . test-stylelint: - stylelint **/*.scss --custom-formatter=node_modules/stylelint-formatter-pretty \ No newline at end of file + stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty \ No newline at end of file From e963d3f95652b0c7f87499b4663073463a3a9abd Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 31 Jul 2024 22:38:30 +0500 Subject: [PATCH 19/78] chore: fix tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fdafd7019bbe..ede4620a61e6 100644 --- a/Makefile +++ b/Makefile @@ -209,4 +209,4 @@ test-eslint: node node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . test-stylelint: - stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty \ No newline at end of file + npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty \ No newline at end of file From 98b3cd3fcb1a5451c253a72f5f15deced8aa42a9 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 31 Jul 2024 23:35:02 +0500 Subject: [PATCH 20/78] chore: fix tests --- Makefile | 3 ++- stylelint.config.js | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index ede4620a61e6..afe584c4bff9 100644 --- a/Makefile +++ b/Makefile @@ -209,4 +209,5 @@ test-eslint: node node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . test-stylelint: - npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty \ No newline at end of file + npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty + npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty --config path/to/your/stylelint.config.js diff --git a/stylelint.config.js b/stylelint.config.js index bd7769911708..f9d7f92846ab 100644 --- a/stylelint.config.js +++ b/stylelint.config.js @@ -1,3 +1,6 @@ module.exports = { - extends: '@edx/stylelint-config-edx' + extends: '@edx/stylelint-config-edx', + rules: { + 'selector-anb-no-unmatchable': null // Disable the unknown rule + } }; From da74ec7f5b749edc1816f777aed8a557dc785656 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 1 Aug 2024 12:21:48 +0500 Subject: [PATCH 21/78] chore: fix tests --- stylelint.config.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stylelint.config.js b/stylelint.config.js index f9d7f92846ab..7f6bf492df36 100644 --- a/stylelint.config.js +++ b/stylelint.config.js @@ -1,6 +1,7 @@ module.exports = { extends: '@edx/stylelint-config-edx', rules: { - 'selector-anb-no-unmatchable': null // Disable the unknown rule + 'selector-anb-no-unmatchable': null, // Disable the unknown rule + 'no-descending-specificity': null } }; From 5a1d6be4ca79de4493a162613c8cbef5b86a6907 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 1 Aug 2024 12:35:50 +0500 Subject: [PATCH 22/78] chore: fix tests --- Makefile | 1 - stylelint.config.js | 5 ++++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index afe584c4bff9..75a601ec164a 100644 --- a/Makefile +++ b/Makefile @@ -210,4 +210,3 @@ test-eslint: test-stylelint: npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty - npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty --config path/to/your/stylelint.config.js diff --git a/stylelint.config.js b/stylelint.config.js index 7f6bf492df36..fd5566ce6c77 100644 --- a/stylelint.config.js +++ b/stylelint.config.js @@ -2,6 +2,9 @@ module.exports = { extends: '@edx/stylelint-config-edx', rules: { 'selector-anb-no-unmatchable': null, // Disable the unknown rule - 'no-descending-specificity': null + 'no-descending-specificity': null, + 'declaration-block-no-duplicate-properties': [true, { + ignore: ['consecutive-duplicates'] + }] } }; From ce7bd6687fd3b8844ba591c58208147e6a711907 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 2 Aug 2024 11:52:39 +0500 Subject: [PATCH 23/78] chore: fix tests --- .github/workflows/quality-checks.yml | 4 +++- Makefile | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 7e1a97615c94..7d8b45cd083c 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -67,7 +67,9 @@ jobs: run: npm ci - name: Run Python Quality Test - run: pycodestyle . + env: + PIP_SRC: ${{ runner.temp }} + run: make test-lint - name: Run Stylelint Tests env: diff --git a/Makefile b/Makefile index 75a601ec164a..733a3c13512f 100644 --- a/Makefile +++ b/Makefile @@ -210,3 +210,6 @@ test-eslint: test-stylelint: npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty + +test-lint: + pycodestyle . \ No newline at end of file From 8be4263c33e3a7a2f480aa1cc81a793209be946e Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 2 Aug 2024 12:00:52 +0500 Subject: [PATCH 24/78] chore: fix tests --- .github/workflows/quality-checks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 7d8b45cd083c..d2d1d11919d6 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -67,6 +67,7 @@ jobs: run: npm ci - name: Run Python Quality Test + working-directory: ${{ github.workspace }} env: PIP_SRC: ${{ runner.temp }} run: make test-lint From 7f716d802bf64a410ec8a065b75c21d477cb918e Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 5 Aug 2024 12:36:57 +0500 Subject: [PATCH 25/78] fix: fixing tests --- .github/workflows/quality-checks.yml | 26 +++++++++++++------------- scripts/generic-ci-tests.sh | 22 +++++++++++----------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index d2d1d11919d6..f5ab5ebb12f4 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -66,21 +66,21 @@ jobs: PIP_SRC: ${{ runner.temp }} run: npm ci - - name: Run Python Quality Test - working-directory: ${{ github.workspace }} - env: - PIP_SRC: ${{ runner.temp }} - run: make test-lint + # - name: Run Python Quality Test + # working-directory: ${{ github.workspace }} + # env: + # PIP_SRC: ${{ runner.temp }} + # run: make test-lint - - name: Run Stylelint Tests - env: - PIP_SRC: ${{ runner.temp }} - run: make test-stylelint + # - name: Run Stylelint Tests + # env: + # PIP_SRC: ${{ runner.temp }} + # run: make test-stylelint - - name: Run Eslint Tests - env: - PIP_SRC: ${{ runner.temp }} - run: make test-eslint + # - name: Run Eslint Tests + # env: + # PIP_SRC: ${{ runner.temp }} + # run: make test-eslint - name: Run Quality Tests env: diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 54b9cbb9d500..d17d2e95a47c 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -78,17 +78,17 @@ case "$TEST_SUITE" in mkdir -p reports echo "Finding pycodestyle violations and storing report..." - run_paver_quality run_pep8 || { EXIT=1; } - echo "Finding ESLint violations and storing report..." - run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } - echo "Finding Stylelint violations and storing report..." - run_paver_quality run_stylelint || { EXIT=1; } - echo "Running xss linter report." - run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } - echo "Running PII checker on all Django models..." - run_paver_quality run_pii_check || { EXIT=1; } - echo "Running reserved keyword checker on all Django models..." - run_paver_quality check_keywords || { EXIT=1; } + pycodestyle . + # echo "Finding ESLint violations and storing report..." + # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } + # echo "Finding Stylelint violations and storing report..." + # run_paver_quality run_stylelint || { EXIT=1; } + # echo "Running xss linter report." + # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } + # echo "Running PII checker on all Django models..." + # run_paver_quality run_pii_check || { EXIT=1; } + # echo "Running reserved keyword checker on all Django models..." + # run_paver_quality check_keywords || { EXIT=1; } # Need to create an empty test result so the post-build # action doesn't fail the build. From f968721fc16476de69154c76fe7a370ff8915fe4 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 5 Aug 2024 12:48:30 +0500 Subject: [PATCH 26/78] fix: fixing tests --- .github/workflows/quality-checks.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index f5ab5ebb12f4..fdef948d8f41 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -66,6 +66,10 @@ jobs: PIP_SRC: ${{ runner.temp }} run: npm ci + - name: Debug Environment + run: | + python --version + pycodestyle --version # - name: Run Python Quality Test # working-directory: ${{ github.workspace }} # env: From d110d7857a9815f291b472048ff16ad89675347f Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 5 Aug 2024 12:57:33 +0500 Subject: [PATCH 27/78] chore: fix tests --- .github/workflows/quality-checks.yml | 30 ++++++++++++---------------- scripts/generic-ci-tests.sh | 22 ++++++++++---------- 2 files changed, 24 insertions(+), 28 deletions(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index fdef948d8f41..82853f3c3624 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -66,25 +66,21 @@ jobs: PIP_SRC: ${{ runner.temp }} run: npm ci - - name: Debug Environment - run: | - python --version - pycodestyle --version - # - name: Run Python Quality Test - # working-directory: ${{ github.workspace }} - # env: - # PIP_SRC: ${{ runner.temp }} - # run: make test-lint + - name: Run Python Quality Test + working-directory: ${{ github.workspace }} + env: + PIP_SRC: ${{ runner.temp }} + run: make test-lint - # - name: Run Stylelint Tests - # env: - # PIP_SRC: ${{ runner.temp }} - # run: make test-stylelint + - name: Run Stylelint Tests + env: + PIP_SRC: ${{ runner.temp }} + run: make test-stylelint - # - name: Run Eslint Tests - # env: - # PIP_SRC: ${{ runner.temp }} - # run: make test-eslint + - name: Run Eslint Tests + env: + PIP_SRC: ${{ runner.temp }} + run: make test-eslint - name: Run Quality Tests env: diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index d17d2e95a47c..54b9cbb9d500 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -78,17 +78,17 @@ case "$TEST_SUITE" in mkdir -p reports echo "Finding pycodestyle violations and storing report..." - pycodestyle . - # echo "Finding ESLint violations and storing report..." - # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } - # echo "Finding Stylelint violations and storing report..." - # run_paver_quality run_stylelint || { EXIT=1; } - # echo "Running xss linter report." - # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } - # echo "Running PII checker on all Django models..." - # run_paver_quality run_pii_check || { EXIT=1; } - # echo "Running reserved keyword checker on all Django models..." - # run_paver_quality check_keywords || { EXIT=1; } + run_paver_quality run_pep8 || { EXIT=1; } + echo "Finding ESLint violations and storing report..." + run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } + echo "Finding Stylelint violations and storing report..." + run_paver_quality run_stylelint || { EXIT=1; } + echo "Running xss linter report." + run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } + echo "Running PII checker on all Django models..." + run_paver_quality run_pii_check || { EXIT=1; } + echo "Running reserved keyword checker on all Django models..." + run_paver_quality check_keywords || { EXIT=1; } # Need to create an empty test result so the post-build # action doesn't fail the build. From 6219edfa91d9f7c306e2abda14db14fca12f5d25 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 15:37:34 +0500 Subject: [PATCH 28/78] chore: replacing paver script --- .github/workflows/quality-checks.yml | 26 +- Makefile | 2 +- scripts/generic-ci-tests.sh | 23 +- scripts/metrics/stylelint | 1 + scripts/quality_test.py | 780 +++++++++++++++++++++++++++ scripts/run_stylelint.sh | 44 ++ scripts/stylelint-results.xml | 1 + 7 files changed, 852 insertions(+), 25 deletions(-) create mode 100644 scripts/metrics/stylelint create mode 100644 scripts/quality_test.py create mode 100755 scripts/run_stylelint.sh create mode 100644 scripts/stylelint-results.xml diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 82853f3c3624..f5ab5ebb12f4 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -66,21 +66,21 @@ jobs: PIP_SRC: ${{ runner.temp }} run: npm ci - - name: Run Python Quality Test - working-directory: ${{ github.workspace }} - env: - PIP_SRC: ${{ runner.temp }} - run: make test-lint + # - name: Run Python Quality Test + # working-directory: ${{ github.workspace }} + # env: + # PIP_SRC: ${{ runner.temp }} + # run: make test-lint - - name: Run Stylelint Tests - env: - PIP_SRC: ${{ runner.temp }} - run: make test-stylelint + # - name: Run Stylelint Tests + # env: + # PIP_SRC: ${{ runner.temp }} + # run: make test-stylelint - - name: Run Eslint Tests - env: - PIP_SRC: ${{ runner.temp }} - run: make test-eslint + # - name: Run Eslint Tests + # env: + # PIP_SRC: ${{ runner.temp }} + # run: make test-eslint - name: Run Quality Tests env: diff --git a/Makefile b/Makefile index 733a3c13512f..aff05a7fcc1b 100644 --- a/Makefile +++ b/Makefile @@ -206,7 +206,7 @@ ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip ins sudo apt install libmysqlclient-dev libxmlsec1-dev test-eslint: - node node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . + npx node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . test-stylelint: npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 54b9cbb9d500..8455e5792c18 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -77,18 +77,19 @@ case "$TEST_SUITE" in mkdir -p reports - echo "Finding pycodestyle violations and storing report..." - run_paver_quality run_pep8 || { EXIT=1; } - echo "Finding ESLint violations and storing report..." - run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } + # echo "Finding pycodestyle violations and storing report..." + # run_paver_quality run_pep8 || { EXIT=1; } + # echo "Finding ESLint violations and storing report..." + # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." - run_paver_quality run_stylelint || { EXIT=1; } - echo "Running xss linter report." - run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } - echo "Running PII checker on all Django models..." - run_paver_quality run_pii_check || { EXIT=1; } - echo "Running reserved keyword checker on all Django models..." - run_paver_quality check_keywords || { EXIT=1; } + python quality_tests.py stylelint + # run_paver_quality run_stylelint || { EXIT=1; } + # echo "Running xss linter report." + # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } + # echo "Running PII checker on all Django models..." + # run_paver_quality run_pii_check || { EXIT=1; } + # echo "Running reserved keyword checker on all Django models..." + # run_paver_quality check_keywords || { EXIT=1; } # Need to create an empty test result so the post-build # action doesn't fail the build. diff --git a/scripts/metrics/stylelint b/scripts/metrics/stylelint new file mode 100644 index 000000000000..573541ac9702 --- /dev/null +++ b/scripts/metrics/stylelint @@ -0,0 +1 @@ +0 diff --git a/scripts/quality_test.py b/scripts/quality_test.py new file mode 100644 index 000000000000..dba2c6630ada --- /dev/null +++ b/scripts/quality_test.py @@ -0,0 +1,780 @@ +""" # lint-amnesty, pylint: disable=django-not-configured +Check code quality using pycodestyle, pylint, and diff_quality. +""" + +import json +import os +import re +import sys +import subprocess +import shutil + +import argparse +from pavelib.utils.envs import Env +# from pavelib.prereqs import install_node_prereqs +from pavelib.prereqs import install_python_prereqs +from pavelib.utils.test.utils import ensure_clean_package_lock +from datetime import datetime +from xml.sax.saxutils import quoteattr + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + + +JUNIT_XML_TEMPLATE = """ + +{failure_element} + +""" +JUNIT_XML_FAILURE_TEMPLATE = '' +START_TIME = datetime.utcnow() + + +class BuildFailure(Exception): + """Represents a problem with some part of the build's execution.""" + +def str2bool(s): + s = str(s) + return s.lower() in ('yes', 'true', 't', '1') + + +def no_prereq_install(): + """ + Determine if NO_PREREQ_INSTALL should be truthy or falsy. + """ + return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False')) + + +def install_node_prereqs(): + """ + Installs Node prerequisites + """ + if no_prereq_install(): + print(NO_PREREQ_MESSAGE) + return + + prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) + + +def prereq_cache(cache_name, paths, install_func): + """ + Conditionally execute `install_func()` only if the files/directories + specified by `paths` have changed. + + If the code executes successfully (no exceptions are thrown), the cache + is updated with the new hash. + """ + # Retrieve the old hash + cache_filename = cache_name.replace(" ", "_") + cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1") + old_hash = None + if os.path.isfile(cache_file_path): + with open(cache_file_path) as cache_file: + old_hash = cache_file.read() + + # Compare the old hash to the new hash + # If they do not match (either the cache hasn't been created, or the files have changed), + # then execute the code within the block. + new_hash = compute_fingerprint(paths) + if new_hash != old_hash: + install_func() + + # Update the cache with the new hash + # If the code executed within the context fails (throws an exception), + # then this step won't get executed. + create_prereqs_cache_dir() + with open(cache_file_path, "wb") as cache_file: + # Since the pip requirement files are modified during the install + # process, we need to store the hash generated AFTER the installation + post_install_hash = compute_fingerprint(paths) + cache_file.write(post_install_hash.encode('utf-8')) + else: + print(f'{cache_name} unchanged, skipping...') + + +def node_prereqs_installation(): + """ + Configures npm and installs Node prerequisites + """ + # Before July 2023, these directories were created and written to + # as root. Afterwards, they are created as being owned by the + # `app` user -- but also need to be deleted by that user (due to + # how npm runs post-install scripts.) Developers with an older + # devstack installation who are reprovisioning will see errors + # here if the files are still owned by root. Deleting the files in + # advance prevents this error. + # + # This hack should probably be left in place for at least a year. + # See ADR 17 for more background on the transition. + # sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") + # At the time of this writing, the js dir has git-versioned files + # but the css dir does not, so the latter would have been created + # as root-owned (in the process of creating the vendor + # subdirectory). Delete it only if empty, just in case + # git-versioned files are added later. + # sh("rmdir common/static/common/css || true") + try: + shutil.rmtree("common/static/common/js/vendor/ common/static/common/css/vendor/") + os.rmdir("common/static/common/css") + except OSError: + pass + + # NPM installs hang sporadically. Log the installation process so that we + # determine if any packages are chronic offenders. + npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log' + npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with + npm_command = 'npm ci --verbose'.split() + + # The implementation of Paver's `sh` function returns before the forked + # actually returns. Using a Popen object so that we can ensure that + # the forked process has returned + proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with + retcode = proc.wait() + if retcode == 1: + raise Exception(f"npm install failed: See {npm_log_file_path}") + print("Successfully clean-installed NPM packages. Log found at {}".format( + npm_log_file_path + )) + +# def write_junit_xml(name, message=None): +# """ +# Write a JUnit results XML file describing the outcome of a quality check. +# """ +# if message: +# failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) +# else: +# failure_element = '' +# data = { +# 'failure_count': 1 if message else 0, +# 'failure_element': failure_element, +# 'name': name, +# 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), +# } +# Env.QUALITY_DIR.makedirs_p() +# filename = Env.QUALITY_DIR / f'{name}.xml' +# with open(filename, 'w') as f: +# f.write(JUNIT_XML_TEMPLATE.format(**data)) + + +def fail_quality(name, message): + """ + Fail the specified quality check by generating the JUnit XML results file + and raising a ``BuildFailure``. + """ + # write_junit_xml(name, message) + sys.exit() + + +# def _get_pep8_violations(clean=True): +# """ +# Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) +# where violations_string is a string of all PEP 8 violations found, separated +# by new lines. +# """ +# report_dir = (Env.REPORT_DIR / 'pep8') +# if clean: +# report_dir.rmtree(ignore_errors=True) +# report_dir.makedirs_p() +# report = report_dir / 'pep8.report' + +# # Make sure the metrics subdirectory exists +# Env.METRICS_DIR.makedirs_p() + +# if not report.exists(): +# # sh(f'pycodestyle . | tee {report} -a') +# with open(report, 'w') as f: +# result = subprocess.run( +# ['pycodestyle', '.'], +# stdout=subprocess.PIPE, +# stderr=subprocess.PIPE, +# check=False, +# text= True +# ) +# f.write(result.stdout) + +# violations_list = _pep8_violations(report) + +# return len(violations_list), violations_list + + +# def _pep8_violations(report_file): +# """ +# Returns the list of all PEP 8 violations in the given report_file. +# """ +# with open(report_file) as f: +# return f.readlines() + + +# def run_pep8(): # pylint: disable=unused-argument +# """ +# Run pycodestyle on system code. +# Fail the task if any violations are found. +# """ +# (count, violations_list) = _get_pep8_violations() +# violations_list = ''.join(violations_list) + +# # Print number of violations to log +# violations_count_str = f"Number of PEP 8 violations: {count}" +# print(violations_count_str) +# print(violations_list) + +# # Also write the number of violations to a file +# with open(Env.METRICS_DIR / "pep8", "w") as f: +# f.write(violations_count_str + '\n\n') +# f.write(violations_list) + +# # Fail if any violations are found +# if count: +# failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str +# failure_string += f"\n\nViolations:\n{violations_list}" +# fail_quality('pep8', failure_string) +# else: +# write_junit_xml('pep8') + + +def _prepare_report_dir(dir_name): + """ + Sets a given directory to a created, but empty state + """ + if os.path.isdir(dir_name): + shutil.rmtree(dir_name) + os.makedirs(dir_name, exist_ok=True) + + +def _write_metric(metric, filename): + """ + Write a given metric to a given file + Used for things like reports/metrics/eslint, which will simply tell you the number of + eslint violations found + """ + Env.METRICS_DIR.makedirs_p() + + with open(filename, "w") as metric_file: + metric_file.write(str(metric)) + + +def _get_report_contents(filename, report_name, last_line_only=False): + """ + Returns the contents of the given file. Use last_line_only to only return + the last line, which can be used for getting output from quality output + files. + + Arguments: + last_line_only: True to return the last line only, False to return a + string with full contents. + + Returns: + String containing full contents of the report, or the last line. + + """ + if os.path.isfile(filename): + with open(filename) as report_file: + if last_line_only: + lines = report_file.readlines() + for line in reversed(lines): + if line != '\n': + return line + return None + else: + return report_file.read() + else: + file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" + fail_quality(report_name, file_not_found_message) + + +def _get_count_from_last_line(filename, file_type): + """ + This will return the number in the last line of a file. + It is returning only the value (as a floating number). + """ + report_contents = _get_report_contents(filename, file_type, last_line_only=True) + + if report_contents is None: + return 0 + + last_line = report_contents.strip() + # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" + regex = r'^\d+' + + try: + return float(re.search(regex, last_line).group(0)) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + return None + + +def _get_stylelint_violations(): + """ + Returns the number of Stylelint violations. + """ + stylelint_report_dir = (Env.REPORT_DIR / "stylelint") + stylelint_report = stylelint_report_dir / "stylelint.report" + _prepare_report_dir(stylelint_report_dir) + formatter = 'node_modules/stylelint-formatter-pretty' + + command = f"stylelint **/*.scss --custom-formatter={formatter}" + with open(stylelint_report, 'w') as report_file: + result = subprocess.run( + command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + report_file.write(result.stdout) + + try: + return int(_get_count_from_last_line(stylelint_report, "stylelint")) + except TypeError: + fail_quality( + 'stylelint', + "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( + stylelint_report=stylelint_report + ) + ) + + +# def run_eslint(): +# """ +# Runs eslint on static asset directories. +# If limit option is passed, fails build if more violations than the limit are found. +# """ + +# eslint_report_dir = (Env.REPORT_DIR / "eslint") +# eslint_report = eslint_report_dir / "eslint.report" +# _prepare_report_dir(eslint_report_dir) +# violations_limit = 4950 + +# command = ( +# "node --max_old_space_size=4096 node_modules/.bin/eslint " +# "--ext .js --ext .jsx --format=compact ." +# ) +# with open(eslint_report, 'w') as report_file: +# # Run the command +# result = subprocess.run( +# command, +# shell=True, +# stdout=subprocess.PIPE, +# stderr=subprocess.PIPE, +# text=True, +# check=False +# ) + +# # Write the output to the report file +# report_file.write(result.stdout) + +# try: +# num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) +# except TypeError: +# fail_quality( +# 'eslint', +# "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( +# eslint_report=eslint_report +# ) +# ) + +# # Record the metric +# _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) + +# # Fail if number of violations is greater than the limit +# if num_violations > violations_limit > -1: +# fail_quality( +# 'eslint', +# "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( +# count=num_violations, violations_limit=violations_limit +# ) +# ) +# else: +# write_junit_xml('eslint') + + +def run_stylelint(): + """ + Runs stylelint on Sass files. + If limit option is passed, fails build if more violations than the limit are found. + """ + + violations_limit = 0 + num_violations = _get_stylelint_violations() + + # Record the metric + _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) + + # Fail if number of violations is greater than the limit + if num_violations > violations_limit: + fail_quality( + 'stylelint', + "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, + violations_limit=violations_limit, + ) + ) + # else: + # write_junit_xml('stylelint') + + +# def _extract_missing_pii_annotations(filename): +# """ +# Returns the number of uncovered models from the stdout report of django_find_annotations. + +# Arguments: +# filename: Filename where stdout of django_find_annotations was captured. + +# Returns: +# three-tuple containing: +# 1. The number of uncovered models, +# 2. A bool indicating whether the coverage is still below the threshold, and +# 3. The full report as a string. +# """ +# uncovered_models = 0 +# pii_check_passed = True +# if os.path.isfile(filename): +# with open(filename) as report_file: +# lines = report_file.readlines() + +# # Find the count of uncovered models. +# uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') +# for line in lines: +# uncovered_match = uncovered_regex.match(line) +# if uncovered_match: +# uncovered_models = int(uncovered_match.groups()[0]) +# break + +# # Find a message which suggests the check failed. +# failure_regex = re.compile(r'^Coverage threshold not met!') +# for line in lines: +# failure_match = failure_regex.match(line) +# if failure_match: +# pii_check_passed = False +# break + +# # Each line in lines already contains a newline. +# full_log = ''.join(lines) +# else: +# fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + +# return (uncovered_models, pii_check_passed, full_log) + + +# def run_pii_check(): +# """ +# Guarantee that all Django models are PII-annotated. +# """ + +# pii_report_name = 'pii' +# default_report_dir = (Env.REPORT_DIR / pii_report_name) +# report_dir = default_report_dir +# output_file = os.path.join(report_dir, 'pii_check_{}.report') +# env_report = [] +# pii_check_passed = True +# for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): +# try: +# print() +# print(f"Running {env_name} PII Annotation check and report") +# print("-" * 45) +# run_output_file = str(output_file).format(env_name.lower()) +# os.makedirs(report_dir, exist_ok=True) +# command = ( +# "export DJANGO_SETTINGS_MODULE={env_settings_file};" +# "code_annotations django_find_annotations" +# "--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" +# "--lint --report --coverage | tee {run_output_file}" +# ) +# result = subprocess.run( +# command, +# shell=True, +# check=False, +# stdout=subprocess.PIPE, +# stderr=subprocess.PIPE, +# text=True +# ) + +# with open(run_output_file, 'w') as f: +# f.write(result.stdout) + +# uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) +# env_report.append(( +# uncovered_model_count, +# full_log, +# )) + +# except BuildFailure as error_message: +# fail_quality(pii_report_name, f'FAILURE: {error_message}') + +# if not pii_check_passed_env: +# pii_check_passed = False + +# # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. +# uncovered_count, full_log = max(env_report, key=lambda r: r[0]) + +# # Write metric file. +# if uncovered_count is None: +# uncovered_count = 0 +# metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" +# _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) + +# # Finally, fail the paver task if code_annotations suggests that the check failed. +# if not pii_check_passed: +# fail_quality('pii', full_log) + + +# def check_keywords(): +# """ +# Check Django model fields for names that conflict with a list of reserved keywords +# """ + +# report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') +# os.makedirs(report_path, exist_ok=True) + +# overall_status = True +# for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: +# report_file = f"{env}_reserved_keyword_report.csv" +# override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") +# try: +# command = ( +# "export DJANGO_SETTINGS_MODULE={env_settings_file};" +# "python manage.py {env} check_reserved_keywords" +# "--override_file {override_file}" +# "--report_path {report_path}" +# "--report_file {report_file}" +# ) +# result = subprocess.run( +# command, +# shell=True, +# check=True, +# stdout=subprocess.PIPE, +# stderr=subprocess.PIPE, +# text=True +# ) +# except BuildFailure: +# overall_status = False + +# if not overall_status: +# fail_quality( +# 'keywords', +# 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( +# report_path +# ) +# ) + + +# def _get_xsslint_counts(filename): +# """ +# This returns a dict of violations from the xsslint report. + +# Arguments: +# filename: The name of the xsslint report. + +# Returns: +# A dict containing the following: +# rules: A dict containing the count for each rule as follows: +# violation-rule-id: N, where N is the number of violations +# total: M, where M is the number of total violations + +# """ +# report_contents = _get_report_contents(filename, 'xsslint') +# rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) +# total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) +# violations = {'rules': {}} +# for violation_match in rule_count_regex.finditer(report_contents): +# try: +# violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) +# except ValueError: +# violations['rules'][violation_match.group('rule_id')] = None +# try: +# violations['total'] = int(total_count_regex.search(report_contents).group('count')) +# # An AttributeError will occur if the regex finds no matches. +# # A ValueError will occur if the returned regex cannot be cast as a float. +# except (AttributeError, ValueError): +# violations['total'] = None +# return violations + + +# def run_xsslint(): +# """ +# Runs xsslint/xss_linter.py on the codebase +# """ + +# try: +# thresholds_option = 'scripts/xsslint_thresholds.json' +# # Read the JSON file +# with open(thresholds_option, 'r') as file: +# violation_thresholds = json.load(file) + +# except ValueError: +# violation_thresholds = None +# if isinstance(violation_thresholds, dict) is False or \ +# any(key not in ("total", "rules") for key in violation_thresholds.keys()): + +# fail_quality( +# 'xsslint', +# """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" +# """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ +# """with property names in double-quotes.""".format( +# thresholds_option=thresholds_option +# ) +# ) + +# xsslint_script = "xss_linter.py" +# xsslint_report_dir = (Env.REPORT_DIR / "xsslint") +# xsslint_report = xsslint_report_dir / "xsslint.report" +# _prepare_report_dir(xsslint_report_dir) + +# # Prepare the command to run the xsslint script +# command = ( +# f"{Env.REPO_ROOT}/scripts/xsslint/{xsslint_script} " +# f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" +# ) + +# result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) +# xsslint_counts = _get_xsslint_counts(xsslint_report) + +# try: +# metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( +# xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) +# ) +# if 'rules' in xsslint_counts and any(xsslint_counts['rules']): +# metrics_str += "\n" +# rule_keys = sorted(xsslint_counts['rules'].keys()) +# for rule in rule_keys: +# metrics_str += "{rule} violations: {count}\n".format( +# rule=rule, +# count=int(xsslint_counts['rules'][rule]) +# ) +# except TypeError: +# fail_quality( +# 'xsslint', +# "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( +# xsslint_script=xsslint_script, xsslint_report=xsslint_report +# ) +# ) + +# metrics_report = (Env.METRICS_DIR / "xsslint") +# # Record the metric +# _write_metric(metrics_str, metrics_report) +# # Print number of violations to log. +# command = f"cat {metrics_report}" +# # Print number of violations to log. +# subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + +# error_message = "" +# # Test total violations against threshold. +# if 'total' in list(violation_thresholds.keys()): +# if violation_thresholds['total'] < xsslint_counts['total']: +# error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( +# count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] +# ) + +# # Test rule violations against thresholds. +# if 'rules' in violation_thresholds: +# threshold_keys = sorted(violation_thresholds['rules'].keys()) +# for threshold_key in threshold_keys: +# if threshold_key not in xsslint_counts['rules']: +# error_message += ( +# "\nNumber of {xsslint_script} violations for {rule} could not be found in " +# "{xsslint_report}." +# ).format( +# xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report +# ) +# elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: +# error_message += \ +# "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( +# rule=threshold_key, count=xsslint_counts['rules'][threshold_key], +# violations_limit=violation_thresholds['rules'][threshold_key], +# ) + +# if error_message: +# fail_quality( +# 'xsslint', +# "FAILURE: XSSLinter Failed.\n{error_message}\n" +# "See {xsslint_report} or run the following command to hone in on the problem:\n" +# " ./scripts/xss-commit-linter.sh -h".format( +# error_message=error_message, xsslint_report=xsslint_report +# ) +# ) +# else: +# write_junit_xml('xsslint') + + +# def diff_coverage(): +# """ +# Build the diff coverage reports +# """ + +# compare_branch = 'origin/master' + +# # Find all coverage XML files (both Python and JavaScript) +# xml_reports = [] + +# for filepath in Env.REPORT_DIR.walk(): +# if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): +# xml_reports.append(filepath) + +# if not xml_reports: +# err_msg = colorize( +# 'red', +# "No coverage info found. Run `paver test` before running " +# "`paver coverage`.\n" +# ) +# sys.stderr.write(err_msg) +# else: +# xml_report_str = ' '.join(xml_reports) +# diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + +# # Generate the diff coverage reports (HTML and console) +# # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 +# command = ( +# f"diff-cover {xml_report_str}" +# f"--diff-range-notation '..'" +# f"--compare-branch={compare_branch} " +# f"--html-report {diff_html_path}" +# ) +# subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("command", choices=['pep8', 'eslint', 'stylelint', + 'xsslint', 'pii_check', 'check_keywords', 'all']) + + argument = parser.parse_args() + + # if argument.command == 'pep8': + # run_pep8() + + # elif argument.command == 'eslint': + # ensure_clean_package_lock() + # install_node_prereqs() + # run_eslint() + + if argument.command == 'stylelint': + install_node_prereqs() + run_stylelint() + + # elif argument.command == 'xsslint': + # install_python_prereqs() + # run_xsslint() + + # elif argument.command == 'pii_check': + # install_python_prereqs() + # run_pii_check() + + # elif argument.command == 'check_keywords': + # install_python_prereqs() + # check_keywords() + + # elif argument.command == 'all': + # run_pep8() + # ensure_clean_package_lock() + # install_node_prereqs() + # run_eslint() + # run_stylelint() + # run_xsslint() + # install_python_prereqs() + # run_pii_check() + # check_keywords() + # diff_coverage() diff --git a/scripts/run_stylelint.sh b/scripts/run_stylelint.sh new file mode 100755 index 000000000000..20a5d6fd07ef --- /dev/null +++ b/scripts/run_stylelint.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Function to run stylelint and handle violations +function run_stylelint() { + # Define the limit of violations + local violations_limit=0 + + # Run stylelint and count the number of violations + local num_violations + num_violations=$(stylelint "**/*.scss" | grep -c "warning\|error") + + # Record the metric + echo "$num_violations" > "$METRICS_DIR/stylelint" + + # Check if number of violations is greater than the limit + if [ "$num_violations" -gt "$violations_limit" ]; then + fail_quality "stylelint" "FAILURE: Stylelint failed with too many violations: ($num_violations).\nThe limit is $violations_limit." + else + write_junit_xml "stylelint" + fi +} + +# Function to fail the build quality +function fail_quality() { + local tool=$1 + local message=$2 + echo "$message" + exit 1 +} + +# Function to write JUnit XML (dummy function for this example) +function write_junit_xml() { + local tool=$1 + echo "" > "$tool-results.xml" +} + +# Set the METRICS_DIR environment variable (change as needed) +export METRICS_DIR="./metrics" + +# Create the metrics directory if it doesn't exist +mkdir -p "$METRICS_DIR" + +# Run the stylelint function +run_stylelint diff --git a/scripts/stylelint-results.xml b/scripts/stylelint-results.xml new file mode 100644 index 000000000000..ad72ae477a7b --- /dev/null +++ b/scripts/stylelint-results.xml @@ -0,0 +1 @@ + From a7607744702a19fe2ebd601280478ae1839a5bc6 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 15:53:18 +0500 Subject: [PATCH 29/78] chore: replacing paver script --- scripts/generic-ci-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 8455e5792c18..e17a20812791 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -82,7 +82,7 @@ case "$TEST_SUITE" in # echo "Finding ESLint violations and storing report..." # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." - python quality_tests.py stylelint + python scripts/quality_tests.py stylelint # run_paver_quality run_stylelint || { EXIT=1; } # echo "Running xss linter report." # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } From 391e623a342f4e3318afa7598d0092e21cebb688 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:00:35 +0500 Subject: [PATCH 30/78] chore: replacing paver script --- scripts/generic-ci-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index e17a20812791..b7977645ef4d 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -82,7 +82,7 @@ case "$TEST_SUITE" in # echo "Finding ESLint violations and storing report..." # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." - python scripts/quality_tests.py stylelint + python scripts/quality_test.py stylelint # run_paver_quality run_stylelint || { EXIT=1; } # echo "Running xss linter report." # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } From dbcb8c7e1cf4528f1b5e9830a0b763a949391a3c Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:04:37 +0500 Subject: [PATCH 31/78] chore: replacing paver script --- scripts/quality_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index dba2c6630ada..caff7e7bf132 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -30,6 +30,8 @@ """ JUNIT_XML_FAILURE_TEMPLATE = '' START_TIME = datetime.utcnow() +PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache') +NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs" class BuildFailure(Exception): From fab999e36974a1d89ed8d45f5f46ad1b33a14304 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:09:38 +0500 Subject: [PATCH 32/78] chore: replacing paver script --- scripts/quality_test.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index caff7e7bf132..1696bbb2f64b 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -60,6 +60,32 @@ def install_node_prereqs(): prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) +def compute_fingerprint(path_list): + """ + Hash the contents of all the files and directories in `path_list`. + Returns the hex digest. + """ + + hasher = hashlib.sha1() + + for path_item in path_list: + + # For directories, create a hash based on the modification times + # of first-level subdirectories + if os.path.isdir(path_item): + for dirname in sorted(os.listdir(path_item)): + path_name = os.path.join(path_item, dirname) + if os.path.isdir(path_name): + hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8')) + + # For files, hash the contents of the file + if os.path.isfile(path_item): + with open(path_item, "rb") as file_handle: + hasher.update(file_handle.read()) + + return hasher.hexdigest() + + def prereq_cache(cache_name, paths, install_func): """ Conditionally execute `install_func()` only if the files/directories From 389f78dec910b72b7640331fadc53ac39a58fc65 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:14:01 +0500 Subject: [PATCH 33/78] chore: replacing paver script --- scripts/quality_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 1696bbb2f64b..84271a930016 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -2,6 +2,7 @@ Check code quality using pycodestyle, pylint, and diff_quality. """ +import hashlib import json import os import re @@ -84,7 +85,7 @@ def compute_fingerprint(path_list): hasher.update(file_handle.read()) return hasher.hexdigest() - + def prereq_cache(cache_name, paths, install_func): """ From 42159654c862ad8ac4c7436b6a9bc4c6bc25e921 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:18:11 +0500 Subject: [PATCH 34/78] chore: replacing paver script --- scripts/quality_test.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 84271a930016..0e2a5891d730 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -87,6 +87,15 @@ def compute_fingerprint(path_list): return hasher.hexdigest() +def create_prereqs_cache_dir(): + """Create the directory for storing the hashes, if it doesn't exist already.""" + try: + os.makedirs(PREREQS_STATE_DIR) + except OSError: + if not os.path.isdir(PREREQS_STATE_DIR): + raise + + def prereq_cache(cache_name, paths, install_func): """ Conditionally execute `install_func()` only if the files/directories From 96c1fd61fcc404ff304c82d6935ce4d3c0ba714a Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:31:42 +0500 Subject: [PATCH 35/78] chore: replacing paver script --- scripts/quality_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 0e2a5891d730..bba694b07c94 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -348,6 +348,7 @@ def _get_stylelint_violations(): """ Returns the number of Stylelint violations. """ + stylelint_report_dir = (Env.REPORT_DIR / "stylelint") stylelint_report = stylelint_report_dir / "stylelint.report" _prepare_report_dir(stylelint_report_dir) @@ -364,6 +365,7 @@ def _get_stylelint_violations(): text=True ) report_file.write(result.stdout) + print("_get_stylelint_violations") try: return int(_get_count_from_last_line(stylelint_report, "stylelint")) From 61ccb36281101fa14938d3a4ea93b12f6942dff2 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 7 Aug 2024 16:45:51 +0500 Subject: [PATCH 36/78] chore: replacing paver script --- scripts/quality_test.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index bba694b07c94..078d3bc86cc8 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -365,7 +365,8 @@ def _get_stylelint_violations(): text=True ) report_file.write(result.stdout) - print("_get_stylelint_violations") + print("_get_stylelint_violations ") + print(result.stdout) try: return int(_get_count_from_last_line(stylelint_report, "stylelint")) @@ -453,7 +454,8 @@ def run_stylelint(): violations_limit=violations_limit, ) ) - # else: + else: + print("script ran successfully") # write_junit_xml('stylelint') From 16257948477c9424b090655b6028a7c0ba14139a Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 15:06:50 +0500 Subject: [PATCH 37/78] chore: replace eslint --- Makefile | 4 +- scripts/generic-ci-tests.sh | 7 +- scripts/quality_test.py | 194 ++++++++++++++---------------------- 3 files changed, 80 insertions(+), 125 deletions(-) diff --git a/Makefile b/Makefile index aff05a7fcc1b..cab473dc32ef 100644 --- a/Makefile +++ b/Makefile @@ -206,10 +206,10 @@ ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip ins sudo apt install libmysqlclient-dev libxmlsec1-dev test-eslint: - npx node_modules/.bin/eslint --ext .js --ext .jsx --format=compact . + python scripts/quality_test.py eslint test-stylelint: - npx stylelint '**/*.scss' --custom-formatter=node_modules/stylelint-formatter-pretty + python scripts/quality_test.py stylelint test-lint: pycodestyle . \ No newline at end of file diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index b7977645ef4d..656cd6040672 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -79,10 +79,11 @@ case "$TEST_SUITE" in # echo "Finding pycodestyle violations and storing report..." # run_paver_quality run_pep8 || { EXIT=1; } - # echo "Finding ESLint violations and storing report..." + echo "Finding ESLint violations and storing report..." + make test-eslint # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } - echo "Finding Stylelint violations and storing report..." - python scripts/quality_test.py stylelint + # echo "Finding Stylelint violations and storing report..." + # make test-stylelint # run_paver_quality run_stylelint || { EXIT=1; } # echo "Running xss linter report." # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 078d3bc86cc8..f057ff76b224 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -14,7 +14,7 @@ from pavelib.utils.envs import Env # from pavelib.prereqs import install_node_prereqs from pavelib.prereqs import install_python_prereqs -from pavelib.utils.test.utils import ensure_clean_package_lock +# from pavelib.utils.test.utils import ensure_clean_package_lock from datetime import datetime from xml.sax.saxutils import quoteattr @@ -176,6 +176,26 @@ def node_prereqs_installation(): npm_log_file_path )) +def ensure_clean_package_lock(): + """ + Ensure no untracked changes have been made in the current git context. + """ + try: + # Run git diff command to check for changes in package-lock.json + result = subprocess.run( + ["git", "diff", "--name-only", "--exit-code", "package-lock.json"], + capture_output=True, # Capture stdout and stderr + text=True, # Decode output to text + check=True # Raise error for non-zero exit code + ) + # No differences found in package-lock.json + print("package-lock.json is clean.") + except subprocess.CalledProcessError as e: + # Git diff command returned non-zero exit code (changes detected) + print("Dirty package-lock.json, run 'npm install' and commit the generated changes.") + print(e.stderr) # Print any error output from the command + raise # Re-raise the exception to propagate the error + # def write_junit_xml(name, message=None): # """ # Write a JUnit results XML file describing the outcome of a quality check. @@ -205,73 +225,6 @@ def fail_quality(name, message): sys.exit() -# def _get_pep8_violations(clean=True): -# """ -# Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) -# where violations_string is a string of all PEP 8 violations found, separated -# by new lines. -# """ -# report_dir = (Env.REPORT_DIR / 'pep8') -# if clean: -# report_dir.rmtree(ignore_errors=True) -# report_dir.makedirs_p() -# report = report_dir / 'pep8.report' - -# # Make sure the metrics subdirectory exists -# Env.METRICS_DIR.makedirs_p() - -# if not report.exists(): -# # sh(f'pycodestyle . | tee {report} -a') -# with open(report, 'w') as f: -# result = subprocess.run( -# ['pycodestyle', '.'], -# stdout=subprocess.PIPE, -# stderr=subprocess.PIPE, -# check=False, -# text= True -# ) -# f.write(result.stdout) - -# violations_list = _pep8_violations(report) - -# return len(violations_list), violations_list - - -# def _pep8_violations(report_file): -# """ -# Returns the list of all PEP 8 violations in the given report_file. -# """ -# with open(report_file) as f: -# return f.readlines() - - -# def run_pep8(): # pylint: disable=unused-argument -# """ -# Run pycodestyle on system code. -# Fail the task if any violations are found. -# """ -# (count, violations_list) = _get_pep8_violations() -# violations_list = ''.join(violations_list) - -# # Print number of violations to log -# violations_count_str = f"Number of PEP 8 violations: {count}" -# print(violations_count_str) -# print(violations_list) - -# # Also write the number of violations to a file -# with open(Env.METRICS_DIR / "pep8", "w") as f: -# f.write(violations_count_str + '\n\n') -# f.write(violations_list) - -# # Fail if any violations are found -# if count: -# failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str -# failure_string += f"\n\nViolations:\n{violations_list}" -# fail_quality('pep8', failure_string) -# else: -# write_junit_xml('pep8') - - def _prepare_report_dir(dir_name): """ Sets a given directory to a created, but empty state @@ -379,58 +332,59 @@ def _get_stylelint_violations(): ) -# def run_eslint(): -# """ -# Runs eslint on static asset directories. -# If limit option is passed, fails build if more violations than the limit are found. -# """ - -# eslint_report_dir = (Env.REPORT_DIR / "eslint") -# eslint_report = eslint_report_dir / "eslint.report" -# _prepare_report_dir(eslint_report_dir) -# violations_limit = 4950 +def run_eslint(): + """ + Runs eslint on static asset directories. + If limit option is passed, fails build if more violations than the limit are found. + """ -# command = ( -# "node --max_old_space_size=4096 node_modules/.bin/eslint " -# "--ext .js --ext .jsx --format=compact ." -# ) -# with open(eslint_report, 'w') as report_file: -# # Run the command -# result = subprocess.run( -# command, -# shell=True, -# stdout=subprocess.PIPE, -# stderr=subprocess.PIPE, -# text=True, -# check=False -# ) + eslint_report_dir = (Env.REPORT_DIR / "eslint") + eslint_report = eslint_report_dir / "eslint.report" + _prepare_report_dir(eslint_report_dir) + violations_limit = 4950 + + command = ( + "node --max_old_space_size=4096 node_modules/.bin/eslint " + "--ext .js --ext .jsx --format=compact ." + ) + with open(eslint_report, 'w') as report_file: + # Run the command + result = subprocess.run( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + check=False + ) -# # Write the output to the report file -# report_file.write(result.stdout) + # Write the output to the report file + report_file.write(result.stdout) -# try: -# num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) -# except TypeError: -# fail_quality( -# 'eslint', -# "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( -# eslint_report=eslint_report -# ) -# ) + try: + num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) + except TypeError: + fail_quality( + 'eslint', + "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( + eslint_report=eslint_report + ) + ) -# # Record the metric -# _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) + # Record the metric + _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) -# # Fail if number of violations is greater than the limit -# if num_violations > violations_limit > -1: -# fail_quality( -# 'eslint', -# "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( -# count=num_violations, violations_limit=violations_limit -# ) -# ) -# else: -# write_junit_xml('eslint') + # Fail if number of violations is greater than the limit + if num_violations > violations_limit > -1: + fail_quality( + 'eslint', + "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, violations_limit=violations_limit + ) + ) + else: + print("successfully run eslint") + # write_junit_xml('eslint') def run_stylelint(): @@ -788,12 +742,12 @@ def run_stylelint(): # if argument.command == 'pep8': # run_pep8() - # elif argument.command == 'eslint': - # ensure_clean_package_lock() - # install_node_prereqs() - # run_eslint() + if argument.command == 'eslint': + ensure_clean_package_lock() + install_node_prereqs() + run_eslint() - if argument.command == 'stylelint': + elif argument.command == 'stylelint': install_node_prereqs() run_stylelint() From cc1fad571757c6227a0a0aa6269a1de6f7f95702 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 15:24:17 +0500 Subject: [PATCH 38/78] chore: replace stylelint --- scripts/generic-ci-tests.sh | 4 ++-- scripts/quality_test.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index 656cd6040672..aeec0985fa95 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -82,8 +82,8 @@ case "$TEST_SUITE" in echo "Finding ESLint violations and storing report..." make test-eslint # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } - # echo "Finding Stylelint violations and storing report..." - # make test-stylelint + echo "Finding Stylelint violations and storing report..." + make test-stylelint # run_paver_quality run_stylelint || { EXIT=1; } # echo "Running xss linter report." # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } diff --git a/scripts/quality_test.py b/scripts/quality_test.py index f057ff76b224..b953e86954a6 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -318,8 +318,6 @@ def _get_stylelint_violations(): text=True ) report_file.write(result.stdout) - print("_get_stylelint_violations ") - print(result.stdout) try: return int(_get_count_from_last_line(stylelint_report, "stylelint")) @@ -383,7 +381,8 @@ def run_eslint(): ) ) else: - print("successfully run eslint") + print("successfully run eslint with violations") + print(num_violations) # write_junit_xml('eslint') @@ -409,7 +408,8 @@ def run_stylelint(): ) ) else: - print("script ran successfully") + print("successfully run stylelint with violations") + print(num_violations) # write_junit_xml('stylelint') From e18bc166ad810450236806d41be152e5f467ea5c Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 16:19:18 +0500 Subject: [PATCH 39/78] chore: replace stylelint --- Makefile | 3 + scripts/generic-ci-tests.sh | 9 +- scripts/quality_test.py | 259 ++++++++++++++++++------------------ 3 files changed, 136 insertions(+), 135 deletions(-) diff --git a/Makefile b/Makefile index cab473dc32ef..dd5858af82fe 100644 --- a/Makefile +++ b/Makefile @@ -211,5 +211,8 @@ test-eslint: test-stylelint: python scripts/quality_test.py stylelint +test-xsslint: + python scripts/quality_test.py xsslint + test-lint: pycodestyle . \ No newline at end of file diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index aeec0985fa95..f66b7ef2dd81 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -77,15 +77,14 @@ case "$TEST_SUITE" in mkdir -p reports - # echo "Finding pycodestyle violations and storing report..." - # run_paver_quality run_pep8 || { EXIT=1; } + echo "Finding pycodestyle violations..." + make test-lint echo "Finding ESLint violations and storing report..." make test-eslint - # run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } echo "Finding Stylelint violations and storing report..." make test-stylelint - # run_paver_quality run_stylelint || { EXIT=1; } - # echo "Running xss linter report." + echo "Running xss linter report." + make test-xsslint # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } # echo "Running PII checker on all Django models..." # run_paver_quality run_pii_check || { EXIT=1; } diff --git a/scripts/quality_test.py b/scripts/quality_test.py index b953e86954a6..2b8276d4749a 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -558,142 +558,143 @@ def run_stylelint(): # ) -# def _get_xsslint_counts(filename): -# """ -# This returns a dict of violations from the xsslint report. +def _get_xsslint_counts(filename): + """ + This returns a dict of violations from the xsslint report. -# Arguments: -# filename: The name of the xsslint report. + Arguments: + filename: The name of the xsslint report. -# Returns: -# A dict containing the following: -# rules: A dict containing the count for each rule as follows: -# violation-rule-id: N, where N is the number of violations -# total: M, where M is the number of total violations + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations -# """ -# report_contents = _get_report_contents(filename, 'xsslint') -# rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) -# total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) -# violations = {'rules': {}} -# for violation_match in rule_count_regex.finditer(report_contents): -# try: -# violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) -# except ValueError: -# violations['rules'][violation_match.group('rule_id')] = None -# try: -# violations['total'] = int(total_count_regex.search(report_contents).group('count')) -# # An AttributeError will occur if the regex finds no matches. -# # A ValueError will occur if the returned regex cannot be cast as a float. -# except (AttributeError, ValueError): -# violations['total'] = None -# return violations - - -# def run_xsslint(): -# """ -# Runs xsslint/xss_linter.py on the codebase -# """ + """ + report_contents = _get_report_contents(filename, 'xsslint') + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(report_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(report_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations -# try: -# thresholds_option = 'scripts/xsslint_thresholds.json' -# # Read the JSON file -# with open(thresholds_option, 'r') as file: -# violation_thresholds = json.load(file) -# except ValueError: -# violation_thresholds = None -# if isinstance(violation_thresholds, dict) is False or \ -# any(key not in ("total", "rules") for key in violation_thresholds.keys()): +def run_xsslint(): + """ + Runs xsslint/xss_linter.py on the codebase + """ -# fail_quality( -# 'xsslint', -# """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" -# """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ -# """with property names in double-quotes.""".format( -# thresholds_option=thresholds_option -# ) -# ) + try: + thresholds_option = 'scripts/xsslint_thresholds.json' + # Read the JSON file + with open(thresholds_option, 'r') as file: + violation_thresholds = json.load(file) -# xsslint_script = "xss_linter.py" -# xsslint_report_dir = (Env.REPORT_DIR / "xsslint") -# xsslint_report = xsslint_report_dir / "xsslint.report" -# _prepare_report_dir(xsslint_report_dir) + except ValueError: + violation_thresholds = None + if isinstance(violation_thresholds, dict) is False or \ + any(key not in ("total", "rules") for key in violation_thresholds.keys()): -# # Prepare the command to run the xsslint script -# command = ( -# f"{Env.REPO_ROOT}/scripts/xsslint/{xsslint_script} " -# f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" -# ) + fail_quality( + 'xsslint', + """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" + """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ + """with property names in double-quotes.""".format( + thresholds_option=thresholds_option + ) + ) -# result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) -# xsslint_counts = _get_xsslint_counts(xsslint_report) + xsslint_script = "xss_linter.py" + xsslint_report_dir = (Env.REPORT_DIR / "xsslint") + xsslint_report = xsslint_report_dir / "xsslint.report" + _prepare_report_dir(xsslint_report_dir) -# try: -# metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( -# xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) -# ) -# if 'rules' in xsslint_counts and any(xsslint_counts['rules']): -# metrics_str += "\n" -# rule_keys = sorted(xsslint_counts['rules'].keys()) -# for rule in rule_keys: -# metrics_str += "{rule} violations: {count}\n".format( -# rule=rule, -# count=int(xsslint_counts['rules'][rule]) -# ) -# except TypeError: -# fail_quality( -# 'xsslint', -# "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( -# xsslint_script=xsslint_script, xsslint_report=xsslint_report -# ) -# ) + # Prepare the command to run the xsslint script + command = ( + f"{Env.REPO_ROOT}/scripts/xsslint/{xsslint_script} " + f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" + ) -# metrics_report = (Env.METRICS_DIR / "xsslint") -# # Record the metric -# _write_metric(metrics_str, metrics_report) -# # Print number of violations to log. -# command = f"cat {metrics_report}" -# # Print number of violations to log. -# subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - -# error_message = "" -# # Test total violations against threshold. -# if 'total' in list(violation_thresholds.keys()): -# if violation_thresholds['total'] < xsslint_counts['total']: -# error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( -# count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] -# ) + result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + xsslint_counts = _get_xsslint_counts(xsslint_report) -# # Test rule violations against thresholds. -# if 'rules' in violation_thresholds: -# threshold_keys = sorted(violation_thresholds['rules'].keys()) -# for threshold_key in threshold_keys: -# if threshold_key not in xsslint_counts['rules']: -# error_message += ( -# "\nNumber of {xsslint_script} violations for {rule} could not be found in " -# "{xsslint_report}." -# ).format( -# xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report -# ) -# elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: -# error_message += \ -# "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( -# rule=threshold_key, count=xsslint_counts['rules'][threshold_key], -# violations_limit=violation_thresholds['rules'][threshold_key], -# ) - -# if error_message: -# fail_quality( -# 'xsslint', -# "FAILURE: XSSLinter Failed.\n{error_message}\n" -# "See {xsslint_report} or run the following command to hone in on the problem:\n" -# " ./scripts/xss-commit-linter.sh -h".format( -# error_message=error_message, xsslint_report=xsslint_report -# ) -# ) -# else: -# write_junit_xml('xsslint') + try: + metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( + xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) + ) + if 'rules' in xsslint_counts and any(xsslint_counts['rules']): + metrics_str += "\n" + rule_keys = sorted(xsslint_counts['rules'].keys()) + for rule in rule_keys: + metrics_str += "{rule} violations: {count}\n".format( + rule=rule, + count=int(xsslint_counts['rules'][rule]) + ) + except TypeError: + fail_quality( + 'xsslint', + "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( + xsslint_script=xsslint_script, xsslint_report=xsslint_report + ) + ) + + metrics_report = (Env.METRICS_DIR / "xsslint") + # Record the metric + _write_metric(metrics_str, metrics_report) + # Print number of violations to log. + command = f"cat {metrics_report}" + # Print number of violations to log. + subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + error_message = "" + # Test total violations against threshold. + if 'total' in list(violation_thresholds.keys()): + if violation_thresholds['total'] < xsslint_counts['total']: + error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( + count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] + ) + + # Test rule violations against thresholds. + if 'rules' in violation_thresholds: + threshold_keys = sorted(violation_thresholds['rules'].keys()) + for threshold_key in threshold_keys: + if threshold_key not in xsslint_counts['rules']: + error_message += ( + "\nNumber of {xsslint_script} violations for {rule} could not be found in " + "{xsslint_report}." + ).format( + xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report + ) + elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: + error_message += \ + "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( + rule=threshold_key, count=xsslint_counts['rules'][threshold_key], + violations_limit=violation_thresholds['rules'][threshold_key], + ) + + if error_message: + fail_quality( + 'xsslint', + "FAILURE: XSSLinter Failed.\n{error_message}\n" + "See {xsslint_report} or run the following command to hone in on the problem:\n" + " ./scripts/xss-commit-linter.sh -h".format( + error_message=error_message, xsslint_report=xsslint_report + ) + ) + else: + print("successfully run xsslint") + # write_junit_xml('xsslint') # def diff_coverage(): @@ -739,9 +740,7 @@ def run_stylelint(): argument = parser.parse_args() - # if argument.command == 'pep8': - # run_pep8() - + if argument.command == 'eslint': ensure_clean_package_lock() install_node_prereqs() @@ -751,9 +750,9 @@ def run_stylelint(): install_node_prereqs() run_stylelint() - # elif argument.command == 'xsslint': - # install_python_prereqs() - # run_xsslint() + elif argument.command == 'xsslint': + install_python_prereqs() + run_xsslint() # elif argument.command == 'pii_check': # install_python_prereqs() From 2b6affbff4ddfafbbe91cdff634931f9e7875cdd Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 16:28:16 +0500 Subject: [PATCH 40/78] fix: fix tests --- scripts/quality_test.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 2b8276d4749a..5da7f5723e92 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -38,6 +38,7 @@ class BuildFailure(Exception): """Represents a problem with some part of the build's execution.""" + def str2bool(s): s = str(s) return s.lower() in ('yes', 'true', 't', '1') @@ -176,6 +177,7 @@ def node_prereqs_installation(): npm_log_file_path )) + def ensure_clean_package_lock(): """ Ensure no untracked changes have been made in the current git context. @@ -301,7 +303,7 @@ def _get_stylelint_violations(): """ Returns the number of Stylelint violations. """ - + stylelint_report_dir = (Env.REPORT_DIR / "stylelint") stylelint_report = stylelint_report_dir / "stylelint.report" _prepare_report_dir(stylelint_report_dir) @@ -736,11 +738,10 @@ def run_xsslint(): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("command", choices=['pep8', 'eslint', 'stylelint', - 'xsslint', 'pii_check', 'check_keywords', 'all']) + 'xsslint', 'pii_check', 'check_keywords', 'all']) argument = parser.parse_args() - if argument.command == 'eslint': ensure_clean_package_lock() install_node_prereqs() From 87068e0a1be7289174e138ab7786ad15eaeaab52 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 18:09:52 +0500 Subject: [PATCH 41/78] fix: fix tests --- .github/workflows/quality-checks.yml | 14 +- scripts/generic-ci-tests.sh | 16 +- scripts/quality_test.py | 288 +++++++++++++-------------- 3 files changed, 164 insertions(+), 154 deletions(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index f5ab5ebb12f4..0f7b9fbd62bb 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -65,7 +65,13 @@ jobs: env: PIP_SRC: ${{ runner.temp }} run: npm ci - + + - name: Install python packages + env: + PIP_SRC: ${{ runner.temp }} + run: | + pip install -e . + # - name: Run Python Quality Test # working-directory: ${{ github.workspace }} # env: @@ -89,7 +95,11 @@ jobs: PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} run: | - ./scripts/all-tests.sh + make test-lint + make test-eslint + make test-stylelint + make test-xsslint + ./scripts/all-tests.sh - name: Save Job Artifacts if: always() diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh index f66b7ef2dd81..cc4e7e3be586 100755 --- a/scripts/generic-ci-tests.sh +++ b/scripts/generic-ci-tests.sh @@ -77,14 +77,14 @@ case "$TEST_SUITE" in mkdir -p reports - echo "Finding pycodestyle violations..." - make test-lint - echo "Finding ESLint violations and storing report..." - make test-eslint - echo "Finding Stylelint violations and storing report..." - make test-stylelint - echo "Running xss linter report." - make test-xsslint + # echo "Finding pycodestyle violations..." + # make test-lint + # echo "Finding ESLint violations and storing report..." + # make test-eslint + # echo "Finding Stylelint violations and storing report..." + # make test-stylelint + # echo "Running xss linter report." + # make test-xsslint # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } # echo "Running PII checker on all Django models..." # run_paver_quality run_pii_check || { EXIT=1; } diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 5da7f5723e92..f2b13ba80bab 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -13,7 +13,7 @@ import argparse from pavelib.utils.envs import Env # from pavelib.prereqs import install_node_prereqs -from pavelib.prereqs import install_python_prereqs +# from pavelib.prereqs import install_python_prereqs # from pavelib.utils.test.utils import ensure_clean_package_lock from datetime import datetime from xml.sax.saxutils import quoteattr @@ -39,164 +39,164 @@ class BuildFailure(Exception): """Represents a problem with some part of the build's execution.""" -def str2bool(s): - s = str(s) - return s.lower() in ('yes', 'true', 't', '1') +# def str2bool(s): +# s = str(s) +# return s.lower() in ('yes', 'true', 't', '1') -def no_prereq_install(): - """ - Determine if NO_PREREQ_INSTALL should be truthy or falsy. - """ - return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False')) +# def no_prereq_install(): +# """ +# Determine if NO_PREREQ_INSTALL should be truthy or falsy. +# """ +# return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False')) -def install_node_prereqs(): - """ - Installs Node prerequisites - """ - if no_prereq_install(): - print(NO_PREREQ_MESSAGE) - return +# def install_node_prereqs(): +# """ +# Installs Node prerequisites +# """ +# if no_prereq_install(): +# print(NO_PREREQ_MESSAGE) +# return - prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) +# prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) -def compute_fingerprint(path_list): - """ - Hash the contents of all the files and directories in `path_list`. - Returns the hex digest. - """ +# def compute_fingerprint(path_list): +# """ +# Hash the contents of all the files and directories in `path_list`. +# Returns the hex digest. +# """ - hasher = hashlib.sha1() +# hasher = hashlib.sha1() - for path_item in path_list: +# for path_item in path_list: - # For directories, create a hash based on the modification times - # of first-level subdirectories - if os.path.isdir(path_item): - for dirname in sorted(os.listdir(path_item)): - path_name = os.path.join(path_item, dirname) - if os.path.isdir(path_name): - hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8')) +# # For directories, create a hash based on the modification times +# # of first-level subdirectories +# if os.path.isdir(path_item): +# for dirname in sorted(os.listdir(path_item)): +# path_name = os.path.join(path_item, dirname) +# if os.path.isdir(path_name): +# hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8')) - # For files, hash the contents of the file - if os.path.isfile(path_item): - with open(path_item, "rb") as file_handle: - hasher.update(file_handle.read()) +# # For files, hash the contents of the file +# if os.path.isfile(path_item): +# with open(path_item, "rb") as file_handle: +# hasher.update(file_handle.read()) - return hasher.hexdigest() +# return hasher.hexdigest() -def create_prereqs_cache_dir(): - """Create the directory for storing the hashes, if it doesn't exist already.""" - try: - os.makedirs(PREREQS_STATE_DIR) - except OSError: - if not os.path.isdir(PREREQS_STATE_DIR): - raise +# def create_prereqs_cache_dir(): +# """Create the directory for storing the hashes, if it doesn't exist already.""" +# try: +# os.makedirs(PREREQS_STATE_DIR) +# except OSError: +# if not os.path.isdir(PREREQS_STATE_DIR): +# raise -def prereq_cache(cache_name, paths, install_func): - """ - Conditionally execute `install_func()` only if the files/directories - specified by `paths` have changed. +# def prereq_cache(cache_name, paths, install_func): +# """ +# Conditionally execute `install_func()` only if the files/directories +# specified by `paths` have changed. - If the code executes successfully (no exceptions are thrown), the cache - is updated with the new hash. - """ - # Retrieve the old hash - cache_filename = cache_name.replace(" ", "_") - cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1") - old_hash = None - if os.path.isfile(cache_file_path): - with open(cache_file_path) as cache_file: - old_hash = cache_file.read() - - # Compare the old hash to the new hash - # If they do not match (either the cache hasn't been created, or the files have changed), - # then execute the code within the block. - new_hash = compute_fingerprint(paths) - if new_hash != old_hash: - install_func() - - # Update the cache with the new hash - # If the code executed within the context fails (throws an exception), - # then this step won't get executed. - create_prereqs_cache_dir() - with open(cache_file_path, "wb") as cache_file: - # Since the pip requirement files are modified during the install - # process, we need to store the hash generated AFTER the installation - post_install_hash = compute_fingerprint(paths) - cache_file.write(post_install_hash.encode('utf-8')) - else: - print(f'{cache_name} unchanged, skipping...') +# If the code executes successfully (no exceptions are thrown), the cache +# is updated with the new hash. +# """ +# # Retrieve the old hash +# cache_filename = cache_name.replace(" ", "_") +# cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1") +# old_hash = None +# if os.path.isfile(cache_file_path): +# with open(cache_file_path) as cache_file: +# old_hash = cache_file.read() + +# # Compare the old hash to the new hash +# # If they do not match (either the cache hasn't been created, or the files have changed), +# # then execute the code within the block. +# new_hash = compute_fingerprint(paths) +# if new_hash != old_hash: +# install_func() + +# # Update the cache with the new hash +# # If the code executed within the context fails (throws an exception), +# # then this step won't get executed. +# create_prereqs_cache_dir() +# with open(cache_file_path, "wb") as cache_file: +# # Since the pip requirement files are modified during the install +# # process, we need to store the hash generated AFTER the installation +# post_install_hash = compute_fingerprint(paths) +# cache_file.write(post_install_hash.encode('utf-8')) +# else: +# print(f'{cache_name} unchanged, skipping...') -def node_prereqs_installation(): - """ - Configures npm and installs Node prerequisites - """ - # Before July 2023, these directories were created and written to - # as root. Afterwards, they are created as being owned by the - # `app` user -- but also need to be deleted by that user (due to - # how npm runs post-install scripts.) Developers with an older - # devstack installation who are reprovisioning will see errors - # here if the files are still owned by root. Deleting the files in - # advance prevents this error. - # - # This hack should probably be left in place for at least a year. - # See ADR 17 for more background on the transition. - # sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") - # At the time of this writing, the js dir has git-versioned files - # but the css dir does not, so the latter would have been created - # as root-owned (in the process of creating the vendor - # subdirectory). Delete it only if empty, just in case - # git-versioned files are added later. - # sh("rmdir common/static/common/css || true") - try: - shutil.rmtree("common/static/common/js/vendor/ common/static/common/css/vendor/") - os.rmdir("common/static/common/css") - except OSError: - pass - - # NPM installs hang sporadically. Log the installation process so that we - # determine if any packages are chronic offenders. - npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log' - npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with - npm_command = 'npm ci --verbose'.split() - - # The implementation of Paver's `sh` function returns before the forked - # actually returns. Using a Popen object so that we can ensure that - # the forked process has returned - proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with - retcode = proc.wait() - if retcode == 1: - raise Exception(f"npm install failed: See {npm_log_file_path}") - print("Successfully clean-installed NPM packages. Log found at {}".format( - npm_log_file_path - )) - - -def ensure_clean_package_lock(): - """ - Ensure no untracked changes have been made in the current git context. - """ - try: - # Run git diff command to check for changes in package-lock.json - result = subprocess.run( - ["git", "diff", "--name-only", "--exit-code", "package-lock.json"], - capture_output=True, # Capture stdout and stderr - text=True, # Decode output to text - check=True # Raise error for non-zero exit code - ) - # No differences found in package-lock.json - print("package-lock.json is clean.") - except subprocess.CalledProcessError as e: - # Git diff command returned non-zero exit code (changes detected) - print("Dirty package-lock.json, run 'npm install' and commit the generated changes.") - print(e.stderr) # Print any error output from the command - raise # Re-raise the exception to propagate the error +# def node_prereqs_installation(): +# """ +# Configures npm and installs Node prerequisites +# """ +# # Before July 2023, these directories were created and written to +# # as root. Afterwards, they are created as being owned by the +# # `app` user -- but also need to be deleted by that user (due to +# # how npm runs post-install scripts.) Developers with an older +# # devstack installation who are reprovisioning will see errors +# # here if the files are still owned by root. Deleting the files in +# # advance prevents this error. +# # +# # This hack should probably be left in place for at least a year. +# # See ADR 17 for more background on the transition. +# # sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") +# # At the time of this writing, the js dir has git-versioned files +# # but the css dir does not, so the latter would have been created +# # as root-owned (in the process of creating the vendor +# # subdirectory). Delete it only if empty, just in case +# # git-versioned files are added later. +# # sh("rmdir common/static/common/css || true") +# try: +# shutil.rmtree("common/static/common/js/vendor/ common/static/common/css/vendor/") +# os.rmdir("common/static/common/css") +# except OSError: +# pass + +# # NPM installs hang sporadically. Log the installation process so that we +# # determine if any packages are chronic offenders. +# npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log' +# npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with +# npm_command = 'npm ci --verbose'.split() + +# # The implementation of Paver's `sh` function returns before the forked +# # actually returns. Using a Popen object so that we can ensure that +# # the forked process has returned +# proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with +# retcode = proc.wait() +# if retcode == 1: +# raise Exception(f"npm install failed: See {npm_log_file_path}") +# print("Successfully clean-installed NPM packages. Log found at {}".format( +# npm_log_file_path +# )) + + +# def ensure_clean_package_lock(): +# """ +# Ensure no untracked changes have been made in the current git context. +# """ +# try: +# # Run git diff command to check for changes in package-lock.json +# result = subprocess.run( +# ["git", "diff", "--name-only", "--exit-code", "package-lock.json"], +# capture_output=True, # Capture stdout and stderr +# text=True, # Decode output to text +# check=True # Raise error for non-zero exit code +# ) +# # No differences found in package-lock.json +# print("package-lock.json is clean.") +# except subprocess.CalledProcessError as e: +# # Git diff command returned non-zero exit code (changes detected) +# print("Dirty package-lock.json, run 'npm install' and commit the generated changes.") +# print(e.stderr) # Print any error output from the command +# raise # Re-raise the exception to propagate the error # def write_junit_xml(name, message=None): # """ @@ -743,16 +743,16 @@ def run_xsslint(): argument = parser.parse_args() if argument.command == 'eslint': - ensure_clean_package_lock() - install_node_prereqs() + # ensure_clean_package_lock() + # install_node_prereqs() run_eslint() elif argument.command == 'stylelint': - install_node_prereqs() + # install_node_prereqs() run_stylelint() elif argument.command == 'xsslint': - install_python_prereqs() + # install_python_prereqs() run_xsslint() # elif argument.command == 'pii_check': From e0ad851af339dc41d72f744d9a740aa3cdbff67b Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 22:38:24 +0500 Subject: [PATCH 42/78] chore: remove some code related to prereq installation of python packages --- scripts/quality_test.py | 304 +++++++--------------------------------- 1 file changed, 52 insertions(+), 252 deletions(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index f2b13ba80bab..afefdc75d968 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -2,7 +2,7 @@ Check code quality using pycodestyle, pylint, and diff_quality. """ -import hashlib +import argparse import json import os import re @@ -10,13 +10,8 @@ import subprocess import shutil -import argparse -from pavelib.utils.envs import Env -# from pavelib.prereqs import install_node_prereqs -# from pavelib.prereqs import install_python_prereqs -# from pavelib.utils.test.utils import ensure_clean_package_lock -from datetime import datetime -from xml.sax.saxutils import quoteattr +from path import Path as path +from time import sleep try: from pygments.console import colorize @@ -24,206 +19,14 @@ colorize = lambda color, text: text -JUNIT_XML_TEMPLATE = """ - -{failure_element} - -""" -JUNIT_XML_FAILURE_TEMPLATE = '' -START_TIME = datetime.utcnow() -PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache') -NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs" - - class BuildFailure(Exception): """Represents a problem with some part of the build's execution.""" -# def str2bool(s): -# s = str(s) -# return s.lower() in ('yes', 'true', 't', '1') - - -# def no_prereq_install(): -# """ -# Determine if NO_PREREQ_INSTALL should be truthy or falsy. -# """ -# return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False')) - - -# def install_node_prereqs(): -# """ -# Installs Node prerequisites -# """ -# if no_prereq_install(): -# print(NO_PREREQ_MESSAGE) -# return - -# prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) - - -# def compute_fingerprint(path_list): -# """ -# Hash the contents of all the files and directories in `path_list`. -# Returns the hex digest. -# """ - -# hasher = hashlib.sha1() - -# for path_item in path_list: - -# # For directories, create a hash based on the modification times -# # of first-level subdirectories -# if os.path.isdir(path_item): -# for dirname in sorted(os.listdir(path_item)): -# path_name = os.path.join(path_item, dirname) -# if os.path.isdir(path_name): -# hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8')) - -# # For files, hash the contents of the file -# if os.path.isfile(path_item): -# with open(path_item, "rb") as file_handle: -# hasher.update(file_handle.read()) - -# return hasher.hexdigest() - - -# def create_prereqs_cache_dir(): -# """Create the directory for storing the hashes, if it doesn't exist already.""" -# try: -# os.makedirs(PREREQS_STATE_DIR) -# except OSError: -# if not os.path.isdir(PREREQS_STATE_DIR): -# raise - - -# def prereq_cache(cache_name, paths, install_func): -# """ -# Conditionally execute `install_func()` only if the files/directories -# specified by `paths` have changed. - -# If the code executes successfully (no exceptions are thrown), the cache -# is updated with the new hash. -# """ -# # Retrieve the old hash -# cache_filename = cache_name.replace(" ", "_") -# cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1") -# old_hash = None -# if os.path.isfile(cache_file_path): -# with open(cache_file_path) as cache_file: -# old_hash = cache_file.read() - -# # Compare the old hash to the new hash -# # If they do not match (either the cache hasn't been created, or the files have changed), -# # then execute the code within the block. -# new_hash = compute_fingerprint(paths) -# if new_hash != old_hash: -# install_func() - -# # Update the cache with the new hash -# # If the code executed within the context fails (throws an exception), -# # then this step won't get executed. -# create_prereqs_cache_dir() -# with open(cache_file_path, "wb") as cache_file: -# # Since the pip requirement files are modified during the install -# # process, we need to store the hash generated AFTER the installation -# post_install_hash = compute_fingerprint(paths) -# cache_file.write(post_install_hash.encode('utf-8')) -# else: -# print(f'{cache_name} unchanged, skipping...') - - -# def node_prereqs_installation(): -# """ -# Configures npm and installs Node prerequisites -# """ -# # Before July 2023, these directories were created and written to -# # as root. Afterwards, they are created as being owned by the -# # `app` user -- but also need to be deleted by that user (due to -# # how npm runs post-install scripts.) Developers with an older -# # devstack installation who are reprovisioning will see errors -# # here if the files are still owned by root. Deleting the files in -# # advance prevents this error. -# # -# # This hack should probably be left in place for at least a year. -# # See ADR 17 for more background on the transition. -# # sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") -# # At the time of this writing, the js dir has git-versioned files -# # but the css dir does not, so the latter would have been created -# # as root-owned (in the process of creating the vendor -# # subdirectory). Delete it only if empty, just in case -# # git-versioned files are added later. -# # sh("rmdir common/static/common/css || true") -# try: -# shutil.rmtree("common/static/common/js/vendor/ common/static/common/css/vendor/") -# os.rmdir("common/static/common/css") -# except OSError: -# pass - -# # NPM installs hang sporadically. Log the installation process so that we -# # determine if any packages are chronic offenders. -# npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log' -# npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with -# npm_command = 'npm ci --verbose'.split() - -# # The implementation of Paver's `sh` function returns before the forked -# # actually returns. Using a Popen object so that we can ensure that -# # the forked process has returned -# proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with -# retcode = proc.wait() -# if retcode == 1: -# raise Exception(f"npm install failed: See {npm_log_file_path}") -# print("Successfully clean-installed NPM packages. Log found at {}".format( -# npm_log_file_path -# )) - - -# def ensure_clean_package_lock(): -# """ -# Ensure no untracked changes have been made in the current git context. -# """ -# try: -# # Run git diff command to check for changes in package-lock.json -# result = subprocess.run( -# ["git", "diff", "--name-only", "--exit-code", "package-lock.json"], -# capture_output=True, # Capture stdout and stderr -# text=True, # Decode output to text -# check=True # Raise error for non-zero exit code -# ) -# # No differences found in package-lock.json -# print("package-lock.json is clean.") -# except subprocess.CalledProcessError as e: -# # Git diff command returned non-zero exit code (changes detected) -# print("Dirty package-lock.json, run 'npm install' and commit the generated changes.") -# print(e.stderr) # Print any error output from the command -# raise # Re-raise the exception to propagate the error - -# def write_junit_xml(name, message=None): -# """ -# Write a JUnit results XML file describing the outcome of a quality check. -# """ -# if message: -# failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) -# else: -# failure_element = '' -# data = { -# 'failure_count': 1 if message else 0, -# 'failure_element': failure_element, -# 'name': name, -# 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), -# } -# Env.QUALITY_DIR.makedirs_p() -# filename = Env.QUALITY_DIR / f'{name}.xml' -# with open(filename, 'w') as f: -# f.write(JUNIT_XML_TEMPLATE.format(**data)) - - def fail_quality(name, message): """ - Fail the specified quality check by generating the JUnit XML results file - and raising a ``BuildFailure``. + Fail the specified quality check. """ - # write_junit_xml(name, message) sys.exit() @@ -236,16 +39,43 @@ def _prepare_report_dir(dir_name): os.makedirs(dir_name, exist_ok=True) -def _write_metric(metric, filename): +def repo_root(): """ - Write a given metric to a given file - Used for things like reports/metrics/eslint, which will simply tell you the number of - eslint violations found + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 """ - Env.METRICS_DIR.makedirs_p() + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent.parent + + +# def _write_metric(metric, filename): +# """ +# Write a given metric to a given file +# Used for things like reports/metrics/eslint, which will simply tell you the number of +# eslint violations found +# """ +# Env.METRICS_DIR.makedirs_p() - with open(filename, "w") as metric_file: - metric_file.write(str(metric)) +# with open(filename, "w") as metric_file: +# metric_file.write(str(metric)) def _get_report_contents(filename, report_name, last_line_only=False): @@ -303,8 +133,9 @@ def _get_stylelint_violations(): """ Returns the number of Stylelint violations. """ - - stylelint_report_dir = (Env.REPORT_DIR / "stylelint") + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + stylelint_report_dir = (REPORT_DIR / "stylelint") stylelint_report = stylelint_report_dir / "stylelint.report" _prepare_report_dir(stylelint_report_dir) formatter = 'node_modules/stylelint-formatter-pretty' @@ -337,8 +168,10 @@ def run_eslint(): Runs eslint on static asset directories. If limit option is passed, fails build if more violations than the limit are found. """ - - eslint_report_dir = (Env.REPORT_DIR / "eslint") + + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + eslint_report_dir = (REPORT_DIR / "eslint") eslint_report = eslint_report_dir / "eslint.report" _prepare_report_dir(eslint_report_dir) violations_limit = 4950 @@ -371,9 +204,6 @@ def run_eslint(): ) ) - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) - # Fail if number of violations is greater than the limit if num_violations > violations_limit > -1: fail_quality( @@ -385,7 +215,6 @@ def run_eslint(): else: print("successfully run eslint with violations") print(num_violations) - # write_junit_xml('eslint') def run_stylelint(): @@ -397,9 +226,6 @@ def run_stylelint(): violations_limit = 0 num_violations = _get_stylelint_violations() - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) - # Fail if number of violations is greater than the limit if num_violations > violations_limit: fail_quality( @@ -412,7 +238,6 @@ def run_stylelint(): else: print("successfully run stylelint with violations") print(num_violations) - # write_junit_xml('stylelint') # def _extract_missing_pii_annotations(filename): @@ -618,13 +443,15 @@ def run_xsslint(): ) xsslint_script = "xss_linter.py" - xsslint_report_dir = (Env.REPORT_DIR / "xsslint") + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + xsslint_report_dir = (REPORT_DIR / "xsslint") xsslint_report = xsslint_report_dir / "xsslint.report" _prepare_report_dir(xsslint_report_dir) # Prepare the command to run the xsslint script command = ( - f"{Env.REPO_ROOT}/scripts/xsslint/{xsslint_script} " + f"{REPO_ROOT}/scripts/xsslint/{xsslint_script} " f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" ) @@ -651,14 +478,6 @@ def run_xsslint(): ) ) - metrics_report = (Env.METRICS_DIR / "xsslint") - # Record the metric - _write_metric(metrics_str, metrics_report) - # Print number of violations to log. - command = f"cat {metrics_report}" - # Print number of violations to log. - subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - error_message = "" # Test total violations against threshold. if 'total' in list(violation_thresholds.keys()): @@ -696,7 +515,6 @@ def run_xsslint(): ) else: print("successfully run xsslint") - # write_junit_xml('xsslint') # def diff_coverage(): @@ -737,40 +555,22 @@ def run_xsslint(): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("command", choices=['pep8', 'eslint', 'stylelint', - 'xsslint', 'pii_check', 'check_keywords', 'all']) + parser.add_argument("command", choices=['eslint', 'stylelint', + 'xsslint', 'pii_check', 'check_keywords']) argument = parser.parse_args() if argument.command == 'eslint': - # ensure_clean_package_lock() - # install_node_prereqs() run_eslint() elif argument.command == 'stylelint': - # install_node_prereqs() run_stylelint() elif argument.command == 'xsslint': - # install_python_prereqs() run_xsslint() # elif argument.command == 'pii_check': - # install_python_prereqs() # run_pii_check() # elif argument.command == 'check_keywords': - # install_python_prereqs() - # check_keywords() - - # elif argument.command == 'all': - # run_pep8() - # ensure_clean_package_lock() - # install_node_prereqs() - # run_eslint() - # run_stylelint() - # run_xsslint() - # install_python_prereqs() - # run_pii_check() - # check_keywords() - # diff_coverage() + # check_keywords() \ No newline at end of file From 747522b56a52945fb9b7bfd8fbf7ba8d467e3564 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 22:44:27 +0500 Subject: [PATCH 43/78] fix: fix tests --- scripts/quality_test.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index afefdc75d968..3d49fa9b6c60 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -168,7 +168,7 @@ def run_eslint(): Runs eslint on static asset directories. If limit option is passed, fails build if more violations than the limit are found. """ - + REPO_ROOT = repo_root() REPORT_DIR = REPO_ROOT / 'reports' eslint_report_dir = (REPORT_DIR / "eslint") @@ -573,4 +573,5 @@ def run_xsslint(): # run_pii_check() # elif argument.command == 'check_keywords': - # check_keywords() \ No newline at end of file + # check_keywords() + \ No newline at end of file From 73934b6896d1bd3304d7c4ac7da38336e7e1eb4a Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 8 Aug 2024 23:20:32 +0500 Subject: [PATCH 44/78] fix: fix tests --- scripts/quality_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 3d49fa9b6c60..815d26c8f449 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -574,4 +574,3 @@ def run_xsslint(): # elif argument.command == 'check_keywords': # check_keywords() - \ No newline at end of file From bf1d7326644495ca3e2565cfa657c40b5373945b Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 9 Aug 2024 15:23:07 +0500 Subject: [PATCH 45/78] fix: fix tests --- .github/workflows/quality-checks.yml | 2 + Makefile | 16 +- scripts/quality_test.py | 365 +++++++++++++-------------- 3 files changed, 191 insertions(+), 192 deletions(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 0f7b9fbd62bb..b5fdb5f4d025 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -99,6 +99,8 @@ jobs: make test-eslint make test-stylelint make test-xsslint + make test-pi_check + make test-check_keyword ./scripts/all-tests.sh - name: Save Job Artifacts diff --git a/Makefile b/Makefile index dd5858af82fe..76e7e7bea6c7 100644 --- a/Makefile +++ b/Makefile @@ -215,4 +215,18 @@ test-xsslint: python scripts/quality_test.py xsslint test-lint: - pycodestyle . \ No newline at end of file + pycodestyle . + +test-pi_check: + python scripts/quality_test.py pii_check + +test-check_keyword: + python scripts/quality_test.py check_keywords + +quality-test: + test-lint + test-eslint + test-stylelint + test-xsslint + test-pi_check + test-check_keyword diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 815d26c8f449..de3335af78fd 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -27,6 +27,8 @@ def fail_quality(name, message): """ Fail the specified quality check. """ + print(name) + print(message) sys.exit() @@ -66,18 +68,6 @@ def repo_root(): return absolute_path.parent.parent.parent -# def _write_metric(metric, filename): -# """ -# Write a given metric to a given file -# Used for things like reports/metrics/eslint, which will simply tell you the number of -# eslint violations found -# """ -# Env.METRICS_DIR.makedirs_p() - -# with open(filename, "w") as metric_file: -# metric_file.write(str(metric)) - - def _get_report_contents(filename, report_name, last_line_only=False): """ Returns the contents of the given file. Use last_line_only to only return @@ -240,149 +230,141 @@ def run_stylelint(): print(num_violations) -# def _extract_missing_pii_annotations(filename): -# """ -# Returns the number of uncovered models from the stdout report of django_find_annotations. - -# Arguments: -# filename: Filename where stdout of django_find_annotations was captured. - -# Returns: -# three-tuple containing: -# 1. The number of uncovered models, -# 2. A bool indicating whether the coverage is still below the threshold, and -# 3. The full report as a string. -# """ -# uncovered_models = 0 -# pii_check_passed = True -# if os.path.isfile(filename): -# with open(filename) as report_file: -# lines = report_file.readlines() - -# # Find the count of uncovered models. -# uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') -# for line in lines: -# uncovered_match = uncovered_regex.match(line) -# if uncovered_match: -# uncovered_models = int(uncovered_match.groups()[0]) -# break - -# # Find a message which suggests the check failed. -# failure_regex = re.compile(r'^Coverage threshold not met!') -# for line in lines: -# failure_match = failure_regex.match(line) -# if failure_match: -# pii_check_passed = False -# break - -# # Each line in lines already contains a newline. -# full_log = ''.join(lines) -# else: -# fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') - -# return (uncovered_models, pii_check_passed, full_log) - - -# def run_pii_check(): -# """ -# Guarantee that all Django models are PII-annotated. -# """ - -# pii_report_name = 'pii' -# default_report_dir = (Env.REPORT_DIR / pii_report_name) -# report_dir = default_report_dir -# output_file = os.path.join(report_dir, 'pii_check_{}.report') -# env_report = [] -# pii_check_passed = True -# for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): -# try: -# print() -# print(f"Running {env_name} PII Annotation check and report") -# print("-" * 45) -# run_output_file = str(output_file).format(env_name.lower()) -# os.makedirs(report_dir, exist_ok=True) -# command = ( -# "export DJANGO_SETTINGS_MODULE={env_settings_file};" -# "code_annotations django_find_annotations" -# "--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" -# "--lint --report --coverage | tee {run_output_file}" -# ) -# result = subprocess.run( -# command, -# shell=True, -# check=False, -# stdout=subprocess.PIPE, -# stderr=subprocess.PIPE, -# text=True -# ) - -# with open(run_output_file, 'w') as f: -# f.write(result.stdout) - -# uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) -# env_report.append(( -# uncovered_model_count, -# full_log, -# )) - -# except BuildFailure as error_message: -# fail_quality(pii_report_name, f'FAILURE: {error_message}') - -# if not pii_check_passed_env: -# pii_check_passed = False - -# # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. -# uncovered_count, full_log = max(env_report, key=lambda r: r[0]) - -# # Write metric file. -# if uncovered_count is None: -# uncovered_count = 0 -# metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" -# _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) - -# # Finally, fail the paver task if code_annotations suggests that the check failed. -# if not pii_check_passed: -# fail_quality('pii', full_log) - - -# def check_keywords(): -# """ -# Check Django model fields for names that conflict with a list of reserved keywords -# """ - -# report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') -# os.makedirs(report_path, exist_ok=True) - -# overall_status = True -# for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: -# report_file = f"{env}_reserved_keyword_report.csv" -# override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") -# try: -# command = ( -# "export DJANGO_SETTINGS_MODULE={env_settings_file};" -# "python manage.py {env} check_reserved_keywords" -# "--override_file {override_file}" -# "--report_path {report_path}" -# "--report_file {report_file}" -# ) -# result = subprocess.run( -# command, -# shell=True, -# check=True, -# stdout=subprocess.PIPE, -# stderr=subprocess.PIPE, -# text=True -# ) -# except BuildFailure: -# overall_status = False - -# if not overall_status: -# fail_quality( -# 'keywords', -# 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( -# report_path -# ) -# ) +def _extract_missing_pii_annotations(filename): + """ + Returns the number of uncovered models from the stdout report of django_find_annotations. + + Arguments: + filename: Filename where stdout of django_find_annotations was captured. + + Returns: + three-tuple containing: + 1. The number of uncovered models, + 2. A bool indicating whether the coverage is still below the threshold, and + 3. The full report as a string. + """ + uncovered_models = 0 + pii_check_passed = True + if os.path.isfile(filename): + with open(filename) as report_file: + lines = report_file.readlines() + + # Find the count of uncovered models. + uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') + for line in lines: + uncovered_match = uncovered_regex.match(line) + if uncovered_match: + uncovered_models = int(uncovered_match.groups()[0]) + break + + # Find a message which suggests the check failed. + failure_regex = re.compile(r'^Coverage threshold not met!') + for line in lines: + failure_match = failure_regex.match(line) + if failure_match: + pii_check_passed = False + break + + # Each line in lines already contains a newline. + full_log = ''.join(lines) + else: + fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + + return (uncovered_models, pii_check_passed, full_log) + + +def run_pii_check(): + """ + Guarantee that all Django models are PII-annotated. + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + pii_report_name = 'pii' + default_report_dir = (REPORT_DIR / pii_report_name) + report_dir = default_report_dir + output_file = os.path.join(report_dir, 'pii_check_{}.report') + env_report = [] + pii_check_passed = True + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): + try: + print(f"Running {env_name} PII Annotation check and report") + print("-" * 45) + run_output_file = str(output_file).format(env_name.lower()) + os.makedirs(report_dir, exist_ok=True) + command = ( + "export DJANGO_SETTINGS_MODULE={env_settings_file};" + "code_annotations django_find_annotations" + "--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" + "--lint --report --coverage | tee {run_output_file}" + ) + result = subprocess.run( + command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + + with open(run_output_file, 'w') as f: + f.write(result.stdout) + + uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) + env_report.append(( + uncovered_model_count, + full_log, + )) + + except BuildFailure as error_message: + fail_quality(pii_report_name, f'FAILURE: {error_message}') + + if not pii_check_passed_env: + pii_check_passed = False + + # Finally, fail the paver task if code_annotations suggests that the check failed. + if not pii_check_passed: + fail_quality('pii', full_log) + + +def check_keywords(): + """ + Check Django model fields for names that conflict with a list of reserved keywords + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + report_path = os.path.join(REPORT_DIR, 'reserved_keywords') + os.makedirs(report_path, exist_ok=True) + + overall_status = True + for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file = f"{env}_reserved_keyword_report.csv" + override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") + try: + command = ( + f"export DJANGO_SETTINGS_MODULE={env_settings_file};" + f"python manage.py {env} check_reserved_keywords" + f"--override_file {override_file}" + f"--report_path {report_path}" + f"--report_file {report_file}" + ) + result = subprocess.run( + command, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + except BuildFailure: + overall_status = False + + if not overall_status: + fail_quality( + 'keywords', + 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( + report_path + ) + ) def _get_xsslint_counts(filename): @@ -517,40 +499,41 @@ def run_xsslint(): print("successfully run xsslint") -# def diff_coverage(): -# """ -# Build the diff coverage reports -# """ - -# compare_branch = 'origin/master' - -# # Find all coverage XML files (both Python and JavaScript) -# xml_reports = [] - -# for filepath in Env.REPORT_DIR.walk(): -# if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): -# xml_reports.append(filepath) +def diff_coverage(): + """ + Build the diff coverage reports + """ -# if not xml_reports: -# err_msg = colorize( -# 'red', -# "No coverage info found. Run `paver test` before running " -# "`paver coverage`.\n" -# ) -# sys.stderr.write(err_msg) -# else: -# xml_report_str = ' '.join(xml_reports) -# diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + compare_branch = 'origin/master' -# # Generate the diff coverage reports (HTML and console) -# # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 -# command = ( -# f"diff-cover {xml_report_str}" -# f"--diff-range-notation '..'" -# f"--compare-branch={compare_branch} " -# f"--html-report {diff_html_path}" -# ) -# subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + for filepath in REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `quality test` before running " + "`coverage test`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) if __name__ == "__main__": @@ -569,8 +552,8 @@ def run_xsslint(): elif argument.command == 'xsslint': run_xsslint() - # elif argument.command == 'pii_check': - # run_pii_check() + elif argument.command == 'pii_check': + run_pii_check() - # elif argument.command == 'check_keywords': - # check_keywords() + elif argument.command == 'check_keywords': + check_keywords() From 034153571ef3c5734cef193bf9017a32b7f72391 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 9 Aug 2024 15:35:09 +0500 Subject: [PATCH 46/78] fix: fix tests --- scripts/quality_test.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index de3335af78fd..11cd8b7be8c5 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -341,12 +341,13 @@ def check_keywords(): override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") try: command = ( - f"export DJANGO_SETTINGS_MODULE={env_settings_file};" - f"python manage.py {env} check_reserved_keywords" - f"--override_file {override_file}" - f"--report_path {report_path}" + f"export DJANGO_SETTINGS_MODULE={env_settings_file}; " + f"python manage.py {env} check_reserved_keywords " + f"--override_file {override_file} " + f"--report_path {report_path} " f"--report_file {report_file}" ) + result = subprocess.run( command, shell=True, From 3ef338370233c5e4d5c326fda33690ebdca6629e Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 9 Aug 2024 16:27:11 +0500 Subject: [PATCH 47/78] fix: fix tests --- scripts/quality_test.py | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/scripts/quality_test.py b/scripts/quality_test.py index 11cd8b7be8c5..19310ab690a9 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -9,8 +9,7 @@ import sys import subprocess import shutil - -from path import Path as path +from pathlib import Path from time import sleep try: @@ -51,21 +50,20 @@ def repo_root(): https://openedx.atlassian.net/browse/PLAT-1629 and https://github.com/docker/for-mac/issues/1509 """ - file_path = path(__file__) - attempt = 1 - while True: + + file_path = Path(__file__) + max_attempts = 180 + for attempt in range(1, max_attempts + 1): try: - absolute_path = file_path.abspath() - break + absolute_path = file_path.resolve(strict=True) + return absolute_path.parents[1] except OSError: - print(f'Attempt {attempt}/180 to get an absolute path failed') - if attempt < 180: - attempt += 1 + print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') + if attempt < max_attempts: sleep(1) else: print('Unable to determine the absolute path of the edx-platform repo, aborting') - raise - return absolute_path.parent.parent.parent + raise RuntimeError('Could not determine the repository root after multiple attempts') def _get_report_contents(filename, report_name, last_line_only=False): @@ -292,10 +290,10 @@ def run_pii_check(): run_output_file = str(output_file).format(env_name.lower()) os.makedirs(report_dir, exist_ok=True) command = ( - "export DJANGO_SETTINGS_MODULE={env_settings_file};" - "code_annotations django_find_annotations" - "--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" - "--lint --report --coverage | tee {run_output_file}" + f"export DJANGO_SETTINGS_MODULE={env_settings_file};" + f"code_annotations django_find_annotations" + f"--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" + f"--lint --report --coverage | tee {run_output_file}" ) result = subprocess.run( command, @@ -324,6 +322,8 @@ def run_pii_check(): # Finally, fail the paver task if code_annotations suggests that the check failed. if not pii_check_passed: fail_quality('pii', full_log) + else: + print("successfully run pi_check") def check_keywords(): @@ -348,17 +348,16 @@ def check_keywords(): f"--report_file {report_file}" ) - result = subprocess.run( + subprocess.run( command, shell=True, - check=True, + check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True ) except BuildFailure: overall_status = False - if not overall_status: fail_quality( 'keywords', @@ -366,6 +365,8 @@ def check_keywords(): report_path ) ) + else: + print("successfully run check_keywords") def _get_xsslint_counts(filename): From 9317537f741c00977be336f3b5774391b4da1470 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 13 Aug 2024 16:37:32 +0500 Subject: [PATCH 48/78] chore: replace paver js tests --- .github/workflows/js-tests.yml | 5 +- .github/workflows/quality-checks.yml | 3 - Makefile | 23 +- pavelib/js_test.py | 143 ---------- pavelib/utils/envs.py | 266 ------------------ scripts/quality_test/__init__.py | 0 scripts/quality_test/js_test.py | 137 +++++++++ scripts/{ => quality_test}/quality_test.py | 0 .../quality_test}/suites/__init__.py | 0 .../quality_test}/suites/js_suite.py | 27 +- .../quality_test}/suites/suite.py | 30 +- scripts/quality_test/utils/envs.py | 136 +++++++++ .../quality_test/utils}/utils.py | 52 +--- 13 files changed, 329 insertions(+), 493 deletions(-) delete mode 100644 pavelib/js_test.py delete mode 100644 pavelib/utils/envs.py create mode 100644 scripts/quality_test/__init__.py create mode 100644 scripts/quality_test/js_test.py rename scripts/{ => quality_test}/quality_test.py (100%) rename {pavelib/utils/test => scripts/quality_test}/suites/__init__.py (100%) rename {pavelib/utils/test => scripts/quality_test}/suites/js_suite.py (84%) rename {pavelib/utils/test => scripts/quality_test}/suites/suite.py (88%) create mode 100644 scripts/quality_test/utils/envs.py rename {pavelib/utils/test => scripts/quality_test/utils}/utils.py (64%) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 4d025e540163..3d69d6b0a463 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -65,12 +65,9 @@ jobs: - uses: c-hive/gha-npm-cache@v1 - name: Run JS Tests - env: - TEST_SUITE: js-unit - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh run: | npm install -g jest - xvfb-run --auto-servernum ./scripts/all-tests.sh + make test-js - name: Save Job Artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index b5fdb5f4d025..08901debd90e 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -90,8 +90,6 @@ jobs: - name: Run Quality Tests env: - TEST_SUITE: quality - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} run: | @@ -101,7 +99,6 @@ jobs: make test-xsslint make test-pi_check make test-check_keyword - ./scripts/all-tests.sh - name: Save Job Artifacts if: always() diff --git a/Makefile b/Makefile index 76e7e7bea6c7..51bd6f3a6816 100644 --- a/Makefile +++ b/Makefile @@ -206,27 +206,24 @@ ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip ins sudo apt install libmysqlclient-dev libxmlsec1-dev test-eslint: - python scripts/quality_test.py eslint + python scripts/quality_test/quality_test.py eslint test-stylelint: - python scripts/quality_test.py stylelint + python scripts/quality_test/quality_test.py stylelint test-xsslint: - python scripts/quality_test.py xsslint + python scripts/quality_test/quality_test.py xsslint test-lint: pycodestyle . test-pi_check: - python scripts/quality_test.py pii_check + python scripts/quality_test/quality_test.py pii_check test-check_keyword: - python scripts/quality_test.py check_keywords - -quality-test: - test-lint - test-eslint - test-stylelint - test-xsslint - test-pi_check - test-check_keyword + python scripts/quality_test/quality_test.py check_keywords + +test-js: + python scripts/quality_test/js_test.py + +quality-test: test-lint test-eslint test-stylelint test-xsslint test-pi_check test-check_keyword \ No newline at end of file diff --git a/pavelib/js_test.py b/pavelib/js_test.py deleted file mode 100644 index fb9c213499ac..000000000000 --- a/pavelib/js_test.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -Javascript test tasks -""" - - -import os -import re -import sys - -from paver.easy import cmdopts, needs, sh, task - -from pavelib.utils.envs import Env -from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite -from pavelib.utils.timer import timed - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -@task -@needs( - 'pavelib.prereqs.install_node_prereqs', - 'pavelib.utils.test.utils.clean_reports_dir', -) -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("mode=", "m", "dev or run"), - ("coverage", "c", "Run test under coverage"), - ("port=", "p", "Port to run test server on (dev mode only)"), - ('skip-clean', 'C', 'skip cleaning repository before running tests'), - ('skip_clean', None, 'deprecated in favor of skip-clean'), -], share_with=["pavelib.utils.tests.utils.clean_reports_dir"]) -@timed -def test_js(options): - """ - Run the JavaScript tests - """ - mode = getattr(options, 'mode', 'run') - port = None - skip_clean = getattr(options, 'skip_clean', False) - - if mode == 'run': - suite = getattr(options, 'suite', 'all') - coverage = getattr(options, 'coverage', False) - elif mode == 'dev': - suite = getattr(options, 'suite', None) - coverage = False - port = getattr(options, 'port', None) - else: - sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.") - return - - if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): - sys.stderr.write( - "Unknown test suite. Please choose from ({suites})\n".format( - suites=", ".join(Env.JS_TEST_ID_KEYS) - ) - ) - return - - if suite != 'jest-snapshot': - test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) - test_suite.run() - - if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in - test_suite = JestSnapshotTestSuite('jest') - test_suite.run() - - -@task -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("coverage", "c", "Run test under coverage"), -]) -@timed -def test_js_run(options): - """ - Run the JavaScript tests and print results to the console - """ - options.mode = 'run' - test_js(options) - - -@task -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("port=", "p", "Port to run test server on"), -]) -@timed -def test_js_dev(options): - """ - Run the JavaScript tests in your default browsers - """ - options.mode = 'dev' - test_js(options) - - -@task -@needs('pavelib.prereqs.install_coverage_prereqs') -@cmdopts([ - ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), -], share_with=['coverage']) -@timed -def diff_coverage(options): - """ - Build the diff coverage reports - """ - compare_branch = options.get('compare_branch', 'origin/master') - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - - for filepath in Env.REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `paver test` before running " - "`paver coverage`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - sh( - "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} " - "--html-report {diff_html_path}".format( - xml_report_str=xml_report_str, - compare_branch=compare_branch, - diff_html_path=diff_html_path, - ) - ) - - print("\n") diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py deleted file mode 100644 index f8eb4fece37b..000000000000 --- a/pavelib/utils/envs.py +++ /dev/null @@ -1,266 +0,0 @@ -""" -Helper functions for loading environment settings. -""" -import configparser -import json -import os -import sys -import subprocess -from time import sleep - -from lazy import lazy -from path import Path as path -from pavelib.utils.cmd import django_cmd - - -def repo_root(): - """ - Get the root of the git repository (edx-platform). - - This sometimes fails on Docker Devstack, so it's been broken - down with some additional error handling. It usually starts - working within 30 seconds or so; for more details, see - https://openedx.atlassian.net/browse/PLAT-1629 and - https://github.com/docker/for-mac/issues/1509 - """ - file_path = path(__file__) - attempt = 1 - while True: - try: - absolute_path = file_path.abspath() - break - except OSError: - print(f'Attempt {attempt}/180 to get an absolute path failed') - if attempt < 180: - attempt += 1 - sleep(1) - else: - print('Unable to determine the absolute path of the edx-platform repo, aborting') - raise - return absolute_path.parent.parent.parent - - -class Env: - """ - Load information about the execution environment. - """ - - # Root of the git repository (edx-platform) - REPO_ROOT = repo_root() - - # Reports Directory - REPORT_DIR = REPO_ROOT / 'reports' - METRICS_DIR = REPORT_DIR / 'metrics' - QUALITY_DIR = REPORT_DIR / 'quality_junitxml' - - # Generic log dir - GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" - - # Python unittest dirs - PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" - - # Which Python version should be used in xdist workers? - PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") - - # Directory that videos are served from - VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" - - PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" - - # Detect if in a Docker container, and if so which one - FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') - USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' - DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack' - TEST_SETTINGS = 'test' - - # Mongo databases that will be dropped before/after the tests run - MONGO_HOST = 'localhost' - - # Test Ids Directory - TEST_DIR = REPO_ROOT / ".testids" - - # Configured browser to use for the js test suites - SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') - if USING_DOCKER: - KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' - else: - KARMA_BROWSER = 'FirefoxNoUpdates' - - # Files used to run each of the js test suites - # TODO: Store this as a dict. Order seems to matter for some - # reason. See issue TE-415. - KARMA_CONFIG_FILES = [ - REPO_ROOT / 'cms/static/karma_cms.conf.js', - REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', - REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', - REPO_ROOT / 'lms/static/karma_lms.conf.js', - REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', - REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', - REPO_ROOT / 'common/static/karma_common.conf.js', - REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', - ] - - JS_TEST_ID_KEYS = [ - 'cms', - 'cms-squire', - 'cms-webpack', - 'lms', - 'xmodule', - 'xmodule-webpack', - 'common', - 'common-requirejs', - 'jest-snapshot' - ] - - JS_REPORT_DIR = REPORT_DIR / 'javascript' - - # Directories used for pavelib/ tests - IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') - LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] - - # Directory for i18n test reports - I18N_REPORT_DIR = REPORT_DIR / 'i18n' - - # Directory for keeping src folder that comes with pip installation. - # Setting this is equivalent to passing `--src ` to pip directly. - PIP_SRC = os.environ.get("PIP_SRC") - - # Service variant (lms, cms, etc.) configured with an environment variable - # We use this to determine which envs.json file to load. - SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) - - # If service variant not configured in env, then pass the correct - # environment for lms / cms - if not SERVICE_VARIANT: # this will intentionally catch ""; - if any(i in sys.argv[1:] for i in ('cms', 'studio')): - SERVICE_VARIANT = 'cms' - else: - SERVICE_VARIANT = 'lms' - - @classmethod - def get_django_settings(cls, django_settings, system, settings=None, print_setting_args=None): - """ - Interrogate Django environment for specific settings values - :param django_settings: list of django settings values to get - :param system: the django app to use when asking for the setting (lms | cms) - :param settings: the settings file to use when asking for the value - :param print_setting_args: the additional arguments to send to print_settings - :return: unicode value of the django setting - """ - if not settings: - settings = os.environ.get("EDX_PLATFORM_SETTINGS", "aws") - log_dir = os.path.dirname(cls.PRINT_SETTINGS_LOG_FILE) - if not os.path.exists(log_dir): - os.makedirs(log_dir) - settings_length = len(django_settings) - django_settings = ' '.join(django_settings) # parse_known_args makes a list again - print_setting_args = ' '.join(print_setting_args or []) - - try: - command = django_cmd( - system, - settings, - "print_setting {django_settings} 2>{log_file} {print_setting_args}".format( - django_settings=django_settings, - print_setting_args=print_setting_args, - log_file=cls.PRINT_SETTINGS_LOG_FILE - ).strip() - ) - - result = subprocess.run(command, shell=True, capture_output=True, text=True, check=True) - value = result.stdout.strip() - return tuple(str(value).splitlines()) if value else tuple(None for _ in range(settings_length)) - except subprocess.CalledProcessError: - print(f"Unable to print the value of the {django_settings} setting:") - with open(cls.PRINT_SETTINGS_LOG_FILE) as f: - print(f.read()) - sys.exit(1) - - @classmethod - def get_django_json_settings(cls, django_settings, system, settings=None): - """ - Interrogate Django environment for specific settings value - :param django_settings: list of django settings values to get - :param system: the django app to use when asking for the setting (lms | cms) - :param settings: the settings file to use when asking for the value - :return: json string value of the django setting - """ - return cls.get_django_settings( - django_settings, - system, - settings=settings, - print_setting_args=["--json"], - ) - - @classmethod - def covered_modules(cls): - """ - List the source modules listed in .coveragerc for which coverage - will be measured. - """ - coveragerc = configparser.RawConfigParser() - coveragerc.read(cls.PYTHON_COVERAGERC) - modules = coveragerc.get('run', 'source') - result = [] - for module in modules.split('\n'): - module = module.strip() - if module: - result.append(module) - return result - - @lazy - def env_tokens(self): - """ - Return a dict of environment settings. - If we couldn't find the JSON file, issue a warning and return an empty dict. - """ - - # Find the env JSON file - if self.SERVICE_VARIANT: - env_path = self.REPO_ROOT.parent / f"{self.SERVICE_VARIANT}.env.json" - else: - env_path = path("env.json").abspath() - - # If the file does not exist, here or one level up, - # issue a warning and return an empty dict - if not env_path.isfile(): - env_path = env_path.parent.parent / env_path.basename() - if not env_path.isfile(): - print( - "Warning: could not find environment JSON file " - "at '{path}'".format(path=env_path), - file=sys.stderr, - ) - return {} - - # Otherwise, load the file as JSON and return the resulting dict - try: - with open(env_path) as env_file: - return json.load(env_file) - - except ValueError: - print( - "Error: Could not parse JSON " - "in {path}".format(path=env_path), - file=sys.stderr, - ) - sys.exit(1) - - @lazy - def feature_flags(self): - """ - Return a dictionary of feature flags configured by the environment. - """ - return self.env_tokens.get('FEATURES', {}) - - @classmethod - def rsync_dirs(cls): - """ - List the directories that should be synced during pytest-xdist - execution. Needs to include all modules for which coverage is - measured, not just the tests being run. - """ - result = set() - for module in cls.covered_modules(): - result.add(module.split('/')[0]) - return result diff --git a/scripts/quality_test/__init__.py b/scripts/quality_test/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/scripts/quality_test/js_test.py b/scripts/quality_test/js_test.py new file mode 100644 index 000000000000..dee2b47e3361 --- /dev/null +++ b/scripts/quality_test/js_test.py @@ -0,0 +1,137 @@ +""" +Javascript test tasks +""" + +import click +import sys + +from utils.envs import Env +from suites import JestSnapshotTestSuite, JsTestSuite + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +@click.command("test_js") +@click.option( + '--s', 'suite', + default='all', + help='Test suite to run.' +) +@click.option( + '--m', 'mode', + default='run', + help='dev or run' +) +@click.option( + '--coverage', 'coverage', + default=False, + help='Run test under coverage' +) +@click.option( + '--p', 'port', + default=None, + help='Port to run test server on (dev mode only)' +) +@click.option( + '--C', 'skip_clean', + default=False, + help='skip cleaning repository before running tests' +) +def test_js(suite, mode, coverage, port, skip_clean): + """ + Run the JavaScript tests + """ + + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): + sys.stderr.write( + "Unknown test suite. Please choose from ({suites})\n".format( + suites=", ".join(Env.JS_TEST_ID_KEYS) + ) + ) + return + + if suite != 'jest-snapshot': + test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) + test_suite.run() + + if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in + test_suite = JestSnapshotTestSuite('jest') + test_suite.run() + + + +# @cmdopts([ +# ("suite=", "s", "Test suite to run"), +# ("coverage", "c", "Run test under coverage"), +# ]) + +def test_js_run(options): + """ + Run the JavaScript tests and print results to the console + """ + options.mode = 'run' + test_js(options) + + +# @cmdopts([ +# ("suite=", "s", "Test suite to run"), +# ("port=", "p", "Port to run test server on"), +# ]) + +def test_js_dev(options): + """ + Run the JavaScript tests in your default browsers + """ + options.mode = 'dev' + test_js(options) + + +# @needs('pavelib.prereqs.install_coverage_prereqs') +# @cmdopts([ +# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +# ], share_with=['coverage']) + +# def diff_coverage(options): +# """ +# Build the diff coverage reports +# """ +# compare_branch = options.get('compare_branch', 'origin/master') + +# # Find all coverage XML files (both Python and JavaScript) +# xml_reports = [] + +# for filepath in Env.REPORT_DIR.walk(): +# if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): +# xml_reports.append(filepath) + +# if not xml_reports: +# err_msg = colorize( +# 'red', +# "No coverage info found. Run `paver test` before running " +# "`paver coverage`.\n" +# ) +# sys.stderr.write(err_msg) +# else: +# xml_report_str = ' '.join(xml_reports) +# diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + +# # Generate the diff coverage reports (HTML and console) +# # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 +# sh( +# "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} " +# "--html-report {diff_html_path}".format( +# xml_report_str=xml_report_str, +# compare_branch=compare_branch, +# diff_html_path=diff_html_path, +# ) +# ) + +# print("\n") + +if __name__ == "__main__": + test_js() diff --git a/scripts/quality_test.py b/scripts/quality_test/quality_test.py similarity index 100% rename from scripts/quality_test.py rename to scripts/quality_test/quality_test.py diff --git a/pavelib/utils/test/suites/__init__.py b/scripts/quality_test/suites/__init__.py similarity index 100% rename from pavelib/utils/test/suites/__init__.py rename to scripts/quality_test/suites/__init__.py diff --git a/pavelib/utils/test/suites/js_suite.py b/scripts/quality_test/suites/js_suite.py similarity index 84% rename from pavelib/utils/test/suites/js_suite.py rename to scripts/quality_test/suites/js_suite.py index 4e53d454fee5..f6f1da08924c 100644 --- a/pavelib/utils/test/suites/js_suite.py +++ b/scripts/quality_test/suites/js_suite.py @@ -2,12 +2,9 @@ Javascript test tasks """ - -from paver import tasks - -from pavelib.utils.envs import Env -from pavelib.utils.test import utils as test_utils -from pavelib.utils.test.suites.suite import TestSuite +from utils.envs import Env +from utils import utils as test_utils +from .suite import TestSuite __test__ = False # do not collect @@ -28,15 +25,15 @@ def __init__(self, *args, **kwargs): def __enter__(self): super().__enter__() - if tasks.environment.dry_run: - tasks.environment.info("make report_dir") - else: - self.report_dir.makedirs_p() - if not self.skip_clean: - test_utils.clean_test_files() - - if self.mode == 'run' and not self.run_under_coverage: - test_utils.clean_dir(self.report_dir) + # if tasks.environment.dry_run: + # tasks.environment.info("make report_dir") + # else: + self.report_dir.makedirs_p() + # if not self.skip_clean: + # test_utils.clean_test_files() + + # if self.mode == 'run' and not self.run_under_coverage: + # test_utils.clean_dir(self.report_dir) @property def _default_subsuites(self): diff --git a/pavelib/utils/test/suites/suite.py b/scripts/quality_test/suites/suite.py similarity index 88% rename from pavelib/utils/test/suites/suite.py rename to scripts/quality_test/suites/suite.py index 5a423c827c21..332266416a06 100644 --- a/pavelib/utils/test/suites/suite.py +++ b/scripts/quality_test/suites/suite.py @@ -6,10 +6,8 @@ import os import subprocess import sys - -from paver import tasks - -from pavelib.utils.process import kill_process +import signal +import psutil try: from pygments.console import colorize @@ -62,6 +60,18 @@ def cmd(self): The command to run tests (as a string). For this base class there is none. """ return None + + @staticmethod + def kill_process(proc): + """ + Kill the process `proc` created with `subprocess`. + """ + p1_group = psutil.Process(proc.pid) + child_pids = p1_group.children(recursive=True) + + for child_pid in child_pids: + os.kill(child_pid.pid, signal.SIGKILL) + @staticmethod def is_success(exit_code): @@ -79,9 +89,9 @@ def run_test(self): """ cmd = " ".join(self.cmd) - if tasks.environment.dry_run: - tasks.environment.info(cmd) - return + # if tasks.environment.dry_run: + # tasks.environment.info(cmd) + # return sys.stdout.write(cmd) @@ -102,7 +112,7 @@ def run_test(self): process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with return self.is_success(process.wait()) except KeyboardInterrupt: - kill_process(process) + self.kill_process(process) sys.exit(1) def run_suite_tests(self): @@ -140,8 +150,8 @@ def run(self): """ self.run_suite_tests() - if tasks.environment.dry_run: - return + # if tasks.environment.dry_run: + # return self.report_test_results() diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py new file mode 100644 index 000000000000..f7b0d5861809 --- /dev/null +++ b/scripts/quality_test/utils/envs.py @@ -0,0 +1,136 @@ +""" +Helper functions for loading environment settings. +""" + +# import json +import os +import sys +# import subprocess +from time import sleep + +from path import Path as path + + +def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent.parent + + +class Env: + """ + Load information about the execution environment. + """ + + # Root of the git repository (edx-platform) + REPO_ROOT = repo_root() + + # Reports Directory + REPORT_DIR = REPO_ROOT / 'reports' + METRICS_DIR = REPORT_DIR / 'metrics' + QUALITY_DIR = REPORT_DIR / 'quality_junitxml' + + # Generic log dir + GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" + + # Python unittest dirs + PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" + + # Which Python version should be used in xdist workers? + PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") + + # Directory that videos are served from + VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" + + PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" + + # Detect if in a Docker container, and if so which one + FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') + USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' + DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack' + TEST_SETTINGS = 'test' + + # Mongo databases that will be dropped before/after the tests run + MONGO_HOST = 'localhost' + + # Test Ids Directory + TEST_DIR = REPO_ROOT / ".testids" + + # Configured browser to use for the js test suites + SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') + if USING_DOCKER: + KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' + else: + KARMA_BROWSER = 'FirefoxNoUpdates' + + # Files used to run each of the js test suites + # TODO: Store this as a dict. Order seems to matter for some + # reason. See issue TE-415. + KARMA_CONFIG_FILES = [ + REPO_ROOT / 'cms/static/karma_cms.conf.js', + REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', + REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', + REPO_ROOT / 'lms/static/karma_lms.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', + REPO_ROOT / 'common/static/karma_common.conf.js', + REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', + ] + + JS_TEST_ID_KEYS = [ + 'cms', + 'cms-squire', + 'cms-webpack', + 'lms', + 'xmodule', + 'xmodule-webpack', + 'common', + 'common-requirejs', + 'jest-snapshot' + ] + + JS_REPORT_DIR = REPORT_DIR / 'javascript' + + # Directories used for pavelib/ tests + IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') + LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] + + # Directory for i18n test reports + I18N_REPORT_DIR = REPORT_DIR / 'i18n' + + # Directory for keeping src folder that comes with pip installation. + # Setting this is equivalent to passing `--src ` to pip directly. + PIP_SRC = os.environ.get("PIP_SRC") + + # Service variant (lms, cms, etc.) configured with an environment variable + # We use this to determine which envs.json file to load. + SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) + + # If service variant not configured in env, then pass the correct + # environment for lms / cms + if not SERVICE_VARIANT: # this will intentionally catch ""; + if any(i in sys.argv[1:] for i in ('cms', 'studio')): + SERVICE_VARIANT = 'cms' + else: + SERVICE_VARIANT = 'lms' diff --git a/pavelib/utils/test/utils.py b/scripts/quality_test/utils/utils.py similarity index 64% rename from pavelib/utils/test/utils.py rename to scripts/quality_test/utils/utils.py index 1f2a0cff28c0..9d3b76a3d15b 100644 --- a/pavelib/utils/test/utils.py +++ b/scripts/quality_test/utils/utils.py @@ -5,36 +5,23 @@ import os import subprocess -from paver.easy import cmdopts, sh, task - -from pavelib.utils.envs import Env -from pavelib.utils.timer import timed +from .envs import Env MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) -COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches" -COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what" -COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all')) -WHO_TESTS_WHAT_DIFF = "who_tests_what.diff" - - -__test__ = False # do not collect - - -@task -@timed def clean_test_files(): """ Clean fixture files used by tests and .pyc files """ - sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") + # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" + subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") # This find command removes all the *.pyc files that aren't in the .git # directory. See this blog post for more details: # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html - sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") - sh("rm -rf test_root/log/auto_screenshots/*") - sh("rm -rf /tmp/mako_[cl]ms") + subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") + subprocess.run("rm -rf test_root/log/auto_screenshots/*") + subprocess.run("rm -rf /tmp/mako_[cl]ms") def ensure_clean_package_lock(): @@ -64,15 +51,15 @@ def clean_dir(directory): """ # We delete the files but preserve the directory structure # so that coverage.py has a place to put the reports. - sh(f'find {directory} -type f -delete') + subprocess.run(f'find {directory} -type f -delete') -@task -@cmdopts([ - ('skip-clean', 'C', 'skip cleaning repository before running tests'), - ('skip_clean', None, 'deprecated in favor of skip-clean'), -]) -@timed +# @task +# @cmdopts([ +# ('skip-clean', 'C', 'skip cleaning repository before running tests'), +# ('skip_clean', None, 'deprecated in favor of skip-clean'), +# ]) + def clean_reports_dir(options): """ Clean coverage files, to ensure that we don't use stale data to generate reports. @@ -85,16 +72,3 @@ def clean_reports_dir(options): # so that coverage.py has a place to put the reports. reports_dir = Env.REPORT_DIR.makedirs_p() clean_dir(reports_dir) - - -@task -@timed -def clean_mongo(): - """ - Clean mongo test databases - """ - sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format( - host=Env.MONGO_HOST, - port=MONGO_PORT_NUM, - repo_root=Env.REPO_ROOT, - )) From b17915a233b68fb3378a4e517694be63fc93225b Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 13 Aug 2024 17:18:36 +0500 Subject: [PATCH 49/78] chore: replace paver js tests --- .github/workflows/js-tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 3d69d6b0a463..3600f1eafa06 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -64,6 +64,10 @@ jobs: make base-requirements - uses: c-hive/gha-npm-cache@v1 + + - name: Install npm + run: npm ci + - name: Run JS Tests run: | npm install -g jest From 4a8356a5891d88cc05a7fdda68aa6016d18b0c7a Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 13 Aug 2024 17:38:44 +0500 Subject: [PATCH 50/78] fix: fix tests --- pavelib/paver_tests/utils.py | 1 + scripts/quality_test/js_test.py | 4 +--- scripts/quality_test/suites/js_suite.py | 4 ++-- scripts/quality_test/suites/suite.py | 3 +-- scripts/quality_test/utils/utils.py | 1 + 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pavelib/paver_tests/utils.py b/pavelib/paver_tests/utils.py index 1db26cf76a4c..9abe60e4730e 100644 --- a/pavelib/paver_tests/utils.py +++ b/pavelib/paver_tests/utils.py @@ -13,6 +13,7 @@ class PaverTestCase(TestCase): """ Base class for Paver test cases. """ + def setUp(self): super().setUp() diff --git a/scripts/quality_test/js_test.py b/scripts/quality_test/js_test.py index dee2b47e3361..906037857540 100644 --- a/scripts/quality_test/js_test.py +++ b/scripts/quality_test/js_test.py @@ -46,7 +46,7 @@ def test_js(suite, mode, coverage, port, skip_clean): """ Run the JavaScript tests """ - + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): sys.stderr.write( "Unknown test suite. Please choose from ({suites})\n".format( @@ -64,12 +64,10 @@ def test_js(suite, mode, coverage, port, skip_clean): test_suite.run() - # @cmdopts([ # ("suite=", "s", "Test suite to run"), # ("coverage", "c", "Run test under coverage"), # ]) - def test_js_run(options): """ Run the JavaScript tests and print results to the console diff --git a/scripts/quality_test/suites/js_suite.py b/scripts/quality_test/suites/js_suite.py index f6f1da08924c..7a2b204569e2 100644 --- a/scripts/quality_test/suites/js_suite.py +++ b/scripts/quality_test/suites/js_suite.py @@ -30,10 +30,10 @@ def __enter__(self): # else: self.report_dir.makedirs_p() # if not self.skip_clean: - # test_utils.clean_test_files() + # test_utils.clean_test_files() # if self.mode == 'run' and not self.run_under_coverage: - # test_utils.clean_dir(self.report_dir) + # test_utils.clean_dir(self.report_dir) @property def _default_subsuites(self): diff --git a/scripts/quality_test/suites/suite.py b/scripts/quality_test/suites/suite.py index 332266416a06..18d3b8683242 100644 --- a/scripts/quality_test/suites/suite.py +++ b/scripts/quality_test/suites/suite.py @@ -60,7 +60,7 @@ def cmd(self): The command to run tests (as a string). For this base class there is none. """ return None - + @staticmethod def kill_process(proc): """ @@ -72,7 +72,6 @@ def kill_process(proc): for child_pid in child_pids: os.kill(child_pid.pid, signal.SIGKILL) - @staticmethod def is_success(exit_code): """ diff --git a/scripts/quality_test/utils/utils.py b/scripts/quality_test/utils/utils.py index 9d3b76a3d15b..6913dc66fc24 100644 --- a/scripts/quality_test/utils/utils.py +++ b/scripts/quality_test/utils/utils.py @@ -10,6 +10,7 @@ MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) + def clean_test_files(): """ Clean fixture files used by tests and .pyc files From 167660cd643f1539d83c584ab47b59b4a364b47d Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 13 Aug 2024 17:58:48 +0500 Subject: [PATCH 51/78] fix: fix tests --- scripts/quality_test/utils/envs.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py index f7b0d5861809..07ce310d301f 100644 --- a/scripts/quality_test/utils/envs.py +++ b/scripts/quality_test/utils/envs.py @@ -9,7 +9,7 @@ from time import sleep from path import Path as path - +from pathlib import Path def repo_root(): """ @@ -21,21 +21,20 @@ def repo_root(): https://openedx.atlassian.net/browse/PLAT-1629 and https://github.com/docker/for-mac/issues/1509 """ - file_path = path(__file__) - attempt = 1 - while True: + + file_path = Path(__file__) + max_attempts = 180 + for attempt in range(1, max_attempts + 1): try: - absolute_path = file_path.abspath() - break + absolute_path = file_path.resolve(strict=True) + return absolute_path.parents[1] except OSError: - print(f'Attempt {attempt}/180 to get an absolute path failed') - if attempt < 180: - attempt += 1 + print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') + if attempt < max_attempts: sleep(1) else: print('Unable to determine the absolute path of the edx-platform repo, aborting') - raise - return absolute_path.parent.parent.parent + raise RuntimeError('Could not determine the repository root after multiple attempts') class Env: From 8b7d6007a93e584cb5238198350246be355dc2d4 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 13 Aug 2024 18:06:56 +0500 Subject: [PATCH 52/78] fix: fix tests --- scripts/quality_test/suites/js_suite.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/quality_test/suites/js_suite.py b/scripts/quality_test/suites/js_suite.py index 7a2b204569e2..b24f58033dc2 100644 --- a/scripts/quality_test/suites/js_suite.py +++ b/scripts/quality_test/suites/js_suite.py @@ -28,7 +28,8 @@ def __enter__(self): # if tasks.environment.dry_run: # tasks.environment.info("make report_dir") # else: - self.report_dir.makedirs_p() + # self.report_dir.makedirs_p() + self.report_dir.mkdir(parents=True, exist_ok=True) # if not self.skip_clean: # test_utils.clean_test_files() From 03120da1c853b817f3a6d9ea3fa3c0cf8627d434 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 14 Aug 2024 13:51:02 +0500 Subject: [PATCH 53/78] fix: fix tests --- scripts/quality_test/suites/suite.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/quality_test/suites/suite.py b/scripts/quality_test/suites/suite.py index 18d3b8683242..b0204d655711 100644 --- a/scripts/quality_test/suites/suite.py +++ b/scripts/quality_test/suites/suite.py @@ -86,7 +86,9 @@ def run_test(self): It returns False if errors or failures occur. Otherwise, it returns True. """ - cmd = " ".join(self.cmd) + # cmd = " ".join(self.cmd) + cmd = " ".join(str(part) for part in self.cmd) + # if tasks.environment.dry_run: # tasks.environment.info(cmd) From 8f9b41f8a22b43841b1d1bee4552b0c7677a9dde Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 14 Aug 2024 14:14:06 +0500 Subject: [PATCH 54/78] fix: fix tests --- scripts/quality_test/utils/envs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py index 07ce310d301f..c1e3de729db4 100644 --- a/scripts/quality_test/utils/envs.py +++ b/scripts/quality_test/utils/envs.py @@ -21,13 +21,13 @@ def repo_root(): https://openedx.atlassian.net/browse/PLAT-1629 and https://github.com/docker/for-mac/issues/1509 """ - + import pdb; pdb.set_trace() file_path = Path(__file__) max_attempts = 180 for attempt in range(1, max_attempts + 1): try: absolute_path = file_path.resolve(strict=True) - return absolute_path.parents[1] + return absolute_path.parents[2] except OSError: print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') if attempt < max_attempts: From d8dedf0871aeb83a75011863d94617d81c9b9503 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 14 Aug 2024 22:52:22 +0500 Subject: [PATCH 55/78] fix: fix tests --- scripts/quality_test/suites/js_suite.py | 4 ++-- scripts/quality_test/utils/envs.py | 18 ++++++++++-------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/scripts/quality_test/suites/js_suite.py b/scripts/quality_test/suites/js_suite.py index b24f58033dc2..5b028a5b3227 100644 --- a/scripts/quality_test/suites/js_suite.py +++ b/scripts/quality_test/suites/js_suite.py @@ -28,8 +28,8 @@ def __enter__(self): # if tasks.environment.dry_run: # tasks.environment.info("make report_dir") # else: - # self.report_dir.makedirs_p() - self.report_dir.mkdir(parents=True, exist_ok=True) + self.report_dir.makedirs_p() + # self.report_dir.mkdir(exist_ok=True) # if not self.skip_clean: # test_utils.clean_test_files() diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py index c1e3de729db4..4610058ac129 100644 --- a/scripts/quality_test/utils/envs.py +++ b/scripts/quality_test/utils/envs.py @@ -22,19 +22,21 @@ def repo_root(): https://github.com/docker/for-mac/issues/1509 """ import pdb; pdb.set_trace() - file_path = Path(__file__) - max_attempts = 180 - for attempt in range(1, max_attempts + 1): + file_path = path(__file__) + attempt = 1 + while True: try: - absolute_path = file_path.resolve(strict=True) - return absolute_path.parents[2] + absolute_path = file_path.abspath() + break except OSError: - print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') - if attempt < max_attempts: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 sleep(1) else: print('Unable to determine the absolute path of the edx-platform repo, aborting') - raise RuntimeError('Could not determine the repository root after multiple attempts') + raise + return absolute_path.parent.parent.parent class Env: From 8ead7cd388e436843e7a022aab9e5ab0472b636a Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 14 Aug 2024 23:01:28 +0500 Subject: [PATCH 56/78] fix: fix tests --- scripts/quality_test/utils/envs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py index 4610058ac129..023f01c4a1a9 100644 --- a/scripts/quality_test/utils/envs.py +++ b/scripts/quality_test/utils/envs.py @@ -21,7 +21,6 @@ def repo_root(): https://openedx.atlassian.net/browse/PLAT-1629 and https://github.com/docker/for-mac/issues/1509 """ - import pdb; pdb.set_trace() file_path = path(__file__) attempt = 1 while True: From f175679958e272165cad341eaa630102a245c28b Mon Sep 17 00:00:00 2001 From: salman2013 Date: Wed, 14 Aug 2024 23:32:06 +0500 Subject: [PATCH 57/78] fix: fix tests --- scripts/quality_test/utils/envs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py index 023f01c4a1a9..00428a5ad656 100644 --- a/scripts/quality_test/utils/envs.py +++ b/scripts/quality_test/utils/envs.py @@ -35,7 +35,7 @@ def repo_root(): else: print('Unable to determine the absolute path of the edx-platform repo, aborting') raise - return absolute_path.parent.parent.parent + return absolute_path.parent.parent.parent.parent class Env: From 28d53e1addcd24d0243e65d29e71f2f9fae858c4 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 15 Aug 2024 00:10:05 +0500 Subject: [PATCH 58/78] fix: fix tests --- .github/workflows/js-tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 3600f1eafa06..2d95cdbf7ccc 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -66,9 +66,13 @@ jobs: - uses: c-hive/gha-npm-cache@v1 - name: Install npm + env: + PIP_SRC: ${{ runner.temp }} run: npm ci - name: Run JS Tests + env: + PIP_SRC: ${{ runner.temp }} run: | npm install -g jest make test-js From 4611635d71703596eef22a69235a00725fa1d375 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 15 Aug 2024 15:54:35 +0500 Subject: [PATCH 59/78] fix: fix tests --- .github/workflows/js-tests.yml | 4 ---- scripts/quality_test/js_test.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 2d95cdbf7ccc..3600f1eafa06 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -66,13 +66,9 @@ jobs: - uses: c-hive/gha-npm-cache@v1 - name: Install npm - env: - PIP_SRC: ${{ runner.temp }} run: npm ci - name: Run JS Tests - env: - PIP_SRC: ${{ runner.temp }} run: | npm install -g jest make test-js diff --git a/scripts/quality_test/js_test.py b/scripts/quality_test/js_test.py index 906037857540..608a321a5998 100644 --- a/scripts/quality_test/js_test.py +++ b/scripts/quality_test/js_test.py @@ -29,7 +29,7 @@ ) @click.option( '--coverage', 'coverage', - default=False, + default=True, help='Run test under coverage' ) @click.option( From a1cd00eefab3fd56c5221a94cf98c088dac63cb0 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 15 Aug 2024 17:29:08 +0500 Subject: [PATCH 60/78] fix: fix tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 51bd6f3a6816..1e9f13150d92 100644 --- a/Makefile +++ b/Makefile @@ -224,6 +224,6 @@ test-check_keyword: python scripts/quality_test/quality_test.py check_keywords test-js: - python scripts/quality_test/js_test.py + xvfb-run --auto-servernum python scripts/quality_test/js_test.py quality-test: test-lint test-eslint test-stylelint test-xsslint test-pi_check test-check_keyword \ No newline at end of file From 25c69f3fd11512de285da4937e38f4ac8547bab3 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 16 Aug 2024 14:59:01 +0500 Subject: [PATCH 61/78] fix: fix tests --- pavelib/utils/envs.py | 136 +++++++++++++++++++++++++++ scripts/quality_test/suites/suite.py | 6 -- scripts/quality_test/utils/envs.py | 3 +- 3 files changed, 137 insertions(+), 8 deletions(-) create mode 100644 pavelib/utils/envs.py diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py new file mode 100644 index 000000000000..953389bbe886 --- /dev/null +++ b/pavelib/utils/envs.py @@ -0,0 +1,136 @@ +""" +Helper functions for loading environment settings. +""" + +# import json +import os +import sys +# import subprocess +from time import sleep + +from path import Path as path + + +def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent.parent.parent + + +class Env: + """ + Load information about the execution environment. + """ + + # Root of the git repository (edx-platform) + REPO_ROOT = repo_root() + + # Reports Directory + REPORT_DIR = REPO_ROOT / 'reports' + METRICS_DIR = REPORT_DIR / 'metrics' + QUALITY_DIR = REPORT_DIR / 'quality_junitxml' + + # Generic log dir + GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" + + # Python unittest dirs + PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" + + # Which Python version should be used in xdist workers? + PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") + + # Directory that videos are served from + VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" + + PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" + + # Detect if in a Docker container, and if so which one + FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') + USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' + DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack' + TEST_SETTINGS = 'test' + + # Mongo databases that will be dropped before/after the tests run + MONGO_HOST = 'localhost' + + # Test Ids Directory + TEST_DIR = REPO_ROOT / ".testids" + + # Configured browser to use for the js test suites + SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') + if USING_DOCKER: + KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' + else: + KARMA_BROWSER = 'FirefoxNoUpdates' + + # Files used to run each of the js test suites + # TODO: Store this as a dict. Order seems to matter for some + # reason. See issue TE-415. + KARMA_CONFIG_FILES = [ + REPO_ROOT / 'cms/static/karma_cms.conf.js', + REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', + REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', + REPO_ROOT / 'lms/static/karma_lms.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', + REPO_ROOT / 'common/static/karma_common.conf.js', + REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', + ] + + JS_TEST_ID_KEYS = [ + 'cms', + 'cms-squire', + 'cms-webpack', + 'lms', + 'xmodule', + 'xmodule-webpack', + 'common', + 'common-requirejs', + 'jest-snapshot' + ] + + JS_REPORT_DIR = REPORT_DIR / 'javascript' + + # Directories used for pavelib/ tests + IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') + LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] + + # Directory for i18n test reports + I18N_REPORT_DIR = REPORT_DIR / 'i18n' + + # Directory for keeping src folder that comes with pip installation. + # Setting this is equivalent to passing `--src ` to pip directly. + PIP_SRC = os.environ.get("PIP_SRC") + + # Service variant (lms, cms, etc.) configured with an environment variable + # We use this to determine which envs.json file to load. + SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) + + # If service variant not configured in env, then pass the correct + # environment for lms / cms + if not SERVICE_VARIANT: # this will intentionally catch ""; + if any(i in sys.argv[1:] for i in ('cms', 'studio')): + SERVICE_VARIANT = 'cms' + else: + SERVICE_VARIANT = 'lms' diff --git a/scripts/quality_test/suites/suite.py b/scripts/quality_test/suites/suite.py index b0204d655711..19a749bc8344 100644 --- a/scripts/quality_test/suites/suite.py +++ b/scripts/quality_test/suites/suite.py @@ -88,12 +88,6 @@ def run_test(self): """ # cmd = " ".join(self.cmd) cmd = " ".join(str(part) for part in self.cmd) - - - # if tasks.environment.dry_run: - # tasks.environment.info(cmd) - # return - sys.stdout.write(cmd) msg = colorize( diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py index 00428a5ad656..37dac514d6c7 100644 --- a/scripts/quality_test/utils/envs.py +++ b/scripts/quality_test/utils/envs.py @@ -7,9 +7,8 @@ import sys # import subprocess from time import sleep - from path import Path as path -from pathlib import Path + def repo_root(): """ From 988b77adc3b06c52e5b2b3cc53008f817dea7ad8 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 16 Aug 2024 15:08:42 +0500 Subject: [PATCH 62/78] chore: remove un-used code --- pavelib/paver_tests/conftest.py | 22 ---- pavelib/paver_tests/test_js_test.py | 148 ---------------------- pavelib/paver_tests/test_timer.py | 190 ---------------------------- 3 files changed, 360 deletions(-) delete mode 100644 pavelib/paver_tests/conftest.py delete mode 100644 pavelib/paver_tests/test_js_test.py delete mode 100644 pavelib/paver_tests/test_timer.py diff --git a/pavelib/paver_tests/conftest.py b/pavelib/paver_tests/conftest.py deleted file mode 100644 index 214a35e3fe85..000000000000 --- a/pavelib/paver_tests/conftest.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Pytest fixtures for the pavelib unit tests. -""" - - -import os -from shutil import rmtree - -import pytest - -from pavelib.utils.envs import Env - - -@pytest.fixture(autouse=True, scope='session') -def delete_quality_junit_xml(): - """ - Delete the JUnit XML results files for quality check tasks run during the - unit tests. - """ - yield - if os.path.exists(Env.QUALITY_DIR): - rmtree(Env.QUALITY_DIR, ignore_errors=True) diff --git a/pavelib/paver_tests/test_js_test.py b/pavelib/paver_tests/test_js_test.py deleted file mode 100644 index 4b165a156674..000000000000 --- a/pavelib/paver_tests/test_js_test.py +++ /dev/null @@ -1,148 +0,0 @@ -"""Unit tests for the Paver JavaScript testing tasks.""" - -from unittest.mock import patch - -import ddt -from paver.easy import call_task - -import pavelib.js_test -from pavelib.utils.envs import Env - -from .utils import PaverTestCase - - -@ddt.ddt -class TestPaverJavaScriptTestTasks(PaverTestCase): - """ - Test the Paver JavaScript testing tasks. - """ - - EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete' - EXPECTED_KARMA_OPTIONS = ( - "{config_file} " - "--single-run={single_run} " - "--capture-timeout=60000 " - "--junitreportpath=" - "{platform_root}/reports/javascript/javascript_xunit-{suite}.xml " - "--browsers={browser}" - ) - EXPECTED_COVERAGE_OPTIONS = ( - ' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml' - ) - - EXPECTED_COMMANDS = [ - "make report_dir", - 'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads', - "find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;", - 'rm -rf test_root/log/auto_screenshots/*', - "rm -rf /tmp/mako_[cl]ms", - ] - - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator - self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Cleanup mocks - self.addCleanup(self._mock_paver_needs.stop) - - @ddt.data( - [""], - ["--coverage"], - ["--suite=lms"], - ["--suite=lms --coverage"], - ) - @ddt.unpack - def test_test_js_run(self, options_string): - """ - Test the "test_js_run" task. - """ - options = self.parse_options_string(options_string) - self.reset_task_messages() - call_task("pavelib.js_test.test_js_run", options=options) - self.verify_messages(options=options, dev_mode=False) - - @ddt.data( - [""], - ["--port=9999"], - ["--suite=lms"], - ["--suite=lms --port=9999"], - ) - @ddt.unpack - def test_test_js_dev(self, options_string): - """ - Test the "test_js_run" task. - """ - options = self.parse_options_string(options_string) - self.reset_task_messages() - call_task("pavelib.js_test.test_js_dev", options=options) - self.verify_messages(options=options, dev_mode=True) - - def parse_options_string(self, options_string): - """ - Parse a string containing the options for a test run - """ - parameters = options_string.split(" ") - suite = "all" - if "--system=lms" in parameters: - suite = "lms" - elif "--system=common" in parameters: - suite = "common" - coverage = "--coverage" in parameters - port = None - if "--port=9999" in parameters: - port = 9999 - return { - "suite": suite, - "coverage": coverage, - "port": port, - } - - def verify_messages(self, options, dev_mode): - """ - Verify that the messages generated when running tests are as expected - for the specified options and dev_mode. - """ - is_coverage = options['coverage'] - port = options['port'] - expected_messages = [] - suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']] - - expected_messages.extend(self.EXPECTED_COMMANDS) - if not dev_mode and not is_coverage: - expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format( - platform_root=self.platform_root - )) - - command_template = ( - 'node --max_old_space_size=4096 node_modules/.bin/karma start {options}' - ) - - for suite in suites: - # Karma test command - if suite != 'jest-snapshot': - karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)] - expected_test_tool_command = command_template.format( - options=self.EXPECTED_KARMA_OPTIONS.format( - config_file=karma_config_file, - single_run='false' if dev_mode else 'true', - suite=suite, - platform_root=self.platform_root, - browser=Env.KARMA_BROWSER, - ), - ) - if is_coverage: - expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format( - platform_root=self.platform_root, - suite=suite - ) - if port: - expected_test_tool_command += f" --port={port}" - else: - expected_test_tool_command = 'jest' - - expected_messages.append(expected_test_tool_command) - - assert self.task_messages == expected_messages diff --git a/pavelib/paver_tests/test_timer.py b/pavelib/paver_tests/test_timer.py deleted file mode 100644 index 5ccbf74abcf9..000000000000 --- a/pavelib/paver_tests/test_timer.py +++ /dev/null @@ -1,190 +0,0 @@ -""" -Tests of the pavelib.utils.timer module. -""" - - -from datetime import datetime, timedelta -from unittest import TestCase - -from unittest.mock import MagicMock, patch - -from pavelib.utils import timer - - -@timer.timed -def identity(*args, **kwargs): - """ - An identity function used as a default task to test the timing of. - """ - return args, kwargs - - -MOCK_OPEN = MagicMock(spec=open) - - -@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN) -class TimedDecoratorTests(TestCase): - """ - Tests of the pavelib.utils.timer:timed decorator. - """ - def setUp(self): - super().setUp() - - patch_dumps = patch.object(timer.json, 'dump', autospec=True) - self.mock_dump = patch_dumps.start() - self.addCleanup(patch_dumps.stop) - - patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True) - self.mock_makedirs = patch_makedirs.start() - self.addCleanup(patch_makedirs.stop) - - patch_datetime = patch.object(timer, 'datetime', autospec=True) - self.mock_datetime = patch_datetime.start() - self.addCleanup(patch_datetime.stop) - - patch_exists = patch.object(timer, 'exists', autospec=True) - self.mock_exists = patch_exists.start() - self.addCleanup(patch_exists.stop) - - MOCK_OPEN.reset_mock() - - def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None): - """ - Return all timing messages recorded during the execution of ``task``. - """ - if args is None: - args = [] - if kwargs is None: - kwargs = {} - - if raises is None: - task(*args, **kwargs) - else: - self.assertRaises(raises, task, *args, **kwargs) - - return [ - call[0][0] # log_message - for call in self.mock_dump.call_args_list - ] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_times(self): - start = datetime(2016, 7, 20, 10, 56, 19) - end = start + timedelta(seconds=35.6) - - self.mock_datetime.utcnow.side_effect = [start, end] - - messages = self.get_log_messages() - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'duration' in messages[0] - assert 35.6 == messages[0]['duration'] - - assert 'started_at' in messages[0] - assert start.isoformat(' ') == messages[0]['started_at'] - - assert 'ended_at' in messages[0] - assert end.isoformat(' ') == messages[0]['ended_at'] - - @patch.object(timer, 'PAVER_TIMER_LOG', None) - def test_no_logs(self): - messages = self.get_log_messages() - assert len(messages) == 0 - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_arguments(self): - messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz')) - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'args' in messages[0] - assert [repr(1), repr('foo')] == messages[0]['args'] - assert 'kwargs' in messages[0] - assert {'bar': repr('baz')} == messages[0]['kwargs'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_task_name(self): - messages = self.get_log_messages() - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'task' in messages[0] - assert 'pavelib.paver_tests.test_timer.identity' == messages[0]['task'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_exceptions(self): - - @timer.timed - def raises(): - """ - A task used for testing exception handling of the timed decorator. - """ - raise Exception('The Message!') - - messages = self.get_log_messages(task=raises, raises=Exception) - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'exception' in messages[0] - assert 'Exception: The Message!' == messages[0]['exception'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log') - def test_date_formatting(self): - start = datetime(2016, 7, 20, 10, 56, 19) - end = start + timedelta(seconds=35.6) - - self.mock_datetime.utcnow.side_effect = [start, end] - - messages = self.get_log_messages() - assert len(messages) == 1 - - MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a') - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_nested_tasks(self): - - @timer.timed - def parent(): - """ - A timed task that calls another task - """ - identity() - - parent_start = datetime(2016, 7, 20, 10, 56, 19) - parent_end = parent_start + timedelta(seconds=60) - child_start = parent_start + timedelta(seconds=10) - child_end = parent_end - timedelta(seconds=10) - - self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end] - - messages = self.get_log_messages(task=parent) - assert len(messages) == 2 - - # Child messages first - assert 'duration' in messages[0] - assert 40 == messages[0]['duration'] - - assert 'started_at' in messages[0] - assert child_start.isoformat(' ') == messages[0]['started_at'] - - assert 'ended_at' in messages[0] - assert child_end.isoformat(' ') == messages[0]['ended_at'] - - # Parent messages after - assert 'duration' in messages[1] - assert 60 == messages[1]['duration'] - - assert 'started_at' in messages[1] - assert parent_start.isoformat(' ') == messages[1]['started_at'] - - assert 'ended_at' in messages[1] - assert parent_end.isoformat(' ') == messages[1]['ended_at'] From 7c1291133863b67ba2e3390ffd63717d4b32459e Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 16 Aug 2024 15:28:53 +0500 Subject: [PATCH 63/78] chore: remove un-used code --- pavelib/__init__.py | 2 +- pavelib/prereqs.py | 361 ------------------- pavelib/quality.py | 602 -------------------------------- scripts/quality_test/js_test.py | 21 +- 4 files changed, 15 insertions(+), 971 deletions(-) delete mode 100644 pavelib/prereqs.py delete mode 100644 pavelib/quality.py diff --git a/pavelib/__init__.py b/pavelib/__init__.py index 875068166ff5..11f3e6e3c5b1 100644 --- a/pavelib/__init__.py +++ b/pavelib/__init__.py @@ -3,4 +3,4 @@ """ -from . import assets, js_test, prereqs, quality +from . import assets, prereqs, quality diff --git a/pavelib/prereqs.py b/pavelib/prereqs.py deleted file mode 100644 index 130de1022f34..000000000000 --- a/pavelib/prereqs.py +++ /dev/null @@ -1,361 +0,0 @@ -""" -Install Python and Node prerequisites. -""" - - -import hashlib -import os -import re -import subprocess -import sys -import shutil -from distutils import sysconfig # pylint: disable=deprecated-module - -from paver.easy import sh, task # lint-amnesty, pylint: disable=unused-import -from pavelib.utils.envs import Env -from pavelib.utils.timer import timed - -PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache') -NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs" -NO_PYTHON_UNINSTALL_MESSAGE = 'NO_PYTHON_UNINSTALL is set. No attempts will be made to uninstall old Python libs.' -COVERAGE_REQ_FILE = 'requirements/edx/coverage.txt' - -# If you make any changes to this list you also need to make -# a corresponding change to circle.yml, which is how the python -# prerequisites are installed for builds on circleci.com -toxenv = os.environ.get('TOXENV') -if toxenv and toxenv != 'quality': - PYTHON_REQ_FILES = ['requirements/edx/testing.txt'] -else: - PYTHON_REQ_FILES = ['requirements/edx/development.txt'] - -# Developers can have private requirements, for local copies of github repos, -# or favorite debugging tools, etc. -PRIVATE_REQS = 'requirements/edx/private.txt' -if os.path.exists(PRIVATE_REQS): - PYTHON_REQ_FILES.append(PRIVATE_REQS) - - -def str2bool(s): - s = str(s) - return s.lower() in ('yes', 'true', 't', '1') - - -def no_prereq_install(): - """ - Determine if NO_PREREQ_INSTALL should be truthy or falsy. - """ - return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False')) - - -def no_python_uninstall(): - """ Determine if we should run the uninstall_python_packages task. """ - return str2bool(os.environ.get('NO_PYTHON_UNINSTALL', 'False')) - - -def create_prereqs_cache_dir(): - """Create the directory for storing the hashes, if it doesn't exist already.""" - try: - os.makedirs(PREREQS_STATE_DIR) - except OSError: - if not os.path.isdir(PREREQS_STATE_DIR): - raise - - -def compute_fingerprint(path_list): - """ - Hash the contents of all the files and directories in `path_list`. - Returns the hex digest. - """ - - hasher = hashlib.sha1() - - for path_item in path_list: - - # For directories, create a hash based on the modification times - # of first-level subdirectories - if os.path.isdir(path_item): - for dirname in sorted(os.listdir(path_item)): - path_name = os.path.join(path_item, dirname) - if os.path.isdir(path_name): - hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8')) - - # For files, hash the contents of the file - if os.path.isfile(path_item): - with open(path_item, "rb") as file_handle: - hasher.update(file_handle.read()) - - return hasher.hexdigest() - - -def prereq_cache(cache_name, paths, install_func): - """ - Conditionally execute `install_func()` only if the files/directories - specified by `paths` have changed. - - If the code executes successfully (no exceptions are thrown), the cache - is updated with the new hash. - """ - # Retrieve the old hash - cache_filename = cache_name.replace(" ", "_") - cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1") - old_hash = None - if os.path.isfile(cache_file_path): - with open(cache_file_path) as cache_file: - old_hash = cache_file.read() - - # Compare the old hash to the new hash - # If they do not match (either the cache hasn't been created, or the files have changed), - # then execute the code within the block. - new_hash = compute_fingerprint(paths) - if new_hash != old_hash: - install_func() - - # Update the cache with the new hash - # If the code executed within the context fails (throws an exception), - # then this step won't get executed. - create_prereqs_cache_dir() - with open(cache_file_path, "wb") as cache_file: - # Since the pip requirement files are modified during the install - # process, we need to store the hash generated AFTER the installation - post_install_hash = compute_fingerprint(paths) - cache_file.write(post_install_hash.encode('utf-8')) - else: - print(f'{cache_name} unchanged, skipping...') - - -def node_prereqs_installation(): - """ - Configures npm and installs Node prerequisites - """ - # Before July 2023, these directories were created and written to - # as root. Afterwards, they are created as being owned by the - # `app` user -- but also need to be deleted by that user (due to - # how npm runs post-install scripts.) Developers with an older - # devstack installation who are reprovisioning will see errors - # here if the files are still owned by root. Deleting the files in - # advance prevents this error. - # - # This hack should probably be left in place for at least a year. - # See ADR 17 for more background on the transition. - # sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") - # At the time of this writing, the js dir has git-versioned files - # but the css dir does not, so the latter would have been created - # as root-owned (in the process of creating the vendor - # subdirectory). Delete it only if empty, just in case - # git-versioned files are added later. - # sh("rmdir common/static/common/css || true") - try: - shutil.rmtree("common/static/common/js/vendor/ common/static/common/css/vendor/") - os.rmdir("common/static/common/css") - except OSError: - pass - - # NPM installs hang sporadically. Log the installation process so that we - # determine if any packages are chronic offenders. - npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log' - npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with - npm_command = 'npm ci --verbose'.split() - - # The implementation of Paver's `sh` function returns before the forked - # actually returns. Using a Popen object so that we can ensure that - # the forked process has returned - proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with - retcode = proc.wait() - if retcode == 1: - raise Exception(f"npm install failed: See {npm_log_file_path}") - print("Successfully clean-installed NPM packages. Log found at {}".format( - npm_log_file_path - )) - - -def python_prereqs_installation(): - """ - Installs Python prerequisites - """ - # edx-platform installs some Python projects from within the edx-platform repo itself. - sh("pip install -e .") - for req_file in PYTHON_REQ_FILES: - pip_install_req_file(req_file) - - -def pip_install_req_file(req_file): - """Pip install the requirements file.""" - pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w' - command = f"{pip_cmd} -r {req_file}" - result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) - - if result.returncode != 0: - print(f"Error: pip command exited with non-zero status {result.returncode}") - print(f"stdout: {result.stdout}") - print(f"stderr: {result.stderr}") - else: - print("Pip install completed successfully.") - - -@task -def install_node_prereqs(): - """ - Installs Node prerequisites - """ - if no_prereq_install(): - print(NO_PREREQ_MESSAGE) - return - - prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) - - -# To add a package to the uninstall list, just add it to this list! No need -# to touch any other part of this file. -PACKAGES_TO_UNINSTALL = [ - "MySQL-python", # Because mysqlclient shares the same directory name - "South", # Because it interferes with Django 1.8 migrations. - "edxval", # Because it was bork-installed somehow. - "django-storages", - "django-oauth2-provider", # Because now it's called edx-django-oauth2-provider. - "edx-oauth2-provider", # Because it moved from github to pypi - "enum34", # Because enum34 is not needed in python>3.4 - "i18n-tools", # Because now it's called edx-i18n-tools - "moto", # Because we no longer use it and it conflicts with recent jsondiff versions - "python-saml", # Because python3-saml shares the same directory name - "pytest-faulthandler", # Because it was bundled into pytest - "djangorestframework-jwt", # Because now its called drf-jwt. -] - - -@task -@timed -def uninstall_python_packages(): - """ - Uninstall Python packages that need explicit uninstallation. - - Some Python packages that we no longer want need to be explicitly - uninstalled, notably, South. Some other packages were once installed in - ways that were resistant to being upgraded, like edxval. Also uninstall - them. - """ - - if no_python_uninstall(): - print(NO_PYTHON_UNINSTALL_MESSAGE) - return - - # So that we don't constantly uninstall things, use a hash of the packages - # to be uninstalled. Check it, and skip this if we're up to date. - hasher = hashlib.sha1() - hasher.update(repr(PACKAGES_TO_UNINSTALL).encode('utf-8')) - expected_version = hasher.hexdigest() - state_file_path = os.path.join(PREREQS_STATE_DIR, "Python_uninstall.sha1") - create_prereqs_cache_dir() - - if os.path.isfile(state_file_path): - with open(state_file_path) as state_file: - version = state_file.read() - if version == expected_version: - print('Python uninstalls unchanged, skipping...') - return - - # Run pip to find the packages we need to get rid of. Believe it or not, - # edx-val is installed in a way that it is present twice, so we have a loop - # to really really get rid of it. - for _ in range(3): - uninstalled = False - frozen = sh("pip freeze", capture=True) - - for package_name in PACKAGES_TO_UNINSTALL: - if package_in_frozen(package_name, frozen): - # Uninstall the pacakge - sh(f"pip uninstall --disable-pip-version-check -y {package_name}") - uninstalled = True - if not uninstalled: - break - else: - # We tried three times and didn't manage to get rid of the pests. - print("Couldn't uninstall unwanted Python packages!") - return - - # Write our version. - with open(state_file_path, "wb") as state_file: - state_file.write(expected_version.encode('utf-8')) - - -def package_in_frozen(package_name, frozen_output): - """Is this package in the output of 'pip freeze'?""" - # Look for either: - # - # PACKAGE-NAME== - # - # or: - # - # blah_blah#egg=package_name-version - # - pattern = r"(?mi)^{pkg}==|#egg={pkg_under}-".format( - pkg=re.escape(package_name), - pkg_under=re.escape(package_name.replace("-", "_")), - ) - return bool(re.search(pattern, frozen_output)) - - -@task -@timed -def install_coverage_prereqs(): - """ Install python prereqs for measuring coverage. """ - if no_prereq_install(): - print(NO_PREREQ_MESSAGE) - return - pip_install_req_file(COVERAGE_REQ_FILE) - - -def install_python_prereqs(): - """ - Installs Python prerequisites. - """ - if no_prereq_install(): - print(NO_PREREQ_MESSAGE) - return - - uninstall_python_packages() - - # Include all of the requirements files in the fingerprint. - files_to_fingerprint = list(PYTHON_REQ_FILES) - - # Also fingerprint the directories where packages get installed: - # ("/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages") - files_to_fingerprint.append(sysconfig.get_python_lib()) - - # In a virtualenv, "-e installs" get put in a src directory. - if Env.PIP_SRC: - src_dir = Env.PIP_SRC - else: - src_dir = os.path.join(sys.prefix, "src") - if os.path.isdir(src_dir): - files_to_fingerprint.append(src_dir) - - # Also fingerprint this source file, so that if the logic for installations - # changes, we will redo the installation. - this_file = __file__ - if this_file.endswith(".pyc"): - this_file = this_file[:-1] # use the .py file instead of the .pyc - files_to_fingerprint.append(this_file) - - prereq_cache("Python prereqs", files_to_fingerprint, python_prereqs_installation) - - -@task -@timed -def install_prereqs(): - """ - Installs Node and Python prerequisites - """ - if no_prereq_install(): - print(NO_PREREQ_MESSAGE) - return - - if not str2bool(os.environ.get('SKIP_NPM_INSTALL', 'False')): - install_node_prereqs() - install_python_prereqs() - log_installed_python_prereqs() - - -def log_installed_python_prereqs(): - """ Logs output of pip freeze for debugging. """ - sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log")) diff --git a/pavelib/quality.py b/pavelib/quality.py deleted file mode 100644 index 774179f45048..000000000000 --- a/pavelib/quality.py +++ /dev/null @@ -1,602 +0,0 @@ -""" # lint-amnesty, pylint: disable=django-not-configured -Check code quality using pycodestyle, pylint, and diff_quality. -""" - -import json -import os -import re -from datetime import datetime -from xml.sax.saxutils import quoteattr - -from paver.easy import BuildFailure, cmdopts, needs, sh, task - -from .utils.envs import Env -from .utils.timer import timed - -ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts' -JUNIT_XML_TEMPLATE = """ - -{failure_element} - -""" -JUNIT_XML_FAILURE_TEMPLATE = '' -START_TIME = datetime.utcnow() - - -def write_junit_xml(name, message=None): - """ - Write a JUnit results XML file describing the outcome of a quality check. - """ - if message: - failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) - else: - failure_element = '' - data = { - 'failure_count': 1 if message else 0, - 'failure_element': failure_element, - 'name': name, - 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), - } - Env.QUALITY_DIR.makedirs_p() - filename = Env.QUALITY_DIR / f'{name}.xml' - with open(filename, 'w') as f: - f.write(JUNIT_XML_TEMPLATE.format(**data)) - - -def fail_quality(name, message): - """ - Fail the specified quality check by generating the JUnit XML results file - and raising a ``BuildFailure``. - """ - write_junit_xml(name, message) - raise BuildFailure(message) - - -def top_python_dirs(dirname): - """ - Find the directories to start from in order to find all the Python files in `dirname`. - """ - top_dirs = [] - - dir_init = os.path.join(dirname, "__init__.py") - if os.path.exists(dir_init): - top_dirs.append(dirname) - - for directory in ['djangoapps', 'lib']: - subdir = os.path.join(dirname, directory) - subdir_init = os.path.join(subdir, "__init__.py") - if os.path.exists(subdir) and not os.path.exists(subdir_init): - dirs = os.listdir(subdir) - top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d))) - - modules_to_remove = ['__pycache__'] - for module in modules_to_remove: - if module in top_dirs: - top_dirs.remove(module) - - return top_dirs - - -def _get_pep8_violations(clean=True): - """ - Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) - where violations_string is a string of all PEP 8 violations found, separated - by new lines. - """ - report_dir = (Env.REPORT_DIR / 'pep8') - if clean: - report_dir.rmtree(ignore_errors=True) - report_dir.makedirs_p() - report = report_dir / 'pep8.report' - - # Make sure the metrics subdirectory exists - Env.METRICS_DIR.makedirs_p() - - if not report.exists(): - sh(f'pycodestyle . | tee {report} -a') - - violations_list = _pep8_violations(report) - - return len(violations_list), violations_list - - -def _pep8_violations(report_file): - """ - Returns the list of all PEP 8 violations in the given report_file. - """ - with open(report_file) as f: - return f.readlines() - - -@task -@cmdopts([ - ("system=", "s", "System to act on"), -]) -@timed -def run_pep8(options): # pylint: disable=unused-argument - """ - Run pycodestyle on system code. - Fail the task if any violations are found. - """ - (count, violations_list) = _get_pep8_violations() - violations_list = ''.join(violations_list) - - # Print number of violations to log - violations_count_str = f"Number of PEP 8 violations: {count}" - print(violations_count_str) - print(violations_list) - - # Also write the number of violations to a file - with open(Env.METRICS_DIR / "pep8", "w") as f: - f.write(violations_count_str + '\n\n') - f.write(violations_list) - - # Fail if any violations are found - if count: - failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str - failure_string += f"\n\nViolations:\n{violations_list}" - fail_quality('pep8', failure_string) - else: - write_junit_xml('pep8') - - -@task -@needs( - 'pavelib.prereqs.install_node_prereqs', - 'pavelib.utils.test.utils.ensure_clean_package_lock', -) -@cmdopts([ - ("limit=", "l", "limit for number of acceptable violations"), -]) -@timed -def run_eslint(options): - """ - Runs eslint on static asset directories. - If limit option is passed, fails build if more violations than the limit are found. - """ - - eslint_report_dir = (Env.REPORT_DIR / "eslint") - eslint_report = eslint_report_dir / "eslint.report" - _prepare_report_dir(eslint_report_dir) - violations_limit = int(getattr(options, 'limit', -1)) - - sh( - "node --max_old_space_size=4096 node_modules/.bin/eslint " - "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format( - eslint_report=eslint_report - ), - ignore_error=True - ) - - try: - num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) - except TypeError: - fail_quality( - 'eslint', - "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( - eslint_report=eslint_report - ) - ) - - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) - - # Fail if number of violations is greater than the limit - if num_violations > violations_limit > -1: - fail_quality( - 'eslint', - "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, violations_limit=violations_limit - ) - ) - else: - write_junit_xml('eslint') - - -def _get_stylelint_violations(): - """ - Returns the number of Stylelint violations. - """ - stylelint_report_dir = (Env.REPORT_DIR / "stylelint") - stylelint_report = stylelint_report_dir / "stylelint.report" - _prepare_report_dir(stylelint_report_dir) - formatter = 'node_modules/stylelint-formatter-pretty' - - sh( - "stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format( - formatter=formatter, - stylelint_report=stylelint_report, - ), - ignore_error=True - ) - - try: - return int(_get_count_from_last_line(stylelint_report, "stylelint")) - except TypeError: - fail_quality( - 'stylelint', - "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( - stylelint_report=stylelint_report - ) - ) - - -@task -@needs('pavelib.prereqs.install_node_prereqs') -@cmdopts([ - ("limit=", "l", "limit for number of acceptable violations"), -]) -@timed -def run_stylelint(options): - """ - Runs stylelint on Sass files. - If limit option is passed, fails build if more violations than the limit are found. - """ - violations_limit = 0 - num_violations = _get_stylelint_violations() - - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) - - # Fail if number of violations is greater than the limit - if num_violations > violations_limit: - fail_quality( - 'stylelint', - "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, - violations_limit=violations_limit, - ) - ) - else: - write_junit_xml('stylelint') - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("thresholds=", "t", "json containing limit for number of acceptable violations per rule"), -]) -@timed -def run_xsslint(options): - """ - Runs xsslint/xss_linter.py on the codebase - """ - - thresholds_option = getattr(options, 'thresholds', '{}') - try: - violation_thresholds = json.loads(thresholds_option) - except ValueError: - violation_thresholds = None - if isinstance(violation_thresholds, dict) is False or \ - any(key not in ("total", "rules") for key in violation_thresholds.keys()): - - fail_quality( - 'xsslint', - """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" - """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ - """with property names in double-quotes.""".format( - thresholds_option=thresholds_option - ) - ) - - xsslint_script = "xss_linter.py" - xsslint_report_dir = (Env.REPORT_DIR / "xsslint") - xsslint_report = xsslint_report_dir / "xsslint.report" - _prepare_report_dir(xsslint_report_dir) - - sh( - "{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format( - repo_root=Env.REPO_ROOT, - xsslint_script=xsslint_script, - xsslint_report=xsslint_report, - cfg_module='scripts.xsslint_config' - ), - ignore_error=True - ) - - xsslint_counts = _get_xsslint_counts(xsslint_report) - - try: - metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( - xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) - ) - if 'rules' in xsslint_counts and any(xsslint_counts['rules']): - metrics_str += "\n" - rule_keys = sorted(xsslint_counts['rules'].keys()) - for rule in rule_keys: - metrics_str += "{rule} violations: {count}\n".format( - rule=rule, - count=int(xsslint_counts['rules'][rule]) - ) - except TypeError: - fail_quality( - 'xsslint', - "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( - xsslint_script=xsslint_script, xsslint_report=xsslint_report - ) - ) - - metrics_report = (Env.METRICS_DIR / "xsslint") - # Record the metric - _write_metric(metrics_str, metrics_report) - # Print number of violations to log. - sh(f"cat {metrics_report}", ignore_error=True) - - error_message = "" - - # Test total violations against threshold. - if 'total' in list(violation_thresholds.keys()): - if violation_thresholds['total'] < xsslint_counts['total']: - error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( - count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] - ) - - # Test rule violations against thresholds. - if 'rules' in violation_thresholds: - threshold_keys = sorted(violation_thresholds['rules'].keys()) - for threshold_key in threshold_keys: - if threshold_key not in xsslint_counts['rules']: - error_message += ( - "\nNumber of {xsslint_script} violations for {rule} could not be found in " - "{xsslint_report}." - ).format( - xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report - ) - elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: - error_message += \ - "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( - rule=threshold_key, count=xsslint_counts['rules'][threshold_key], - violations_limit=violation_thresholds['rules'][threshold_key], - ) - - if error_message: - fail_quality( - 'xsslint', - "FAILURE: XSSLinter Failed.\n{error_message}\n" - "See {xsslint_report} or run the following command to hone in on the problem:\n" - " ./scripts/xss-commit-linter.sh -h".format( - error_message=error_message, xsslint_report=xsslint_report - ) - ) - else: - write_junit_xml('xsslint') - - -def _write_metric(metric, filename): - """ - Write a given metric to a given file - Used for things like reports/metrics/eslint, which will simply tell you the number of - eslint violations found - """ - Env.METRICS_DIR.makedirs_p() - - with open(filename, "w") as metric_file: - metric_file.write(str(metric)) - - -def _prepare_report_dir(dir_name): - """ - Sets a given directory to a created, but empty state - """ - dir_name.rmtree_p() - dir_name.mkdir_p() - - -def _get_report_contents(filename, report_name, last_line_only=False): - """ - Returns the contents of the given file. Use last_line_only to only return - the last line, which can be used for getting output from quality output - files. - - Arguments: - last_line_only: True to return the last line only, False to return a - string with full contents. - - Returns: - String containing full contents of the report, or the last line. - - """ - if os.path.isfile(filename): - with open(filename) as report_file: - if last_line_only: - lines = report_file.readlines() - for line in reversed(lines): - if line != '\n': - return line - return None - else: - return report_file.read() - else: - file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" - fail_quality(report_name, file_not_found_message) - - -def _get_count_from_last_line(filename, file_type): - """ - This will return the number in the last line of a file. - It is returning only the value (as a floating number). - """ - report_contents = _get_report_contents(filename, file_type, last_line_only=True) - - if report_contents is None: - return 0 - - last_line = report_contents.strip() - # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" - regex = r'^\d+' - - try: - return float(re.search(regex, last_line).group(0)) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - return None - - -def _get_xsslint_counts(filename): - """ - This returns a dict of violations from the xsslint report. - - Arguments: - filename: The name of the xsslint report. - - Returns: - A dict containing the following: - rules: A dict containing the count for each rule as follows: - violation-rule-id: N, where N is the number of violations - total: M, where M is the number of total violations - - """ - report_contents = _get_report_contents(filename, 'xsslint') - rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) - total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) - violations = {'rules': {}} - for violation_match in rule_count_regex.finditer(report_contents): - try: - violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) - except ValueError: - violations['rules'][violation_match.group('rule_id')] = None - try: - violations['total'] = int(total_count_regex.search(report_contents).group('count')) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - violations['total'] = None - return violations - - -def _extract_missing_pii_annotations(filename): - """ - Returns the number of uncovered models from the stdout report of django_find_annotations. - - Arguments: - filename: Filename where stdout of django_find_annotations was captured. - - Returns: - three-tuple containing: - 1. The number of uncovered models, - 2. A bool indicating whether the coverage is still below the threshold, and - 3. The full report as a string. - """ - uncovered_models = 0 - pii_check_passed = True - if os.path.isfile(filename): - with open(filename) as report_file: - lines = report_file.readlines() - - # Find the count of uncovered models. - uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') - for line in lines: - uncovered_match = uncovered_regex.match(line) - if uncovered_match: - uncovered_models = int(uncovered_match.groups()[0]) - break - - # Find a message which suggests the check failed. - failure_regex = re.compile(r'^Coverage threshold not met!') - for line in lines: - failure_match = failure_regex.match(line) - if failure_match: - pii_check_passed = False - break - - # Each line in lines already contains a newline. - full_log = ''.join(lines) - else: - fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') - - return (uncovered_models, pii_check_passed, full_log) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("report-dir=", "r", "Directory in which to put PII reports"), -]) -@timed -def run_pii_check(options): - """ - Guarantee that all Django models are PII-annotated. - """ - pii_report_name = 'pii' - default_report_dir = (Env.REPORT_DIR / pii_report_name) - report_dir = getattr(options, 'report_dir', default_report_dir) - output_file = os.path.join(report_dir, 'pii_check_{}.report') - env_report = [] - pii_check_passed = True - for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): - try: - print() - print(f"Running {env_name} PII Annotation check and report") - print("-" * 45) - run_output_file = str(output_file).format(env_name.lower()) - sh( - "mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument - "export DJANGO_SETTINGS_MODULE={}; " - "code_annotations django_find_annotations " - "--config_file .pii_annotations.yml --report_path {} --app_name {} " - "--lint --report --coverage | tee {}".format( - report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file - ) - ) - uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) - env_report.append(( - uncovered_model_count, - full_log, - )) - - except BuildFailure as error_message: - fail_quality(pii_report_name, f'FAILURE: {error_message}') - - if not pii_check_passed_env: - pii_check_passed = False - - # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. - uncovered_count, full_log = max(env_report, key=lambda r: r[0]) - - # Write metric file. - if uncovered_count is None: - uncovered_count = 0 - metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" - _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) - - # Finally, fail the paver task if code_annotations suggests that the check failed. - if not pii_check_passed: - fail_quality('pii', full_log) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@timed -def check_keywords(): - """ - Check Django model fields for names that conflict with a list of reserved keywords - """ - report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') - sh(f"mkdir -p {report_path}") - - overall_status = True - for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: - report_file = f"{env}_reserved_keyword_report.csv" - override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") - try: - sh( - "export DJANGO_SETTINGS_MODULE={settings_file}; " - "python manage.py {app} check_reserved_keywords " - "--override_file {override_file} " - "--report_path {report_path} " - "--report_file {report_file}".format( - settings_file=env_settings_file, app=env, override_file=override_file, - report_path=report_path, report_file=report_file - ) - ) - except BuildFailure: - overall_status = False - - if not overall_status: - fail_quality( - 'keywords', - 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( - report_path - ) - ) diff --git a/scripts/quality_test/js_test.py b/scripts/quality_test/js_test.py index 608a321a5998..3ca2b3e39090 100644 --- a/scripts/quality_test/js_test.py +++ b/scripts/quality_test/js_test.py @@ -64,16 +64,23 @@ def test_js(suite, mode, coverage, port, skip_clean): test_suite.run() -# @cmdopts([ -# ("suite=", "s", "Test suite to run"), -# ("coverage", "c", "Run test under coverage"), -# ]) -def test_js_run(options): +@click.command("test_js_run") +@click.option( + '--s', 'suite', + default='all', + help='Test suite to run.' +) +@click.option( + '--coverage', 'coverage', + default=True, + help='Run test under coverage' +) +def test_js_run(suite, coverage): """ Run the JavaScript tests and print results to the console """ - options.mode = 'run' - test_js(options) + + test_js(suite, 'run', coverage) # @cmdopts([ From 0cb72264b24cd4e8a659d3aa04a331e8c423240b Mon Sep 17 00:00:00 2001 From: salman2013 Date: Fri, 16 Aug 2024 16:04:42 +0500 Subject: [PATCH 64/78] fix: fix tests --- pavelib/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pavelib/__init__.py b/pavelib/__init__.py index 11f3e6e3c5b1..24f05618bdd7 100644 --- a/pavelib/__init__.py +++ b/pavelib/__init__.py @@ -3,4 +3,4 @@ """ -from . import assets, prereqs, quality +from . import assets From 029a9d82a35aef5d4145a91077e5331ac0b2ed1b Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 20 Aug 2024 15:04:21 +0500 Subject: [PATCH 65/78] chore: replace paver coverage tests --- .github/workflows/js-tests.yml | 1 + .github/workflows/quality-checks.yml | 16 --- Makefile | 5 +- pavelib/utils/envs.py | 18 +-- scripts/quality_test/js_test.py | 163 +++++++++++------------- scripts/quality_test/quality_test.py | 37 ------ scripts/quality_test/suites/js_suite.py | 3 - 7 files changed, 88 insertions(+), 155 deletions(-) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 3600f1eafa06..b2a3d1effa26 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -72,6 +72,7 @@ jobs: run: | npm install -g jest make test-js + make test-coverage - name: Save Job Artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 08901debd90e..1bc50014754e 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -72,22 +72,6 @@ jobs: run: | pip install -e . - # - name: Run Python Quality Test - # working-directory: ${{ github.workspace }} - # env: - # PIP_SRC: ${{ runner.temp }} - # run: make test-lint - - # - name: Run Stylelint Tests - # env: - # PIP_SRC: ${{ runner.temp }} - # run: make test-stylelint - - # - name: Run Eslint Tests - # env: - # PIP_SRC: ${{ runner.temp }} - # run: make test-eslint - - name: Run Quality Tests env: PIP_SRC: ${{ runner.temp }} diff --git a/Makefile b/Makefile index 1e9f13150d92..ba0737c7d25e 100644 --- a/Makefile +++ b/Makefile @@ -224,6 +224,9 @@ test-check_keyword: python scripts/quality_test/quality_test.py check_keywords test-js: - xvfb-run --auto-servernum python scripts/quality_test/js_test.py + xvfb-run --auto-servernum python scripts/quality_test/js_test.py --option jstest + +test-coverage: + python scripts/quality_test/js_test.py --option coverage quality-test: test-lint test-eslint test-stylelint test-xsslint test-pi_check test-check_keyword \ No newline at end of file diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py index 953389bbe886..c4ad83299d12 100644 --- a/pavelib/utils/envs.py +++ b/pavelib/utils/envs.py @@ -52,18 +52,18 @@ class Env: QUALITY_DIR = REPORT_DIR / 'quality_junitxml' # Generic log dir - GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" + # GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" # Python unittest dirs - PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" + # PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" # Which Python version should be used in xdist workers? - PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") + # PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") # Directory that videos are served from - VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" + # VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" - PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" + # PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" # Detect if in a Docker container, and if so which one FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') @@ -113,15 +113,15 @@ class Env: JS_REPORT_DIR = REPORT_DIR / 'javascript' # Directories used for pavelib/ tests - IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') - LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] + # IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') + # LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] # Directory for i18n test reports - I18N_REPORT_DIR = REPORT_DIR / 'i18n' + # I18N_REPORT_DIR = REPORT_DIR / 'i18n' # Directory for keeping src folder that comes with pip installation. # Setting this is equivalent to passing `--src ` to pip directly. - PIP_SRC = os.environ.get("PIP_SRC") + # PIP_SRC = os.environ.get("PIP_SRC") # Service variant (lms, cms, etc.) configured with an environment variable # We use this to determine which envs.json file to load. diff --git a/scripts/quality_test/js_test.py b/scripts/quality_test/js_test.py index 3ca2b3e39090..db5b04fdc0ed 100644 --- a/scripts/quality_test/js_test.py +++ b/scripts/quality_test/js_test.py @@ -3,7 +3,10 @@ """ import click +import os +import re import sys +import subprocess from utils.envs import Env from suites import JestSnapshotTestSuite, JsTestSuite @@ -16,32 +19,6 @@ __test__ = False # do not collect -@click.command("test_js") -@click.option( - '--s', 'suite', - default='all', - help='Test suite to run.' -) -@click.option( - '--m', 'mode', - default='run', - help='dev or run' -) -@click.option( - '--coverage', 'coverage', - default=True, - help='Run test under coverage' -) -@click.option( - '--p', 'port', - default=None, - help='Port to run test server on (dev mode only)' -) -@click.option( - '--C', 'skip_clean', - default=False, - help='skip cleaning repository before running tests' -) def test_js(suite, mode, coverage, port, skip_clean): """ Run the JavaScript tests @@ -64,79 +41,87 @@ def test_js(suite, mode, coverage, port, skip_clean): test_suite.run() -@click.command("test_js_run") +# @needs('pavelib.prereqs.install_coverage_prereqs') +# @cmdopts([ +# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +# ], share_with=['coverage']) + +def diff_coverage(): + """ + Build the diff coverage reports + """ + + compare_branch = 'origin/master' + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `quality test` before running " + "`coverage test`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + + +@click.command("main") +@click.option( + '--option', 'option', + help='Run javascript tests or coverage test as per given option' +) @click.option( '--s', 'suite', default='all', help='Test suite to run.' ) +@click.option( + '--m', 'mode', + default='run', + help='dev or run' +) @click.option( '--coverage', 'coverage', default=True, help='Run test under coverage' ) -def test_js_run(suite, coverage): - """ - Run the JavaScript tests and print results to the console - """ - - test_js(suite, 'run', coverage) - - -# @cmdopts([ -# ("suite=", "s", "Test suite to run"), -# ("port=", "p", "Port to run test server on"), -# ]) - -def test_js_dev(options): - """ - Run the JavaScript tests in your default browsers - """ - options.mode = 'dev' - test_js(options) - - -# @needs('pavelib.prereqs.install_coverage_prereqs') -# @cmdopts([ -# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), -# ], share_with=['coverage']) +@click.option( + '--p', 'port', + default=None, + help='Port to run test server on (dev mode only)' +) +@click.option( + '--C', 'skip_clean', + default=False, + help='skip cleaning repository before running tests' +) +def main(option, suite, mode, coverage, port, skip_clean): + if option == 'jstest': + test_js(suite, mode, coverage, port, skip_clean) + elif option == 'coverage': + diff_coverage() -# def diff_coverage(options): -# """ -# Build the diff coverage reports -# """ -# compare_branch = options.get('compare_branch', 'origin/master') - -# # Find all coverage XML files (both Python and JavaScript) -# xml_reports = [] - -# for filepath in Env.REPORT_DIR.walk(): -# if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): -# xml_reports.append(filepath) - -# if not xml_reports: -# err_msg = colorize( -# 'red', -# "No coverage info found. Run `paver test` before running " -# "`paver coverage`.\n" -# ) -# sys.stderr.write(err_msg) -# else: -# xml_report_str = ' '.join(xml_reports) -# diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - -# # Generate the diff coverage reports (HTML and console) -# # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 -# sh( -# "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} " -# "--html-report {diff_html_path}".format( -# xml_report_str=xml_report_str, -# compare_branch=compare_branch, -# diff_html_path=diff_html_path, -# ) -# ) - -# print("\n") if __name__ == "__main__": - test_js() + main() diff --git a/scripts/quality_test/quality_test.py b/scripts/quality_test/quality_test.py index 19310ab690a9..e2c12aab0aa8 100644 --- a/scripts/quality_test/quality_test.py +++ b/scripts/quality_test/quality_test.py @@ -501,43 +501,6 @@ def run_xsslint(): print("successfully run xsslint") -def diff_coverage(): - """ - Build the diff coverage reports - """ - - compare_branch = 'origin/master' - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - REPO_ROOT = repo_root() - REPORT_DIR = REPO_ROOT / 'reports' - for filepath in REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `quality test` before running " - "`coverage test`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - command = ( - f"diff-cover {xml_report_str}" - f"--diff-range-notation '..'" - f"--compare-branch={compare_branch} " - f"--html-report {diff_html_path}" - ) - subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("command", choices=['eslint', 'stylelint', diff --git a/scripts/quality_test/suites/js_suite.py b/scripts/quality_test/suites/js_suite.py index 5b028a5b3227..f84a73c9350a 100644 --- a/scripts/quality_test/suites/js_suite.py +++ b/scripts/quality_test/suites/js_suite.py @@ -25,9 +25,6 @@ def __init__(self, *args, **kwargs): def __enter__(self): super().__enter__() - # if tasks.environment.dry_run: - # tasks.environment.info("make report_dir") - # else: self.report_dir.makedirs_p() # self.report_dir.mkdir(exist_ok=True) # if not self.skip_clean: From 90d58a267913f6c9e054ad71d0184a26af4c1b26 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 20 Aug 2024 16:42:26 +0500 Subject: [PATCH 66/78] fix: remove files which was accidentally added --- scripts/metrics/stylelint | 1 - scripts/run_stylelint.sh | 44 ----------------------------------- scripts/stylelint-results.xml | 1 - 3 files changed, 46 deletions(-) delete mode 100644 scripts/metrics/stylelint delete mode 100755 scripts/run_stylelint.sh delete mode 100644 scripts/stylelint-results.xml diff --git a/scripts/metrics/stylelint b/scripts/metrics/stylelint deleted file mode 100644 index 573541ac9702..000000000000 --- a/scripts/metrics/stylelint +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/scripts/run_stylelint.sh b/scripts/run_stylelint.sh deleted file mode 100755 index 20a5d6fd07ef..000000000000 --- a/scripts/run_stylelint.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Function to run stylelint and handle violations -function run_stylelint() { - # Define the limit of violations - local violations_limit=0 - - # Run stylelint and count the number of violations - local num_violations - num_violations=$(stylelint "**/*.scss" | grep -c "warning\|error") - - # Record the metric - echo "$num_violations" > "$METRICS_DIR/stylelint" - - # Check if number of violations is greater than the limit - if [ "$num_violations" -gt "$violations_limit" ]; then - fail_quality "stylelint" "FAILURE: Stylelint failed with too many violations: ($num_violations).\nThe limit is $violations_limit." - else - write_junit_xml "stylelint" - fi -} - -# Function to fail the build quality -function fail_quality() { - local tool=$1 - local message=$2 - echo "$message" - exit 1 -} - -# Function to write JUnit XML (dummy function for this example) -function write_junit_xml() { - local tool=$1 - echo "" > "$tool-results.xml" -} - -# Set the METRICS_DIR environment variable (change as needed) -export METRICS_DIR="./metrics" - -# Create the metrics directory if it doesn't exist -mkdir -p "$METRICS_DIR" - -# Run the stylelint function -run_stylelint diff --git a/scripts/stylelint-results.xml b/scripts/stylelint-results.xml deleted file mode 100644 index ad72ae477a7b..000000000000 --- a/scripts/stylelint-results.xml +++ /dev/null @@ -1 +0,0 @@ - From b323c641c18405af67e1c812593489447157d4c2 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 26 Aug 2024 13:14:32 +0500 Subject: [PATCH 67/78] fix: fix the comments received on PR review --- pavelib/paver_tests/test_assets.py | 232 +++++------ pavelib/prereqs.py | 351 +++++++++++++++++ pavelib/utils/envs.py | 27 -- scripts/generic-ci-tests.sh | 123 ------ scripts/js_test.py | 490 ++++++++++++++++++++++++ scripts/quality_test/js_test.py | 127 ------ scripts/quality_test/suites/__init__.py | 5 - scripts/quality_test/suites/js_suite.py | 104 ----- scripts/quality_test/suites/suite.py | 154 -------- scripts/quality_test/utils/envs.py | 135 ------- scripts/quality_test/utils/utils.py | 75 ---- stylelint.config.js | 9 +- 12 files changed, 958 insertions(+), 874 deletions(-) create mode 100644 pavelib/prereqs.py delete mode 100755 scripts/generic-ci-tests.sh create mode 100644 scripts/js_test.py delete mode 100644 scripts/quality_test/js_test.py delete mode 100644 scripts/quality_test/suites/__init__.py delete mode 100644 scripts/quality_test/suites/js_suite.py delete mode 100644 scripts/quality_test/suites/suite.py delete mode 100644 scripts/quality_test/utils/envs.py delete mode 100644 scripts/quality_test/utils/utils.py diff --git a/pavelib/paver_tests/test_assets.py b/pavelib/paver_tests/test_assets.py index f4d264fd5d77..bb943a4ca195 100644 --- a/pavelib/paver_tests/test_assets.py +++ b/pavelib/paver_tests/test_assets.py @@ -1,130 +1,130 @@ # """Unit tests for the Paver asset tasks.""" -# import json -# import os -# from pathlib import Path -# from unittest import TestCase -# from unittest.mock import patch +import json +import os +from pathlib import Path +from unittest import TestCase +from unittest.mock import patch -# import ddt -# import paver.easy -# from paver import tasks +import ddt +import paver.easy +from paver import tasks -# import pavelib.assets -# from pavelib.assets import Env +import pavelib.assets +from pavelib.assets import Env -# REPO_ROOT = Path(__file__).parent.parent.parent +REPO_ROOT = Path(__file__).parent.parent.parent -# LMS_SETTINGS = { -# "WEBPACK_CONFIG_PATH": "webpack.fake.config.js", -# "STATIC_ROOT": "/fake/lms/staticfiles", +LMS_SETTINGS = { + "WEBPACK_CONFIG_PATH": "webpack.fake.config.js", + "STATIC_ROOT": "/fake/lms/staticfiles", -# } -# CMS_SETTINGS = { -# "WEBPACK_CONFIG_PATH": "webpack.fake.config", -# "STATIC_ROOT": "/fake/cms/staticfiles", -# "JS_ENV_EXTRA_CONFIG": json.dumps({"key1": [True, False], "key2": {"key2.1": 1369, "key2.2": "1369"}}), -# } +} +CMS_SETTINGS = { + "WEBPACK_CONFIG_PATH": "webpack.fake.config", + "STATIC_ROOT": "/fake/cms/staticfiles", + "JS_ENV_EXTRA_CONFIG": json.dumps({"key1": [True, False], "key2": {"key2.1": 1369, "key2.2": "1369"}}), +} -# def _mock_get_django_settings(django_settings, system, settings=None): # pylint: disable=unused-argument -# return [(LMS_SETTINGS if system == "lms" else CMS_SETTINGS)[s] for s in django_settings] +def _mock_get_django_settings(django_settings, system, settings=None): # pylint: disable=unused-argument + return [(LMS_SETTINGS if system == "lms" else CMS_SETTINGS)[s] for s in django_settings] -# @ddt.ddt -# @patch.object(Env, 'get_django_settings', _mock_get_django_settings) -# @patch.object(Env, 'get_django_json_settings', _mock_get_django_settings) -# class TestDeprecatedPaverAssets(TestCase): -# """ -# Simple test to ensure that the soon-to-be-removed Paver commands are correctly translated into the new npm-run -# commands. -# """ -# def setUp(self): -# super().setUp() -# self.maxDiff = None -# os.environ['NO_PREREQ_INSTALL'] = 'true' -# tasks.environment = tasks.Environment() +@ddt.ddt +@patch.object(Env, 'get_django_settings', _mock_get_django_settings) +@patch.object(Env, 'get_django_json_settings', _mock_get_django_settings) +class TestDeprecatedPaverAssets(TestCase): + """ + Simple test to ensure that the soon-to-be-removed Paver commands are correctly translated into the new npm-run + commands. + """ + def setUp(self): + super().setUp() + self.maxDiff = None + os.environ['NO_PREREQ_INSTALL'] = 'true' + tasks.environment = tasks.Environment() -# def tearDown(self): -# super().tearDown() -# del os.environ['NO_PREREQ_INSTALL'] + def tearDown(self): + super().tearDown() + del os.environ['NO_PREREQ_INSTALL'] -# @ddt.data( -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={}, -# expected=["npm run compile-sass --"], -# ), -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={"system": "lms,studio"}, -# expected=["npm run compile-sass --"], -# ), -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={"debug": True}, -# expected=["npm run compile-sass-dev --"], -# ), -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={"system": "lms"}, -# expected=["npm run compile-sass -- --skip-cms"], -# ), -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={"system": "studio"}, -# expected=["npm run compile-sass -- --skip-lms"], -# ), -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={"system": "cms", "theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes"}, -# expected=[ -# "npm run compile-sass -- --skip-lms " + -# f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes" -# ], -# ), -# dict( -# task_name='pavelib.assets.compile_sass', -# args=[], -# kwargs={"theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes", "themes": "red-theme,test-theme"}, -# expected=[ -# "npm run compile-sass -- " + -# f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes " + -# "--theme red-theme --theme test-theme" -# ], -# ), -# dict( -# task_name='pavelib.assets.update_assets', -# args=["lms", "studio", "--settings=fake.settings"], -# kwargs={}, -# expected=[ -# ( -# "WEBPACK_CONFIG_PATH=webpack.fake.config.js " + -# "NODE_ENV=production " + -# "STATIC_ROOT_LMS=/fake/lms/staticfiles " + -# "STATIC_ROOT_CMS=/fake/cms/staticfiles " + -# 'JS_ENV_EXTRA_CONFIG=' + -# '"{\\"key1\\": [true, false], \\"key2\\": {\\"key2.1\\": 1369, \\"key2.2\\": \\"1369\\"}}" ' + -# "npm run webpack" -# ), -# "python manage.py lms --settings=fake.settings compile_sass lms ", -# "python manage.py cms --settings=fake.settings compile_sass cms ", -# ( -# "( ./manage.py lms --settings=fake.settings collectstatic --noinput ) && " + -# "( ./manage.py cms --settings=fake.settings collectstatic --noinput )" -# ), -# ], -# ), -# ) -# @ddt.unpack -# @patch.object(pavelib.assets, 'sh') -# def test_paver_assets_wrapper_invokes_new_commands(self, mock_sh, task_name, args, kwargs, expected): -# paver.easy.call_task(task_name, args=args, options=kwargs) -# assert [call_args[0] for (call_args, call_kwargs) in mock_sh.call_args_list] == expected + @ddt.data( + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={}, + expected=["npm run compile-sass --"], + ), + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={"system": "lms,studio"}, + expected=["npm run compile-sass --"], + ), + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={"debug": True}, + expected=["npm run compile-sass-dev --"], + ), + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={"system": "lms"}, + expected=["npm run compile-sass -- --skip-cms"], + ), + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={"system": "studio"}, + expected=["npm run compile-sass -- --skip-lms"], + ), + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={"system": "cms", "theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes"}, + expected=[ + "npm run compile-sass -- --skip-lms " + + f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes" + ], + ), + dict( + task_name='pavelib.assets.compile_sass', + args=[], + kwargs={"theme_dirs": f"{REPO_ROOT}/common/test,{REPO_ROOT}/themes", "themes": "red-theme,test-theme"}, + expected=[ + "npm run compile-sass -- " + + f"--theme-dir {REPO_ROOT}/common/test --theme-dir {REPO_ROOT}/themes " + + "--theme red-theme --theme test-theme" + ], + ), + dict( + task_name='pavelib.assets.update_assets', + args=["lms", "studio", "--settings=fake.settings"], + kwargs={}, + expected=[ + ( + "WEBPACK_CONFIG_PATH=webpack.fake.config.js " + + "NODE_ENV=production " + + "STATIC_ROOT_LMS=/fake/lms/staticfiles " + + "STATIC_ROOT_CMS=/fake/cms/staticfiles " + + 'JS_ENV_EXTRA_CONFIG=' + + '"{\\"key1\\": [true, false], \\"key2\\": {\\"key2.1\\": 1369, \\"key2.2\\": \\"1369\\"}}" ' + + "npm run webpack" + ), + "python manage.py lms --settings=fake.settings compile_sass lms ", + "python manage.py cms --settings=fake.settings compile_sass cms ", + ( + "( ./manage.py lms --settings=fake.settings collectstatic --noinput ) && " + + "( ./manage.py cms --settings=fake.settings collectstatic --noinput )" + ), + ], + ), + ) + @ddt.unpack + @patch.object(pavelib.assets, 'sh') + def test_paver_assets_wrapper_invokes_new_commands(self, mock_sh, task_name, args, kwargs, expected): + paver.easy.call_task(task_name, args=args, options=kwargs) + assert [call_args[0] for (call_args, call_kwargs) in mock_sh.call_args_list] == expected diff --git a/pavelib/prereqs.py b/pavelib/prereqs.py new file mode 100644 index 000000000000..4453176c94da --- /dev/null +++ b/pavelib/prereqs.py @@ -0,0 +1,351 @@ +""" +Install Python and Node prerequisites. +""" + + +import hashlib +import os +import re +import subprocess +import sys +from distutils import sysconfig # pylint: disable=deprecated-module + +from paver.easy import sh, task # lint-amnesty, pylint: disable=unused-import + +from .utils.envs import Env +from .utils.timer import timed + +PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache') +NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs" +NO_PYTHON_UNINSTALL_MESSAGE = 'NO_PYTHON_UNINSTALL is set. No attempts will be made to uninstall old Python libs.' +COVERAGE_REQ_FILE = 'requirements/edx/coverage.txt' + +# If you make any changes to this list you also need to make +# a corresponding change to circle.yml, which is how the python +# prerequisites are installed for builds on circleci.com +toxenv = os.environ.get('TOXENV') +if toxenv and toxenv != 'quality': + PYTHON_REQ_FILES = ['requirements/edx/testing.txt'] +else: + PYTHON_REQ_FILES = ['requirements/edx/development.txt'] + +# Developers can have private requirements, for local copies of github repos, +# or favorite debugging tools, etc. +PRIVATE_REQS = 'requirements/edx/private.txt' +if os.path.exists(PRIVATE_REQS): + PYTHON_REQ_FILES.append(PRIVATE_REQS) + + +def str2bool(s): + s = str(s) + return s.lower() in ('yes', 'true', 't', '1') + + +def no_prereq_install(): + """ + Determine if NO_PREREQ_INSTALL should be truthy or falsy. + """ + return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False')) + + +def no_python_uninstall(): + """ Determine if we should run the uninstall_python_packages task. """ + return str2bool(os.environ.get('NO_PYTHON_UNINSTALL', 'False')) + + +def create_prereqs_cache_dir(): + """Create the directory for storing the hashes, if it doesn't exist already.""" + try: + os.makedirs(PREREQS_STATE_DIR) + except OSError: + if not os.path.isdir(PREREQS_STATE_DIR): + raise + + +def compute_fingerprint(path_list): + """ + Hash the contents of all the files and directories in `path_list`. + Returns the hex digest. + """ + + hasher = hashlib.sha1() + + for path_item in path_list: + + # For directories, create a hash based on the modification times + # of first-level subdirectories + if os.path.isdir(path_item): + for dirname in sorted(os.listdir(path_item)): + path_name = os.path.join(path_item, dirname) + if os.path.isdir(path_name): + hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8')) + + # For files, hash the contents of the file + if os.path.isfile(path_item): + with open(path_item, "rb") as file_handle: + hasher.update(file_handle.read()) + + return hasher.hexdigest() + + +def prereq_cache(cache_name, paths, install_func): + """ + Conditionally execute `install_func()` only if the files/directories + specified by `paths` have changed. + + If the code executes successfully (no exceptions are thrown), the cache + is updated with the new hash. + """ + # Retrieve the old hash + cache_filename = cache_name.replace(" ", "_") + cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1") + old_hash = None + if os.path.isfile(cache_file_path): + with open(cache_file_path) as cache_file: + old_hash = cache_file.read() + + # Compare the old hash to the new hash + # If they do not match (either the cache hasn't been created, or the files have changed), + # then execute the code within the block. + new_hash = compute_fingerprint(paths) + if new_hash != old_hash: + install_func() + + # Update the cache with the new hash + # If the code executed within the context fails (throws an exception), + # then this step won't get executed. + create_prereqs_cache_dir() + with open(cache_file_path, "wb") as cache_file: + # Since the pip requirement files are modified during the install + # process, we need to store the hash generated AFTER the installation + post_install_hash = compute_fingerprint(paths) + cache_file.write(post_install_hash.encode('utf-8')) + else: + print(f'{cache_name} unchanged, skipping...') + + +def node_prereqs_installation(): + """ + Configures npm and installs Node prerequisites + """ + # Before July 2023, these directories were created and written to + # as root. Afterwards, they are created as being owned by the + # `app` user -- but also need to be deleted by that user (due to + # how npm runs post-install scripts.) Developers with an older + # devstack installation who are reprovisioning will see errors + # here if the files are still owned by root. Deleting the files in + # advance prevents this error. + # + # This hack should probably be left in place for at least a year. + # See ADR 17 for more background on the transition. + sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/") + # At the time of this writing, the js dir has git-versioned files + # but the css dir does not, so the latter would have been created + # as root-owned (in the process of creating the vendor + # subdirectory). Delete it only if empty, just in case + # git-versioned files are added later. + sh("rmdir common/static/common/css || true") + + # NPM installs hang sporadically. Log the installation process so that we + # determine if any packages are chronic offenders. + npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log' + npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with + npm_command = 'npm ci --verbose'.split() + + # The implementation of Paver's `sh` function returns before the forked + # actually returns. Using a Popen object so that we can ensure that + # the forked process has returned + proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with + retcode = proc.wait() + if retcode == 1: + raise Exception(f"npm install failed: See {npm_log_file_path}") + print("Successfully clean-installed NPM packages. Log found at {}".format( + npm_log_file_path + )) + + +def python_prereqs_installation(): + """ + Installs Python prerequisites + """ + # edx-platform installs some Python projects from within the edx-platform repo itself. + sh("pip install -e .") + for req_file in PYTHON_REQ_FILES: + pip_install_req_file(req_file) + + +def pip_install_req_file(req_file): + """Pip install the requirements file.""" + pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w' + sh(f"{pip_cmd} -r {req_file}") + + +@task +@timed +def install_node_prereqs(): + """ + Installs Node prerequisites + """ + if no_prereq_install(): + print(NO_PREREQ_MESSAGE) + return + + prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation) + + +# To add a package to the uninstall list, just add it to this list! No need +# to touch any other part of this file. +PACKAGES_TO_UNINSTALL = [ + "MySQL-python", # Because mysqlclient shares the same directory name + "South", # Because it interferes with Django 1.8 migrations. + "edxval", # Because it was bork-installed somehow. + "django-storages", + "django-oauth2-provider", # Because now it's called edx-django-oauth2-provider. + "edx-oauth2-provider", # Because it moved from github to pypi + "enum34", # Because enum34 is not needed in python>3.4 + "i18n-tools", # Because now it's called edx-i18n-tools + "moto", # Because we no longer use it and it conflicts with recent jsondiff versions + "python-saml", # Because python3-saml shares the same directory name + "pytest-faulthandler", # Because it was bundled into pytest + "djangorestframework-jwt", # Because now its called drf-jwt. +] + + +@task +@timed +def uninstall_python_packages(): + """ + Uninstall Python packages that need explicit uninstallation. + + Some Python packages that we no longer want need to be explicitly + uninstalled, notably, South. Some other packages were once installed in + ways that were resistant to being upgraded, like edxval. Also uninstall + them. + """ + + if no_python_uninstall(): + print(NO_PYTHON_UNINSTALL_MESSAGE) + return + + # So that we don't constantly uninstall things, use a hash of the packages + # to be uninstalled. Check it, and skip this if we're up to date. + hasher = hashlib.sha1() + hasher.update(repr(PACKAGES_TO_UNINSTALL).encode('utf-8')) + expected_version = hasher.hexdigest() + state_file_path = os.path.join(PREREQS_STATE_DIR, "Python_uninstall.sha1") + create_prereqs_cache_dir() + + if os.path.isfile(state_file_path): + with open(state_file_path) as state_file: + version = state_file.read() + if version == expected_version: + print('Python uninstalls unchanged, skipping...') + return + + # Run pip to find the packages we need to get rid of. Believe it or not, + # edx-val is installed in a way that it is present twice, so we have a loop + # to really really get rid of it. + for _ in range(3): + uninstalled = False + frozen = sh("pip freeze", capture=True) + + for package_name in PACKAGES_TO_UNINSTALL: + if package_in_frozen(package_name, frozen): + # Uninstall the pacakge + sh(f"pip uninstall --disable-pip-version-check -y {package_name}") + uninstalled = True + if not uninstalled: + break + else: + # We tried three times and didn't manage to get rid of the pests. + print("Couldn't uninstall unwanted Python packages!") + return + + # Write our version. + with open(state_file_path, "wb") as state_file: + state_file.write(expected_version.encode('utf-8')) + + +def package_in_frozen(package_name, frozen_output): + """Is this package in the output of 'pip freeze'?""" + # Look for either: + # + # PACKAGE-NAME== + # + # or: + # + # blah_blah#egg=package_name-version + # + pattern = r"(?mi)^{pkg}==|#egg={pkg_under}-".format( + pkg=re.escape(package_name), + pkg_under=re.escape(package_name.replace("-", "_")), + ) + return bool(re.search(pattern, frozen_output)) + + +@task +@timed +def install_coverage_prereqs(): + """ Install python prereqs for measuring coverage. """ + if no_prereq_install(): + print(NO_PREREQ_MESSAGE) + return + pip_install_req_file(COVERAGE_REQ_FILE) + + +@task +@timed +def install_python_prereqs(): + """ + Installs Python prerequisites. + """ + if no_prereq_install(): + print(NO_PREREQ_MESSAGE) + return + + uninstall_python_packages() + + # Include all of the requirements files in the fingerprint. + files_to_fingerprint = list(PYTHON_REQ_FILES) + + # Also fingerprint the directories where packages get installed: + # ("/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages") + files_to_fingerprint.append(sysconfig.get_python_lib()) + + # In a virtualenv, "-e installs" get put in a src directory. + if Env.PIP_SRC: + src_dir = Env.PIP_SRC + else: + src_dir = os.path.join(sys.prefix, "src") + if os.path.isdir(src_dir): + files_to_fingerprint.append(src_dir) + + # Also fingerprint this source file, so that if the logic for installations + # changes, we will redo the installation. + this_file = __file__ + if this_file.endswith(".pyc"): + this_file = this_file[:-1] # use the .py file instead of the .pyc + files_to_fingerprint.append(this_file) + + prereq_cache("Python prereqs", files_to_fingerprint, python_prereqs_installation) + + +@task +@timed +def install_prereqs(): + """ + Installs Node and Python prerequisites + """ + if no_prereq_install(): + print(NO_PREREQ_MESSAGE) + return + + if not str2bool(os.environ.get('SKIP_NPM_INSTALL', 'False')): + install_node_prereqs() + install_python_prereqs() + log_installed_python_prereqs() + + +def log_installed_python_prereqs(): + """ Logs output of pip freeze for debugging. """ + sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log")) diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py index c4ad83299d12..c537d4e08dde 100644 --- a/pavelib/utils/envs.py +++ b/pavelib/utils/envs.py @@ -2,10 +2,8 @@ Helper functions for loading environment settings. """ -# import json import os import sys -# import subprocess from time import sleep from path import Path as path @@ -51,20 +49,6 @@ class Env: METRICS_DIR = REPORT_DIR / 'metrics' QUALITY_DIR = REPORT_DIR / 'quality_junitxml' - # Generic log dir - # GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" - - # Python unittest dirs - # PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" - - # Which Python version should be used in xdist workers? - # PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") - - # Directory that videos are served from - # VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" - - # PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" - # Detect if in a Docker container, and if so which one FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' @@ -112,17 +96,6 @@ class Env: JS_REPORT_DIR = REPORT_DIR / 'javascript' - # Directories used for pavelib/ tests - # IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') - # LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] - - # Directory for i18n test reports - # I18N_REPORT_DIR = REPORT_DIR / 'i18n' - - # Directory for keeping src folder that comes with pip installation. - # Setting this is equivalent to passing `--src ` to pip directly. - # PIP_SRC = os.environ.get("PIP_SRC") - # Service variant (lms, cms, etc.) configured with an environment variable # We use this to determine which envs.json file to load. SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh deleted file mode 100755 index cc4e7e3be586..000000000000 --- a/scripts/generic-ci-tests.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env bash -set -e - -############################################################################### -# -# generic-ci-tests.sh -# -# Execute some tests for edx-platform. -# (Most other tests are run by invoking `pytest`, `pylint`, etc. directly) -# -# This script can be called from CI jobs that define -# these environment variables: -# -# `TEST_SUITE` defines which kind of test to run. -# Possible values are: -# -# - "quality": Run the quality (pycodestyle/pylint) checks -# - "js-unit": Run the JavaScript tests -# - "pavelib-js-unit": Run the JavaScript tests and the Python unit -# tests from the pavelib/lib directory -# -############################################################################### - -# Clean up previous builds -git clean -qxfd - -function emptyxunit { - - cat > "reports/$1.xml" < - - - -END - -} - -# if specified tox environment is supported, prepend paver commands -# with tox env invocation -if [ -z ${TOX_ENV+x} ] || [[ ${TOX_ENV} == 'null' ]]; then - echo "TOX_ENV: ${TOX_ENV}" - TOX="" -elif tox -l |grep -q "${TOX_ENV}"; then - if [[ "${TOX_ENV}" == 'quality' ]]; then - TOX="" - else - TOX="tox -r -e ${TOX_ENV} --" - fi -else - echo "${TOX_ENV} is not currently supported. Please review the" - echo "tox.ini file to see which environments are supported" - exit 1 -fi - -PAVER_ARGS="-v" -export SUBSET_JOB=$JOB_NAME - -function run_paver_quality { - QUALITY_TASK=$1 - shift - mkdir -p test_root/log/ - LOG_PREFIX="test_root/log/$QUALITY_TASK" - $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { - echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):"; - tail -n 100 "$LOG_PREFIX.out.log" - echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):"; - tail -n 100 "$LOG_PREFIX.err.log" - return 1; - } - return 0; -} - -case "$TEST_SUITE" in - - "quality") - EXIT=0 - - mkdir -p reports - - # echo "Finding pycodestyle violations..." - # make test-lint - # echo "Finding ESLint violations and storing report..." - # make test-eslint - # echo "Finding Stylelint violations and storing report..." - # make test-stylelint - # echo "Running xss linter report." - # make test-xsslint - # run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } - # echo "Running PII checker on all Django models..." - # run_paver_quality run_pii_check || { EXIT=1; } - # echo "Running reserved keyword checker on all Django models..." - # run_paver_quality check_keywords || { EXIT=1; } - - # Need to create an empty test result so the post-build - # action doesn't fail the build. - emptyxunit "stub" - exit "$EXIT" - ;; - - "js-unit") - $TOX paver test_js --coverage - $TOX paver diff_coverage - ;; - - "pavelib-js-unit") - EXIT=0 - $TOX paver test_js --coverage --skip-clean || { EXIT=1; } - paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; } - - # This is to ensure that the build status of the shard is properly set. - # Because we are running two paver commands in a row, we need to capture - # their return codes in order to exit with a non-zero code if either of - # them fail. We put the || clause there because otherwise, when a paver - # command fails, this entire script will exit, and not run the second - # paver command in this case statement. So instead of exiting, the value - # of a variable named EXIT will be set to 1 if either of the paver - # commands fail. We then use this variable's value as our exit code. - # Note that by default the value of this variable EXIT is not set, so if - # neither command fails then the exit command resolves to simply exit - # which is considered successful. - exit "$EXIT" - ;; -esac diff --git a/scripts/js_test.py b/scripts/js_test.py new file mode 100644 index 000000000000..5d72c5c8df8d --- /dev/null +++ b/scripts/js_test.py @@ -0,0 +1,490 @@ +""" +Javascript test tasks +""" + +import click +import os +import re +import sys +import subprocess + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +class Env: + """ + Load information about the execution environment. + """ + + @staticmethod + def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent.parent.parent + + # Root of the git repository (edx-platform) + REPO_ROOT = repo_root() + + # Reports Directory + REPORT_DIR = REPO_ROOT / 'reports' + + + # Detect if in a Docker container, and if so which one + FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') + USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' + + # Configured browser to use for the js test suites + SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') + if USING_DOCKER: + KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' + else: + KARMA_BROWSER = 'FirefoxNoUpdates' + + # Files used to run each of the js test suites + # TODO: Store this as a dict. Order seems to matter for some + # reason. See issue TE-415. + KARMA_CONFIG_FILES = [ + REPO_ROOT / 'cms/static/karma_cms.conf.js', + REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', + REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', + REPO_ROOT / 'lms/static/karma_lms.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', + REPO_ROOT / 'common/static/karma_common.conf.js', + REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', + ] + + JS_TEST_ID_KEYS = [ + 'cms', + 'cms-squire', + 'cms-webpack', + 'lms', + 'xmodule', + 'xmodule-webpack', + 'common', + 'common-requirejs', + 'jest-snapshot' + ] + + JS_REPORT_DIR = REPORT_DIR / 'javascript' + + # Service variant (lms, cms, etc.) configured with an environment variable + # We use this to determine which envs.json file to load. + SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) + + # If service variant not configured in env, then pass the correct + # environment for lms / cms + if not SERVICE_VARIANT: # this will intentionally catch ""; + if any(i in sys.argv[1:] for i in ('cms', 'studio')): + SERVICE_VARIANT = 'cms' + else: + SERVICE_VARIANT = 'lms' + + +# def clean_test_files(): +# """ +# Clean fixture files used by tests and .pyc files +# """ +# # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" +# subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") +# # This find command removes all the *.pyc files that aren't in the .git +# # directory. See this blog post for more details: +# # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html +# subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") +# subprocess.run("rm -rf test_root/log/auto_screenshots/*") +# subprocess.run("rm -rf /tmp/mako_[cl]ms") + + +# def clean_dir(directory): +# """ +# Delete all the files from the specified directory. +# """ +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# subprocess.run(f'find {directory} -type f -delete') + + +# @task +# @cmdopts([ +# ('skip-clean', 'C', 'skip cleaning repository before running tests'), +# ('skip_clean', None, 'deprecated in favor of skip-clean'), +# ]) + +# def clean_reports_dir(options): +# """ +# Clean coverage files, to ensure that we don't use stale data to generate reports. +# """ +# if getattr(options, 'skip_clean', False): +# print('--skip-clean is set, skipping...') +# return + +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# reports_dir = Env.REPORT_DIR.makedirs_p() +# clean_dir(reports_dir) + + +class TestSuite: + """ + TestSuite is a class that defines how groups of tests run. + """ + def __init__(self, *args, **kwargs): + self.root = args[0] + self.subsuites = kwargs.get('subsuites', []) + self.failed_suites = [] + self.verbosity = int(kwargs.get('verbosity', 1)) + self.skip_clean = kwargs.get('skip_clean', False) + self.passthrough_options = kwargs.get('passthrough_options', []) + + def __enter__(self): + """ + This will run before the test suite is run with the run_suite_tests method. + If self.run_test is called directly, it should be run in a 'with' block to + ensure that the proper context is created. + + Specific setup tasks should be defined in each subsuite. + + i.e. Checking for and defining required directories. + """ + print(f"\nSetting up for {self.root}") + self.failed_suites = [] + + def __exit__(self, exc_type, exc_value, traceback): + """ + This is run after the tests run with the run_suite_tests method finish. + Specific clean up tasks should be defined in each subsuite. + + If self.run_test is called directly, it should be run in a 'with' block + to ensure that clean up happens properly. + + i.e. Cleaning mongo after the lms tests run. + """ + print(f"\nCleaning up after {self.root}") + + @property + def cmd(self): + """ + The command to run tests (as a string). For this base class there is none. + """ + return None + + @staticmethod + def kill_process(proc): + """ + Kill the process `proc` created with `subprocess`. + """ + p1_group = psutil.Process(proc.pid) + child_pids = p1_group.children(recursive=True) + + for child_pid in child_pids: + os.kill(child_pid.pid, signal.SIGKILL) + + @staticmethod + def is_success(exit_code): + """ + Determine if the given exit code represents a success of the test + suite. By default, only a zero counts as a success. + """ + return exit_code == 0 + + def run_test(self): + """ + Runs a self.cmd in a subprocess and waits for it to finish. + It returns False if errors or failures occur. Otherwise, it + returns True. + """ + # cmd = " ".join(self.cmd) + cmd = " ".join(str(part) for part in self.cmd) + sys.stdout.write(cmd) + + msg = colorize( + 'green', + '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), + ) + + sys.stdout.write(msg) + sys.stdout.flush() + + if 'TEST_SUITE' not in os.environ: + os.environ['TEST_SUITE'] = self.root.replace("/", "_") + kwargs = {'shell': True, 'cwd': None} + process = None + + try: + process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with + return self.is_success(process.wait()) + except KeyboardInterrupt: + self.kill_process(process) + sys.exit(1) + + def run_suite_tests(self): + """ + Runs each of the suites in self.subsuites while tracking failures + """ + # Uses __enter__ and __exit__ for context + with self: + # run the tests for this class, and for all subsuites + if self.cmd: + passed = self.run_test() + if not passed: + self.failed_suites.append(self) + + for suite in self.subsuites: + suite.run_suite_tests() + if suite.failed_suites: + self.failed_suites.extend(suite.failed_suites) + + def report_test_results(self): + """ + Writes a list of failed_suites to sys.stderr + """ + if self.failed_suites: + msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) + msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') + else: + msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) + + print(msg) + + def run(self): + """ + Runs the tests in the suite while tracking and reporting failures. + """ + self.run_suite_tests() + + # if tasks.environment.dry_run: + # return + + self.report_test_results() + + if self.failed_suites: + sys.exit(1) + + +class JsTestSuite(TestSuite): + """ + A class for running JavaScript tests. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.report_dir = Env.JS_REPORT_DIR + self.opts = kwargs + + suite = args[0] + self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] + + def __enter__(self): + super().__enter__() + self.report_dir.makedirs_p() + # self.report_dir.mkdir(exist_ok=True) + # if not self.skip_clean: + # test_utils.clean_test_files() + + # if self.mode == 'run' and not self.run_under_coverage: + # test_utils.clean_dir(self.report_dir) + + @property + def _default_subsuites(self): + """ + Returns all JS test suites + """ + return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] + + +class JsTestSubSuite(TestSuite): + """ + Class for JS suites like cms, cms-squire, lms, common, + common-requirejs and xmodule + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.test_id = args[0] + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.port = kwargs.get('port') + self.root = self.root + ' javascript' + self.report_dir = Env.JS_REPORT_DIR + + try: + self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] + except ValueError: + self.test_conf_file = Env.KARMA_CONFIG_FILES[0] + + self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' + self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' + + @property + def cmd(self): + """ + Run the tests using karma runner. + """ + cmd = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/karma", + "start", + self.test_conf_file, + "--single-run={}".format('false' if self.mode == 'dev' else 'true'), + "--capture-timeout=60000", + f"--junitreportpath={self.xunit_report}", + f"--browsers={Env.KARMA_BROWSER}", + ] + + if self.port: + cmd.append(f"--port={self.port}") + + if self.run_under_coverage: + cmd.extend([ + "--coverage", + f"--coveragereportpath={self.coverage_report}", + ]) + + return cmd + + +class JestSnapshotTestSuite(TestSuite): + """ + A class for running Jest Snapshot tests. + """ + @property + def cmd(self): + """ + Run the tests using Jest. + """ + return ["jest"] + + +def test_js(suite, mode, coverage, port, skip_clean): + """ + Run the JavaScript tests + """ + + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): + sys.stderr.write( + "Unknown test suite. Please choose from ({suites})\n".format( + suites=", ".join(Env.JS_TEST_ID_KEYS) + ) + ) + return + + if suite != 'jest-snapshot': + test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) + test_suite.run() + + if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in + test_suite = JestSnapshotTestSuite('jest') + test_suite.run() + + +# @needs('pavelib.prereqs.install_coverage_prereqs') +# @cmdopts([ +# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +# ], share_with=['coverage']) + +def diff_coverage(): + """ + Build the diff coverage reports + """ + + compare_branch = 'origin/master' + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `quality test` before running " + "`coverage test`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + + +@click.command("main") +@click.option( + '--option', 'option', + help='Run javascript tests or coverage test as per given option' +) +@click.option( + '--s', 'suite', + default='all', + help='Test suite to run.' +) +@click.option( + '--m', 'mode', + default='run', + help='dev or run' +) +@click.option( + '--coverage', 'coverage', + default=True, + help='Run test under coverage' +) +@click.option( + '--p', 'port', + default=None, + help='Port to run test server on (dev mode only)' +) +@click.option( + '--C', 'skip_clean', + default=False, + help='skip cleaning repository before running tests' +) +def main(option, suite, mode, coverage, port, skip_clean): + if option == 'jstest': + test_js(suite, mode, coverage, port, skip_clean) + elif option == 'coverage': + diff_coverage() + + +if __name__ == "__main__": + main() diff --git a/scripts/quality_test/js_test.py b/scripts/quality_test/js_test.py deleted file mode 100644 index db5b04fdc0ed..000000000000 --- a/scripts/quality_test/js_test.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -Javascript test tasks -""" - -import click -import os -import re -import sys -import subprocess - -from utils.envs import Env -from suites import JestSnapshotTestSuite, JsTestSuite - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -def test_js(suite, mode, coverage, port, skip_clean): - """ - Run the JavaScript tests - """ - - if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): - sys.stderr.write( - "Unknown test suite. Please choose from ({suites})\n".format( - suites=", ".join(Env.JS_TEST_ID_KEYS) - ) - ) - return - - if suite != 'jest-snapshot': - test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) - test_suite.run() - - if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in - test_suite = JestSnapshotTestSuite('jest') - test_suite.run() - - -# @needs('pavelib.prereqs.install_coverage_prereqs') -# @cmdopts([ -# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), -# ], share_with=['coverage']) - -def diff_coverage(): - """ - Build the diff coverage reports - """ - - compare_branch = 'origin/master' - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - for filepath in Env.REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `quality test` before running " - "`coverage test`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - command = ( - f"diff-cover {xml_report_str}" - f"--diff-range-notation '..'" - f"--compare-branch={compare_branch} " - f"--html-report {diff_html_path}" - ) - subprocess.run(command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True) - - -@click.command("main") -@click.option( - '--option', 'option', - help='Run javascript tests or coverage test as per given option' -) -@click.option( - '--s', 'suite', - default='all', - help='Test suite to run.' -) -@click.option( - '--m', 'mode', - default='run', - help='dev or run' -) -@click.option( - '--coverage', 'coverage', - default=True, - help='Run test under coverage' -) -@click.option( - '--p', 'port', - default=None, - help='Port to run test server on (dev mode only)' -) -@click.option( - '--C', 'skip_clean', - default=False, - help='skip cleaning repository before running tests' -) -def main(option, suite, mode, coverage, port, skip_clean): - if option == 'jstest': - test_js(suite, mode, coverage, port, skip_clean) - elif option == 'coverage': - diff_coverage() - - -if __name__ == "__main__": - main() diff --git a/scripts/quality_test/suites/__init__.py b/scripts/quality_test/suites/__init__.py deleted file mode 100644 index 34ecd49c1c74..000000000000 --- a/scripts/quality_test/suites/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -TestSuite class and subclasses -""" -from .js_suite import JestSnapshotTestSuite, JsTestSuite -from .suite import TestSuite diff --git a/scripts/quality_test/suites/js_suite.py b/scripts/quality_test/suites/js_suite.py deleted file mode 100644 index f84a73c9350a..000000000000 --- a/scripts/quality_test/suites/js_suite.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Javascript test tasks -""" - -from utils.envs import Env -from utils import utils as test_utils -from .suite import TestSuite - -__test__ = False # do not collect - - -class JsTestSuite(TestSuite): - """ - A class for running JavaScript tests. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.report_dir = Env.JS_REPORT_DIR - self.opts = kwargs - - suite = args[0] - self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] - - def __enter__(self): - super().__enter__() - self.report_dir.makedirs_p() - # self.report_dir.mkdir(exist_ok=True) - # if not self.skip_clean: - # test_utils.clean_test_files() - - # if self.mode == 'run' and not self.run_under_coverage: - # test_utils.clean_dir(self.report_dir) - - @property - def _default_subsuites(self): - """ - Returns all JS test suites - """ - return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] - - -class JsTestSubSuite(TestSuite): - """ - Class for JS suites like cms, cms-squire, lms, common, - common-requirejs and xmodule - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.test_id = args[0] - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.port = kwargs.get('port') - self.root = self.root + ' javascript' - self.report_dir = Env.JS_REPORT_DIR - - try: - self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] - except ValueError: - self.test_conf_file = Env.KARMA_CONFIG_FILES[0] - - self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' - self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' - - @property - def cmd(self): - """ - Run the tests using karma runner. - """ - cmd = [ - "node", - "--max_old_space_size=4096", - "node_modules/.bin/karma", - "start", - self.test_conf_file, - "--single-run={}".format('false' if self.mode == 'dev' else 'true'), - "--capture-timeout=60000", - f"--junitreportpath={self.xunit_report}", - f"--browsers={Env.KARMA_BROWSER}", - ] - - if self.port: - cmd.append(f"--port={self.port}") - - if self.run_under_coverage: - cmd.extend([ - "--coverage", - f"--coveragereportpath={self.coverage_report}", - ]) - - return cmd - - -class JestSnapshotTestSuite(TestSuite): - """ - A class for running Jest Snapshot tests. - """ - @property - def cmd(self): - """ - Run the tests using Jest. - """ - return ["jest"] diff --git a/scripts/quality_test/suites/suite.py b/scripts/quality_test/suites/suite.py deleted file mode 100644 index 19a749bc8344..000000000000 --- a/scripts/quality_test/suites/suite.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -A class used for defining and running test suites -""" - - -import os -import subprocess -import sys -import signal -import psutil - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -class TestSuite: - """ - TestSuite is a class that defines how groups of tests run. - """ - def __init__(self, *args, **kwargs): - self.root = args[0] - self.subsuites = kwargs.get('subsuites', []) - self.failed_suites = [] - self.verbosity = int(kwargs.get('verbosity', 1)) - self.skip_clean = kwargs.get('skip_clean', False) - self.passthrough_options = kwargs.get('passthrough_options', []) - - def __enter__(self): - """ - This will run before the test suite is run with the run_suite_tests method. - If self.run_test is called directly, it should be run in a 'with' block to - ensure that the proper context is created. - - Specific setup tasks should be defined in each subsuite. - - i.e. Checking for and defining required directories. - """ - print(f"\nSetting up for {self.root}") - self.failed_suites = [] - - def __exit__(self, exc_type, exc_value, traceback): - """ - This is run after the tests run with the run_suite_tests method finish. - Specific clean up tasks should be defined in each subsuite. - - If self.run_test is called directly, it should be run in a 'with' block - to ensure that clean up happens properly. - - i.e. Cleaning mongo after the lms tests run. - """ - print(f"\nCleaning up after {self.root}") - - @property - def cmd(self): - """ - The command to run tests (as a string). For this base class there is none. - """ - return None - - @staticmethod - def kill_process(proc): - """ - Kill the process `proc` created with `subprocess`. - """ - p1_group = psutil.Process(proc.pid) - child_pids = p1_group.children(recursive=True) - - for child_pid in child_pids: - os.kill(child_pid.pid, signal.SIGKILL) - - @staticmethod - def is_success(exit_code): - """ - Determine if the given exit code represents a success of the test - suite. By default, only a zero counts as a success. - """ - return exit_code == 0 - - def run_test(self): - """ - Runs a self.cmd in a subprocess and waits for it to finish. - It returns False if errors or failures occur. Otherwise, it - returns True. - """ - # cmd = " ".join(self.cmd) - cmd = " ".join(str(part) for part in self.cmd) - sys.stdout.write(cmd) - - msg = colorize( - 'green', - '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), - ) - - sys.stdout.write(msg) - sys.stdout.flush() - - if 'TEST_SUITE' not in os.environ: - os.environ['TEST_SUITE'] = self.root.replace("/", "_") - kwargs = {'shell': True, 'cwd': None} - process = None - - try: - process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with - return self.is_success(process.wait()) - except KeyboardInterrupt: - self.kill_process(process) - sys.exit(1) - - def run_suite_tests(self): - """ - Runs each of the suites in self.subsuites while tracking failures - """ - # Uses __enter__ and __exit__ for context - with self: - # run the tests for this class, and for all subsuites - if self.cmd: - passed = self.run_test() - if not passed: - self.failed_suites.append(self) - - for suite in self.subsuites: - suite.run_suite_tests() - if suite.failed_suites: - self.failed_suites.extend(suite.failed_suites) - - def report_test_results(self): - """ - Writes a list of failed_suites to sys.stderr - """ - if self.failed_suites: - msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) - msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') - else: - msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) - - print(msg) - - def run(self): - """ - Runs the tests in the suite while tracking and reporting failures. - """ - self.run_suite_tests() - - # if tasks.environment.dry_run: - # return - - self.report_test_results() - - if self.failed_suites: - sys.exit(1) diff --git a/scripts/quality_test/utils/envs.py b/scripts/quality_test/utils/envs.py deleted file mode 100644 index 37dac514d6c7..000000000000 --- a/scripts/quality_test/utils/envs.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -Helper functions for loading environment settings. -""" - -# import json -import os -import sys -# import subprocess -from time import sleep -from path import Path as path - - -def repo_root(): - """ - Get the root of the git repository (edx-platform). - - This sometimes fails on Docker Devstack, so it's been broken - down with some additional error handling. It usually starts - working within 30 seconds or so; for more details, see - https://openedx.atlassian.net/browse/PLAT-1629 and - https://github.com/docker/for-mac/issues/1509 - """ - file_path = path(__file__) - attempt = 1 - while True: - try: - absolute_path = file_path.abspath() - break - except OSError: - print(f'Attempt {attempt}/180 to get an absolute path failed') - if attempt < 180: - attempt += 1 - sleep(1) - else: - print('Unable to determine the absolute path of the edx-platform repo, aborting') - raise - return absolute_path.parent.parent.parent.parent - - -class Env: - """ - Load information about the execution environment. - """ - - # Root of the git repository (edx-platform) - REPO_ROOT = repo_root() - - # Reports Directory - REPORT_DIR = REPO_ROOT / 'reports' - METRICS_DIR = REPORT_DIR / 'metrics' - QUALITY_DIR = REPORT_DIR / 'quality_junitxml' - - # Generic log dir - GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" - - # Python unittest dirs - PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" - - # Which Python version should be used in xdist workers? - PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") - - # Directory that videos are served from - VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" - - PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" - - # Detect if in a Docker container, and if so which one - FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') - USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' - DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack' - TEST_SETTINGS = 'test' - - # Mongo databases that will be dropped before/after the tests run - MONGO_HOST = 'localhost' - - # Test Ids Directory - TEST_DIR = REPO_ROOT / ".testids" - - # Configured browser to use for the js test suites - SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') - if USING_DOCKER: - KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' - else: - KARMA_BROWSER = 'FirefoxNoUpdates' - - # Files used to run each of the js test suites - # TODO: Store this as a dict. Order seems to matter for some - # reason. See issue TE-415. - KARMA_CONFIG_FILES = [ - REPO_ROOT / 'cms/static/karma_cms.conf.js', - REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', - REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', - REPO_ROOT / 'lms/static/karma_lms.conf.js', - REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', - REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', - REPO_ROOT / 'common/static/karma_common.conf.js', - REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', - ] - - JS_TEST_ID_KEYS = [ - 'cms', - 'cms-squire', - 'cms-webpack', - 'lms', - 'xmodule', - 'xmodule-webpack', - 'common', - 'common-requirejs', - 'jest-snapshot' - ] - - JS_REPORT_DIR = REPORT_DIR / 'javascript' - - # Directories used for pavelib/ tests - IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') - LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] - - # Directory for i18n test reports - I18N_REPORT_DIR = REPORT_DIR / 'i18n' - - # Directory for keeping src folder that comes with pip installation. - # Setting this is equivalent to passing `--src ` to pip directly. - PIP_SRC = os.environ.get("PIP_SRC") - - # Service variant (lms, cms, etc.) configured with an environment variable - # We use this to determine which envs.json file to load. - SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) - - # If service variant not configured in env, then pass the correct - # environment for lms / cms - if not SERVICE_VARIANT: # this will intentionally catch ""; - if any(i in sys.argv[1:] for i in ('cms', 'studio')): - SERVICE_VARIANT = 'cms' - else: - SERVICE_VARIANT = 'lms' diff --git a/scripts/quality_test/utils/utils.py b/scripts/quality_test/utils/utils.py deleted file mode 100644 index 6913dc66fc24..000000000000 --- a/scripts/quality_test/utils/utils.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Helper functions for test tasks -""" - - -import os -import subprocess -from .envs import Env - - -MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) - - -def clean_test_files(): - """ - Clean fixture files used by tests and .pyc files - """ - # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" - subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") - # This find command removes all the *.pyc files that aren't in the .git - # directory. See this blog post for more details: - # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html - subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") - subprocess.run("rm -rf test_root/log/auto_screenshots/*") - subprocess.run("rm -rf /tmp/mako_[cl]ms") - - -def ensure_clean_package_lock(): - """ - Ensure no untracked changes have been made in the current git context. - """ - try: - # Run git diff command to check for changes in package-lock.json - result = subprocess.run( - ["git", "diff", "--name-only", "--exit-code", "package-lock.json"], - capture_output=True, # Capture stdout and stderr - text=True, # Decode output to text - check=True # Raise error for non-zero exit code - ) - # No differences found in package-lock.json - print("package-lock.json is clean.") - except subprocess.CalledProcessError as e: - # Git diff command returned non-zero exit code (changes detected) - print("Dirty package-lock.json, run 'npm install' and commit the generated changes.") - print(e.stderr) # Print any error output from the command - raise # Re-raise the exception to propagate the error - - -def clean_dir(directory): - """ - Delete all the files from the specified directory. - """ - # We delete the files but preserve the directory structure - # so that coverage.py has a place to put the reports. - subprocess.run(f'find {directory} -type f -delete') - - -# @task -# @cmdopts([ -# ('skip-clean', 'C', 'skip cleaning repository before running tests'), -# ('skip_clean', None, 'deprecated in favor of skip-clean'), -# ]) - -def clean_reports_dir(options): - """ - Clean coverage files, to ensure that we don't use stale data to generate reports. - """ - if getattr(options, 'skip_clean', False): - print('--skip-clean is set, skipping...') - return - - # We delete the files but preserve the directory structure - # so that coverage.py has a place to put the reports. - reports_dir = Env.REPORT_DIR.makedirs_p() - clean_dir(reports_dir) diff --git a/stylelint.config.js b/stylelint.config.js index fd5566ce6c77..bd7769911708 100644 --- a/stylelint.config.js +++ b/stylelint.config.js @@ -1,10 +1,3 @@ module.exports = { - extends: '@edx/stylelint-config-edx', - rules: { - 'selector-anb-no-unmatchable': null, // Disable the unknown rule - 'no-descending-specificity': null, - 'declaration-block-no-duplicate-properties': [true, { - ignore: ['consecutive-duplicates'] - }] - } + extends: '@edx/stylelint-config-edx' }; From 515080e8c9e9d01916ec3b85bd67b6fc7bf6344d Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 26 Aug 2024 15:55:07 +0500 Subject: [PATCH 68/78] fix: fix tests --- .github/workflows/js-tests.yml | 2 +- Makefile | 4 ++-- scripts/js_test.py | 7 ++++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index b2a3d1effa26..cc01f83ab0d1 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -71,7 +71,7 @@ jobs: - name: Run JS Tests run: | npm install -g jest - make test-js + xvfb-run --auto-servernum make test-js make test-coverage - name: Save Job Artifacts diff --git a/Makefile b/Makefile index ba0737c7d25e..f463dac73652 100644 --- a/Makefile +++ b/Makefile @@ -224,9 +224,9 @@ test-check_keyword: python scripts/quality_test/quality_test.py check_keywords test-js: - xvfb-run --auto-servernum python scripts/quality_test/js_test.py --option jstest + python scripts/js_test.py --option jstest test-coverage: - python scripts/quality_test/js_test.py --option coverage + python scripts/js_test.py --option coverage quality-test: test-lint test-eslint test-stylelint test-xsslint test-pi_check test-check_keyword \ No newline at end of file diff --git a/scripts/js_test.py b/scripts/js_test.py index 5d72c5c8df8d..710942590b95 100644 --- a/scripts/js_test.py +++ b/scripts/js_test.py @@ -8,6 +8,8 @@ import sys import subprocess +from path import Path as path + try: from pygments.console import colorize except ImportError: @@ -20,7 +22,7 @@ class Env: """ Load information about the execution environment. """ - + @staticmethod def repo_root(): """ @@ -54,11 +56,10 @@ def repo_root(): # Reports Directory REPORT_DIR = REPO_ROOT / 'reports' - # Detect if in a Docker container, and if so which one FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' - + # Configured browser to use for the js test suites SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') if USING_DOCKER: From ba666b13a372e3219f7e06d7fea604da9c48c263 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 26 Aug 2024 16:03:47 +0500 Subject: [PATCH 69/78] fix: fix tests --- .github/workflows/js-tests.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index cc01f83ab0d1..b2a3d1effa26 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -71,7 +71,7 @@ jobs: - name: Run JS Tests run: | npm install -g jest - xvfb-run --auto-servernum make test-js + make test-js make test-coverage - name: Save Job Artifacts diff --git a/Makefile b/Makefile index f463dac73652..5b72029f6ec0 100644 --- a/Makefile +++ b/Makefile @@ -224,7 +224,7 @@ test-check_keyword: python scripts/quality_test/quality_test.py check_keywords test-js: - python scripts/js_test.py --option jstest + xvfb-run --auto-servernum python scripts/js_test.py --option jstest test-coverage: python scripts/js_test.py --option coverage From 75ee133a0d29dd5abd00ed860a1cb313970a408e Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 26 Aug 2024 16:36:28 +0500 Subject: [PATCH 70/78] fix: fix tests --- scripts/js_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/js_test.py b/scripts/js_test.py index 710942590b95..69be37f602fe 100644 --- a/scripts/js_test.py +++ b/scripts/js_test.py @@ -34,6 +34,7 @@ def repo_root(): https://openedx.atlassian.net/browse/PLAT-1629 and https://github.com/docker/for-mac/issues/1509 """ + file_path = path(__file__) attempt = 1 while True: @@ -48,7 +49,7 @@ def repo_root(): else: print('Unable to determine the absolute path of the edx-platform repo, aborting') raise - return absolute_path.parent.parent.parent.parent + return absolute_path.parent.parent # Root of the git repository (edx-platform) REPO_ROOT = repo_root() From c63892ff2d23b9ebcfb1a537d47f40fafbddb240 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 27 Aug 2024 11:36:28 +0500 Subject: [PATCH 71/78] fix: fix the pylint and unit tests --- pavelib/paver_tests/test_assets.py | 2 +- pavelib/utils/envs.py | 163 ++++++++++++++++++++++++++++- 2 files changed, 162 insertions(+), 3 deletions(-) diff --git a/pavelib/paver_tests/test_assets.py b/pavelib/paver_tests/test_assets.py index bb943a4ca195..f7100a7f03c3 100644 --- a/pavelib/paver_tests/test_assets.py +++ b/pavelib/paver_tests/test_assets.py @@ -1,4 +1,4 @@ -# """Unit tests for the Paver asset tasks.""" +"""Unit tests for the Paver asset tasks.""" import json import os diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py index c537d4e08dde..42c25f9ea942 100644 --- a/pavelib/utils/envs.py +++ b/pavelib/utils/envs.py @@ -1,12 +1,18 @@ + """ Helper functions for loading environment settings. """ - +import configparser +import json import os import sys from time import sleep +from lazy import lazy from path import Path as path +from paver.easy import BuildFailure, sh + +from pavelib.utils.cmd import django_cmd def repo_root(): @@ -33,7 +39,7 @@ def repo_root(): else: print('Unable to determine the absolute path of the edx-platform repo, aborting') raise - return absolute_path.parent.parent.parent.parent + return absolute_path.parent.parent.parent class Env: @@ -49,6 +55,20 @@ class Env: METRICS_DIR = REPORT_DIR / 'metrics' QUALITY_DIR = REPORT_DIR / 'quality_junitxml' + # Generic log dir + GEN_LOG_DIR = REPO_ROOT / "test_root" / "log" + + # Python unittest dirs + PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc" + + # Which Python version should be used in xdist workers? + PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7") + + # Directory that videos are served from + VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video" + + PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log" + # Detect if in a Docker container, and if so which one FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' @@ -96,6 +116,17 @@ class Env: JS_REPORT_DIR = REPORT_DIR / 'javascript' + # Directories used for pavelib/ tests + IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache') + LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")] + + # Directory for i18n test reports + I18N_REPORT_DIR = REPORT_DIR / 'i18n' + + # Directory for keeping src folder that comes with pip installation. + # Setting this is equivalent to passing `--src ` to pip directly. + PIP_SRC = os.environ.get("PIP_SRC") + # Service variant (lms, cms, etc.) configured with an environment variable # We use this to determine which envs.json file to load. SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) @@ -107,3 +138,131 @@ class Env: SERVICE_VARIANT = 'cms' else: SERVICE_VARIANT = 'lms' + + @classmethod + def get_django_settings(cls, django_settings, system, settings=None, print_setting_args=None): + """ + Interrogate Django environment for specific settings values + :param django_settings: list of django settings values to get + :param system: the django app to use when asking for the setting (lms | cms) + :param settings: the settings file to use when asking for the value + :param print_setting_args: the additional arguments to send to print_settings + :return: unicode value of the django setting + """ + if not settings: + settings = os.environ.get("EDX_PLATFORM_SETTINGS", "aws") + log_dir = os.path.dirname(cls.PRINT_SETTINGS_LOG_FILE) + if not os.path.exists(log_dir): + os.makedirs(log_dir) + settings_length = len(django_settings) + django_settings = ' '.join(django_settings) # parse_known_args makes a list again + print_setting_args = ' '.join(print_setting_args or []) + try: + value = sh( + django_cmd( + system, + settings, + "print_setting {django_settings} 2>{log_file} {print_setting_args}".format( + django_settings=django_settings, + print_setting_args=print_setting_args, + log_file=cls.PRINT_SETTINGS_LOG_FILE + ).strip() + ), + capture=True + ) + # else for cases where values are not found & sh returns one None value + return tuple(str(value).splitlines()) if value else tuple(None for _ in range(settings_length)) + except BuildFailure: + print(f"Unable to print the value of the {django_settings} setting:") + with open(cls.PRINT_SETTINGS_LOG_FILE) as f: + print(f.read()) + sys.exit(1) + + @classmethod + def get_django_json_settings(cls, django_settings, system, settings=None): + """ + Interrogate Django environment for specific settings value + :param django_settings: list of django settings values to get + :param system: the django app to use when asking for the setting (lms | cms) + :param settings: the settings file to use when asking for the value + :return: json string value of the django setting + """ + return cls.get_django_settings( + django_settings, + system, + settings=settings, + print_setting_args=["--json"], + ) + + @classmethod + def covered_modules(cls): + """ + List the source modules listed in .coveragerc for which coverage + will be measured. + """ + coveragerc = configparser.RawConfigParser() + coveragerc.read(cls.PYTHON_COVERAGERC) + modules = coveragerc.get('run', 'source') + result = [] + for module in modules.split('\n'): + module = module.strip() + if module: + result.append(module) + return result + + @lazy + def env_tokens(self): + """ + Return a dict of environment settings. + If we couldn't find the JSON file, issue a warning and return an empty dict. + """ + + # Find the env JSON file + if self.SERVICE_VARIANT: + env_path = self.REPO_ROOT.parent / f"{self.SERVICE_VARIANT}.env.json" + else: + env_path = path("env.json").abspath() + + # If the file does not exist, here or one level up, + # issue a warning and return an empty dict + if not env_path.isfile(): + env_path = env_path.parent.parent / env_path.basename() + if not env_path.isfile(): + print( + "Warning: could not find environment JSON file " + "at '{path}'".format(path=env_path), + file=sys.stderr, + ) + return {} + + # Otherwise, load the file as JSON and return the resulting dict + try: + with open(env_path) as env_file: + return json.load(env_file) + + except ValueError: + print( + "Error: Could not parse JSON " + "in {path}".format(path=env_path), + file=sys.stderr, + ) + sys.exit(1) + + @lazy + def feature_flags(self): + """ + Return a dictionary of feature flags configured by the environment. + """ + return self.env_tokens.get('FEATURES', {}) + + @classmethod + def rsync_dirs(cls): + """ + List the directories that should be synced during pytest-xdist + execution. Needs to include all modules for which coverage is + measured, not just the tests being run. + """ + result = set() + for module in cls.covered_modules(): + result.add(module.split('/')[0]) + return result From 53091602451c52efcc39322dc6327d70fa5c4750 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 27 Aug 2024 11:59:48 +0500 Subject: [PATCH 72/78] fix: code improvements --- .github/workflows/js-tests.yml | 2 +- .github/workflows/quality-checks.yml | 12 ++++++------ Makefile | 16 ++++++++-------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index b2a3d1effa26..cc01f83ab0d1 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -71,7 +71,7 @@ jobs: - name: Run JS Tests run: | npm install -g jest - make test-js + xvfb-run --auto-servernum make test-js make test-coverage - name: Save Job Artifacts diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 1bc50014754e..68e980bf3440 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -77,12 +77,12 @@ jobs: PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} run: | - make test-lint - make test-eslint - make test-stylelint - make test-xsslint - make test-pi_check - make test-check_keyword + make pycodestyle + make eslint + make stylelint + make xsslint + make pi_check + make check_keyword - name: Save Job Artifacts if: always() diff --git a/Makefile b/Makefile index 5b72029f6ec0..fbe5207cbfc7 100644 --- a/Makefile +++ b/Makefile @@ -205,28 +205,28 @@ migrate: migrate-lms migrate-cms ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev -test-eslint: +eslint: python scripts/quality_test/quality_test.py eslint -test-stylelint: +stylelint: python scripts/quality_test/quality_test.py stylelint -test-xsslint: +xsslint: python scripts/quality_test/quality_test.py xsslint -test-lint: +pycodestyle: pycodestyle . -test-pi_check: +pi_check: python scripts/quality_test/quality_test.py pii_check -test-check_keyword: +check_keyword: python scripts/quality_test/quality_test.py check_keywords test-js: - xvfb-run --auto-servernum python scripts/js_test.py --option jstest + python scripts/js_test.py --option jstest test-coverage: python scripts/js_test.py --option coverage -quality-test: test-lint test-eslint test-stylelint test-xsslint test-pi_check test-check_keyword \ No newline at end of file +quality-test: pycodestyle eslint stylelint xsslint pi_check check_keyword \ No newline at end of file From 2f3169a1dae30f2229fc6200056bd5a96f88a982 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Tue, 10 Sep 2024 08:53:09 +0500 Subject: [PATCH 73/78] fix: fix the comments found in PR review --- .github/workflows/js-tests.yml | 2 +- .github/workflows/quality-checks.yml | 4 +- Makefile | 31 +++---- pavelib/paver_tests/utils.py | 1 - pavelib/utils/envs.py | 1 - scripts/{quality_test => }/quality_test.py | 98 +++++++++++++++++----- scripts/quality_test/__init__.py | 0 7 files changed, 97 insertions(+), 40 deletions(-) rename scripts/{quality_test => }/quality_test.py (87%) delete mode 100644 scripts/quality_test/__init__.py diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index cc01f83ab0d1..597f7e0bb4c3 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -72,7 +72,7 @@ jobs: run: | npm install -g jest xvfb-run --auto-servernum make test-js - make test-coverage + make coverage-js - name: Save Job Artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 68e980bf3440..28c3841acc54 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -81,8 +81,8 @@ jobs: make eslint make stylelint make xsslint - make pi_check - make check_keyword + make pii_check + make check_keywords - name: Save Job Artifacts if: always() diff --git a/Makefile b/Makefile index fbe5207cbfc7..35db3543600a 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ guides help lint-imports local-requirements migrate migrate-lms migrate-cms \ pre-requirements pull pull_xblock_translations pull_translations push_translations \ requirements shell swagger \ - technical-docs test-requirements ubuntu-requirements upgrade-package upgrade pep8_test + technical-docs test-requirements ubuntu-requirements upgrade-package upgrade # Careful with mktemp syntax: it has to work on Mac and Ubuntu, which have differences. PRIVATE_FILES := $(shell mktemp -u /tmp/private_files.XXXXXX) @@ -205,28 +205,29 @@ migrate: migrate-lms migrate-cms ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev -eslint: - python scripts/quality_test/quality_test.py eslint +eslint: ## check javascript for quality issues + python scripts/quality_test.py eslint -stylelint: - python scripts/quality_test/quality_test.py stylelint +stylelint: ## check css/scss for quality issues + python scripts/quality_test.py stylelint -xsslint: - python scripts/quality_test/quality_test.py xsslint +xsslint: ## check xss for quality issues + python scripts/quality_test.py xsslint -pycodestyle: +pycodestyle: ## check python files for quality issues pycodestyle . -pi_check: - python scripts/quality_test/quality_test.py pii_check +pii_check: ## check django models for pii annotations + python scripts/quality_test.py pii_check -check_keyword: - python scripts/quality_test/quality_test.py check_keywords +check_keywords: ## check django models for reserve keywords + python scripts/quality_test.py check_keywords -test-js: +test-js: ## run javascript tests python scripts/js_test.py --option jstest -test-coverage: +coverage-js: ## run javascript coverage test python scripts/js_test.py --option coverage -quality-test: pycodestyle eslint stylelint xsslint pi_check check_keyword \ No newline at end of file +quality: ## run all quality tests + pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file diff --git a/pavelib/paver_tests/utils.py b/pavelib/paver_tests/utils.py index 9abe60e4730e..1db26cf76a4c 100644 --- a/pavelib/paver_tests/utils.py +++ b/pavelib/paver_tests/utils.py @@ -13,7 +13,6 @@ class PaverTestCase(TestCase): """ Base class for Paver test cases. """ - def setUp(self): super().setUp() diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py index 42c25f9ea942..d2cdd4a77d7a 100644 --- a/pavelib/utils/envs.py +++ b/pavelib/utils/envs.py @@ -1,4 +1,3 @@ - """ Helper functions for loading environment settings. """ diff --git a/scripts/quality_test/quality_test.py b/scripts/quality_test.py similarity index 87% rename from scripts/quality_test/quality_test.py rename to scripts/quality_test.py index e2c12aab0aa8..eb4d5cf812ba 100644 --- a/scripts/quality_test/quality_test.py +++ b/scripts/quality_test.py @@ -133,7 +133,7 @@ def _get_stylelint_violations(): result = subprocess.run( command, shell=True, - check=False, + check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True @@ -150,7 +150,6 @@ def _get_stylelint_violations(): ) ) - def run_eslint(): """ Runs eslint on static asset directories. @@ -159,52 +158,111 @@ def run_eslint(): REPO_ROOT = repo_root() REPORT_DIR = REPO_ROOT / 'reports' - eslint_report_dir = (REPORT_DIR / "eslint") + eslint_report_dir = REPORT_DIR / "eslint" eslint_report = eslint_report_dir / "eslint.report" _prepare_report_dir(eslint_report_dir) violations_limit = 4950 - command = ( - "node --max_old_space_size=4096 node_modules/.bin/eslint " - "--ext .js --ext .jsx --format=compact ." - ) + command = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/eslint", + "--ext", ".js", + "--ext", ".jsx", + "--format=compact", + "." + ] + with open(eslint_report, 'w') as report_file: # Run the command result = subprocess.run( command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + stdout=report_file, + stderr=subprocess.STDOUT, text=True, check=False ) - - # Write the output to the report file - report_file.write(result.stdout) + import pdb; pdb.set_trace() + + # Print the content of the report file for debugging + with open(eslint_report, 'r') as report_file: + report_content = report_file.read() + print("ESLint report content:") + print(report_content) try: num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) except TypeError: fail_quality( 'eslint', - "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( - eslint_report=eslint_report - ) + f"FAILURE: Number of eslint violations could not be found in {eslint_report}" ) # Fail if number of violations is greater than the limit if num_violations > violations_limit > -1: fail_quality( 'eslint', - "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, violations_limit=violations_limit - ) + f"FAILURE: Too many eslint violations ({num_violations}).\nThe limit is {violations_limit}." ) else: print("successfully run eslint with violations") print(num_violations) +# def run_eslint(): +# """ +# Runs eslint on static asset directories. +# If limit option is passed, fails build if more violations than the limit are found. +# """ + +# REPO_ROOT = repo_root() +# REPORT_DIR = REPO_ROOT / 'reports' +# eslint_report_dir = (REPORT_DIR / "eslint") +# eslint_report = eslint_report_dir / "eslint.report" +# _prepare_report_dir(eslint_report_dir) +# violations_limit = 4950 + +# command = ( +# "node --max_old_space_size=4096 node_modules/.bin/eslint " +# "--ext .js --ext .jsx --format=compact ." +# ) +# with open(eslint_report, 'w') as report_file: +# # Run the command +# result = subprocess.run( +# command, +# shell=True, +# stdout=subprocess.PIPE, +# stderr=subprocess.PIPE, +# text=True, +# check=False +# ) + +# # Write the output to the report file +# report_file.write(result.stdout) + +# try: +# num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) +# except TypeError: +# fail_quality( +# 'eslint', +# "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( +# eslint_report=eslint_report +# ) +# ) + +# # Fail if number of violations is greater than the limit +# if num_violations > violations_limit > -1: +# fail_quality( +# 'eslint', +# "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( +# count=num_violations, violations_limit=violations_limit +# ) +# ) +# else: +# print("successfully run eslint with violations") +# print(num_violations) + + def run_stylelint(): """ Runs stylelint on Sass files. @@ -323,7 +381,7 @@ def run_pii_check(): if not pii_check_passed: fail_quality('pii', full_log) else: - print("successfully run pi_check") + print("successfully run pii_check") def check_keywords(): diff --git a/scripts/quality_test/__init__.py b/scripts/quality_test/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 75cf11c8716725651e6abaaa635b796b6331f8df Mon Sep 17 00:00:00 2001 From: salman2013 Date: Thu, 12 Sep 2024 15:37:32 +0500 Subject: [PATCH 74/78] fix: fix the commands with check=True --- Makefile | 3 +- scripts/quality_test.py | 203 +++++++++++++++++----------------------- 2 files changed, 85 insertions(+), 121 deletions(-) diff --git a/Makefile b/Makefile index 35db3543600a..0fc07aab8f13 100644 --- a/Makefile +++ b/Makefile @@ -229,5 +229,4 @@ test-js: ## run javascript tests coverage-js: ## run javascript coverage test python scripts/js_test.py --option coverage -quality: ## run all quality tests - pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file +quality: pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file diff --git a/scripts/quality_test.py b/scripts/quality_test.py index eb4d5cf812ba..fb7d1e481eb9 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -3,6 +3,7 @@ """ import argparse +import glob import json import os import re @@ -101,7 +102,6 @@ def _get_count_from_last_line(filename, file_type): It is returning only the value (as a floating number). """ report_contents = _get_report_contents(filename, file_type, last_line_only=True) - if report_contents is None: return 0 @@ -126,19 +126,21 @@ def _get_stylelint_violations(): stylelint_report_dir = (REPORT_DIR / "stylelint") stylelint_report = stylelint_report_dir / "stylelint.report" _prepare_report_dir(stylelint_report_dir) - formatter = 'node_modules/stylelint-formatter-pretty' - command = f"stylelint **/*.scss --custom-formatter={formatter}" + command = [ + 'node', 'node_modules/stylelint', + '*scss_files', + '--custom-formatter', 'stylelint-formatter-pretty/index.js' + ] + with open(stylelint_report, 'w') as report_file: - result = subprocess.run( + subprocess.run( command, - shell=True, check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + stdout=report_file, + stderr=subprocess.STDOUT, text=True ) - report_file.write(result.stdout) try: return int(_get_count_from_last_line(stylelint_report, "stylelint")) @@ -150,6 +152,7 @@ def _get_stylelint_violations(): ) ) + def run_eslint(): """ Runs eslint on static asset directories. @@ -174,95 +177,37 @@ def run_eslint(): ] with open(eslint_report, 'w') as report_file: - # Run the command - result = subprocess.run( + subprocess.run( command, stdout=report_file, stderr=subprocess.STDOUT, text=True, check=False ) - import pdb; pdb.set_trace() - - # Print the content of the report file for debugging - with open(eslint_report, 'r') as report_file: - report_content = report_file.read() - print("ESLint report content:") - print(report_content) try: num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) except TypeError: fail_quality( 'eslint', - f"FAILURE: Number of eslint violations could not be found in {eslint_report}" + "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( + eslint_report=eslint_report + ) ) # Fail if number of violations is greater than the limit if num_violations > violations_limit > -1: fail_quality( 'eslint', - f"FAILURE: Too many eslint violations ({num_violations}).\nThe limit is {violations_limit}." + "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, violations_limit=violations_limit + ) ) else: print("successfully run eslint with violations") print(num_violations) -# def run_eslint(): -# """ -# Runs eslint on static asset directories. -# If limit option is passed, fails build if more violations than the limit are found. -# """ - -# REPO_ROOT = repo_root() -# REPORT_DIR = REPO_ROOT / 'reports' -# eslint_report_dir = (REPORT_DIR / "eslint") -# eslint_report = eslint_report_dir / "eslint.report" -# _prepare_report_dir(eslint_report_dir) -# violations_limit = 4950 - -# command = ( -# "node --max_old_space_size=4096 node_modules/.bin/eslint " -# "--ext .js --ext .jsx --format=compact ." -# ) -# with open(eslint_report, 'w') as report_file: -# # Run the command -# result = subprocess.run( -# command, -# shell=True, -# stdout=subprocess.PIPE, -# stderr=subprocess.PIPE, -# text=True, -# check=False -# ) - -# # Write the output to the report file -# report_file.write(result.stdout) - -# try: -# num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) -# except TypeError: -# fail_quality( -# 'eslint', -# "FAILURE: Number of eslint violations could not be found in {eslint_report}".format( -# eslint_report=eslint_report -# ) -# ) - -# # Fail if number of violations is greater than the limit -# if num_violations > violations_limit > -1: -# fail_quality( -# 'eslint', -# "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format( -# count=num_violations, violations_limit=violations_limit -# ) -# ) -# else: -# print("successfully run eslint with violations") -# print(num_violations) - - def run_stylelint(): """ Runs stylelint on Sass files. @@ -271,7 +216,6 @@ def run_stylelint(): violations_limit = 0 num_violations = _get_stylelint_violations() - # Fail if number of violations is greater than the limit if num_violations > violations_limit: fail_quality( @@ -341,30 +285,41 @@ def run_pii_check(): output_file = os.path.join(report_dir, 'pii_check_{}.report') env_report = [] pii_check_passed = True + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): try: print(f"Running {env_name} PII Annotation check and report") print("-" * 45) + run_output_file = str(output_file).format(env_name.lower()) os.makedirs(report_dir, exist_ok=True) - command = ( - f"export DJANGO_SETTINGS_MODULE={env_settings_file};" - f"code_annotations django_find_annotations" - f"--config_file .pii_annotations.yml --report_path {report_dir} --app_name {env_name.lower()}" - f"--lint --report --coverage | tee {run_output_file}" - ) - result = subprocess.run( - command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - with open(run_output_file, 'w') as f: - f.write(result.stdout) + # Prepare the environment for the command + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + + command = [ + "code_annotations", + "django_find_annotations", + "--config_file", ".pii_annotations.yml", + "--report_path", str(report_dir), + "--app_name", env_name.lower() + ] + + # Run the command without shell=True + with open(run_output_file, 'w') as report_file: + subprocess.run( + command, + env=env, # Pass the environment with DJANGO_SETTINGS_MODULE + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + # Extract results uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) env_report.append(( uncovered_model_count, @@ -374,14 +329,15 @@ def run_pii_check(): except BuildFailure as error_message: fail_quality(pii_report_name, f'FAILURE: {error_message}') + # Update pii_check_passed based on the result of the current environment if not pii_check_passed_env: pii_check_passed = False - # Finally, fail the paver task if code_annotations suggests that the check failed. + # If the PII check failed in any environment, fail the task if not pii_check_passed: fail_quality('pii', full_log) else: - print("successfully run pii_check") + print("Successfully ran pii_check") def check_keywords(): @@ -390,30 +346,33 @@ def check_keywords(): """ REPO_ROOT = repo_root() REPORT_DIR = REPO_ROOT / 'reports' - report_path = os.path.join(REPORT_DIR, 'reserved_keywords') - os.makedirs(report_path, exist_ok=True) + report_path = REPORT_DIR / 'reserved_keywords' + report_path.mkdir(parents=True, exist_ok=True) overall_status = True - for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: - report_file = f"{env}_reserved_keyword_report.csv" + for env_name, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file_path = report_path / f"{env_name}_reserved_keyword_report.csv" override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") try: - command = ( - f"export DJANGO_SETTINGS_MODULE={env_settings_file}; " - f"python manage.py {env} check_reserved_keywords " - f"--override_file {override_file} " - f"--report_path {report_path} " - f"--report_file {report_file}" - ) - - subprocess.run( - command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + command = [ + "python", "manage.py", env_name, "check_reserved_keywords", + "--override_file", str(override_file), + "--report_path", str(report_path), + "--report_file", str(report_file_path) + ] + with open(report_file_path, 'w') as report_file: + subprocess.run( + command, + env=env, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) except BuildFailure: overall_status = False if not overall_status: @@ -491,13 +450,19 @@ def run_xsslint(): xsslint_report = xsslint_report_dir / "xsslint.report" _prepare_report_dir(xsslint_report_dir) - # Prepare the command to run the xsslint script - command = ( - f"{REPO_ROOT}/scripts/xsslint/{xsslint_script} " - f"--rule-totals --config=scripts.xsslint_config >> {xsslint_report}" - ) - - result = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + command = [ + f"{REPO_ROOT}/scripts/xsslint/{xsslint_script}", + "--rule-totals", + "--config=scripts.xsslint_config" + ] + with open(xsslint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) xsslint_counts = _get_xsslint_counts(xsslint_report) try: From 2c68035ec4c9b4d657df5858afcab433dbb71371 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 11 Nov 2024 17:06:18 +0500 Subject: [PATCH 75/78] fix: fix the comments, code improvements --- .pii_annotations.yml | 2 +- Makefile | 38 ++- scripts/js_test.py | 492 -------------------------------- scripts/quality_test.py | 369 +----------------------- scripts/xsslint/xsslint/main.py | 117 +++++++- 5 files changed, 142 insertions(+), 876 deletions(-) delete mode 100644 scripts/js_test.py diff --git a/.pii_annotations.yml b/.pii_annotations.yml index 328520738f10..7a3b80e5d255 100644 --- a/.pii_annotations.yml +++ b/.pii_annotations.yml @@ -1,7 +1,7 @@ source_path: ./ report_path: pii_report safelist_path: .annotation_safe_list.yml -coverage_target: 94.5 +coverage_target: 71.6 # See OEP-30 for more information on these values and what they mean: # https://open-edx-proposals.readthedocs.io/en/latest/oep-0030-arch-pii-markup-and-auditing.html#docstring-annotations annotations: diff --git a/Makefile b/Makefile index 0fc07aab8f13..74751340cddb 100644 --- a/Makefile +++ b/Makefile @@ -208,25 +208,37 @@ ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip ins eslint: ## check javascript for quality issues python scripts/quality_test.py eslint -stylelint: ## check css/scss for quality issues - python scripts/quality_test.py stylelint - -xsslint: ## check xss for quality issues - python scripts/quality_test.py xsslint +xsslint: ## check xss for quality issuest + python scripts/xsslint/xss_linter.py \ + --rule-totals \ + --config=scripts.xsslint_config \ + --thresholds=scripts/xsslint_thresholds.json pycodestyle: ## check python files for quality issues pycodestyle . +## Re-enable --lint flag when this issue https://github.com/openedx/edx-platform/issues/35775 is resolved pii_check: ## check django models for pii annotations - python scripts/quality_test.py pii_check + code_annotations django_find_annotations \ + --config_file .pii_annotations.yml \ + --app_name cms \ + --coverage + code_annotations django_find_annotations \ + --config_file .pii_annotations.yml \ + --app_name lms \ + --coverage check_keywords: ## check django models for reserve keywords - python scripts/quality_test.py check_keywords - -test-js: ## run javascript tests - python scripts/js_test.py --option jstest + DJANGO_SETTINGS_MODULE=cms.envs.test \ + python manage.py cms check_reserved_keywords \ + --override_file db_keyword_overrides.yml -coverage-js: ## run javascript coverage test - python scripts/js_test.py --option coverage + DJANGO_SETTINGS_MODULE=lms.envs.test \ + python manage.py lms check_reserved_keywords \ + --override_file db_keyword_overrides.yml -quality: pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file +test-js: ## run javascript tests + node --max_old_space_size=4096 node_modules/.bin/karma start common/static/karma_common.conf.js \ + --single-run=false \ + --capture-timeout=60000 \ + --browsers=FirefoxNoUpdates diff --git a/scripts/js_test.py b/scripts/js_test.py deleted file mode 100644 index 69be37f602fe..000000000000 --- a/scripts/js_test.py +++ /dev/null @@ -1,492 +0,0 @@ -""" -Javascript test tasks -""" - -import click -import os -import re -import sys -import subprocess - -from path import Path as path - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -class Env: - """ - Load information about the execution environment. - """ - - @staticmethod - def repo_root(): - """ - Get the root of the git repository (edx-platform). - - This sometimes fails on Docker Devstack, so it's been broken - down with some additional error handling. It usually starts - working within 30 seconds or so; for more details, see - https://openedx.atlassian.net/browse/PLAT-1629 and - https://github.com/docker/for-mac/issues/1509 - """ - - file_path = path(__file__) - attempt = 1 - while True: - try: - absolute_path = file_path.abspath() - break - except OSError: - print(f'Attempt {attempt}/180 to get an absolute path failed') - if attempt < 180: - attempt += 1 - sleep(1) - else: - print('Unable to determine the absolute path of the edx-platform repo, aborting') - raise - return absolute_path.parent.parent - - # Root of the git repository (edx-platform) - REPO_ROOT = repo_root() - - # Reports Directory - REPORT_DIR = REPO_ROOT / 'reports' - - # Detect if in a Docker container, and if so which one - FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') - USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' - - # Configured browser to use for the js test suites - SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') - if USING_DOCKER: - KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' - else: - KARMA_BROWSER = 'FirefoxNoUpdates' - - # Files used to run each of the js test suites - # TODO: Store this as a dict. Order seems to matter for some - # reason. See issue TE-415. - KARMA_CONFIG_FILES = [ - REPO_ROOT / 'cms/static/karma_cms.conf.js', - REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', - REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', - REPO_ROOT / 'lms/static/karma_lms.conf.js', - REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', - REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', - REPO_ROOT / 'common/static/karma_common.conf.js', - REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', - ] - - JS_TEST_ID_KEYS = [ - 'cms', - 'cms-squire', - 'cms-webpack', - 'lms', - 'xmodule', - 'xmodule-webpack', - 'common', - 'common-requirejs', - 'jest-snapshot' - ] - - JS_REPORT_DIR = REPORT_DIR / 'javascript' - - # Service variant (lms, cms, etc.) configured with an environment variable - # We use this to determine which envs.json file to load. - SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) - - # If service variant not configured in env, then pass the correct - # environment for lms / cms - if not SERVICE_VARIANT: # this will intentionally catch ""; - if any(i in sys.argv[1:] for i in ('cms', 'studio')): - SERVICE_VARIANT = 'cms' - else: - SERVICE_VARIANT = 'lms' - - -# def clean_test_files(): -# """ -# Clean fixture files used by tests and .pyc files -# """ -# # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" -# subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") -# # This find command removes all the *.pyc files that aren't in the .git -# # directory. See this blog post for more details: -# # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html -# subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") -# subprocess.run("rm -rf test_root/log/auto_screenshots/*") -# subprocess.run("rm -rf /tmp/mako_[cl]ms") - - -# def clean_dir(directory): -# """ -# Delete all the files from the specified directory. -# """ -# # We delete the files but preserve the directory structure -# # so that coverage.py has a place to put the reports. -# subprocess.run(f'find {directory} -type f -delete') - - -# @task -# @cmdopts([ -# ('skip-clean', 'C', 'skip cleaning repository before running tests'), -# ('skip_clean', None, 'deprecated in favor of skip-clean'), -# ]) - -# def clean_reports_dir(options): -# """ -# Clean coverage files, to ensure that we don't use stale data to generate reports. -# """ -# if getattr(options, 'skip_clean', False): -# print('--skip-clean is set, skipping...') -# return - -# # We delete the files but preserve the directory structure -# # so that coverage.py has a place to put the reports. -# reports_dir = Env.REPORT_DIR.makedirs_p() -# clean_dir(reports_dir) - - -class TestSuite: - """ - TestSuite is a class that defines how groups of tests run. - """ - def __init__(self, *args, **kwargs): - self.root = args[0] - self.subsuites = kwargs.get('subsuites', []) - self.failed_suites = [] - self.verbosity = int(kwargs.get('verbosity', 1)) - self.skip_clean = kwargs.get('skip_clean', False) - self.passthrough_options = kwargs.get('passthrough_options', []) - - def __enter__(self): - """ - This will run before the test suite is run with the run_suite_tests method. - If self.run_test is called directly, it should be run in a 'with' block to - ensure that the proper context is created. - - Specific setup tasks should be defined in each subsuite. - - i.e. Checking for and defining required directories. - """ - print(f"\nSetting up for {self.root}") - self.failed_suites = [] - - def __exit__(self, exc_type, exc_value, traceback): - """ - This is run after the tests run with the run_suite_tests method finish. - Specific clean up tasks should be defined in each subsuite. - - If self.run_test is called directly, it should be run in a 'with' block - to ensure that clean up happens properly. - - i.e. Cleaning mongo after the lms tests run. - """ - print(f"\nCleaning up after {self.root}") - - @property - def cmd(self): - """ - The command to run tests (as a string). For this base class there is none. - """ - return None - - @staticmethod - def kill_process(proc): - """ - Kill the process `proc` created with `subprocess`. - """ - p1_group = psutil.Process(proc.pid) - child_pids = p1_group.children(recursive=True) - - for child_pid in child_pids: - os.kill(child_pid.pid, signal.SIGKILL) - - @staticmethod - def is_success(exit_code): - """ - Determine if the given exit code represents a success of the test - suite. By default, only a zero counts as a success. - """ - return exit_code == 0 - - def run_test(self): - """ - Runs a self.cmd in a subprocess and waits for it to finish. - It returns False if errors or failures occur. Otherwise, it - returns True. - """ - # cmd = " ".join(self.cmd) - cmd = " ".join(str(part) for part in self.cmd) - sys.stdout.write(cmd) - - msg = colorize( - 'green', - '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), - ) - - sys.stdout.write(msg) - sys.stdout.flush() - - if 'TEST_SUITE' not in os.environ: - os.environ['TEST_SUITE'] = self.root.replace("/", "_") - kwargs = {'shell': True, 'cwd': None} - process = None - - try: - process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with - return self.is_success(process.wait()) - except KeyboardInterrupt: - self.kill_process(process) - sys.exit(1) - - def run_suite_tests(self): - """ - Runs each of the suites in self.subsuites while tracking failures - """ - # Uses __enter__ and __exit__ for context - with self: - # run the tests for this class, and for all subsuites - if self.cmd: - passed = self.run_test() - if not passed: - self.failed_suites.append(self) - - for suite in self.subsuites: - suite.run_suite_tests() - if suite.failed_suites: - self.failed_suites.extend(suite.failed_suites) - - def report_test_results(self): - """ - Writes a list of failed_suites to sys.stderr - """ - if self.failed_suites: - msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) - msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') - else: - msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) - - print(msg) - - def run(self): - """ - Runs the tests in the suite while tracking and reporting failures. - """ - self.run_suite_tests() - - # if tasks.environment.dry_run: - # return - - self.report_test_results() - - if self.failed_suites: - sys.exit(1) - - -class JsTestSuite(TestSuite): - """ - A class for running JavaScript tests. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.report_dir = Env.JS_REPORT_DIR - self.opts = kwargs - - suite = args[0] - self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] - - def __enter__(self): - super().__enter__() - self.report_dir.makedirs_p() - # self.report_dir.mkdir(exist_ok=True) - # if not self.skip_clean: - # test_utils.clean_test_files() - - # if self.mode == 'run' and not self.run_under_coverage: - # test_utils.clean_dir(self.report_dir) - - @property - def _default_subsuites(self): - """ - Returns all JS test suites - """ - return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] - - -class JsTestSubSuite(TestSuite): - """ - Class for JS suites like cms, cms-squire, lms, common, - common-requirejs and xmodule - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.test_id = args[0] - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.port = kwargs.get('port') - self.root = self.root + ' javascript' - self.report_dir = Env.JS_REPORT_DIR - - try: - self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] - except ValueError: - self.test_conf_file = Env.KARMA_CONFIG_FILES[0] - - self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' - self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' - - @property - def cmd(self): - """ - Run the tests using karma runner. - """ - cmd = [ - "node", - "--max_old_space_size=4096", - "node_modules/.bin/karma", - "start", - self.test_conf_file, - "--single-run={}".format('false' if self.mode == 'dev' else 'true'), - "--capture-timeout=60000", - f"--junitreportpath={self.xunit_report}", - f"--browsers={Env.KARMA_BROWSER}", - ] - - if self.port: - cmd.append(f"--port={self.port}") - - if self.run_under_coverage: - cmd.extend([ - "--coverage", - f"--coveragereportpath={self.coverage_report}", - ]) - - return cmd - - -class JestSnapshotTestSuite(TestSuite): - """ - A class for running Jest Snapshot tests. - """ - @property - def cmd(self): - """ - Run the tests using Jest. - """ - return ["jest"] - - -def test_js(suite, mode, coverage, port, skip_clean): - """ - Run the JavaScript tests - """ - - if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): - sys.stderr.write( - "Unknown test suite. Please choose from ({suites})\n".format( - suites=", ".join(Env.JS_TEST_ID_KEYS) - ) - ) - return - - if suite != 'jest-snapshot': - test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) - test_suite.run() - - if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in - test_suite = JestSnapshotTestSuite('jest') - test_suite.run() - - -# @needs('pavelib.prereqs.install_coverage_prereqs') -# @cmdopts([ -# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), -# ], share_with=['coverage']) - -def diff_coverage(): - """ - Build the diff coverage reports - """ - - compare_branch = 'origin/master' - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - for filepath in Env.REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `quality test` before running " - "`coverage test`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - command = ( - f"diff-cover {xml_report_str}" - f"--diff-range-notation '..'" - f"--compare-branch={compare_branch} " - f"--html-report {diff_html_path}" - ) - subprocess.run(command, - shell=True, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True) - - -@click.command("main") -@click.option( - '--option', 'option', - help='Run javascript tests or coverage test as per given option' -) -@click.option( - '--s', 'suite', - default='all', - help='Test suite to run.' -) -@click.option( - '--m', 'mode', - default='run', - help='dev or run' -) -@click.option( - '--coverage', 'coverage', - default=True, - help='Run test under coverage' -) -@click.option( - '--p', 'port', - default=None, - help='Port to run test server on (dev mode only)' -) -@click.option( - '--C', 'skip_clean', - default=False, - help='skip cleaning repository before running tests' -) -def main(option, suite, mode, coverage, port, skip_clean): - if option == 'jstest': - test_js(suite, mode, coverage, port, skip_clean) - elif option == 'coverage': - diff_coverage() - - -if __name__ == "__main__": - main() diff --git a/scripts/quality_test.py b/scripts/quality_test.py index fb7d1e481eb9..2f8438ac99e4 100644 --- a/scripts/quality_test.py +++ b/scripts/quality_test.py @@ -117,42 +117,6 @@ def _get_count_from_last_line(filename, file_type): return None -def _get_stylelint_violations(): - """ - Returns the number of Stylelint violations. - """ - REPO_ROOT = repo_root() - REPORT_DIR = REPO_ROOT / 'reports' - stylelint_report_dir = (REPORT_DIR / "stylelint") - stylelint_report = stylelint_report_dir / "stylelint.report" - _prepare_report_dir(stylelint_report_dir) - - command = [ - 'node', 'node_modules/stylelint', - '*scss_files', - '--custom-formatter', 'stylelint-formatter-pretty/index.js' - ] - - with open(stylelint_report, 'w') as report_file: - subprocess.run( - command, - check=True, - stdout=report_file, - stderr=subprocess.STDOUT, - text=True - ) - - try: - return int(_get_count_from_last_line(stylelint_report, "stylelint")) - except TypeError: - fail_quality( - 'stylelint', - "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( - stylelint_report=stylelint_report - ) - ) - - def run_eslint(): """ Runs eslint on static asset directories. @@ -182,7 +146,7 @@ def run_eslint(): stdout=report_file, stderr=subprocess.STDOUT, text=True, - check=False + check=True ) try: @@ -208,340 +172,11 @@ def run_eslint(): print(num_violations) -def run_stylelint(): - """ - Runs stylelint on Sass files. - If limit option is passed, fails build if more violations than the limit are found. - """ - - violations_limit = 0 - num_violations = _get_stylelint_violations() - # Fail if number of violations is greater than the limit - if num_violations > violations_limit: - fail_quality( - 'stylelint', - "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, - violations_limit=violations_limit, - ) - ) - else: - print("successfully run stylelint with violations") - print(num_violations) - - -def _extract_missing_pii_annotations(filename): - """ - Returns the number of uncovered models from the stdout report of django_find_annotations. - - Arguments: - filename: Filename where stdout of django_find_annotations was captured. - - Returns: - three-tuple containing: - 1. The number of uncovered models, - 2. A bool indicating whether the coverage is still below the threshold, and - 3. The full report as a string. - """ - uncovered_models = 0 - pii_check_passed = True - if os.path.isfile(filename): - with open(filename) as report_file: - lines = report_file.readlines() - - # Find the count of uncovered models. - uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') - for line in lines: - uncovered_match = uncovered_regex.match(line) - if uncovered_match: - uncovered_models = int(uncovered_match.groups()[0]) - break - - # Find a message which suggests the check failed. - failure_regex = re.compile(r'^Coverage threshold not met!') - for line in lines: - failure_match = failure_regex.match(line) - if failure_match: - pii_check_passed = False - break - - # Each line in lines already contains a newline. - full_log = ''.join(lines) - else: - fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') - - return (uncovered_models, pii_check_passed, full_log) - - -def run_pii_check(): - """ - Guarantee that all Django models are PII-annotated. - """ - REPO_ROOT = repo_root() - REPORT_DIR = REPO_ROOT / 'reports' - pii_report_name = 'pii' - default_report_dir = (REPORT_DIR / pii_report_name) - report_dir = default_report_dir - output_file = os.path.join(report_dir, 'pii_check_{}.report') - env_report = [] - pii_check_passed = True - - for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): - try: - print(f"Running {env_name} PII Annotation check and report") - print("-" * 45) - - run_output_file = str(output_file).format(env_name.lower()) - os.makedirs(report_dir, exist_ok=True) - - # Prepare the environment for the command - env = { - **os.environ, # Include the current environment variables - "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment - } - - command = [ - "code_annotations", - "django_find_annotations", - "--config_file", ".pii_annotations.yml", - "--report_path", str(report_dir), - "--app_name", env_name.lower() - ] - - # Run the command without shell=True - with open(run_output_file, 'w') as report_file: - subprocess.run( - command, - env=env, # Pass the environment with DJANGO_SETTINGS_MODULE - check=True, - stdout=report_file, - stderr=subprocess.STDOUT, - text=True - ) - - # Extract results - uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) - env_report.append(( - uncovered_model_count, - full_log, - )) - - except BuildFailure as error_message: - fail_quality(pii_report_name, f'FAILURE: {error_message}') - - # Update pii_check_passed based on the result of the current environment - if not pii_check_passed_env: - pii_check_passed = False - - # If the PII check failed in any environment, fail the task - if not pii_check_passed: - fail_quality('pii', full_log) - else: - print("Successfully ran pii_check") - - -def check_keywords(): - """ - Check Django model fields for names that conflict with a list of reserved keywords - """ - REPO_ROOT = repo_root() - REPORT_DIR = REPO_ROOT / 'reports' - report_path = REPORT_DIR / 'reserved_keywords' - report_path.mkdir(parents=True, exist_ok=True) - - overall_status = True - for env_name, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: - report_file_path = report_path / f"{env_name}_reserved_keyword_report.csv" - override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") - try: - env = { - **os.environ, # Include the current environment variables - "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment - } - command = [ - "python", "manage.py", env_name, "check_reserved_keywords", - "--override_file", str(override_file), - "--report_path", str(report_path), - "--report_file", str(report_file_path) - ] - with open(report_file_path, 'w') as report_file: - subprocess.run( - command, - env=env, - check=True, - stdout=report_file, - stderr=subprocess.STDOUT, - text=True - ) - except BuildFailure: - overall_status = False - if not overall_status: - fail_quality( - 'keywords', - 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( - report_path - ) - ) - else: - print("successfully run check_keywords") - - -def _get_xsslint_counts(filename): - """ - This returns a dict of violations from the xsslint report. - - Arguments: - filename: The name of the xsslint report. - - Returns: - A dict containing the following: - rules: A dict containing the count for each rule as follows: - violation-rule-id: N, where N is the number of violations - total: M, where M is the number of total violations - - """ - report_contents = _get_report_contents(filename, 'xsslint') - rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) - total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) - violations = {'rules': {}} - for violation_match in rule_count_regex.finditer(report_contents): - try: - violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) - except ValueError: - violations['rules'][violation_match.group('rule_id')] = None - try: - violations['total'] = int(total_count_regex.search(report_contents).group('count')) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - violations['total'] = None - return violations - - -def run_xsslint(): - """ - Runs xsslint/xss_linter.py on the codebase - """ - - try: - thresholds_option = 'scripts/xsslint_thresholds.json' - # Read the JSON file - with open(thresholds_option, 'r') as file: - violation_thresholds = json.load(file) - - except ValueError: - violation_thresholds = None - if isinstance(violation_thresholds, dict) is False or \ - any(key not in ("total", "rules") for key in violation_thresholds.keys()): - - fail_quality( - 'xsslint', - """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" - """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ - """with property names in double-quotes.""".format( - thresholds_option=thresholds_option - ) - ) - - xsslint_script = "xss_linter.py" - REPO_ROOT = repo_root() - REPORT_DIR = REPO_ROOT / 'reports' - xsslint_report_dir = (REPORT_DIR / "xsslint") - xsslint_report = xsslint_report_dir / "xsslint.report" - _prepare_report_dir(xsslint_report_dir) - - command = [ - f"{REPO_ROOT}/scripts/xsslint/{xsslint_script}", - "--rule-totals", - "--config=scripts.xsslint_config" - ] - with open(xsslint_report, 'w') as report_file: - subprocess.run( - command, - check=True, - stdout=report_file, - stderr=subprocess.STDOUT, - text=True - ) - xsslint_counts = _get_xsslint_counts(xsslint_report) - - try: - metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( - xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total']) - ) - if 'rules' in xsslint_counts and any(xsslint_counts['rules']): - metrics_str += "\n" - rule_keys = sorted(xsslint_counts['rules'].keys()) - for rule in rule_keys: - metrics_str += "{rule} violations: {count}\n".format( - rule=rule, - count=int(xsslint_counts['rules'][rule]) - ) - except TypeError: - fail_quality( - 'xsslint', - "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format( - xsslint_script=xsslint_script, xsslint_report=xsslint_report - ) - ) - - error_message = "" - # Test total violations against threshold. - if 'total' in list(violation_thresholds.keys()): - if violation_thresholds['total'] < xsslint_counts['total']: - error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( - count=xsslint_counts['total'], violations_limit=violation_thresholds['total'] - ) - - # Test rule violations against thresholds. - if 'rules' in violation_thresholds: - threshold_keys = sorted(violation_thresholds['rules'].keys()) - for threshold_key in threshold_keys: - if threshold_key not in xsslint_counts['rules']: - error_message += ( - "\nNumber of {xsslint_script} violations for {rule} could not be found in " - "{xsslint_report}." - ).format( - xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report - ) - elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]: - error_message += \ - "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( - rule=threshold_key, count=xsslint_counts['rules'][threshold_key], - violations_limit=violation_thresholds['rules'][threshold_key], - ) - - if error_message: - fail_quality( - 'xsslint', - "FAILURE: XSSLinter Failed.\n{error_message}\n" - "See {xsslint_report} or run the following command to hone in on the problem:\n" - " ./scripts/xss-commit-linter.sh -h".format( - error_message=error_message, xsslint_report=xsslint_report - ) - ) - else: - print("successfully run xsslint") - - if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("command", choices=['eslint', 'stylelint', - 'xsslint', 'pii_check', 'check_keywords']) + parser.add_argument("command", choices=['eslint']) argument = parser.parse_args() if argument.command == 'eslint': run_eslint() - - elif argument.command == 'stylelint': - run_stylelint() - - elif argument.command == 'xsslint': - run_xsslint() - - elif argument.command == 'pii_check': - run_pii_check() - - elif argument.command == 'check_keywords': - check_keywords() diff --git a/scripts/xsslint/xsslint/main.py b/scripts/xsslint/xsslint/main.py index f8f8672b74b3..0a8afc1febae 100644 --- a/scripts/xsslint/xsslint/main.py +++ b/scripts/xsslint/xsslint/main.py @@ -5,10 +5,13 @@ import argparse import importlib +import json import os +import re import sys -from functools import reduce +from functools import reduce +from io import StringIO from xsslint.reporting import SummaryResults from xsslint.rules import RuleSet from xsslint.utils import is_skip_dir @@ -102,6 +105,107 @@ def _process_os_dirs(starting_dir, template_linters, options, summary_results, o _process_os_dir(root, files, template_linters, options, summary_results, out) +def _get_xsslint_counts(result_contents): + """ + This returns a dict of violations from the xsslint report. + + Arguments: + filename: The name of the xsslint report. + + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations + + """ + + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(result_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(result_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations + + +def _check_violations(options, results): + try: + thresholds_option = options['thresholds'] + # Read the JSON file + with open(thresholds_option, 'r') as file: + violation_thresholds = json.load(file) + + except ValueError: + violation_thresholds = None + if isinstance(violation_thresholds, dict) is False or \ + any(key not in ("total", "rules") for key in violation_thresholds.keys()): + print('xsslint') + print("""FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n""" + """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """ + """with property names in double-quotes.""".format(thresholds_option=thresholds_option)) + xsslint_script = "xss_linter.py" + + try: + metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format( + xsslint_script=xsslint_script, num_violations=int(results['total']) + ) + if 'rules' in results and any(results['rules']): + metrics_str += "\n" + rule_keys = sorted(results['rules'].keys()) + for rule in rule_keys: + metrics_str += "{rule} violations: {count}\n".format( + rule=rule, + count=int(results['rules'][rule]) + ) + except TypeError: + print('xsslint') + print("FAILURE: Number of {xsslint_script} violations could not be found".format( + xsslint_script=xsslint_script + )) + + error_message = "" + # Test total violations against threshold. + if 'total' in list(violation_thresholds.keys()): + if violation_thresholds['total'] < results['total']: + error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format( + count=results['total'], violations_limit=violation_thresholds['total'] + ) + + # Test rule violations against thresholds. + if 'rules' in violation_thresholds: + threshold_keys = sorted(violation_thresholds['rules'].keys()) + for threshold_key in threshold_keys: + if threshold_key not in results['rules']: + error_message += ( + "\nNumber of {xsslint_script} violations for {rule} could not be found" + ).format( + xsslint_script=xsslint_script, rule=threshold_key + ) + elif violation_thresholds['rules'][threshold_key] < results['rules'][threshold_key]: + error_message += \ + "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format( + rule=threshold_key, count=results['rules'][threshold_key], + violations_limit=violation_thresholds['rules'][threshold_key], + ) + + if error_message: + print('xsslint') + print("FAILURE: XSSLinter Failed.\n{error_message}\n" + "run the following command to hone in on the problem:\n" + " ./scripts/xss-commit-linter.sh -h".format(error_message=error_message)) + else: + print("successfully run xsslint") + + def _lint(file_or_dir, template_linters, options, summary_results, out): """ For each linter, lints the provided file or directory. @@ -127,6 +231,8 @@ def _lint(file_or_dir, template_linters, options, summary_results, out): _process_os_dirs(directory, template_linters, options, summary_results, out) summary_results.print_results(options, out) + result_output = _get_xsslint_counts(out.getvalue()) + _check_violations(options, result_output) def main(): @@ -167,6 +273,10 @@ def main(): '--config', dest='config', action='store', default='xsslint.default_config', help='Specifies the config module to use. The config module should be in Python package syntax.' ) + parser.add_argument( + '--thresholds', dest='thresholds', action='store', + help='Specifies the config module to use. The config module should be in Python package syntax.' + ) parser.add_argument('path', nargs="?", default=None, help='A file to lint or directory to recursively lint.') args = parser.parse_args() @@ -176,7 +286,8 @@ def main(): 'rule_totals': args.rule_totals, 'summary_format': args.summary_format, 'verbose': args.verbose, - 'skip_dirs': getattr(config, 'SKIP_DIRS', ()) + 'skip_dirs': getattr(config, 'SKIP_DIRS', ()), + 'thresholds': args.thresholds } template_linters = getattr(config, 'LINTERS', ()) if not template_linters: @@ -184,4 +295,4 @@ def main(): ruleset = _build_ruleset(template_linters) summary_results = SummaryResults(ruleset) - _lint(args.path, template_linters, options, summary_results, out=sys.stdout) + _lint(args.path, template_linters, options, summary_results, out=StringIO()) From f53eab8137a47470629464e1f8a3ce827cbf5882 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 11 Nov 2024 18:54:15 +0500 Subject: [PATCH 76/78] fix: testing changes in commands --- .github/workflows/quality-checks.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 510059a9d62a..96f1ab1f284c 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -78,7 +78,6 @@ jobs: TARGET_BRANCH: ${{ github.base_ref }} run: | make pycodestyle - make eslint make stylelint make xsslint make pii_check From 77476dd8a6794f890b62d8a516947cc67c4b78bb Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 11 Nov 2024 19:03:37 +0500 Subject: [PATCH 77/78] fix: fix the code for stylelint removal --- .github/workflows/quality-checks.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 96f1ab1f284c..73fec6bb466e 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -78,7 +78,6 @@ jobs: TARGET_BRANCH: ${{ github.base_ref }} run: | make pycodestyle - make stylelint make xsslint make pii_check make check_keywords From 26d5153619c3d75c4119f933df97a45d71682280 Mon Sep 17 00:00:00 2001 From: salman2013 Date: Mon, 11 Nov 2024 19:34:22 +0500 Subject: [PATCH 78/78] fix: fix the pii_check command for django setting module --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 74751340cddb..b79ba8c10a7f 100644 --- a/Makefile +++ b/Makefile @@ -219,10 +219,13 @@ pycodestyle: ## check python files for quality issues ## Re-enable --lint flag when this issue https://github.com/openedx/edx-platform/issues/35775 is resolved pii_check: ## check django models for pii annotations + DJANGO_SETTINGS_MODULE=cms.envs.test \ code_annotations django_find_annotations \ --config_file .pii_annotations.yml \ --app_name cms \ --coverage + + DJANGO_SETTINGS_MODULE=lms.envs.test \ code_annotations django_find_annotations \ --config_file .pii_annotations.yml \ --app_name lms \