diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 4d025e540163..597f7e0bb4c3 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -64,13 +64,15 @@ jobs: make base-requirements - uses: c-hive/gha-npm-cache@v1 + + - name: Install npm + run: npm ci + - name: Run JS Tests - env: - TEST_SUITE: js-unit - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh run: | npm install -g jest - xvfb-run --auto-servernum ./scripts/all-tests.sh + xvfb-run --auto-servernum make test-js + make coverage-js - name: Save Job Artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index cf8ffd5d2910..28c3841acc54 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -60,16 +60,30 @@ jobs: PIP_SRC: ${{ runner.temp }} run: | make test-requirements - + + - name: Install npm + env: + PIP_SRC: ${{ runner.temp }} + run: npm ci + + - name: Install python packages + env: + PIP_SRC: ${{ runner.temp }} + run: | + pip install -e . + - name: Run Quality Tests env: - TEST_SUITE: quality - SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh PIP_SRC: ${{ runner.temp }} TARGET_BRANCH: ${{ github.base_ref }} run: | - ./scripts/all-tests.sh - + make pycodestyle + make eslint + make stylelint + make xsslint + make pii_check + make check_keywords + - name: Save Job Artifacts if: always() uses: actions/upload-artifact@v4 diff --git a/Makefile b/Makefile index 15bab5df67a9..0fc07aab8f13 100644 --- a/Makefile +++ b/Makefile @@ -204,3 +204,29 @@ migrate: migrate-lms migrate-cms # Part of https://github.com/openedx/wg-developer-experience/issues/136 ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu. sudo apt install libmysqlclient-dev libxmlsec1-dev + +eslint: ## check javascript for quality issues + python scripts/quality_test.py eslint + +stylelint: ## check css/scss for quality issues + python scripts/quality_test.py stylelint + +xsslint: ## check xss for quality issues + python scripts/quality_test.py xsslint + +pycodestyle: ## check python files for quality issues + pycodestyle . + +pii_check: ## check django models for pii annotations + python scripts/quality_test.py pii_check + +check_keywords: ## check django models for reserve keywords + python scripts/quality_test.py check_keywords + +test-js: ## run javascript tests + python scripts/js_test.py --option jstest + +coverage-js: ## run javascript coverage test + python scripts/js_test.py --option coverage + +quality: pycodestyle eslint stylelint xsslint pii_check check_keywords \ No newline at end of file diff --git a/pavelib/__init__.py b/pavelib/__init__.py index 875068166ff5..24f05618bdd7 100644 --- a/pavelib/__init__.py +++ b/pavelib/__init__.py @@ -3,4 +3,4 @@ """ -from . import assets, js_test, prereqs, quality +from . import assets diff --git a/pavelib/js_test.py b/pavelib/js_test.py deleted file mode 100644 index fb9c213499ac..000000000000 --- a/pavelib/js_test.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -Javascript test tasks -""" - - -import os -import re -import sys - -from paver.easy import cmdopts, needs, sh, task - -from pavelib.utils.envs import Env -from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite -from pavelib.utils.timer import timed - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -@task -@needs( - 'pavelib.prereqs.install_node_prereqs', - 'pavelib.utils.test.utils.clean_reports_dir', -) -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("mode=", "m", "dev or run"), - ("coverage", "c", "Run test under coverage"), - ("port=", "p", "Port to run test server on (dev mode only)"), - ('skip-clean', 'C', 'skip cleaning repository before running tests'), - ('skip_clean', None, 'deprecated in favor of skip-clean'), -], share_with=["pavelib.utils.tests.utils.clean_reports_dir"]) -@timed -def test_js(options): - """ - Run the JavaScript tests - """ - mode = getattr(options, 'mode', 'run') - port = None - skip_clean = getattr(options, 'skip_clean', False) - - if mode == 'run': - suite = getattr(options, 'suite', 'all') - coverage = getattr(options, 'coverage', False) - elif mode == 'dev': - suite = getattr(options, 'suite', None) - coverage = False - port = getattr(options, 'port', None) - else: - sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.") - return - - if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): - sys.stderr.write( - "Unknown test suite. Please choose from ({suites})\n".format( - suites=", ".join(Env.JS_TEST_ID_KEYS) - ) - ) - return - - if suite != 'jest-snapshot': - test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) - test_suite.run() - - if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in - test_suite = JestSnapshotTestSuite('jest') - test_suite.run() - - -@task -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("coverage", "c", "Run test under coverage"), -]) -@timed -def test_js_run(options): - """ - Run the JavaScript tests and print results to the console - """ - options.mode = 'run' - test_js(options) - - -@task -@cmdopts([ - ("suite=", "s", "Test suite to run"), - ("port=", "p", "Port to run test server on"), -]) -@timed -def test_js_dev(options): - """ - Run the JavaScript tests in your default browsers - """ - options.mode = 'dev' - test_js(options) - - -@task -@needs('pavelib.prereqs.install_coverage_prereqs') -@cmdopts([ - ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), -], share_with=['coverage']) -@timed -def diff_coverage(options): - """ - Build the diff coverage reports - """ - compare_branch = options.get('compare_branch', 'origin/master') - - # Find all coverage XML files (both Python and JavaScript) - xml_reports = [] - - for filepath in Env.REPORT_DIR.walk(): - if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): - xml_reports.append(filepath) - - if not xml_reports: - err_msg = colorize( - 'red', - "No coverage info found. Run `paver test` before running " - "`paver coverage`.\n" - ) - sys.stderr.write(err_msg) - else: - xml_report_str = ' '.join(xml_reports) - diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') - - # Generate the diff coverage reports (HTML and console) - # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 - sh( - "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} " - "--html-report {diff_html_path}".format( - xml_report_str=xml_report_str, - compare_branch=compare_branch, - diff_html_path=diff_html_path, - ) - ) - - print("\n") diff --git a/pavelib/paver_tests/conftest.py b/pavelib/paver_tests/conftest.py deleted file mode 100644 index 214a35e3fe85..000000000000 --- a/pavelib/paver_tests/conftest.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Pytest fixtures for the pavelib unit tests. -""" - - -import os -from shutil import rmtree - -import pytest - -from pavelib.utils.envs import Env - - -@pytest.fixture(autouse=True, scope='session') -def delete_quality_junit_xml(): - """ - Delete the JUnit XML results files for quality check tasks run during the - unit tests. - """ - yield - if os.path.exists(Env.QUALITY_DIR): - rmtree(Env.QUALITY_DIR, ignore_errors=True) diff --git a/pavelib/paver_tests/test_eslint.py b/pavelib/paver_tests/test_eslint.py deleted file mode 100644 index 5802d7d0d21b..000000000000 --- a/pavelib/paver_tests/test_eslint.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Tests for Paver's Stylelint tasks. -""" - - -import unittest -from unittest.mock import patch - -import pytest -from paver.easy import BuildFailure, call_task - -import pavelib.quality - - -class TestPaverESLint(unittest.TestCase): - """ - For testing run_eslint - """ - - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator - self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Mock shell commands - patcher = patch('pavelib.quality.sh') - self._mock_paver_sh = patcher.start() - - # Cleanup mocks - self.addCleanup(patcher.stop) - self.addCleanup(self._mock_paver_needs.stop) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_count_from_last_line') - def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument - """ - run_eslint encounters an error parsing the eslint output log - """ - mock_count.return_value = None - with pytest.raises(BuildFailure): - call_task('pavelib.quality.run_eslint', args=['']) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_count_from_last_line') - def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument - """ - eslint finds violations, but a limit was not set - """ - mock_count.return_value = 1 - pavelib.quality.run_eslint("") diff --git a/pavelib/paver_tests/test_js_test.py b/pavelib/paver_tests/test_js_test.py deleted file mode 100644 index 4b165a156674..000000000000 --- a/pavelib/paver_tests/test_js_test.py +++ /dev/null @@ -1,148 +0,0 @@ -"""Unit tests for the Paver JavaScript testing tasks.""" - -from unittest.mock import patch - -import ddt -from paver.easy import call_task - -import pavelib.js_test -from pavelib.utils.envs import Env - -from .utils import PaverTestCase - - -@ddt.ddt -class TestPaverJavaScriptTestTasks(PaverTestCase): - """ - Test the Paver JavaScript testing tasks. - """ - - EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete' - EXPECTED_KARMA_OPTIONS = ( - "{config_file} " - "--single-run={single_run} " - "--capture-timeout=60000 " - "--junitreportpath=" - "{platform_root}/reports/javascript/javascript_xunit-{suite}.xml " - "--browsers={browser}" - ) - EXPECTED_COVERAGE_OPTIONS = ( - ' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml' - ) - - EXPECTED_COMMANDS = [ - "make report_dir", - 'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads', - "find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;", - 'rm -rf test_root/log/auto_screenshots/*', - "rm -rf /tmp/mako_[cl]ms", - ] - - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator - self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Cleanup mocks - self.addCleanup(self._mock_paver_needs.stop) - - @ddt.data( - [""], - ["--coverage"], - ["--suite=lms"], - ["--suite=lms --coverage"], - ) - @ddt.unpack - def test_test_js_run(self, options_string): - """ - Test the "test_js_run" task. - """ - options = self.parse_options_string(options_string) - self.reset_task_messages() - call_task("pavelib.js_test.test_js_run", options=options) - self.verify_messages(options=options, dev_mode=False) - - @ddt.data( - [""], - ["--port=9999"], - ["--suite=lms"], - ["--suite=lms --port=9999"], - ) - @ddt.unpack - def test_test_js_dev(self, options_string): - """ - Test the "test_js_run" task. - """ - options = self.parse_options_string(options_string) - self.reset_task_messages() - call_task("pavelib.js_test.test_js_dev", options=options) - self.verify_messages(options=options, dev_mode=True) - - def parse_options_string(self, options_string): - """ - Parse a string containing the options for a test run - """ - parameters = options_string.split(" ") - suite = "all" - if "--system=lms" in parameters: - suite = "lms" - elif "--system=common" in parameters: - suite = "common" - coverage = "--coverage" in parameters - port = None - if "--port=9999" in parameters: - port = 9999 - return { - "suite": suite, - "coverage": coverage, - "port": port, - } - - def verify_messages(self, options, dev_mode): - """ - Verify that the messages generated when running tests are as expected - for the specified options and dev_mode. - """ - is_coverage = options['coverage'] - port = options['port'] - expected_messages = [] - suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']] - - expected_messages.extend(self.EXPECTED_COMMANDS) - if not dev_mode and not is_coverage: - expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format( - platform_root=self.platform_root - )) - - command_template = ( - 'node --max_old_space_size=4096 node_modules/.bin/karma start {options}' - ) - - for suite in suites: - # Karma test command - if suite != 'jest-snapshot': - karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)] - expected_test_tool_command = command_template.format( - options=self.EXPECTED_KARMA_OPTIONS.format( - config_file=karma_config_file, - single_run='false' if dev_mode else 'true', - suite=suite, - platform_root=self.platform_root, - browser=Env.KARMA_BROWSER, - ), - ) - if is_coverage: - expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format( - platform_root=self.platform_root, - suite=suite - ) - if port: - expected_test_tool_command += f" --port={port}" - else: - expected_test_tool_command = 'jest' - - expected_messages.append(expected_test_tool_command) - - assert self.task_messages == expected_messages diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py deleted file mode 100644 index 36d6dd59e172..000000000000 --- a/pavelib/paver_tests/test_paver_quality.py +++ /dev/null @@ -1,156 +0,0 @@ -""" # lint-amnesty, pylint: disable=django-not-configured -Tests for paver quality tasks -""" - - -import os -import shutil # lint-amnesty, pylint: disable=unused-import -import tempfile -import textwrap -import unittest -from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import - -import pytest # lint-amnesty, pylint: disable=unused-import -from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import -from path import Path as path -from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import - -import pavelib.quality -from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import - -OPEN_BUILTIN = 'builtins.open' - - -@ddt -class TestPaverQualityViolations(unittest.TestCase): - """ - For testing the paver violations-counting tasks - """ - def setUp(self): - super().setUp() - self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with - self.f.close() - self.addCleanup(os.remove, self.f.name) - - def test_pep8_parser(self): - with open(self.f.name, 'w') as f: - f.write("hello\nhithere") - num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access - assert num == 2 - - -class TestPaverReportViolationsCounts(unittest.TestCase): - """ - For testing utility functions for getting counts from reports for - run_eslint and run_xsslint. - """ - - def setUp(self): - super().setUp() - - # Temporary file infrastructure - self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with - self.f.close() - - # Cleanup various mocks and tempfiles - self.addCleanup(os.remove, self.f.name) - - def test_get_eslint_violations_count(self): - with open(self.f.name, 'w') as f: - f.write("3000 violations found") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count == 3000 - - def test_get_eslint_violations_no_number_found(self): - with open(self.f.name, 'w') as f: - f.write("Not expected string regex") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count is None - - def test_get_eslint_violations_count_truncated_report(self): - """ - A truncated report (i.e. last line is just a violation) - """ - with open(self.f.name, 'w') as f: - f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access - assert actual_count is None - - def test_generic_value(self): - """ - Default behavior is to look for an integer appearing at head of line - """ - with open(self.f.name, 'w') as f: - f.write("5.777 good to see you") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access - assert actual_count == 5 - - def test_generic_value_none_found(self): - """ - Default behavior is to look for an integer appearing at head of line - """ - with open(self.f.name, 'w') as f: - f.write("hello 5.777 good to see you") - actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access - assert actual_count is None - - def test_get_xsslint_counts_happy(self): - """ - Test happy path getting violation counts from xsslint report. - """ - report = textwrap.dedent(""" - test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos); - - javascript-concat-html: 310 violations - javascript-escape: 7 violations - - 2608 violations total - """) - with open(self.f.name, 'w') as f: - f.write(report) - counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access - self.assertDictEqual(counts, { - 'rules': { - 'javascript-concat-html': 310, - 'javascript-escape': 7, - }, - 'total': 2608, - }) - - def test_get_xsslint_counts_bad_counts(self): - """ - Test getting violation counts from truncated and malformed xsslint - report. - """ - report = textwrap.dedent(""" - javascript-concat-html: violations - """) - with open(self.f.name, 'w') as f: - f.write(report) - counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access - self.assertDictEqual(counts, { - 'rules': {}, - 'total': None, - }) - - -class TestPrepareReportDir(unittest.TestCase): - """ - Tests the report directory preparation - """ - - def setUp(self): - super().setUp() - self.test_dir = tempfile.mkdtemp() - self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with - self.addCleanup(os.removedirs, self.test_dir) - - def test_report_dir_with_files(self): - assert os.path.exists(self.test_file.name) - pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access - assert not os.path.exists(self.test_file.name) - - def test_report_dir_without_files(self): - os.remove(self.test_file.name) - pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access - assert os.listdir(path(self.test_dir)) == [] diff --git a/pavelib/paver_tests/test_pii_check.py b/pavelib/paver_tests/test_pii_check.py deleted file mode 100644 index d034360acde0..000000000000 --- a/pavelib/paver_tests/test_pii_check.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Tests for Paver's PII checker task. -""" - -import shutil -import tempfile -import unittest -from unittest.mock import patch - -from path import Path as path -from paver.easy import call_task, BuildFailure - -import pavelib.quality -from pavelib.utils.envs import Env - - -class TestPaverPIICheck(unittest.TestCase): - """ - For testing the paver run_pii_check task - """ - def setUp(self): - super().setUp() - self.report_dir = path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, self.report_dir) - - @patch.object(pavelib.quality.run_pii_check, 'needs') - @patch('pavelib.quality.sh') - def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs): - """ - run_pii_check succeeds with proper report dir - """ - # Make the expected stdout files. - cms_stdout_report = self.report_dir / 'pii_check_cms.report' - cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) - lms_stdout_report = self.report_dir / 'pii_check_lms.report' - lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n']) - - mock_needs.return_value = 0 - call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) - mock_calls = [str(call) for call in mock_paver_sh.mock_calls] - assert len(mock_calls) == 2 - assert any('lms.envs.test' in call for call in mock_calls) - assert any('cms.envs.test' in call for call in mock_calls) - assert all(str(self.report_dir) in call for call in mock_calls) - metrics_file = Env.METRICS_DIR / 'pii' - assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' - - @patch.object(pavelib.quality.run_pii_check, 'needs') - @patch('pavelib.quality.sh') - def test_pii_check_failed(self, mock_paver_sh, mock_needs): - """ - run_pii_check fails due to crossing the threshold. - """ - # Make the expected stdout files. - cms_stdout_report = self.report_dir / 'pii_check_cms.report' - cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n']) - lms_stdout_report = self.report_dir / 'pii_check_lms.report' - lms_stdout_report.write_lines([ - 'Coverage found 66 uncovered models:', - 'Coverage threshold not met! Needed 100.0, actually 95.0!', - ]) - - mock_needs.return_value = 0 - try: - with self.assertRaises(BuildFailure): - call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)}) - except SystemExit: - # Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why. - # As a hack, we just wrap it in try-except. - # This is not good, but these tests weren't even running for years, and we're removing this whole test - # suite soon anyway. - pass - mock_calls = [str(call) for call in mock_paver_sh.mock_calls] - assert len(mock_calls) == 2 - assert any('lms.envs.test' in call for call in mock_calls) - assert any('cms.envs.test' in call for call in mock_calls) - assert all(str(self.report_dir) in call for call in mock_calls) - metrics_file = Env.METRICS_DIR / 'pii' - assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n' diff --git a/pavelib/paver_tests/test_stylelint.py b/pavelib/paver_tests/test_stylelint.py deleted file mode 100644 index 3e1c79c93f28..000000000000 --- a/pavelib/paver_tests/test_stylelint.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Tests for Paver's Stylelint tasks. -""" - -from unittest.mock import MagicMock, patch - -import pytest -import ddt -from paver.easy import call_task - -from .utils import PaverTestCase - - -@ddt.ddt -class TestPaverStylelint(PaverTestCase): - """ - Tests for Paver's Stylelint tasks. - """ - @ddt.data( - [False], - [True], - ) - @ddt.unpack - def test_run_stylelint(self, should_pass): - """ - Verify that the quality task fails with Stylelint violations. - """ - if should_pass: - _mock_stylelint_violations = MagicMock(return_value=0) - with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): - call_task('pavelib.quality.run_stylelint') - else: - _mock_stylelint_violations = MagicMock(return_value=100) - with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations): - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_stylelint') diff --git a/pavelib/paver_tests/test_timer.py b/pavelib/paver_tests/test_timer.py deleted file mode 100644 index 5ccbf74abcf9..000000000000 --- a/pavelib/paver_tests/test_timer.py +++ /dev/null @@ -1,190 +0,0 @@ -""" -Tests of the pavelib.utils.timer module. -""" - - -from datetime import datetime, timedelta -from unittest import TestCase - -from unittest.mock import MagicMock, patch - -from pavelib.utils import timer - - -@timer.timed -def identity(*args, **kwargs): - """ - An identity function used as a default task to test the timing of. - """ - return args, kwargs - - -MOCK_OPEN = MagicMock(spec=open) - - -@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN) -class TimedDecoratorTests(TestCase): - """ - Tests of the pavelib.utils.timer:timed decorator. - """ - def setUp(self): - super().setUp() - - patch_dumps = patch.object(timer.json, 'dump', autospec=True) - self.mock_dump = patch_dumps.start() - self.addCleanup(patch_dumps.stop) - - patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True) - self.mock_makedirs = patch_makedirs.start() - self.addCleanup(patch_makedirs.stop) - - patch_datetime = patch.object(timer, 'datetime', autospec=True) - self.mock_datetime = patch_datetime.start() - self.addCleanup(patch_datetime.stop) - - patch_exists = patch.object(timer, 'exists', autospec=True) - self.mock_exists = patch_exists.start() - self.addCleanup(patch_exists.stop) - - MOCK_OPEN.reset_mock() - - def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None): - """ - Return all timing messages recorded during the execution of ``task``. - """ - if args is None: - args = [] - if kwargs is None: - kwargs = {} - - if raises is None: - task(*args, **kwargs) - else: - self.assertRaises(raises, task, *args, **kwargs) - - return [ - call[0][0] # log_message - for call in self.mock_dump.call_args_list - ] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_times(self): - start = datetime(2016, 7, 20, 10, 56, 19) - end = start + timedelta(seconds=35.6) - - self.mock_datetime.utcnow.side_effect = [start, end] - - messages = self.get_log_messages() - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'duration' in messages[0] - assert 35.6 == messages[0]['duration'] - - assert 'started_at' in messages[0] - assert start.isoformat(' ') == messages[0]['started_at'] - - assert 'ended_at' in messages[0] - assert end.isoformat(' ') == messages[0]['ended_at'] - - @patch.object(timer, 'PAVER_TIMER_LOG', None) - def test_no_logs(self): - messages = self.get_log_messages() - assert len(messages) == 0 - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_arguments(self): - messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz')) - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'args' in messages[0] - assert [repr(1), repr('foo')] == messages[0]['args'] - assert 'kwargs' in messages[0] - assert {'bar': repr('baz')} == messages[0]['kwargs'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_task_name(self): - messages = self.get_log_messages() - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'task' in messages[0] - assert 'pavelib.paver_tests.test_timer.identity' == messages[0]['task'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_exceptions(self): - - @timer.timed - def raises(): - """ - A task used for testing exception handling of the timed decorator. - """ - raise Exception('The Message!') - - messages = self.get_log_messages(task=raises, raises=Exception) - assert len(messages) == 1 - - # I'm not using assertDictContainsSubset because it is - # removed in python 3.2 (because the arguments were backwards) - # and it wasn't ever replaced by anything *headdesk* - assert 'exception' in messages[0] - assert 'Exception: The Message!' == messages[0]['exception'] - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log') - def test_date_formatting(self): - start = datetime(2016, 7, 20, 10, 56, 19) - end = start + timedelta(seconds=35.6) - - self.mock_datetime.utcnow.side_effect = [start, end] - - messages = self.get_log_messages() - assert len(messages) == 1 - - MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a') - - @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log') - def test_nested_tasks(self): - - @timer.timed - def parent(): - """ - A timed task that calls another task - """ - identity() - - parent_start = datetime(2016, 7, 20, 10, 56, 19) - parent_end = parent_start + timedelta(seconds=60) - child_start = parent_start + timedelta(seconds=10) - child_end = parent_end - timedelta(seconds=10) - - self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end] - - messages = self.get_log_messages(task=parent) - assert len(messages) == 2 - - # Child messages first - assert 'duration' in messages[0] - assert 40 == messages[0]['duration'] - - assert 'started_at' in messages[0] - assert child_start.isoformat(' ') == messages[0]['started_at'] - - assert 'ended_at' in messages[0] - assert child_end.isoformat(' ') == messages[0]['ended_at'] - - # Parent messages after - assert 'duration' in messages[1] - assert 60 == messages[1]['duration'] - - assert 'started_at' in messages[1] - assert parent_start.isoformat(' ') == messages[1]['started_at'] - - assert 'ended_at' in messages[1] - assert parent_end.isoformat(' ') == messages[1]['ended_at'] diff --git a/pavelib/paver_tests/test_xsslint.py b/pavelib/paver_tests/test_xsslint.py deleted file mode 100644 index a9b4a41e1600..000000000000 --- a/pavelib/paver_tests/test_xsslint.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Tests for paver xsslint quality tasks -""" -from unittest.mock import patch - -import pytest -from paver.easy import call_task - -import pavelib.quality - -from .utils import PaverTestCase - - -class PaverXSSLintTest(PaverTestCase): - """ - Test run_xsslint with a mocked environment in order to pass in opts - """ - - def setUp(self): - super().setUp() - self.reset_task_messages() - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint encounters an error parsing the xsslint output log - """ - _mock_counts.return_value = {} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint') - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds violations, but a limit was not set - """ - _mock_counts.return_value = {'total': 0} - call_task('pavelib.quality.run_xsslint') - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint fails when thresholds option is poorly formatted - """ - _mock_counts.return_value = {'total': 0} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint fails when thresholds option is poorly formatted - """ - _mock_counts.return_value = {'total': 0} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds more violations than are allowed - """ - _mock_counts.return_value = {'total': 4} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds fewer violations than are allowed - """ - _mock_counts.return_value = {'total': 4} - # No System Exit is expected - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint encounters an error parsing the xsslint output log for a - given rule threshold that was set. - """ - _mock_counts.return_value = {'total': 4} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds more rule violations than are allowed - """ - _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} - with pytest.raises(SystemExit): - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'}) - - @patch.object(pavelib.quality, '_write_metric') - @patch.object(pavelib.quality, '_prepare_report_dir') - @patch.object(pavelib.quality, '_get_xsslint_counts') - def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric): - """ - run_xsslint finds fewer rule violations than are allowed - """ - _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}} - # No System Exit is expected - call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'}) diff --git a/pavelib/utils/test/suites/__init__.py b/pavelib/utils/test/suites/__init__.py deleted file mode 100644 index 34ecd49c1c74..000000000000 --- a/pavelib/utils/test/suites/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -TestSuite class and subclasses -""" -from .js_suite import JestSnapshotTestSuite, JsTestSuite -from .suite import TestSuite diff --git a/pavelib/utils/test/suites/js_suite.py b/pavelib/utils/test/suites/js_suite.py deleted file mode 100644 index 4e53d454fee5..000000000000 --- a/pavelib/utils/test/suites/js_suite.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Javascript test tasks -""" - - -from paver import tasks - -from pavelib.utils.envs import Env -from pavelib.utils.test import utils as test_utils -from pavelib.utils.test.suites.suite import TestSuite - -__test__ = False # do not collect - - -class JsTestSuite(TestSuite): - """ - A class for running JavaScript tests. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.report_dir = Env.JS_REPORT_DIR - self.opts = kwargs - - suite = args[0] - self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] - - def __enter__(self): - super().__enter__() - if tasks.environment.dry_run: - tasks.environment.info("make report_dir") - else: - self.report_dir.makedirs_p() - if not self.skip_clean: - test_utils.clean_test_files() - - if self.mode == 'run' and not self.run_under_coverage: - test_utils.clean_dir(self.report_dir) - - @property - def _default_subsuites(self): - """ - Returns all JS test suites - """ - return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] - - -class JsTestSubSuite(TestSuite): - """ - Class for JS suites like cms, cms-squire, lms, common, - common-requirejs and xmodule - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.test_id = args[0] - self.run_under_coverage = kwargs.get('with_coverage', True) - self.mode = kwargs.get('mode', 'run') - self.port = kwargs.get('port') - self.root = self.root + ' javascript' - self.report_dir = Env.JS_REPORT_DIR - - try: - self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] - except ValueError: - self.test_conf_file = Env.KARMA_CONFIG_FILES[0] - - self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' - self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' - - @property - def cmd(self): - """ - Run the tests using karma runner. - """ - cmd = [ - "node", - "--max_old_space_size=4096", - "node_modules/.bin/karma", - "start", - self.test_conf_file, - "--single-run={}".format('false' if self.mode == 'dev' else 'true'), - "--capture-timeout=60000", - f"--junitreportpath={self.xunit_report}", - f"--browsers={Env.KARMA_BROWSER}", - ] - - if self.port: - cmd.append(f"--port={self.port}") - - if self.run_under_coverage: - cmd.extend([ - "--coverage", - f"--coveragereportpath={self.coverage_report}", - ]) - - return cmd - - -class JestSnapshotTestSuite(TestSuite): - """ - A class for running Jest Snapshot tests. - """ - @property - def cmd(self): - """ - Run the tests using Jest. - """ - return ["jest"] diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py deleted file mode 100644 index 5a423c827c21..000000000000 --- a/pavelib/utils/test/suites/suite.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -A class used for defining and running test suites -""" - - -import os -import subprocess -import sys - -from paver import tasks - -from pavelib.utils.process import kill_process - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -__test__ = False # do not collect - - -class TestSuite: - """ - TestSuite is a class that defines how groups of tests run. - """ - def __init__(self, *args, **kwargs): - self.root = args[0] - self.subsuites = kwargs.get('subsuites', []) - self.failed_suites = [] - self.verbosity = int(kwargs.get('verbosity', 1)) - self.skip_clean = kwargs.get('skip_clean', False) - self.passthrough_options = kwargs.get('passthrough_options', []) - - def __enter__(self): - """ - This will run before the test suite is run with the run_suite_tests method. - If self.run_test is called directly, it should be run in a 'with' block to - ensure that the proper context is created. - - Specific setup tasks should be defined in each subsuite. - - i.e. Checking for and defining required directories. - """ - print(f"\nSetting up for {self.root}") - self.failed_suites = [] - - def __exit__(self, exc_type, exc_value, traceback): - """ - This is run after the tests run with the run_suite_tests method finish. - Specific clean up tasks should be defined in each subsuite. - - If self.run_test is called directly, it should be run in a 'with' block - to ensure that clean up happens properly. - - i.e. Cleaning mongo after the lms tests run. - """ - print(f"\nCleaning up after {self.root}") - - @property - def cmd(self): - """ - The command to run tests (as a string). For this base class there is none. - """ - return None - - @staticmethod - def is_success(exit_code): - """ - Determine if the given exit code represents a success of the test - suite. By default, only a zero counts as a success. - """ - return exit_code == 0 - - def run_test(self): - """ - Runs a self.cmd in a subprocess and waits for it to finish. - It returns False if errors or failures occur. Otherwise, it - returns True. - """ - cmd = " ".join(self.cmd) - - if tasks.environment.dry_run: - tasks.environment.info(cmd) - return - - sys.stdout.write(cmd) - - msg = colorize( - 'green', - '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), - ) - - sys.stdout.write(msg) - sys.stdout.flush() - - if 'TEST_SUITE' not in os.environ: - os.environ['TEST_SUITE'] = self.root.replace("/", "_") - kwargs = {'shell': True, 'cwd': None} - process = None - - try: - process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with - return self.is_success(process.wait()) - except KeyboardInterrupt: - kill_process(process) - sys.exit(1) - - def run_suite_tests(self): - """ - Runs each of the suites in self.subsuites while tracking failures - """ - # Uses __enter__ and __exit__ for context - with self: - # run the tests for this class, and for all subsuites - if self.cmd: - passed = self.run_test() - if not passed: - self.failed_suites.append(self) - - for suite in self.subsuites: - suite.run_suite_tests() - if suite.failed_suites: - self.failed_suites.extend(suite.failed_suites) - - def report_test_results(self): - """ - Writes a list of failed_suites to sys.stderr - """ - if self.failed_suites: - msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) - msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') - else: - msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) - - print(msg) - - def run(self): - """ - Runs the tests in the suite while tracking and reporting failures. - """ - self.run_suite_tests() - - if tasks.environment.dry_run: - return - - self.report_test_results() - - if self.failed_suites: - sys.exit(1) diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py deleted file mode 100644 index 0851251e2222..000000000000 --- a/pavelib/utils/test/utils.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Helper functions for test tasks -""" - - -import os - -from paver.easy import cmdopts, sh, task - -from pavelib.utils.envs import Env -from pavelib.utils.timer import timed - - -MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) - -COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches" -COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what" -COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all')) -WHO_TESTS_WHAT_DIFF = "who_tests_what.diff" - - -__test__ = False # do not collect - - -@task -@timed -def clean_test_files(): - """ - Clean fixture files used by tests and .pyc files - """ - sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") - # This find command removes all the *.pyc files that aren't in the .git - # directory. See this blog post for more details: - # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html - sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") - sh("rm -rf test_root/log/auto_screenshots/*") - sh("rm -rf /tmp/mako_[cl]ms") - - -@task -@timed -def ensure_clean_package_lock(): - """ - Ensure no untracked changes have been made in the current git context. - """ - sh(""" - git diff --name-only --exit-code package-lock.json || - (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1) - """) - - -def clean_dir(directory): - """ - Delete all the files from the specified directory. - """ - # We delete the files but preserve the directory structure - # so that coverage.py has a place to put the reports. - sh(f'find {directory} -type f -delete') - - -@task -@cmdopts([ - ('skip-clean', 'C', 'skip cleaning repository before running tests'), - ('skip_clean', None, 'deprecated in favor of skip-clean'), -]) -@timed -def clean_reports_dir(options): - """ - Clean coverage files, to ensure that we don't use stale data to generate reports. - """ - if getattr(options, 'skip_clean', False): - print('--skip-clean is set, skipping...') - return - - # We delete the files but preserve the directory structure - # so that coverage.py has a place to put the reports. - reports_dir = Env.REPORT_DIR.makedirs_p() - clean_dir(reports_dir) - - -@task -@timed -def clean_mongo(): - """ - Clean mongo test databases - """ - sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format( - host=Env.MONGO_HOST, - port=MONGO_PORT_NUM, - repo_root=Env.REPO_ROOT, - )) diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh deleted file mode 100755 index 54b9cbb9d500..000000000000 --- a/scripts/generic-ci-tests.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash -set -e - -############################################################################### -# -# generic-ci-tests.sh -# -# Execute some tests for edx-platform. -# (Most other tests are run by invoking `pytest`, `pylint`, etc. directly) -# -# This script can be called from CI jobs that define -# these environment variables: -# -# `TEST_SUITE` defines which kind of test to run. -# Possible values are: -# -# - "quality": Run the quality (pycodestyle/pylint) checks -# - "js-unit": Run the JavaScript tests -# - "pavelib-js-unit": Run the JavaScript tests and the Python unit -# tests from the pavelib/lib directory -# -############################################################################### - -# Clean up previous builds -git clean -qxfd - -function emptyxunit { - - cat > "reports/$1.xml" < - - - -END - -} - -# if specified tox environment is supported, prepend paver commands -# with tox env invocation -if [ -z ${TOX_ENV+x} ] || [[ ${TOX_ENV} == 'null' ]]; then - echo "TOX_ENV: ${TOX_ENV}" - TOX="" -elif tox -l |grep -q "${TOX_ENV}"; then - if [[ "${TOX_ENV}" == 'quality' ]]; then - TOX="" - else - TOX="tox -r -e ${TOX_ENV} --" - fi -else - echo "${TOX_ENV} is not currently supported. Please review the" - echo "tox.ini file to see which environments are supported" - exit 1 -fi - -PAVER_ARGS="-v" -export SUBSET_JOB=$JOB_NAME - -function run_paver_quality { - QUALITY_TASK=$1 - shift - mkdir -p test_root/log/ - LOG_PREFIX="test_root/log/$QUALITY_TASK" - $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || { - echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):"; - tail -n 100 "$LOG_PREFIX.out.log" - echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):"; - tail -n 100 "$LOG_PREFIX.err.log" - return 1; - } - return 0; -} - -case "$TEST_SUITE" in - - "quality") - EXIT=0 - - mkdir -p reports - - echo "Finding pycodestyle violations and storing report..." - run_paver_quality run_pep8 || { EXIT=1; } - echo "Finding ESLint violations and storing report..." - run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; } - echo "Finding Stylelint violations and storing report..." - run_paver_quality run_stylelint || { EXIT=1; } - echo "Running xss linter report." - run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; } - echo "Running PII checker on all Django models..." - run_paver_quality run_pii_check || { EXIT=1; } - echo "Running reserved keyword checker on all Django models..." - run_paver_quality check_keywords || { EXIT=1; } - - # Need to create an empty test result so the post-build - # action doesn't fail the build. - emptyxunit "stub" - exit "$EXIT" - ;; - - "js-unit") - $TOX paver test_js --coverage - $TOX paver diff_coverage - ;; - - "pavelib-js-unit") - EXIT=0 - $TOX paver test_js --coverage --skip-clean || { EXIT=1; } - paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; } - - # This is to ensure that the build status of the shard is properly set. - # Because we are running two paver commands in a row, we need to capture - # their return codes in order to exit with a non-zero code if either of - # them fail. We put the || clause there because otherwise, when a paver - # command fails, this entire script will exit, and not run the second - # paver command in this case statement. So instead of exiting, the value - # of a variable named EXIT will be set to 1 if either of the paver - # commands fail. We then use this variable's value as our exit code. - # Note that by default the value of this variable EXIT is not set, so if - # neither command fails then the exit command resolves to simply exit - # which is considered successful. - exit "$EXIT" - ;; -esac diff --git a/scripts/js_test.py b/scripts/js_test.py new file mode 100644 index 000000000000..69be37f602fe --- /dev/null +++ b/scripts/js_test.py @@ -0,0 +1,492 @@ +""" +Javascript test tasks +""" + +import click +import os +import re +import sys +import subprocess + +from path import Path as path + +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text + +__test__ = False # do not collect + + +class Env: + """ + Load information about the execution environment. + """ + + @staticmethod + def repo_root(): + """ + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 + """ + + file_path = path(__file__) + attempt = 1 + while True: + try: + absolute_path = file_path.abspath() + break + except OSError: + print(f'Attempt {attempt}/180 to get an absolute path failed') + if attempt < 180: + attempt += 1 + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise + return absolute_path.parent.parent + + # Root of the git repository (edx-platform) + REPO_ROOT = repo_root() + + # Reports Directory + REPORT_DIR = REPO_ROOT / 'reports' + + # Detect if in a Docker container, and if so which one + FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0') + USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0' + + # Configured browser to use for the js test suites + SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox') + if USING_DOCKER: + KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker' + else: + KARMA_BROWSER = 'FirefoxNoUpdates' + + # Files used to run each of the js test suites + # TODO: Store this as a dict. Order seems to matter for some + # reason. See issue TE-415. + KARMA_CONFIG_FILES = [ + REPO_ROOT / 'cms/static/karma_cms.conf.js', + REPO_ROOT / 'cms/static/karma_cms_squire.conf.js', + REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js', + REPO_ROOT / 'lms/static/karma_lms.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js', + REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js', + REPO_ROOT / 'common/static/karma_common.conf.js', + REPO_ROOT / 'common/static/karma_common_requirejs.conf.js', + ] + + JS_TEST_ID_KEYS = [ + 'cms', + 'cms-squire', + 'cms-webpack', + 'lms', + 'xmodule', + 'xmodule-webpack', + 'common', + 'common-requirejs', + 'jest-snapshot' + ] + + JS_REPORT_DIR = REPORT_DIR / 'javascript' + + # Service variant (lms, cms, etc.) configured with an environment variable + # We use this to determine which envs.json file to load. + SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None) + + # If service variant not configured in env, then pass the correct + # environment for lms / cms + if not SERVICE_VARIANT: # this will intentionally catch ""; + if any(i in sys.argv[1:] for i in ('cms', 'studio')): + SERVICE_VARIANT = 'cms' + else: + SERVICE_VARIANT = 'lms' + + +# def clean_test_files(): +# """ +# Clean fixture files used by tests and .pyc files +# """ +# # "git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads" +# subprocess.run("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") +# # This find command removes all the *.pyc files that aren't in the .git +# # directory. See this blog post for more details: +# # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html +# subprocess.run(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;") +# subprocess.run("rm -rf test_root/log/auto_screenshots/*") +# subprocess.run("rm -rf /tmp/mako_[cl]ms") + + +# def clean_dir(directory): +# """ +# Delete all the files from the specified directory. +# """ +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# subprocess.run(f'find {directory} -type f -delete') + + +# @task +# @cmdopts([ +# ('skip-clean', 'C', 'skip cleaning repository before running tests'), +# ('skip_clean', None, 'deprecated in favor of skip-clean'), +# ]) + +# def clean_reports_dir(options): +# """ +# Clean coverage files, to ensure that we don't use stale data to generate reports. +# """ +# if getattr(options, 'skip_clean', False): +# print('--skip-clean is set, skipping...') +# return + +# # We delete the files but preserve the directory structure +# # so that coverage.py has a place to put the reports. +# reports_dir = Env.REPORT_DIR.makedirs_p() +# clean_dir(reports_dir) + + +class TestSuite: + """ + TestSuite is a class that defines how groups of tests run. + """ + def __init__(self, *args, **kwargs): + self.root = args[0] + self.subsuites = kwargs.get('subsuites', []) + self.failed_suites = [] + self.verbosity = int(kwargs.get('verbosity', 1)) + self.skip_clean = kwargs.get('skip_clean', False) + self.passthrough_options = kwargs.get('passthrough_options', []) + + def __enter__(self): + """ + This will run before the test suite is run with the run_suite_tests method. + If self.run_test is called directly, it should be run in a 'with' block to + ensure that the proper context is created. + + Specific setup tasks should be defined in each subsuite. + + i.e. Checking for and defining required directories. + """ + print(f"\nSetting up for {self.root}") + self.failed_suites = [] + + def __exit__(self, exc_type, exc_value, traceback): + """ + This is run after the tests run with the run_suite_tests method finish. + Specific clean up tasks should be defined in each subsuite. + + If self.run_test is called directly, it should be run in a 'with' block + to ensure that clean up happens properly. + + i.e. Cleaning mongo after the lms tests run. + """ + print(f"\nCleaning up after {self.root}") + + @property + def cmd(self): + """ + The command to run tests (as a string). For this base class there is none. + """ + return None + + @staticmethod + def kill_process(proc): + """ + Kill the process `proc` created with `subprocess`. + """ + p1_group = psutil.Process(proc.pid) + child_pids = p1_group.children(recursive=True) + + for child_pid in child_pids: + os.kill(child_pid.pid, signal.SIGKILL) + + @staticmethod + def is_success(exit_code): + """ + Determine if the given exit code represents a success of the test + suite. By default, only a zero counts as a success. + """ + return exit_code == 0 + + def run_test(self): + """ + Runs a self.cmd in a subprocess and waits for it to finish. + It returns False if errors or failures occur. Otherwise, it + returns True. + """ + # cmd = " ".join(self.cmd) + cmd = " ".join(str(part) for part in self.cmd) + sys.stdout.write(cmd) + + msg = colorize( + 'green', + '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40), + ) + + sys.stdout.write(msg) + sys.stdout.flush() + + if 'TEST_SUITE' not in os.environ: + os.environ['TEST_SUITE'] = self.root.replace("/", "_") + kwargs = {'shell': True, 'cwd': None} + process = None + + try: + process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with + return self.is_success(process.wait()) + except KeyboardInterrupt: + self.kill_process(process) + sys.exit(1) + + def run_suite_tests(self): + """ + Runs each of the suites in self.subsuites while tracking failures + """ + # Uses __enter__ and __exit__ for context + with self: + # run the tests for this class, and for all subsuites + if self.cmd: + passed = self.run_test() + if not passed: + self.failed_suites.append(self) + + for suite in self.subsuites: + suite.run_suite_tests() + if suite.failed_suites: + self.failed_suites.extend(suite.failed_suites) + + def report_test_results(self): + """ + Writes a list of failed_suites to sys.stderr + """ + if self.failed_suites: + msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48)) + msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n') + else: + msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48)) + + print(msg) + + def run(self): + """ + Runs the tests in the suite while tracking and reporting failures. + """ + self.run_suite_tests() + + # if tasks.environment.dry_run: + # return + + self.report_test_results() + + if self.failed_suites: + sys.exit(1) + + +class JsTestSuite(TestSuite): + """ + A class for running JavaScript tests. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.report_dir = Env.JS_REPORT_DIR + self.opts = kwargs + + suite = args[0] + self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)] + + def __enter__(self): + super().__enter__() + self.report_dir.makedirs_p() + # self.report_dir.mkdir(exist_ok=True) + # if not self.skip_clean: + # test_utils.clean_test_files() + + # if self.mode == 'run' and not self.run_under_coverage: + # test_utils.clean_dir(self.report_dir) + + @property + def _default_subsuites(self): + """ + Returns all JS test suites + """ + return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot'] + + +class JsTestSubSuite(TestSuite): + """ + Class for JS suites like cms, cms-squire, lms, common, + common-requirejs and xmodule + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.test_id = args[0] + self.run_under_coverage = kwargs.get('with_coverage', True) + self.mode = kwargs.get('mode', 'run') + self.port = kwargs.get('port') + self.root = self.root + ' javascript' + self.report_dir = Env.JS_REPORT_DIR + + try: + self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)] + except ValueError: + self.test_conf_file = Env.KARMA_CONFIG_FILES[0] + + self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml' + self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml' + + @property + def cmd(self): + """ + Run the tests using karma runner. + """ + cmd = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/karma", + "start", + self.test_conf_file, + "--single-run={}".format('false' if self.mode == 'dev' else 'true'), + "--capture-timeout=60000", + f"--junitreportpath={self.xunit_report}", + f"--browsers={Env.KARMA_BROWSER}", + ] + + if self.port: + cmd.append(f"--port={self.port}") + + if self.run_under_coverage: + cmd.extend([ + "--coverage", + f"--coveragereportpath={self.coverage_report}", + ]) + + return cmd + + +class JestSnapshotTestSuite(TestSuite): + """ + A class for running Jest Snapshot tests. + """ + @property + def cmd(self): + """ + Run the tests using Jest. + """ + return ["jest"] + + +def test_js(suite, mode, coverage, port, skip_clean): + """ + Run the JavaScript tests + """ + + if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS): + sys.stderr.write( + "Unknown test suite. Please choose from ({suites})\n".format( + suites=", ".join(Env.JS_TEST_ID_KEYS) + ) + ) + return + + if suite != 'jest-snapshot': + test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean) + test_suite.run() + + if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in + test_suite = JestSnapshotTestSuite('jest') + test_suite.run() + + +# @needs('pavelib.prereqs.install_coverage_prereqs') +# @cmdopts([ +# ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), +# ], share_with=['coverage']) + +def diff_coverage(): + """ + Build the diff coverage reports + """ + + compare_branch = 'origin/master' + + # Find all coverage XML files (both Python and JavaScript) + xml_reports = [] + for filepath in Env.REPORT_DIR.walk(): + if bool(re.match(r'^coverage.*\.xml$', filepath.basename())): + xml_reports.append(filepath) + + if not xml_reports: + err_msg = colorize( + 'red', + "No coverage info found. Run `quality test` before running " + "`coverage test`.\n" + ) + sys.stderr.write(err_msg) + else: + xml_report_str = ' '.join(xml_reports) + diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html') + + # Generate the diff coverage reports (HTML and console) + # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153 + command = ( + f"diff-cover {xml_report_str}" + f"--diff-range-notation '..'" + f"--compare-branch={compare_branch} " + f"--html-report {diff_html_path}" + ) + subprocess.run(command, + shell=True, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + + +@click.command("main") +@click.option( + '--option', 'option', + help='Run javascript tests or coverage test as per given option' +) +@click.option( + '--s', 'suite', + default='all', + help='Test suite to run.' +) +@click.option( + '--m', 'mode', + default='run', + help='dev or run' +) +@click.option( + '--coverage', 'coverage', + default=True, + help='Run test under coverage' +) +@click.option( + '--p', 'port', + default=None, + help='Port to run test server on (dev mode only)' +) +@click.option( + '--C', 'skip_clean', + default=False, + help='skip cleaning repository before running tests' +) +def main(option, suite, mode, coverage, port, skip_clean): + if option == 'jstest': + test_js(suite, mode, coverage, port, skip_clean) + elif option == 'coverage': + diff_coverage() + + +if __name__ == "__main__": + main() diff --git a/pavelib/quality.py b/scripts/quality_test.py similarity index 58% rename from pavelib/quality.py rename to scripts/quality_test.py index 774179f45048..fb7d1e481eb9 100644 --- a/pavelib/quality.py +++ b/scripts/quality_test.py @@ -2,171 +2,188 @@ Check code quality using pycodestyle, pylint, and diff_quality. """ +import argparse +import glob import json import os import re -from datetime import datetime -from xml.sax.saxutils import quoteattr +import sys +import subprocess +import shutil +from pathlib import Path +from time import sleep -from paver.easy import BuildFailure, cmdopts, needs, sh, task +try: + from pygments.console import colorize +except ImportError: + colorize = lambda color, text: text -from .utils.envs import Env -from .utils.timer import timed -ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts' -JUNIT_XML_TEMPLATE = """ - -{failure_element} - -""" -JUNIT_XML_FAILURE_TEMPLATE = '' -START_TIME = datetime.utcnow() +class BuildFailure(Exception): + """Represents a problem with some part of the build's execution.""" -def write_junit_xml(name, message=None): +def fail_quality(name, message): """ - Write a JUnit results XML file describing the outcome of a quality check. + Fail the specified quality check. """ - if message: - failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message)) - else: - failure_element = '' - data = { - 'failure_count': 1 if message else 0, - 'failure_element': failure_element, - 'name': name, - 'seconds': (datetime.utcnow() - START_TIME).total_seconds(), - } - Env.QUALITY_DIR.makedirs_p() - filename = Env.QUALITY_DIR / f'{name}.xml' - with open(filename, 'w') as f: - f.write(JUNIT_XML_TEMPLATE.format(**data)) + print(name) + print(message) + sys.exit() -def fail_quality(name, message): +def _prepare_report_dir(dir_name): """ - Fail the specified quality check by generating the JUnit XML results file - and raising a ``BuildFailure``. + Sets a given directory to a created, but empty state """ - write_junit_xml(name, message) - raise BuildFailure(message) + if os.path.isdir(dir_name): + shutil.rmtree(dir_name) + os.makedirs(dir_name, exist_ok=True) -def top_python_dirs(dirname): +def repo_root(): """ - Find the directories to start from in order to find all the Python files in `dirname`. + Get the root of the git repository (edx-platform). + + This sometimes fails on Docker Devstack, so it's been broken + down with some additional error handling. It usually starts + working within 30 seconds or so; for more details, see + https://openedx.atlassian.net/browse/PLAT-1629 and + https://github.com/docker/for-mac/issues/1509 """ - top_dirs = [] - dir_init = os.path.join(dirname, "__init__.py") - if os.path.exists(dir_init): - top_dirs.append(dirname) + file_path = Path(__file__) + max_attempts = 180 + for attempt in range(1, max_attempts + 1): + try: + absolute_path = file_path.resolve(strict=True) + return absolute_path.parents[1] + except OSError: + print(f'Attempt {attempt}/{max_attempts} to get an absolute path failed') + if attempt < max_attempts: + sleep(1) + else: + print('Unable to determine the absolute path of the edx-platform repo, aborting') + raise RuntimeError('Could not determine the repository root after multiple attempts') - for directory in ['djangoapps', 'lib']: - subdir = os.path.join(dirname, directory) - subdir_init = os.path.join(subdir, "__init__.py") - if os.path.exists(subdir) and not os.path.exists(subdir_init): - dirs = os.listdir(subdir) - top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d))) - modules_to_remove = ['__pycache__'] - for module in modules_to_remove: - if module in top_dirs: - top_dirs.remove(module) +def _get_report_contents(filename, report_name, last_line_only=False): + """ + Returns the contents of the given file. Use last_line_only to only return + the last line, which can be used for getting output from quality output + files. - return top_dirs + Arguments: + last_line_only: True to return the last line only, False to return a + string with full contents. + Returns: + String containing full contents of the report, or the last line. -def _get_pep8_violations(clean=True): - """ - Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) - where violations_string is a string of all PEP 8 violations found, separated - by new lines. """ - report_dir = (Env.REPORT_DIR / 'pep8') - if clean: - report_dir.rmtree(ignore_errors=True) - report_dir.makedirs_p() - report = report_dir / 'pep8.report' + if os.path.isfile(filename): + with open(filename) as report_file: + if last_line_only: + lines = report_file.readlines() + for line in reversed(lines): + if line != '\n': + return line + return None + else: + return report_file.read() + else: + file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" + fail_quality(report_name, file_not_found_message) - # Make sure the metrics subdirectory exists - Env.METRICS_DIR.makedirs_p() - if not report.exists(): - sh(f'pycodestyle . | tee {report} -a') +def _get_count_from_last_line(filename, file_type): + """ + This will return the number in the last line of a file. + It is returning only the value (as a floating number). + """ + report_contents = _get_report_contents(filename, file_type, last_line_only=True) + if report_contents is None: + return 0 - violations_list = _pep8_violations(report) + last_line = report_contents.strip() + # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" + regex = r'^\d+' - return len(violations_list), violations_list + try: + return float(re.search(regex, last_line).group(0)) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + return None -def _pep8_violations(report_file): +def _get_stylelint_violations(): """ - Returns the list of all PEP 8 violations in the given report_file. + Returns the number of Stylelint violations. """ - with open(report_file) as f: - return f.readlines() + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + stylelint_report_dir = (REPORT_DIR / "stylelint") + stylelint_report = stylelint_report_dir / "stylelint.report" + _prepare_report_dir(stylelint_report_dir) + command = [ + 'node', 'node_modules/stylelint', + '*scss_files', + '--custom-formatter', 'stylelint-formatter-pretty/index.js' + ] + + with open(stylelint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) -@task -@cmdopts([ - ("system=", "s", "System to act on"), -]) -@timed -def run_pep8(options): # pylint: disable=unused-argument - """ - Run pycodestyle on system code. - Fail the task if any violations are found. - """ - (count, violations_list) = _get_pep8_violations() - violations_list = ''.join(violations_list) - - # Print number of violations to log - violations_count_str = f"Number of PEP 8 violations: {count}" - print(violations_count_str) - print(violations_list) - - # Also write the number of violations to a file - with open(Env.METRICS_DIR / "pep8", "w") as f: - f.write(violations_count_str + '\n\n') - f.write(violations_list) - - # Fail if any violations are found - if count: - failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str - failure_string += f"\n\nViolations:\n{violations_list}" - fail_quality('pep8', failure_string) - else: - write_junit_xml('pep8') - - -@task -@needs( - 'pavelib.prereqs.install_node_prereqs', - 'pavelib.utils.test.utils.ensure_clean_package_lock', -) -@cmdopts([ - ("limit=", "l", "limit for number of acceptable violations"), -]) -@timed -def run_eslint(options): + try: + return int(_get_count_from_last_line(stylelint_report, "stylelint")) + except TypeError: + fail_quality( + 'stylelint', + "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( + stylelint_report=stylelint_report + ) + ) + + +def run_eslint(): """ Runs eslint on static asset directories. If limit option is passed, fails build if more violations than the limit are found. """ - eslint_report_dir = (Env.REPORT_DIR / "eslint") + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + eslint_report_dir = REPORT_DIR / "eslint" eslint_report = eslint_report_dir / "eslint.report" _prepare_report_dir(eslint_report_dir) - violations_limit = int(getattr(options, 'limit', -1)) - - sh( - "node --max_old_space_size=4096 node_modules/.bin/eslint " - "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format( - eslint_report=eslint_report - ), - ignore_error=True - ) + violations_limit = 4950 + + command = [ + "node", + "--max_old_space_size=4096", + "node_modules/.bin/eslint", + "--ext", ".js", + "--ext", ".jsx", + "--format=compact", + "." + ] + + with open(eslint_report, 'w') as report_file: + subprocess.run( + command, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True, + check=False + ) try: num_violations = int(_get_count_from_last_line(eslint_report, "eslint")) @@ -178,9 +195,6 @@ def run_eslint(options): ) ) - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "eslint")) - # Fail if number of violations is greater than the limit if num_violations > violations_limit > -1: fail_quality( @@ -190,81 +204,231 @@ def run_eslint(options): ) ) else: - write_junit_xml('eslint') + print("successfully run eslint with violations") + print(num_violations) -def _get_stylelint_violations(): +def run_stylelint(): """ - Returns the number of Stylelint violations. + Runs stylelint on Sass files. + If limit option is passed, fails build if more violations than the limit are found. """ - stylelint_report_dir = (Env.REPORT_DIR / "stylelint") - stylelint_report = stylelint_report_dir / "stylelint.report" - _prepare_report_dir(stylelint_report_dir) - formatter = 'node_modules/stylelint-formatter-pretty' - - sh( - "stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format( - formatter=formatter, - stylelint_report=stylelint_report, - ), - ignore_error=True - ) - try: - return int(_get_count_from_last_line(stylelint_report, "stylelint")) - except TypeError: + violations_limit = 0 + num_violations = _get_stylelint_violations() + # Fail if number of violations is greater than the limit + if num_violations > violations_limit: fail_quality( 'stylelint', - "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format( - stylelint_report=stylelint_report + "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( + count=num_violations, + violations_limit=violations_limit, ) ) + else: + print("successfully run stylelint with violations") + print(num_violations) -@task -@needs('pavelib.prereqs.install_node_prereqs') -@cmdopts([ - ("limit=", "l", "limit for number of acceptable violations"), -]) -@timed -def run_stylelint(options): +def _extract_missing_pii_annotations(filename): """ - Runs stylelint on Sass files. - If limit option is passed, fails build if more violations than the limit are found. + Returns the number of uncovered models from the stdout report of django_find_annotations. + + Arguments: + filename: Filename where stdout of django_find_annotations was captured. + + Returns: + three-tuple containing: + 1. The number of uncovered models, + 2. A bool indicating whether the coverage is still below the threshold, and + 3. The full report as a string. """ - violations_limit = 0 - num_violations = _get_stylelint_violations() + uncovered_models = 0 + pii_check_passed = True + if os.path.isfile(filename): + with open(filename) as report_file: + lines = report_file.readlines() - # Record the metric - _write_metric(num_violations, (Env.METRICS_DIR / "stylelint")) + # Find the count of uncovered models. + uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') + for line in lines: + uncovered_match = uncovered_regex.match(line) + if uncovered_match: + uncovered_models = int(uncovered_match.groups()[0]) + break - # Fail if number of violations is greater than the limit - if num_violations > violations_limit: + # Find a message which suggests the check failed. + failure_regex = re.compile(r'^Coverage threshold not met!') + for line in lines: + failure_match = failure_regex.match(line) + if failure_match: + pii_check_passed = False + break + + # Each line in lines already contains a newline. + full_log = ''.join(lines) + else: + fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') + + return (uncovered_models, pii_check_passed, full_log) + + +def run_pii_check(): + """ + Guarantee that all Django models are PII-annotated. + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + pii_report_name = 'pii' + default_report_dir = (REPORT_DIR / pii_report_name) + report_dir = default_report_dir + output_file = os.path.join(report_dir, 'pii_check_{}.report') + env_report = [] + pii_check_passed = True + + for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): + try: + print(f"Running {env_name} PII Annotation check and report") + print("-" * 45) + + run_output_file = str(output_file).format(env_name.lower()) + os.makedirs(report_dir, exist_ok=True) + + # Prepare the environment for the command + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + + command = [ + "code_annotations", + "django_find_annotations", + "--config_file", ".pii_annotations.yml", + "--report_path", str(report_dir), + "--app_name", env_name.lower() + ] + + # Run the command without shell=True + with open(run_output_file, 'w') as report_file: + subprocess.run( + command, + env=env, # Pass the environment with DJANGO_SETTINGS_MODULE + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + + # Extract results + uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) + env_report.append(( + uncovered_model_count, + full_log, + )) + + except BuildFailure as error_message: + fail_quality(pii_report_name, f'FAILURE: {error_message}') + + # Update pii_check_passed based on the result of the current environment + if not pii_check_passed_env: + pii_check_passed = False + + # If the PII check failed in any environment, fail the task + if not pii_check_passed: + fail_quality('pii', full_log) + else: + print("Successfully ran pii_check") + + +def check_keywords(): + """ + Check Django model fields for names that conflict with a list of reserved keywords + """ + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + report_path = REPORT_DIR / 'reserved_keywords' + report_path.mkdir(parents=True, exist_ok=True) + + overall_status = True + for env_name, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: + report_file_path = report_path / f"{env_name}_reserved_keyword_report.csv" + override_file = os.path.join(REPO_ROOT, "db_keyword_overrides.yml") + try: + env = { + **os.environ, # Include the current environment variables + "DJANGO_SETTINGS_MODULE": env_settings_file # Set DJANGO_SETTINGS_MODULE for each environment + } + command = [ + "python", "manage.py", env_name, "check_reserved_keywords", + "--override_file", str(override_file), + "--report_path", str(report_path), + "--report_file", str(report_file_path) + ] + with open(report_file_path, 'w') as report_file: + subprocess.run( + command, + env=env, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) + except BuildFailure: + overall_status = False + if not overall_status: fail_quality( - 'stylelint', - "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format( - count=num_violations, - violations_limit=violations_limit, + 'keywords', + 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( + report_path ) ) else: - write_junit_xml('stylelint') + print("successfully run check_keywords") -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("thresholds=", "t", "json containing limit for number of acceptable violations per rule"), -]) -@timed -def run_xsslint(options): +def _get_xsslint_counts(filename): + """ + This returns a dict of violations from the xsslint report. + + Arguments: + filename: The name of the xsslint report. + + Returns: + A dict containing the following: + rules: A dict containing the count for each rule as follows: + violation-rule-id: N, where N is the number of violations + total: M, where M is the number of total violations + + """ + report_contents = _get_report_contents(filename, 'xsslint') + rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) + total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) + violations = {'rules': {}} + for violation_match in rule_count_regex.finditer(report_contents): + try: + violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) + except ValueError: + violations['rules'][violation_match.group('rule_id')] = None + try: + violations['total'] = int(total_count_regex.search(report_contents).group('count')) + # An AttributeError will occur if the regex finds no matches. + # A ValueError will occur if the returned regex cannot be cast as a float. + except (AttributeError, ValueError): + violations['total'] = None + return violations + + +def run_xsslint(): """ Runs xsslint/xss_linter.py on the codebase """ - thresholds_option = getattr(options, 'thresholds', '{}') try: - violation_thresholds = json.loads(thresholds_option) + thresholds_option = 'scripts/xsslint_thresholds.json' + # Read the JSON file + with open(thresholds_option, 'r') as file: + violation_thresholds = json.load(file) + except ValueError: violation_thresholds = None if isinstance(violation_thresholds, dict) is False or \ @@ -280,20 +444,25 @@ def run_xsslint(options): ) xsslint_script = "xss_linter.py" - xsslint_report_dir = (Env.REPORT_DIR / "xsslint") + REPO_ROOT = repo_root() + REPORT_DIR = REPO_ROOT / 'reports' + xsslint_report_dir = (REPORT_DIR / "xsslint") xsslint_report = xsslint_report_dir / "xsslint.report" _prepare_report_dir(xsslint_report_dir) - sh( - "{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format( - repo_root=Env.REPO_ROOT, - xsslint_script=xsslint_script, - xsslint_report=xsslint_report, - cfg_module='scripts.xsslint_config' - ), - ignore_error=True - ) - + command = [ + f"{REPO_ROOT}/scripts/xsslint/{xsslint_script}", + "--rule-totals", + "--config=scripts.xsslint_config" + ] + with open(xsslint_report, 'w') as report_file: + subprocess.run( + command, + check=True, + stdout=report_file, + stderr=subprocess.STDOUT, + text=True + ) xsslint_counts = _get_xsslint_counts(xsslint_report) try: @@ -316,14 +485,7 @@ def run_xsslint(options): ) ) - metrics_report = (Env.METRICS_DIR / "xsslint") - # Record the metric - _write_metric(metrics_str, metrics_report) - # Print number of violations to log. - sh(f"cat {metrics_report}", ignore_error=True) - error_message = "" - # Test total violations against threshold. if 'total' in list(violation_thresholds.keys()): if violation_thresholds['total'] < xsslint_counts['total']: @@ -359,244 +521,27 @@ def run_xsslint(options): ) ) else: - write_junit_xml('xsslint') - - -def _write_metric(metric, filename): - """ - Write a given metric to a given file - Used for things like reports/metrics/eslint, which will simply tell you the number of - eslint violations found - """ - Env.METRICS_DIR.makedirs_p() - - with open(filename, "w") as metric_file: - metric_file.write(str(metric)) - - -def _prepare_report_dir(dir_name): - """ - Sets a given directory to a created, but empty state - """ - dir_name.rmtree_p() - dir_name.mkdir_p() - - -def _get_report_contents(filename, report_name, last_line_only=False): - """ - Returns the contents of the given file. Use last_line_only to only return - the last line, which can be used for getting output from quality output - files. - - Arguments: - last_line_only: True to return the last line only, False to return a - string with full contents. - - Returns: - String containing full contents of the report, or the last line. - - """ - if os.path.isfile(filename): - with open(filename) as report_file: - if last_line_only: - lines = report_file.readlines() - for line in reversed(lines): - if line != '\n': - return line - return None - else: - return report_file.read() - else: - file_not_found_message = f"FAILURE: The following log file could not be found: {filename}" - fail_quality(report_name, file_not_found_message) + print("successfully run xsslint") -def _get_count_from_last_line(filename, file_type): - """ - This will return the number in the last line of a file. - It is returning only the value (as a floating number). - """ - report_contents = _get_report_contents(filename, file_type, last_line_only=True) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("command", choices=['eslint', 'stylelint', + 'xsslint', 'pii_check', 'check_keywords']) - if report_contents is None: - return 0 + argument = parser.parse_args() - last_line = report_contents.strip() - # Example of the last line of a compact-formatted eslint report (for example): "62829 problems" - regex = r'^\d+' + if argument.command == 'eslint': + run_eslint() - try: - return float(re.search(regex, last_line).group(0)) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - return None + elif argument.command == 'stylelint': + run_stylelint() + elif argument.command == 'xsslint': + run_xsslint() -def _get_xsslint_counts(filename): - """ - This returns a dict of violations from the xsslint report. - - Arguments: - filename: The name of the xsslint report. - - Returns: - A dict containing the following: - rules: A dict containing the count for each rule as follows: - violation-rule-id: N, where N is the number of violations - total: M, where M is the number of total violations + elif argument.command == 'pii_check': + run_pii_check() - """ - report_contents = _get_report_contents(filename, 'xsslint') - rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE) - total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE) - violations = {'rules': {}} - for violation_match in rule_count_regex.finditer(report_contents): - try: - violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count')) - except ValueError: - violations['rules'][violation_match.group('rule_id')] = None - try: - violations['total'] = int(total_count_regex.search(report_contents).group('count')) - # An AttributeError will occur if the regex finds no matches. - # A ValueError will occur if the returned regex cannot be cast as a float. - except (AttributeError, ValueError): - violations['total'] = None - return violations - - -def _extract_missing_pii_annotations(filename): - """ - Returns the number of uncovered models from the stdout report of django_find_annotations. - - Arguments: - filename: Filename where stdout of django_find_annotations was captured. - - Returns: - three-tuple containing: - 1. The number of uncovered models, - 2. A bool indicating whether the coverage is still below the threshold, and - 3. The full report as a string. - """ - uncovered_models = 0 - pii_check_passed = True - if os.path.isfile(filename): - with open(filename) as report_file: - lines = report_file.readlines() - - # Find the count of uncovered models. - uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered') - for line in lines: - uncovered_match = uncovered_regex.match(line) - if uncovered_match: - uncovered_models = int(uncovered_match.groups()[0]) - break - - # Find a message which suggests the check failed. - failure_regex = re.compile(r'^Coverage threshold not met!') - for line in lines: - failure_match = failure_regex.match(line) - if failure_match: - pii_check_passed = False - break - - # Each line in lines already contains a newline. - full_log = ''.join(lines) - else: - fail_quality('pii', f'FAILURE: Log file could not be found: {filename}') - - return (uncovered_models, pii_check_passed, full_log) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("report-dir=", "r", "Directory in which to put PII reports"), -]) -@timed -def run_pii_check(options): - """ - Guarantee that all Django models are PII-annotated. - """ - pii_report_name = 'pii' - default_report_dir = (Env.REPORT_DIR / pii_report_name) - report_dir = getattr(options, 'report_dir', default_report_dir) - output_file = os.path.join(report_dir, 'pii_check_{}.report') - env_report = [] - pii_check_passed = True - for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")): - try: - print() - print(f"Running {env_name} PII Annotation check and report") - print("-" * 45) - run_output_file = str(output_file).format(env_name.lower()) - sh( - "mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument - "export DJANGO_SETTINGS_MODULE={}; " - "code_annotations django_find_annotations " - "--config_file .pii_annotations.yml --report_path {} --app_name {} " - "--lint --report --coverage | tee {}".format( - report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file - ) - ) - uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file) - env_report.append(( - uncovered_model_count, - full_log, - )) - - except BuildFailure as error_message: - fail_quality(pii_report_name, f'FAILURE: {error_message}') - - if not pii_check_passed_env: - pii_check_passed = False - - # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count. - uncovered_count, full_log = max(env_report, key=lambda r: r[0]) - - # Write metric file. - if uncovered_count is None: - uncovered_count = 0 - metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n" - _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name)) - - # Finally, fail the paver task if code_annotations suggests that the check failed. - if not pii_check_passed: - fail_quality('pii', full_log) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@timed -def check_keywords(): - """ - Check Django model fields for names that conflict with a list of reserved keywords - """ - report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords') - sh(f"mkdir -p {report_path}") - - overall_status = True - for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]: - report_file = f"{env}_reserved_keyword_report.csv" - override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml") - try: - sh( - "export DJANGO_SETTINGS_MODULE={settings_file}; " - "python manage.py {app} check_reserved_keywords " - "--override_file {override_file} " - "--report_path {report_path} " - "--report_file {report_file}".format( - settings_file=env_settings_file, app=env, override_file=override_file, - report_path=report_path, report_file=report_file - ) - ) - except BuildFailure: - overall_status = False - - if not overall_status: - fail_quality( - 'keywords', - 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format( - report_path - ) - ) + elif argument.command == 'check_keywords': + check_keywords()