diff --git a/pavelib/__init__.py b/pavelib/__init__.py index f195d22007ff..875068166ff5 100644 --- a/pavelib/__init__.py +++ b/pavelib/__init__.py @@ -3,4 +3,4 @@ """ -from . import assets, docs, i18n, js_test, prereqs, quality, servers +from . import assets, js_test, prereqs, quality diff --git a/pavelib/docs.py b/pavelib/docs.py deleted file mode 100644 index c2075e10daa1..000000000000 --- a/pavelib/docs.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Open edX Documentation Builder -Ties into Sphinx to generate files at the specified location(s) -""" - - -import sys - -from paver.easy import cmdopts, needs, sh, task - -from .utils.timer import timed - -DOC_PATHS = { - "dev": "docs/en_us/developers", - "author": "docs/en_us/course_authors", - "data": "docs/en_us/data", - "default": "docs/en_us" -} - - -def valid_doc_types(): - """ - Return a comma-separated string of valid doc types. - """ - return ", ".join(list(DOC_PATHS.keys())) - - -def doc_path(options, allow_default=True): - """ - Parse `options` (from the Paver task args) to determine the path - to the documentation directory. - If the specified path is not one of the valid options, print an error - message and exit. - - If `allow_default` is False, then require that a type is specified, - and exit with an error message if it isn't. - """ - doc_type = getattr(options, 'type', 'default') - path = DOC_PATHS.get(doc_type) - - if doc_type == 'default' and not allow_default: - print( - "You must specify a documentation type using '--type'. " - "Valid options are: {options}".format( - options=valid_doc_types() - ) - ) - sys.exit(1) - - if path is None: - print( - "Invalid documentation type '{doc_type}'. " - "Valid options are: {options}".format( - doc_type=doc_type, options=valid_doc_types() - ) - ) - sys.exit(1) - - else: - return path - - -@task -@needs('pavelib.prereqs.install_prereqs') -@cmdopts([ - ("type=", "t", "Type of docs to compile"), - ("verbose", "v", "Display verbose output"), -]) -@timed -def build_docs(options): - """ - Invoke sphinx 'make build' to generate docs. - """ - verbose = getattr(options, 'verbose', False) - - cmd = "cd {dir}; make html quiet={quiet}".format( - dir=doc_path(options), - quiet="false" if verbose else "true" - ) - - sh(cmd) diff --git a/pavelib/i18n.py b/pavelib/i18n.py deleted file mode 100644 index cc960e3948a3..000000000000 --- a/pavelib/i18n.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Internationalization tasks -""" - -import subprocess -import sys - -from paver.easy import cmdopts, needs, sh, task - -from .utils.envs import Env -from .utils.timer import timed - -try: - from pygments.console import colorize -except ImportError: - colorize = lambda color, text: text - -DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS - - -@task -@needs( - "pavelib.prereqs.install_prereqs", - "pavelib.i18n.i18n_validate_gettext", -) -@cmdopts([ - ("verbose", "v", "Sets 'verbose' to True"), -]) -@timed -def i18n_extract(options): - """ - Extract localizable strings from sources - """ - verbose = getattr(options, "verbose", None) - cmd = "i18n_tool extract" - - if verbose: - cmd += " -v" - - sh(cmd) - - -@task -@needs("pavelib.i18n.i18n_extract") -@timed -def i18n_generate(): - """ - Compile localizable strings from sources, extracting strings first. - """ - sh("i18n_tool generate") - - -@task -@needs("pavelib.i18n.i18n_extract") -@timed -def i18n_generate_strict(): - """ - Compile localizable strings from sources, extracting strings first. - Complains if files are missing. - """ - sh("i18n_tool generate --strict") - - -@task -@needs("pavelib.i18n.i18n_extract") -@timed -def i18n_dummy(): - """ - Simulate international translation by generating dummy strings - corresponding to source strings. - """ - sh("i18n_tool dummy") - # Need to then compile the new dummy strings - sh("i18n_tool generate") - - -@task -@timed -def i18n_validate_gettext(): - """ - Make sure GNU gettext utilities are available - """ - - returncode = subprocess.call(['which', 'xgettext']) - - if returncode != 0: - msg = colorize( - 'red', - "Cannot locate GNU gettext utilities, which are " - "required by django for internationalization.\n (see " - "https://docs.djangoproject.com/en/dev/topics/i18n/" - "translation/#message-files)\nTry downloading them from " - "http://www.gnu.org/software/gettext/ \n" - ) - - sys.stderr.write(msg) - sys.exit(1) - - -@task -@timed -def i18n_clean(): - """ - Clean the i18n directory of artifacts - """ - sh('git clean -fdX conf/locale') diff --git a/pavelib/paver_tests/test_extract_and_generate.py b/pavelib/paver_tests/test_extract_and_generate.py deleted file mode 100644 index 7c8dff02eff7..000000000000 --- a/pavelib/paver_tests/test_extract_and_generate.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -This test tests that i18n extraction (`paver i18n_extract -v`) works properly. -""" - - -import os -import random -import re -import string -import subprocess -import sys -from datetime import datetime, timedelta -from unittest import TestCase - -from i18n import config, dummy, extract, generate -from polib import pofile -from pytz import UTC - - -class TestGenerate(TestCase): - """ - Tests functionality of i18n/generate.py - """ - generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po') - - @classmethod - def setUpClass(cls): - super().setUpClass() - - sys.stderr.write( - "\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. " - "If you experience failures, please check that all instances of `gettext` and " - "`ngettext` are used correctly. You can also try running `paver i18n_extract -v` " - "locally for more detail.\n" - ) - sys.stderr.write( - "\nExtracting i18n strings and generating dummy translations; " - "this may take a few minutes\n" - ) - sys.stderr.flush() - extract.main(verbose=0) - dummy.main(verbose=0) - - @classmethod - def tearDownClass(cls): - # Clear the Esperanto & RTL directories of any test artifacts - cmd = "git checkout conf/locale/eo conf/locale/rtl" - sys.stderr.write("Cleaning up dummy language directories: " + cmd) - sys.stderr.flush() - returncode = subprocess.call(cmd, shell=True) - assert returncode == 0 - super().tearDownClass() - - def setUp(self): - super().setUp() - - self.configuration = config.Configuration() - - # Subtract 1 second to help comparisons with file-modify time succeed, - # since os.path.getmtime() is not millisecond-accurate - self.start_time = datetime.now(UTC) - timedelta(seconds=1) - - def test_merge(self): - """ - Tests merge script on English source files. - """ - filename = os.path.join(self.configuration.source_messages_dir, random_name()) - generate.merge(self.configuration, self.configuration.source_locale, target=filename) - assert os.path.exists(filename) - os.remove(filename) - - def test_main(self): - """ - Runs generate.main() which should merge source files, - then compile all sources in all configured languages. - Validates output by checking all .mo files in all configured languages. - .mo files should exist, and be recently created (modified - after start of test suite) - """ - # Change dummy_locales to only contain Esperanto. - self.configuration.dummy_locales = ['eo'] - - # Clear previous files. - for locale in self.configuration.dummy_locales: - for filename in ('django', 'djangojs'): - mofile = filename + '.mo' - path = os.path.join(self.configuration.get_messages_dir(locale), mofile) - if os.path.exists(path): - os.remove(path) - - # Regenerate files. - generate.main(verbosity=0, strict=False) - for locale in self.configuration.dummy_locales: - for filename in ('django', 'djangojs'): - mofile = filename + '.mo' - path = os.path.join(self.configuration.get_messages_dir(locale), mofile) - exists = os.path.exists(path) - assert exists, (f'Missing file in locale {locale}: {mofile}') - assert datetime.fromtimestamp(os.path.getmtime(path), UTC) >= \ - self.start_time, ('File not recently modified: %s' % path) - # Segmenting means that the merge headers don't work they way they - # used to, so don't make this check for now. I'm not sure if we'll - # get the merge header back eventually, or delete this code eventually. - # self.assert_merge_headers(locale) - - def assert_merge_headers(self, locale): - """ - This is invoked by test_main to ensure that it runs after - calling generate.main(). - - There should be exactly three merge comment headers - in our merged .po file. This counts them to be sure. - A merge comment looks like this: - # #-#-#-#-# django-partial.po (0.1a) #-#-#-#-# - - """ - path = os.path.join(self.configuration.get_messages_dir(locale), 'django.po') - pof = pofile(path) - pattern = re.compile('^#-#-#-#-#', re.M) - match = pattern.findall(pof.header) - assert len(match) == 3, (f'Found {len(match)} (should be 3) merge comments in the header for {path}') - - -def random_name(size=6): - """Returns random filename as string, like test-4BZ81W""" - chars = string.ascii_uppercase + string.digits - return 'test-' + ''.join(random.choice(chars) for x in range(size)) diff --git a/pavelib/paver_tests/test_i18n.py b/pavelib/paver_tests/test_i18n.py deleted file mode 100644 index 2d546955a046..000000000000 --- a/pavelib/paver_tests/test_i18n.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Tests for pavelib/i18n.py. -""" - -import os -from unittest.mock import patch - -from paver.easy import call_task - -import pavelib.i18n -from pavelib.paver_tests.utils import PaverTestCase - - -class TestI18nDummy(PaverTestCase): - """ - Test the Paver i18n_dummy task. - """ - def setUp(self): - super().setUp() - - # Mock the paver @needs decorator for i18n_extract - self._mock_paver_needs = patch.object(pavelib.i18n.i18n_extract, 'needs').start() - self._mock_paver_needs.return_value = 0 - - # Cleanup mocks - self.addCleanup(self._mock_paver_needs.stop) - - def test_i18n_dummy(self): - """ - Test the "i18n_dummy" task. - """ - self.reset_task_messages() - os.environ['NO_PREREQ_INSTALL'] = "true" - call_task('pavelib.i18n.i18n_dummy') - assert self.task_messages == ['i18n_tool extract', 'i18n_tool dummy', 'i18n_tool generate'] diff --git a/pavelib/paver_tests/test_paver_get_quality_reports.py b/pavelib/paver_tests/test_paver_get_quality_reports.py deleted file mode 100644 index 4f4e21cd0524..000000000000 --- a/pavelib/paver_tests/test_paver_get_quality_reports.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Tests to ensure only the report files we want are returned as part of run_quality. -""" - - -import unittest - -from unittest.mock import patch - -import pavelib.quality - - -class TestGetReportFiles(unittest.TestCase): - """ - Ensure only the report files we want are returned as part of run_quality. - """ - - @patch('os.walk') - def test_get_pylint_reports(self, my_mock): - - my_mock.return_value = iter([ - ('/foo', (None,), ('pylint.report',)), - ('/bar', ('/baz',), ('pylint.report',)) - ]) - reports = pavelib.quality.get_violations_reports("pylint") - assert len(reports) == 2 - - @patch('os.walk') - def test_get_pep8_reports(self, my_mock): - my_mock.return_value = iter([ - ('/foo', (None,), ('pep8.report',)), - ('/bar', ('/baz',), ('pep8.report',)) - ]) - reports = pavelib.quality.get_violations_reports("pep8") - assert len(reports) == 2 - - @patch('os.walk') - def test_get_pep8_reports_noisy(self, my_mock): - """ Several conditions: different report types, different files, multiple files """ - my_mock.return_value = iter([ - ('/foo', (None,), ('pep8.report',)), - ('/fooz', ('/ball',), ('pylint.report',)), - ('/fooz', ('/ball',), ('non.report',)), - ('/fooz', ('/ball',), ('lms.xml',)), - ('/bar', ('/baz',), ('pep8.report',)) - ]) - reports = pavelib.quality.get_violations_reports("pep8") - assert len(reports) == 2 diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py index b2a91a3475ec..36d6dd59e172 100644 --- a/pavelib/paver_tests/test_paver_quality.py +++ b/pavelib/paver_tests/test_paver_quality.py @@ -32,31 +32,6 @@ def setUp(self): self.f.close() self.addCleanup(os.remove, self.f.name) - def test_pylint_parser_other_string(self): - with open(self.f.name, 'w') as f: - f.write("hello") - num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access - assert num == 0 - - def test_pylint_parser_pep8(self): - # Pep8 violations should be ignored. - with open(self.f.name, 'w') as f: - f.write("foo/hello/test.py:304:15: E203 whitespace before ':'") - num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access - assert num == 0 - - @file_data('pylint_test_list.json') - def test_pylint_parser_count_violations(self, value): - """ - Tests: - - Different types of violations - - One violation covering multiple lines - """ - with open(self.f.name, 'w') as f: - f.write(value) - num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access - assert num == 1 - def test_pep8_parser(self): with open(self.f.name, 'w') as f: f.write("hello\nhithere") diff --git a/pavelib/paver_tests/test_prereqs.py b/pavelib/paver_tests/test_prereqs.py deleted file mode 100644 index 4b542ae97b2e..000000000000 --- a/pavelib/paver_tests/test_prereqs.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Tests covering the Open edX Paver prequisites installation workflow -""" - - -import os -import unittest -from unittest import mock -from unittest.mock import patch - -import pytest -from paver.easy import BuildFailure - -import pavelib.prereqs -from pavelib.paver_tests.utils import PaverTestCase, fail_on_npm_install, unexpected_fail_on_npm_install - - -class TestPaverPrereqInstall(unittest.TestCase): - """ - Test the status of the NO_PREREQ_INSTALL variable, its presence and how - paver handles it. - """ - def check_val(self, set_val, expected_val): - """ - Verify that setting the variable to a certain value returns - the expected boolean for it. - - As environment variables are only stored as strings, we have to cast - whatever it's set at to a boolean that does not violate expectations. - """ - _orig_environ = dict(os.environ) - os.environ['NO_PREREQ_INSTALL'] = set_val - assert pavelib.prereqs.no_prereq_install() == expected_val,\ - f'NO_PREREQ_INSTALL is set to {set_val}, but we read it as {expected_val}' - - # Reset Environment back to original state - os.environ.clear() - os.environ.update(_orig_environ) - - def test_no_prereq_install_true_lowercase(self): - """ - Ensure that 'true' will be True. - """ - self.check_val('true', True) - - def test_no_prereq_install_false_lowercase(self): - """ - Ensure that 'false' will be False. - """ - self.check_val('false', False) - - def test_no_prereq_install_true(self): - """ - Ensure that 'True' will be True. - """ - self.check_val('True', True) - - def test_no_prereq_install_false(self): - """ - Ensure that 'False' will be False. - """ - self.check_val('False', False) - - def test_no_prereq_install_0(self): - """ - Ensure that '0' will be False. - """ - self.check_val('0', False) - - def test_no_prereq_install_1(self): - """ - Ensure that '1' will be True. - """ - self.check_val('1', True) - - -class TestPaverNodeInstall(PaverTestCase): - """ - Test node install logic - """ - - def setUp(self): - super().setUp() - - # Ensure prereqs will be run - os.environ['NO_PREREQ_INSTALL'] = 'false' - - def test_npm_install_with_subprocess_error(self): - """ - Test an error in 'npm ci' execution - """ - with patch('subprocess.Popen') as _mock_popen: - _mock_subprocess = mock.Mock() - attrs = {'wait': fail_on_npm_install} - _mock_subprocess.configure_mock(**attrs) - _mock_popen.return_value = _mock_subprocess - with pytest.raises(Exception): - pavelib.prereqs.node_prereqs_installation() - - # npm ci will be called twice - assert _mock_popen.call_count == 2 - - def test_npm_install_called_once_when_successful(self): - """ - Vanilla npm ci should only be calling npm ci one time - """ - with patch('subprocess.Popen') as _mock_popen: - pavelib.prereqs.node_prereqs_installation() - # when there's no failure, npm ci is only called once - assert _mock_popen.call_count == 1 - - def test_npm_install_with_unexpected_subprocess_error(self): - """ - If there's some other error, only call npm ci once, and raise a failure - """ - with patch('subprocess.Popen') as _mock_popen: - _mock_popen.side_effect = unexpected_fail_on_npm_install - with pytest.raises(BuildFailure): - pavelib.prereqs.node_prereqs_installation() - assert _mock_popen.call_count == 1 diff --git a/pavelib/paver_tests/test_servers.py b/pavelib/paver_tests/test_servers.py deleted file mode 100644 index e9a07b94f0c6..000000000000 --- a/pavelib/paver_tests/test_servers.py +++ /dev/null @@ -1,307 +0,0 @@ -"""Unit tests for the Paver server tasks.""" - -import json - -import ddt -from paver.easy import call_task - -from ..utils.envs import Env -from .utils import PaverTestCase - -EXPECTED_SASS_COMMAND = ( - "libsass {sass_directory}" -) -EXPECTED_COMMON_SASS_DIRECTORIES = [ - "common/static/sass", -] -EXPECTED_LMS_SASS_DIRECTORIES = [ - "lms/static/sass", - "lms/static/certificates/sass", -] -EXPECTED_CMS_SASS_DIRECTORIES = [ - "cms/static/sass", -] -EXPECTED_LMS_SASS_COMMAND = [ - "python manage.py lms --settings={asset_settings} compile_sass lms ", -] -EXPECTED_CMS_SASS_COMMAND = [ - "python manage.py cms --settings={asset_settings} compile_sass cms ", -] -EXPECTED_COLLECT_STATIC_COMMAND = ( - 'python manage.py {system} --settings={asset_settings} collectstatic ' - '--ignore "fixtures" --ignore "karma_*.js" --ignore "spec" ' - '--ignore "spec_helpers" --ignore "spec-helpers" --ignore "xmodule_js" ' - '--ignore "geoip" --ignore "sass" ' - '--noinput {log_string}' -) -EXPECTED_CELERY_COMMAND = ( - "DJANGO_SETTINGS_MODULE=lms.envs.{settings} celery worker " - "--app=lms.celery:APP --beat --loglevel=INFO --pythonpath=." -) -EXPECTED_RUN_SERVER_COMMAND = ( - "python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}" -) -EXPECTED_INDEX_COURSE_COMMAND = ( - "python manage.py {system} --settings={settings} reindex_course --setup" -) -EXPECTED_PRINT_SETTINGS_COMMAND = [ - "python manage.py lms --settings={settings} print_setting STATIC_ROOT WEBPACK_CONFIG_PATH 2>{log_file}", - "python manage.py cms --settings={settings} print_setting STATIC_ROOT 2>{log_file}", - "python manage.py cms --settings={settings} print_setting JS_ENV_EXTRA_CONFIG 2>{log_file} --json", -] -EXPECTED_WEBPACK_COMMAND = ( - "NODE_ENV={node_env} STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} " - "JS_ENV_EXTRA_CONFIG={js_env_extra_config} " - "$(npm bin)/webpack --config={webpack_config_path}" -) - - -@ddt.ddt -class TestPaverServerTasks(PaverTestCase): - """ - Test the Paver server tasks. - """ - - @ddt.data( - [{}], - [{"settings": "aws"}], - [{"asset-settings": "test_static_optimized"}], - [{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}], - [{"fast": True}], - [{"port": 8030}], - ) - @ddt.unpack - def test_lms(self, options): - """ - Test the "devstack" task. - """ - self.verify_server_task("lms", options) - - @ddt.data( - [{}], - [{"settings": "aws"}], - [{"asset-settings": "test_static_optimized"}], - [{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}], - [{"fast": True}], - [{"port": 8031}], - ) - @ddt.unpack - def test_studio(self, options): - """ - Test the "devstack" task. - """ - self.verify_server_task("studio", options) - - @ddt.data( - [{}], - [{"settings": "aws"}], - [{"asset-settings": "test_static_optimized"}], - [{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}], - [{"fast": True}], - [{"optimized": True}], - [{"optimized": True, "fast": True}], - ) - @ddt.unpack - def test_devstack(self, server_options): - """ - Test the "devstack" task. - """ - options = server_options.copy() - is_optimized = options.get("optimized", False) - expected_settings = "devstack_optimized" if is_optimized else options.get("settings", Env.DEVSTACK_SETTINGS) - - # First test with LMS - options["system"] = "lms" - options["expected_messages"] = [ - EXPECTED_INDEX_COURSE_COMMAND.format( - system="cms", - settings=expected_settings, - ) - ] - self.verify_server_task("devstack", options) - - # Then test with Studio - options["system"] = "cms" - options["expected_messages"] = [ - EXPECTED_INDEX_COURSE_COMMAND.format( - system="cms", - settings=expected_settings, - ) - ] - self.verify_server_task("devstack", options) - - @ddt.data( - [{}], - [{"settings": "aws"}], - [{"asset_settings": "test_static_optimized"}], - [{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}], - [{"fast": True}], - [{"optimized": True}], - [{"optimized": True, "fast": True}], - ) - @ddt.unpack - def test_run_all_servers(self, options): - """ - Test the "run_all_servers" task. - """ - self.verify_run_all_servers_task(options) - - @ddt.data( - [{}], - [{"settings": "aws"}], - ) - @ddt.unpack - def test_celery(self, options): - """ - Test the "celery" task. - """ - settings = options.get("settings", "devstack_with_worker") - call_task("pavelib.servers.celery", options=options) - assert self.task_messages == [EXPECTED_CELERY_COMMAND.format(settings=settings)] - - @ddt.data( - ["lms", {}], - ["lms", {"settings": "aws"}], - ["cms", {}], - ["cms", {"settings": "aws"}], - ) - @ddt.unpack - def test_check_settings(self, system, options): - """ - Test the "check_settings" task. - """ - settings = options.get("settings", Env.DEVSTACK_SETTINGS) - call_task("pavelib.servers.check_settings", args=[system, settings]) - assert self.task_messages ==\ - ["echo 'import {system}.envs.{settings}' | python manage.py {system} " - "--settings={settings} shell --plain --pythonpath=.".format(system=system, settings=settings)] - - def verify_server_task(self, task_name, options): - """ - Verify the output of a server task. - """ - log_string = options.get("log_string", "> /dev/null") - settings = options.get("settings", None) - asset_settings = options.get("asset-settings", None) - is_optimized = options.get("optimized", False) - is_fast = options.get("fast", False) - if task_name == "devstack": - system = options.get("system") - elif task_name == "studio": - system = "cms" - else: - system = "lms" - port = options.get("port", "8000" if system == "lms" else "8001") - self.reset_task_messages() - if task_name == "devstack": - args = ["studio" if system == "cms" else system] - if settings: - args.append(f"--settings={settings}") - if asset_settings: - args.append(f"--asset-settings={asset_settings}") - if is_optimized: - args.append("--optimized") - if is_fast: - args.append("--fast") - call_task("pavelib.servers.devstack", args=args) - else: - call_task(f"pavelib.servers.{task_name}", options=options) - expected_messages = options.get("expected_messages", []) - expected_settings = settings if settings else Env.DEVSTACK_SETTINGS - expected_asset_settings = asset_settings if asset_settings else expected_settings - if is_optimized: - expected_settings = "devstack_optimized" - expected_asset_settings = "test_static_optimized" - expected_collect_static = not is_fast and expected_settings != Env.DEVSTACK_SETTINGS - if not is_fast: - expected_messages.append("install npm_assets") - expected_messages.extend( - [c.format(settings=expected_asset_settings, - log_file=Env.PRINT_SETTINGS_LOG_FILE) for c in EXPECTED_PRINT_SETTINGS_COMMAND] - ) - expected_messages.append(EXPECTED_WEBPACK_COMMAND.format( - node_env="production", - static_root_lms=None, - static_root_cms=None, - js_env_extra_config=json.dumps("{}"), - webpack_config_path=None - )) - expected_messages.extend(self.expected_sass_commands(system=system, asset_settings=expected_asset_settings)) - if expected_collect_static: - expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format( - system=system, asset_settings=expected_asset_settings, log_string=log_string - )) - expected_run_server_command = EXPECTED_RUN_SERVER_COMMAND.format( - system=system, - settings=expected_settings, - port=port, - ) - expected_messages.append(expected_run_server_command) - assert self.task_messages == expected_messages - - def verify_run_all_servers_task(self, options): - """ - Verify the output of a server task. - """ - log_string = options.get("log_string", "> /dev/null") - settings = options.get("settings", None) - asset_settings = options.get("asset_settings", None) - is_optimized = options.get("optimized", False) - is_fast = options.get("fast", False) - self.reset_task_messages() - call_task("pavelib.servers.run_all_servers", options=options) - expected_settings = settings if settings else Env.DEVSTACK_SETTINGS - expected_asset_settings = asset_settings if asset_settings else expected_settings - if is_optimized: - expected_settings = "devstack_optimized" - expected_asset_settings = "test_static_optimized" - expected_collect_static = not is_fast and expected_settings != Env.DEVSTACK_SETTINGS - expected_messages = [] - if not is_fast: - expected_messages.append("install npm_assets") - expected_messages.extend( - [c.format(settings=expected_asset_settings, - log_file=Env.PRINT_SETTINGS_LOG_FILE) for c in EXPECTED_PRINT_SETTINGS_COMMAND] - ) - expected_messages.append(EXPECTED_WEBPACK_COMMAND.format( - node_env="production", - static_root_lms=None, - static_root_cms=None, - js_env_extra_config=json.dumps("{}"), - webpack_config_path=None - )) - expected_messages.extend(self.expected_sass_commands(asset_settings=expected_asset_settings)) - if expected_collect_static: - expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format( - system="lms", asset_settings=expected_asset_settings, log_string=log_string - )) - expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format( - system="cms", asset_settings=expected_asset_settings, log_string=log_string - )) - expected_messages.append( - EXPECTED_RUN_SERVER_COMMAND.format( - system="lms", - settings=expected_settings, - port=8000, - ) - ) - expected_messages.append( - EXPECTED_RUN_SERVER_COMMAND.format( - system="cms", - settings=expected_settings, - port=8001, - ) - ) - expected_messages.append(EXPECTED_CELERY_COMMAND.format(settings="devstack_with_worker")) - assert self.task_messages == expected_messages - - def expected_sass_commands(self, system=None, asset_settings="test_static_optimized"): - """ - Returns the expected SASS commands for the specified system. - """ - expected_sass_commands = [] - if system != 'cms': - expected_sass_commands.extend(EXPECTED_LMS_SASS_COMMAND) - if system != 'lms': - expected_sass_commands.extend(EXPECTED_CMS_SASS_COMMAND) - return [command.format(asset_settings=asset_settings) for command in expected_sass_commands] diff --git a/pavelib/quality.py b/pavelib/quality.py index 2a5c686b62a3..774179f45048 100644 --- a/pavelib/quality.py +++ b/pavelib/quality.py @@ -10,8 +10,6 @@ from paver.easy import BuildFailure, cmdopts, needs, sh, task -from openedx.core.djangolib.markup import HTML - from .utils.envs import Env from .utils.timer import timed @@ -79,143 +77,6 @@ def top_python_dirs(dirname): return top_dirs -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("system=", "s", "System to act on"), -]) -@timed -def find_fixme(options): - """ - Run pylint on system code, only looking for fixme items. - """ - num_fixme = 0 - systems = getattr(options, 'system', ALL_SYSTEMS).split(',') - - for system in systems: - # Directory to put the pylint report in. - # This makes the folder if it doesn't already exist. - report_dir = (Env.REPORT_DIR / system).makedirs_p() - - apps_list = ' '.join(top_python_dirs(system)) - - cmd = ( - "pylint --disable all --enable=fixme " - "--output-format=parseable {apps} " - "> {report_dir}/pylint_fixme.report".format( - apps=apps_list, - report_dir=report_dir - ) - ) - - sh(cmd, ignore_error=True) - - num_fixme += _count_pylint_violations( - f"{report_dir}/pylint_fixme.report") - - print("Number of pylint fixmes: " + str(num_fixme)) - - -def _get_pylint_violations(systems=ALL_SYSTEMS.split(','), errors_only=False, clean=True): - """ - Runs pylint. Returns a tuple of (number_of_violations, list_of_violations) - where list_of_violations is a list of all pylint violations found, separated - by new lines. - """ - # Make sure the metrics subdirectory exists - Env.METRICS_DIR.makedirs_p() - - num_violations = 0 - violations_list = [] - - for system in systems: - # Directory to put the pylint report in. - # This makes the folder if it doesn't already exist. - report_dir = (Env.REPORT_DIR / system).makedirs_p() - - flags = [] - if errors_only: - flags.append("--errors-only") - - apps_list = ' '.join(top_python_dirs(system)) - - system_report = report_dir / 'pylint.report' - if clean or not system_report.exists(): - sh( - "export DJANGO_SETTINGS_MODULE={env}.envs.test; " - "pylint {flags} --output-format=parseable {apps} " - "> {report_dir}/pylint.report".format( - flags=" ".join(flags), - apps=apps_list, - report_dir=report_dir, - env=('cms' if system == 'cms' else 'lms') - ), - ignore_error=True, - ) - - num_violations += _count_pylint_violations(system_report) - with open(system_report) as report_contents: - violations_list.extend(report_contents) - - # Print number of violations to log - return num_violations, violations_list - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@cmdopts([ - ("system=", "s", "System to act on"), - ("errors", "e", "Check for errors only"), -]) -@timed -def run_pylint(options): - """ - Run pylint on system code. When violations limit is passed in, - fail the task if too many violations are found. - """ - errors = getattr(options, 'errors', False) - systems = getattr(options, 'system', ALL_SYSTEMS).split(',') - result_name = 'pylint_{}'.format('_'.join(systems)) - - num_violations, violations_list = _get_pylint_violations(systems, errors) - - # Print number of violations to log - violations_count_str = "Number of pylint violations: " + str(num_violations) - print(violations_count_str) - - # Also write the number of violations to a file - with open(Env.METRICS_DIR / "pylint", "w") as f: - f.write(violations_count_str) - - # Fail if there are violations found in pylint report. - if num_violations > 0: - failure_message = "FAILURE: Pylint violations found.\n" - for violation in violations_list: - failure_message += violation # lint-amnesty, pylint: disable=consider-using-join - fail_quality(result_name, failure_message) - else: - write_junit_xml(result_name) - - -def _count_pylint_violations(report_file): - """ - Parses a pylint report line-by-line and determines the number of violations reported - """ - num_violations_report = 0 - # An example string: - # xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring - # More examples can be found in the unit tests for this method - pylint_pattern = re.compile(r".(\d+):\ \[(\D\d+.+\]).") - - for line in open(report_file): - violation_list_for_line = pylint_pattern.split(line) - # If the string is parsed into four parts, then we've found a violation. Example of split parts: - # test file, line number, violation name, violation details - if len(violation_list_for_line) == 4: - num_violations_report += 1 - return num_violations_report - - def _get_pep8_violations(clean=True): """ Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string) @@ -248,7 +109,6 @@ def _pep8_violations(report_file): @task -@needs('pavelib.prereqs.install_python_prereqs') @cmdopts([ ("system=", "s", "System to act on"), ]) @@ -740,70 +600,3 @@ def check_keywords(): report_path ) ) - - -@task -@needs('pavelib.prereqs.install_python_prereqs') -@timed -# pylint: disable=too-many-statements -def run_quality(): - """ - Build the html quality reports, and print the reports to the console. - """ - failure_reasons = [] - - def _lint_output(linter, count, violations_list, is_html=False): - """ - Given a count & list of pylint violations, pretty-print the output. - If `is_html`, will print out with HTML markup. - """ - if is_html: - lines = ['
\n'] - sep = '-------------