diff --git a/docs/markdown/snippets/test_dependencies.md b/docs/markdown/snippets/test_dependencies.md new file mode 100644 index 000000000000..f69efa5cea9e --- /dev/null +++ b/docs/markdown/snippets/test_dependencies.md @@ -0,0 +1,29 @@ +## Test targets no longer built by default + +`meson test` and the `ninja all` rule have been reworked to no longer force +unnecessary rebuilds. + +`meson test` was invoking `ninja all` due to a bug if the chosen set of tests +had no build dependencies. The behavior is now the same as when tests do have +build dependencies, i.e. to only build the actual set of targets that are used +by the test. This change could cause failures when upgrading to Meson 1.7.0, if +the dependencies are not specified correctly in meson.build. Using `ninja test` +has always been guaranteed to "do the right thing" and rebuild `all` as well; +this continues to work. + +`ninja all` does not rebuild all tests anymore; it should be noted that this +change means test programs are no longer guaranteed to have been built, +depending on whether those test programs were *also* defined to build by +default / marked as installable. This avoids building test-only binaries as +part of installing the project (`ninja && ninja install`), which is unnecessary +and has no use case. + +Some users might have been relying on the "all" target building test +dependencies in combination with `meson test --no-rebuild` in order to skip +calling out to ninja when running tests. This might break with this change +because, when given `--no-rebuild`, Meson provides no guarantee that test +dependencies are present and up to date. The recommended workflow is to use +either `ninja test` or `ninja && meson test` but, if you wish to build test +programs and dependencies in a separate stage, you can use for example `ninja +all meson-test-prereq meson-benchmark-prereq` before `meson test --no-rebuild`. +These prereq targets have been available since meson 0.63.0. diff --git a/mesonbuild/backend/ninjabackend.py b/mesonbuild/backend/ninjabackend.py index 7244ea90b84f..7e353c0afd46 100644 --- a/mesonbuild/backend/ninjabackend.py +++ b/mesonbuild/backend/ninjabackend.py @@ -1327,7 +1327,7 @@ def generate_tests(self) -> None: cmd += ['--no-stdsplit'] if self.environment.coredata.get_option(OptionKey('errorlogs')): cmd += ['--print-errorlogs'] - elem = self.create_phony_target('test', 'CUSTOM_COMMAND', ['all', 'PHONY']) + elem = self.create_phony_target('test', 'CUSTOM_COMMAND', ['all', 'meson-test-prereq', 'PHONY']) elem.add_item('COMMAND', cmd) elem.add_item('DESC', 'Running all tests') elem.add_item('pool', 'console') @@ -1337,7 +1337,7 @@ def generate_tests(self) -> None: cmd = self.environment.get_build_command(True) + [ 'test', '--benchmark', '--logbase', 'benchmarklog', '--num-processes=1', '--no-rebuild'] - elem = self.create_phony_target('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY']) + elem = self.create_phony_target('benchmark', 'CUSTOM_COMMAND', ['all', 'meson-benchmark-prereq', 'PHONY']) elem.add_item('COMMAND', cmd) elem.add_item('DESC', 'Running benchmark suite') elem.add_item('pool', 'console') @@ -3740,10 +3740,6 @@ def generate_ending(self) -> None: ('meson-test-prereq', self.get_testlike_targets()), ('meson-benchmark-prereq', self.get_testlike_targets(True))]: targetlist = [] - # These must also be built by default. - # XXX: Sometime in the future these should be built only before running tests. - if targ == 'all': - targetlist.extend(['meson-test-prereq', 'meson-benchmark-prereq']) for t in deps.values(): # Add the first output of each target to the 'all' target so that # they are all built diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index d0added78f8c..39970e530872 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -1825,9 +1825,10 @@ def doit(self) -> int: raise RuntimeError('Test harness object can only be used once.') self.is_run = True tests = self.get_tests() + rebuild_only_tests = tests if self.options.args else [] if not tests: return 0 - if not self.options.no_rebuild and not rebuild_deps(self.ninja, self.options.wd, tests): + if not self.options.no_rebuild and not rebuild_deps(self.ninja, self.options.wd, rebuild_only_tests, self.options.benchmark): # We return 125 here in case the build failed. # The reason is that exit code 125 tells `git bisect run` that the current # commit should be skipped. Thus users can directly use `meson test` to @@ -2140,7 +2141,7 @@ def list_tests(th: TestHarness) -> bool: print(th.get_pretty_suite(t)) return not tests -def rebuild_deps(ninja: T.List[str], wd: str, tests: T.List[TestSerialisation]) -> bool: +def rebuild_deps(ninja: T.List[str], wd: str, tests: T.List[TestSerialisation], benchmark: bool) -> bool: def convert_path_to_target(path: str) -> str: path = os.path.relpath(path, wd) if os.sep != '/': @@ -2149,23 +2150,34 @@ def convert_path_to_target(path: str) -> str: assert len(ninja) > 0 - targets_file = os.path.join(wd, 'meson-info/intro-targets.json') - with open(targets_file, encoding='utf-8') as fp: - targets_info = json.load(fp) - - depends: T.Set[str] = set() targets: T.Set[str] = set() - intro_targets: T.Dict[str, T.List[str]] = {} - for target in targets_info: - intro_targets[target['id']] = [ - convert_path_to_target(f) - for f in target['filename']] - for t in tests: - for d in t.depends: - if d in depends: - continue - depends.update(d) - targets.update(intro_targets[d]) + if tests: + targets_file = os.path.join(wd, 'meson-info/intro-targets.json') + with open(targets_file, encoding='utf-8') as fp: + targets_info = json.load(fp) + + depends: T.Set[str] = set() + intro_targets: T.Dict[str, T.List[str]] = {} + for target in targets_info: + intro_targets[target['id']] = [ + convert_path_to_target(f) + for f in target['filename']] + for t in tests: + for d in t.depends: + if d in depends: + continue + depends.update(d) + targets.update(intro_targets[d]) + else: + if benchmark: + targets.add('meson-benchmark-prereq') + else: + targets.add('meson-test-prereq') + + if not targets: + # We want to build minimal deps, but if the subset of targets have no + # deps then ninja falls back to 'all'. + return True ret = subprocess.run(ninja + ['-C', wd] + sorted(targets)).returncode if ret != 0: diff --git a/run_project_tests.py b/run_project_tests.py index ab34c27f21d2..0dc287191f21 100755 --- a/run_project_tests.py +++ b/run_project_tests.py @@ -712,7 +712,14 @@ def _run_test(test: TestDef, # Build with subprocess def build_step() -> None: build_start = time.time() - pc, o, _ = Popen_safe(compile_commands + dir_args, cwd=test_build_dir, stderr=subprocess.STDOUT) + + if backend is Backend.ninja: + # FIXME: meson test inprocess does not handle running ninja via StringIO + targets = ['all', 'meson-test-prereq', 'meson-benchmark-prereq'] + else: + targets = [] + + pc, o, _ = Popen_safe(compile_commands + dir_args + targets, cwd=test_build_dir, stderr=subprocess.STDOUT) testresult.add_step(BuildStep.build, o, '', '', time.time() - build_start) if should_fail == 'build': if pc.returncode != 0: diff --git a/test cases/unit/106 underspecified mtest/main.c b/test cases/unit/106 underspecified mtest/main.c new file mode 100644 index 000000000000..8842fc1226ef --- /dev/null +++ b/test cases/unit/106 underspecified mtest/main.c @@ -0,0 +1 @@ +int main(void) { return 0 ; } diff --git a/test cases/unit/106 underspecified mtest/meson.build b/test cases/unit/106 underspecified mtest/meson.build new file mode 100644 index 000000000000..c0a88d6770c8 --- /dev/null +++ b/test cases/unit/106 underspecified mtest/meson.build @@ -0,0 +1,8 @@ +project('underspecified deps', 'c') + +runner = find_program('runner.py') +exe1 = executable('main1', 'main.c') +exe2 = executable('main2', 'main.c') + +test('runner-with-exedep', runner, args: exe1) +test('runner-without-dep', runner, args: exe2.full_path()) diff --git a/test cases/unit/106 underspecified mtest/runner.py b/test cases/unit/106 underspecified mtest/runner.py new file mode 100755 index 000000000000..9fb9ac40b94e --- /dev/null +++ b/test cases/unit/106 underspecified mtest/runner.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 + +import sys, subprocess + +subprocess.run(sys.argv[1:], check=True) diff --git a/unittests/platformagnostictests.py b/unittests/platformagnostictests.py index 2fb75f284993..aa38b07e0a2e 100644 --- a/unittests/platformagnostictests.py +++ b/unittests/platformagnostictests.py @@ -6,6 +6,7 @@ import json import os import pickle +import subprocess import tempfile import subprocess import textwrap @@ -77,7 +78,7 @@ def write_file(code: str): with tempfile.NamedTemporaryFile('w', dir=self.builddir, encoding='utf-8', delete=False) as f: f.write(code) return f.name - + fname = write_file("option('intminmax', type: 'integer', value: 10, min: 0, max: 5)") self.assertRaisesRegex(MesonException, 'Value 10 for option "intminmax" is more than maximum value 5.', interp.process, fname) @@ -85,7 +86,7 @@ def write_file(code: str): fname = write_file("option('array', type: 'array', choices : ['one', 'two', 'three'], value : ['one', 'four'])") self.assertRaisesRegex(MesonException, 'Value "four" for option "array" is not in allowed choices: "one, two, three"', interp.process, fname) - + fname = write_file("option('array', type: 'array', choices : ['one', 'two', 'three'], value : ['four', 'five', 'six'])") self.assertRaisesRegex(MesonException, 'Values "four, five, six" for option "array" are not in allowed choices: "one, two, three"', interp.process, fname) @@ -325,7 +326,7 @@ def test_editorconfig_match_path(self): ('a.txt', '{a,b,c}.txt', True), ('a.txt', '*.{txt,tex,cpp}', True), ('a.hpp', '*.{txt,tex,cpp}', False), - + ('a1.txt', 'a{0..9}.txt', True), ('a001.txt', 'a{0..9}.txt', True), ('a-1.txt', 'a{-10..10}.txt', True), @@ -375,14 +376,14 @@ def test_format_empty_file(self) -> None: for code in ('', '\n'): formatted = formatter.format(code, Path()) self.assertEqual('\n', formatted) - + def test_format_indent_comment_in_brackets(self) -> None: """Ensure comments in arrays and dicts are correctly indented""" formatter = Formatter(None, use_editor_config=False, fetch_subdirs=False) code = 'a = [\n # comment\n]\n' formatted = formatter.format(code, Path()) self.assertEqual(code, formatted) - + code = 'a = [\n # comment\n 1,\n]\n' formatted = formatter.format(code, Path()) self.assertEqual(code, formatted) @@ -390,7 +391,7 @@ def test_format_indent_comment_in_brackets(self) -> None: code = 'a = {\n # comment\n}\n' formatted = formatter.format(code, Path()) self.assertEqual(code, formatted) - + def test_error_configuring_subdir(self): testdir = os.path.join(self.common_test_dir, '152 index customtarget') out = self.init(os.path.join(testdir, 'subdir'), allow_fail=True) @@ -508,3 +509,17 @@ def test_configure_new_option_subproject(self) -> None: f.write("option('new_option', type : 'boolean', value : false)") self.setconf('-Dsubproject:new_option=true') self.assertEqual(self.getconf('subproject:new_option'), True) + + def test_mtest_rebuild_deps(self): + testdir = os.path.join(self.unit_test_dir, '106 underspecified mtest') + self.init(testdir) + + with self.assertRaises(subprocess.CalledProcessError): + self._run(self.mtest_command) + self.clean() + + with self.assertRaises(subprocess.CalledProcessError): + self._run(self.mtest_command + ['runner-without-dep']) + self.clean() + + self._run(self.mtest_command + ['runner-with-exedep'])