Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mtest: fix rebuilding correct target list before running tests #10413

Merged
merged 3 commits into from
Jan 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions docs/markdown/snippets/test_dependencies.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
## Test targets no longer built by default

`meson test` and the `ninja all` rule have been reworked to no longer force
unnecessary rebuilds.

`meson test` was invoking `ninja all` due to a bug if the chosen set of tests
had no build dependencies. The behavior is now the same as when tests do have
build dependencies, i.e. to only build the actual set of targets that are used
by the test. This change could cause failures when upgrading to Meson 1.7.0, if
the dependencies are not specified correctly in meson.build. Using `ninja test`
has always been guaranteed to "do the right thing" and rebuild `all` as well;
this continues to work.

`ninja all` does not rebuild all tests anymore; it should be noted that this
change means test programs are no longer guaranteed to have been built,
depending on whether those test programs were *also* defined to build by
default / marked as installable. This avoids building test-only binaries as
part of installing the project (`ninja && ninja install`), which is unnecessary
and has no use case.

Some users might have been relying on the "all" target building test
dependencies in combination with `meson test --no-rebuild` in order to skip
calling out to ninja when running tests. This might break with this change
because, when given `--no-rebuild`, Meson provides no guarantee that test
dependencies are present and up to date. The recommended workflow is to use
either `ninja test` or `ninja && meson test` but, if you wish to build test
programs and dependencies in a separate stage, you can use for example `ninja
all meson-test-prereq meson-benchmark-prereq` before `meson test --no-rebuild`.
These prereq targets have been available since meson 0.63.0.
8 changes: 2 additions & 6 deletions mesonbuild/backend/ninjabackend.py
Original file line number Diff line number Diff line change
Expand Up @@ -1327,7 +1327,7 @@ def generate_tests(self) -> None:
cmd += ['--no-stdsplit']
if self.environment.coredata.get_option(OptionKey('errorlogs')):
cmd += ['--print-errorlogs']
elem = self.create_phony_target('test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem = self.create_phony_target('test', 'CUSTOM_COMMAND', ['all', 'meson-test-prereq', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests')
elem.add_item('pool', 'console')
Expand All @@ -1337,7 +1337,7 @@ def generate_tests(self) -> None:
cmd = self.environment.get_build_command(True) + [
'test', '--benchmark', '--logbase',
'benchmarklog', '--num-processes=1', '--no-rebuild']
elem = self.create_phony_target('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem = self.create_phony_target('benchmark', 'CUSTOM_COMMAND', ['all', 'meson-benchmark-prereq', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite')
elem.add_item('pool', 'console')
Expand Down Expand Up @@ -3740,10 +3740,6 @@ def generate_ending(self) -> None:
('meson-test-prereq', self.get_testlike_targets()),
('meson-benchmark-prereq', self.get_testlike_targets(True))]:
targetlist = []
# These must also be built by default.
# XXX: Sometime in the future these should be built only before running tests.
if targ == 'all':
targetlist.extend(['meson-test-prereq', 'meson-benchmark-prereq'])
for t in deps.values():
# Add the first output of each target to the 'all' target so that
# they are all built
Expand Down
48 changes: 30 additions & 18 deletions mesonbuild/mtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1825,9 +1825,10 @@ def doit(self) -> int:
raise RuntimeError('Test harness object can only be used once.')
self.is_run = True
tests = self.get_tests()
rebuild_only_tests = tests if self.options.args else []
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nitpick: I think this line could go after the if not tests: return 0

if not tests:
return 0
if not self.options.no_rebuild and not rebuild_deps(self.ninja, self.options.wd, tests):
if not self.options.no_rebuild and not rebuild_deps(self.ninja, self.options.wd, rebuild_only_tests, self.options.benchmark):
# We return 125 here in case the build failed.
# The reason is that exit code 125 tells `git bisect run` that the current
# commit should be skipped. Thus users can directly use `meson test` to
Expand Down Expand Up @@ -2140,7 +2141,7 @@ def list_tests(th: TestHarness) -> bool:
print(th.get_pretty_suite(t))
return not tests

def rebuild_deps(ninja: T.List[str], wd: str, tests: T.List[TestSerialisation]) -> bool:
def rebuild_deps(ninja: T.List[str], wd: str, tests: T.List[TestSerialisation], benchmark: bool) -> bool:
def convert_path_to_target(path: str) -> str:
path = os.path.relpath(path, wd)
if os.sep != '/':
Expand All @@ -2149,23 +2150,34 @@ def convert_path_to_target(path: str) -> str:

assert len(ninja) > 0

targets_file = os.path.join(wd, 'meson-info/intro-targets.json')
with open(targets_file, encoding='utf-8') as fp:
targets_info = json.load(fp)

depends: T.Set[str] = set()
targets: T.Set[str] = set()
intro_targets: T.Dict[str, T.List[str]] = {}
for target in targets_info:
intro_targets[target['id']] = [
convert_path_to_target(f)
for f in target['filename']]
for t in tests:
for d in t.depends:
if d in depends:
continue
depends.update(d)
targets.update(intro_targets[d])
if tests:
targets_file = os.path.join(wd, 'meson-info/intro-targets.json')
with open(targets_file, encoding='utf-8') as fp:
targets_info = json.load(fp)

depends: T.Set[str] = set()
intro_targets: T.Dict[str, T.List[str]] = {}
for target in targets_info:
intro_targets[target['id']] = [
convert_path_to_target(f)
for f in target['filename']]
for t in tests:
for d in t.depends:
if d in depends:
continue
depends.update(d)
targets.update(intro_targets[d])
else:
if benchmark:
targets.add('meson-benchmark-prereq')
else:
targets.add('meson-test-prereq')

if not targets:
# We want to build minimal deps, but if the subset of targets have no
# deps then ninja falls back to 'all'.
return True

ret = subprocess.run(ninja + ['-C', wd] + sorted(targets)).returncode
if ret != 0:
Expand Down
9 changes: 8 additions & 1 deletion run_project_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,14 @@ def _run_test(test: TestDef,
# Build with subprocess
def build_step() -> None:
build_start = time.time()
pc, o, _ = Popen_safe(compile_commands + dir_args, cwd=test_build_dir, stderr=subprocess.STDOUT)

if backend is Backend.ninja:
# FIXME: meson test inprocess does not handle running ninja via StringIO
targets = ['all', 'meson-test-prereq', 'meson-benchmark-prereq']
else:
targets = []

pc, o, _ = Popen_safe(compile_commands + dir_args + targets, cwd=test_build_dir, stderr=subprocess.STDOUT)
testresult.add_step(BuildStep.build, o, '', '', time.time() - build_start)
if should_fail == 'build':
if pc.returncode != 0:
Expand Down
1 change: 1 addition & 0 deletions test cases/unit/106 underspecified mtest/main.c
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
int main(void) { return 0 ; }
8 changes: 8 additions & 0 deletions test cases/unit/106 underspecified mtest/meson.build
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
project('underspecified deps', 'c')

runner = find_program('runner.py')
exe1 = executable('main1', 'main.c')
exe2 = executable('main2', 'main.c')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For completeness one of these two should have build_by_default: false.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's not really what I'm testing here, I'm testing that meson test needs to build "all". I'm not testing what is included in "all".

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But it is relevant for this. Rebuilding needs to work even if the end user has tagged a test executable as build_by_default: false so we should test it.


test('runner-with-exedep', runner, args: exe1)
test('runner-without-dep', runner, args: exe2.full_path())
5 changes: 5 additions & 0 deletions test cases/unit/106 underspecified mtest/runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/bin/env python3

import sys, subprocess

subprocess.run(sys.argv[1:], check=True)
27 changes: 21 additions & 6 deletions unittests/platformagnostictests.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import json
import os
import pickle
import subprocess
import tempfile
import subprocess
import textwrap
Expand Down Expand Up @@ -77,15 +78,15 @@ def write_file(code: str):
with tempfile.NamedTemporaryFile('w', dir=self.builddir, encoding='utf-8', delete=False) as f:
f.write(code)
return f.name

fname = write_file("option('intminmax', type: 'integer', value: 10, min: 0, max: 5)")
self.assertRaisesRegex(MesonException, 'Value 10 for option "intminmax" is more than maximum value 5.',
interp.process, fname)

fname = write_file("option('array', type: 'array', choices : ['one', 'two', 'three'], value : ['one', 'four'])")
self.assertRaisesRegex(MesonException, 'Value "four" for option "array" is not in allowed choices: "one, two, three"',
interp.process, fname)

fname = write_file("option('array', type: 'array', choices : ['one', 'two', 'three'], value : ['four', 'five', 'six'])")
self.assertRaisesRegex(MesonException, 'Values "four, five, six" for option "array" are not in allowed choices: "one, two, three"',
interp.process, fname)
Expand Down Expand Up @@ -325,7 +326,7 @@ def test_editorconfig_match_path(self):
('a.txt', '{a,b,c}.txt', True),
('a.txt', '*.{txt,tex,cpp}', True),
('a.hpp', '*.{txt,tex,cpp}', False),

('a1.txt', 'a{0..9}.txt', True),
('a001.txt', 'a{0..9}.txt', True),
('a-1.txt', 'a{-10..10}.txt', True),
Expand Down Expand Up @@ -375,22 +376,22 @@ def test_format_empty_file(self) -> None:
for code in ('', '\n'):
formatted = formatter.format(code, Path())
self.assertEqual('\n', formatted)

def test_format_indent_comment_in_brackets(self) -> None:
"""Ensure comments in arrays and dicts are correctly indented"""
formatter = Formatter(None, use_editor_config=False, fetch_subdirs=False)
code = 'a = [\n # comment\n]\n'
formatted = formatter.format(code, Path())
self.assertEqual(code, formatted)

code = 'a = [\n # comment\n 1,\n]\n'
formatted = formatter.format(code, Path())
self.assertEqual(code, formatted)

code = 'a = {\n # comment\n}\n'
formatted = formatter.format(code, Path())
self.assertEqual(code, formatted)

def test_error_configuring_subdir(self):
testdir = os.path.join(self.common_test_dir, '152 index customtarget')
out = self.init(os.path.join(testdir, 'subdir'), allow_fail=True)
Expand Down Expand Up @@ -508,3 +509,17 @@ def test_configure_new_option_subproject(self) -> None:
f.write("option('new_option', type : 'boolean', value : false)")
self.setconf('-Dsubproject:new_option=true')
self.assertEqual(self.getconf('subproject:new_option'), True)

def test_mtest_rebuild_deps(self):
testdir = os.path.join(self.unit_test_dir, '106 underspecified mtest')
self.init(testdir)

with self.assertRaises(subprocess.CalledProcessError):
self._run(self.mtest_command)
self.clean()

with self.assertRaises(subprocess.CalledProcessError):
self._run(self.mtest_command + ['runner-without-dep'])
self.clean()

self._run(self.mtest_command + ['runner-with-exedep'])
Loading