diff --git a/tests/conftest.py b/tests/conftest.py index 807e0203..6f20dc20 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -93,10 +93,6 @@ def config(): } -# Global string literal used so that it is accessible in tests -DEFAULT_STDOUT = "mock standard output" - - @pytest.fixture() def mock_subprocess_handler(): """Returns a mock implementation of `SubprocessWrapperInterface`.""" @@ -106,7 +102,7 @@ class MockSubprocessWrapper(SubprocessWrapperInterface): def __init__(self) -> None: self.commands: list[str] = [] - self.stdout = DEFAULT_STDOUT + self.stdout = "mock standard output" self.error_on_call = False self.env = {} diff --git a/tests/test_comparison.py b/tests/test_comparison.py index 6dd212e9..ef0bdcfc 100644 --- a/tests/test_comparison.py +++ b/tests/test_comparison.py @@ -5,8 +5,6 @@ pytest autouse fixture. """ -import contextlib -import io from pathlib import Path import pytest @@ -47,26 +45,6 @@ def test_nccmp_execution(self, comparison_task, files, mock_subprocess_handler): comparison_task.run() assert f"nccmp -df {file_a} {file_b}" in mock_subprocess_handler.commands - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - ( - False, - f"Success: files {FILE_NAME_A} {FILE_NAME_B} are identical\n", - ), - ( - True, - f"Comparing files {FILE_NAME_A} and {FILE_NAME_B} bitwise...\n" - f"Success: files {FILE_NAME_A} {FILE_NAME_B} are identical\n", - ), - ], - ) - def test_standard_output(self, comparison_task, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - comparison_task.run(verbose=verbosity) - assert buf.getvalue() == expected - def test_failed_comparison_check( self, comparison_task, mock_subprocess_handler, bitwise_cmp_dir ): @@ -76,34 +54,3 @@ def test_failed_comparison_check( comparison_task.run() with stdout_file.open("r", encoding="utf-8") as file: assert file.read() == mock_subprocess_handler.stdout - - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - ( - False, - f"Failure: files {FILE_NAME_A} {FILE_NAME_B} differ. Results of " - "diff have been written to " - f"{internal.FLUXSITE_DIRS['BITWISE_CMP']}/{TASK_NAME}.txt\n", - ), - ( - True, - f"Comparing files {FILE_NAME_A} and {FILE_NAME_B} bitwise...\n" - f"Failure: files {FILE_NAME_A} {FILE_NAME_B} differ. Results of " - "diff have been written to " - f"{internal.FLUXSITE_DIRS['BITWISE_CMP']}/{TASK_NAME}.txt\n", - ), - ], - ) - def test_standard_output_on_failure( - self, - comparison_task, - mock_subprocess_handler, - verbosity, - expected, - ): - """Failure case: test standard output on failure.""" - mock_subprocess_handler.error_on_call = True - with contextlib.redirect_stdout(io.StringIO()) as buf: - comparison_task.run(verbose=verbosity) - assert buf.getvalue() == expected diff --git a/tests/test_fluxsite.py b/tests/test_fluxsite.py index e186641d..606b7d09 100644 --- a/tests/test_fluxsite.py +++ b/tests/test_fluxsite.py @@ -5,8 +5,6 @@ pytest autouse fixture. """ -import contextlib -import io import math from pathlib import Path @@ -288,37 +286,6 @@ def test_all_settings_are_patched_into_namelist_file(self, task): "some_branch_specific_setting": True, } - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - ( - False, - "", - ), - ( - True, - "Setting up task: forcing-file_R1_S0\n" - "Creating runs/fluxsite/tasks/forcing-file_R1_S0 directory\n" - " Cleaning task\n" - " Copying namelist files from namelists to " - "runs/fluxsite/tasks/forcing-file_R1_S0\n" - " Copying CABLE executable from src/test-branch/" - "offline/cable to runs/fluxsite/tasks/forcing-file_R1_S0/cable\n" - " Adding base configurations to CABLE namelist file " - "runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" - " Adding science configurations to CABLE namelist file " - "runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" - " Adding branch specific configurations to CABLE namelist file " - "runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n", - ), - ], - ) - def test_standard_output(self, task, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.setup_task(verbose=verbosity) - assert buf.getvalue() == expected - class TestRunCable: """Tests for `Task.run_cable()`.""" @@ -339,13 +306,6 @@ def test_cable_execution(self, task, mock_subprocess_handler): ) assert (task_dir / internal.CABLE_STDOUT_FILENAME).exists() - @pytest.mark.parametrize(("verbosity", "expected"), [(False, ""), (True, "")]) - def test_standard_output(self, task, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run_cable(verbose=verbosity) - assert buf.getvalue() == expected - def test_cable_error_exception(self, task, mock_subprocess_handler): """Failure case: raise CableError on subprocess non-zero exit code.""" mock_subprocess_handler.error_on_call = True @@ -399,26 +359,6 @@ def test_netcdf_global_attributes(self, task, nc_output_path, mock_repo, nml): assert atts[r"filename%foo"] == nml["cable"]["filename"]["foo"] assert atts[r"bar"] == ".true." - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - ( - False, - "", - ), - ( - True, - "Adding attributes to output file: " - "runs/fluxsite/outputs/forcing-file_R1_S0_out.nc\n", - ), - ], - ) - def test_standard_output(self, task, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.add_provenance_info(verbose=verbosity) - assert buf.getvalue() == expected - class TestGetFluxsiteTasks: """Tests for `get_fluxsite_tasks()`.""" diff --git a/tests/test_fs.py b/tests/test_fs.py index ebe6d9bf..ada175c0 100644 --- a/tests/test_fs.py +++ b/tests/test_fs.py @@ -5,8 +5,6 @@ pytest autouse fixture. """ -import contextlib -import io from pathlib import Path import pytest @@ -48,12 +46,3 @@ def test_mkdir(self, test_path, kwargs): mkdir(test_path, **kwargs) assert test_path.exists() test_path.rmdir() - - @pytest.mark.parametrize( - ("verbosity", "expected"), [(False, ""), (True, "Creating test1 directory\n")] - ) - def test_standard_output(self, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - mkdir(Path("test1"), verbose=verbosity) - assert buf.getvalue() == expected diff --git a/tests/test_model.py b/tests/test_model.py index 86153478..df002ac0 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -5,8 +5,6 @@ pytest autouse fixture. """ -import contextlib -import io import os from pathlib import Path @@ -16,8 +14,6 @@ from benchcab.model import Model, remove_module_lines from benchcab.utils.repo import Repo -from .conftest import DEFAULT_STDOUT - @pytest.fixture() def mock_repo(): @@ -106,19 +102,6 @@ def test_checkout_command_execution_with_revision_number( in mock_subprocess_handler.commands ) - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - (False, f"Successfully checked out trunk at revision {DEFAULT_STDOUT}\n"), - (True, f"Successfully checked out trunk at revision {DEFAULT_STDOUT}\n"), - ], - ) - def test_standard_output(self, model, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - model.checkout(verbose=verbosity) - assert buf.getvalue() == expected - # TODO(Sean) remove for issue https://github.com/CABLE-LSM/benchcab/issues/211 @pytest.mark.skip( @@ -170,29 +153,6 @@ def test_source_files_and_scripts_are_copied_to_tmp_dir(self, model): assert (tmp_dir / "serial_cable").exists() assert (tmp_dir / "foo.f90").exists() - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - ( - False, - "", - ), - ( - True, - "mkdir src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/foo.f90 src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/Makefile src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/parallel_cable src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/serial_cable src/trunk/offline/.tmp\n", - ), - ], - ) - def test_standard_output(self, model, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - model.pre_build(verbose=verbosity) - assert buf.getvalue() == expected - class TestRunBuild: """Tests for `Model.run_build()`.""" @@ -260,19 +220,6 @@ def test_commands_are_run_with_environment_variables( for kv in env.items(): assert kv in mock_subprocess_handler.env.items() - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - (False, ""), - (True, "Loading modules: foo bar\nUnloading modules: foo bar\n"), - ], - ) - def test_standard_output(self, model, modules, verbosity, expected): - """Success case: test standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - model.run_build(modules, verbose=verbosity) - assert buf.getvalue() == expected - class TestPostBuild: """Tests for `Model.post_build()`.""" @@ -293,19 +240,6 @@ def test_exe_moved_to_offline_dir(self, model): offline_dir = internal.SRC_DIR / model.name / "offline" assert (offline_dir / internal.CABLE_EXE).exists() - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - (False, ""), - (True, "mv src/trunk/offline/.tmp/cable src/trunk/offline/cable\n"), - ], - ) - def test_standard_output(self, model, verbosity, expected): - """Success case: test non-verbose standard output.""" - with contextlib.redirect_stdout(io.StringIO()) as buf: - model.post_build(verbose=verbosity) - assert buf.getvalue() == expected - class TestCustomBuild: """Tests for `Model.custom_build()`.""" @@ -347,31 +281,6 @@ def test_modules_loaded_at_runtime( "module unload " + " ".join(modules) ) in mock_environment_modules_handler.commands - @pytest.mark.parametrize( - ("verbosity", "expected"), - [ - ( - False, - "", - ), - ( - True, - "Copying src/trunk/my-custom-build.sh to src/trunk/tmp-build.sh\n" - "chmod +x src/trunk/tmp-build.sh\n" - "Modifying tmp-build.sh: remove lines that call environment " - "modules\n" - "Loading modules: foo bar\n" - "Unloading modules: foo bar\n", - ), - ], - ) - def test_standard_output(self, model, build_script, modules, verbosity, expected): - """Success case: test non-verbose standard output for a custom build script.""" - model.build_script = str(build_script) - with contextlib.redirect_stdout(io.StringIO()) as buf: - model.custom_build(modules, verbose=verbosity) - assert buf.getvalue() == expected - def test_file_not_found_exception(self, model, build_script, modules): """Failure case: cannot find custom build script.""" build_script_path = internal.SRC_DIR / model.name / build_script