Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove tests on standard output #222

Merged
merged 1 commit into from
Dec 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,6 @@ def config():
}


# Global string literal used so that it is accessible in tests
DEFAULT_STDOUT = "mock standard output"


@pytest.fixture()
def mock_subprocess_handler():
"""Returns a mock implementation of `SubprocessWrapperInterface`."""
Expand All @@ -106,7 +102,7 @@ class MockSubprocessWrapper(SubprocessWrapperInterface):

def __init__(self) -> None:
self.commands: list[str] = []
self.stdout = DEFAULT_STDOUT
self.stdout = "mock standard output"
self.error_on_call = False
self.env = {}

Expand Down
53 changes: 0 additions & 53 deletions tests/test_comparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
pytest autouse fixture.
"""

import contextlib
import io
from pathlib import Path

import pytest
Expand Down Expand Up @@ -47,26 +45,6 @@ def test_nccmp_execution(self, comparison_task, files, mock_subprocess_handler):
comparison_task.run()
assert f"nccmp -df {file_a} {file_b}" in mock_subprocess_handler.commands

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(
False,
f"Success: files {FILE_NAME_A} {FILE_NAME_B} are identical\n",
),
(
True,
f"Comparing files {FILE_NAME_A} and {FILE_NAME_B} bitwise...\n"
f"Success: files {FILE_NAME_A} {FILE_NAME_B} are identical\n",
),
],
)
def test_standard_output(self, comparison_task, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
comparison_task.run(verbose=verbosity)
assert buf.getvalue() == expected

def test_failed_comparison_check(
self, comparison_task, mock_subprocess_handler, bitwise_cmp_dir
):
Expand All @@ -76,34 +54,3 @@ def test_failed_comparison_check(
comparison_task.run()
with stdout_file.open("r", encoding="utf-8") as file:
assert file.read() == mock_subprocess_handler.stdout

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(
False,
f"Failure: files {FILE_NAME_A} {FILE_NAME_B} differ. Results of "
"diff have been written to "
f"{internal.FLUXSITE_DIRS['BITWISE_CMP']}/{TASK_NAME}.txt\n",
),
(
True,
f"Comparing files {FILE_NAME_A} and {FILE_NAME_B} bitwise...\n"
f"Failure: files {FILE_NAME_A} {FILE_NAME_B} differ. Results of "
"diff have been written to "
f"{internal.FLUXSITE_DIRS['BITWISE_CMP']}/{TASK_NAME}.txt\n",
),
],
)
def test_standard_output_on_failure(
self,
comparison_task,
mock_subprocess_handler,
verbosity,
expected,
):
"""Failure case: test standard output on failure."""
mock_subprocess_handler.error_on_call = True
with contextlib.redirect_stdout(io.StringIO()) as buf:
comparison_task.run(verbose=verbosity)
assert buf.getvalue() == expected
60 changes: 0 additions & 60 deletions tests/test_fluxsite.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
pytest autouse fixture.
"""

import contextlib
import io
import math
from pathlib import Path

Expand Down Expand Up @@ -288,37 +286,6 @@ def test_all_settings_are_patched_into_namelist_file(self, task):
"some_branch_specific_setting": True,
}

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(
False,
"",
),
(
True,
"Setting up task: forcing-file_R1_S0\n"
"Creating runs/fluxsite/tasks/forcing-file_R1_S0 directory\n"
" Cleaning task\n"
" Copying namelist files from namelists to "
"runs/fluxsite/tasks/forcing-file_R1_S0\n"
" Copying CABLE executable from src/test-branch/"
"offline/cable to runs/fluxsite/tasks/forcing-file_R1_S0/cable\n"
" Adding base configurations to CABLE namelist file "
"runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n"
" Adding science configurations to CABLE namelist file "
"runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n"
" Adding branch specific configurations to CABLE namelist file "
"runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n",
),
],
)
def test_standard_output(self, task, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
task.setup_task(verbose=verbosity)
assert buf.getvalue() == expected


class TestRunCable:
"""Tests for `Task.run_cable()`."""
Expand All @@ -339,13 +306,6 @@ def test_cable_execution(self, task, mock_subprocess_handler):
)
assert (task_dir / internal.CABLE_STDOUT_FILENAME).exists()

@pytest.mark.parametrize(("verbosity", "expected"), [(False, ""), (True, "")])
def test_standard_output(self, task, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
task.run_cable(verbose=verbosity)
assert buf.getvalue() == expected

def test_cable_error_exception(self, task, mock_subprocess_handler):
"""Failure case: raise CableError on subprocess non-zero exit code."""
mock_subprocess_handler.error_on_call = True
Expand Down Expand Up @@ -399,26 +359,6 @@ def test_netcdf_global_attributes(self, task, nc_output_path, mock_repo, nml):
assert atts[r"filename%foo"] == nml["cable"]["filename"]["foo"]
assert atts[r"bar"] == ".true."

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(
False,
"",
),
(
True,
"Adding attributes to output file: "
"runs/fluxsite/outputs/forcing-file_R1_S0_out.nc\n",
),
],
)
def test_standard_output(self, task, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
task.add_provenance_info(verbose=verbosity)
assert buf.getvalue() == expected


class TestGetFluxsiteTasks:
"""Tests for `get_fluxsite_tasks()`."""
Expand Down
11 changes: 0 additions & 11 deletions tests/test_fs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
pytest autouse fixture.
"""

import contextlib
import io
from pathlib import Path

import pytest
Expand Down Expand Up @@ -48,12 +46,3 @@ def test_mkdir(self, test_path, kwargs):
mkdir(test_path, **kwargs)
assert test_path.exists()
test_path.rmdir()

@pytest.mark.parametrize(
("verbosity", "expected"), [(False, ""), (True, "Creating test1 directory\n")]
)
def test_standard_output(self, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
mkdir(Path("test1"), verbose=verbosity)
assert buf.getvalue() == expected
91 changes: 0 additions & 91 deletions tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
pytest autouse fixture.
"""

import contextlib
import io
import os
from pathlib import Path

Expand All @@ -16,8 +14,6 @@
from benchcab.model import Model, remove_module_lines
from benchcab.utils.repo import Repo

from .conftest import DEFAULT_STDOUT


@pytest.fixture()
def mock_repo():
Expand Down Expand Up @@ -106,19 +102,6 @@ def test_checkout_command_execution_with_revision_number(
in mock_subprocess_handler.commands
)

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(False, f"Successfully checked out trunk at revision {DEFAULT_STDOUT}\n"),
(True, f"Successfully checked out trunk at revision {DEFAULT_STDOUT}\n"),
],
)
def test_standard_output(self, model, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
model.checkout(verbose=verbosity)
assert buf.getvalue() == expected


# TODO(Sean) remove for issue https://github.com/CABLE-LSM/benchcab/issues/211
@pytest.mark.skip(
Expand Down Expand Up @@ -170,29 +153,6 @@ def test_source_files_and_scripts_are_copied_to_tmp_dir(self, model):
assert (tmp_dir / "serial_cable").exists()
assert (tmp_dir / "foo.f90").exists()

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(
False,
"",
),
(
True,
"mkdir src/trunk/offline/.tmp\n"
"cp -p src/trunk/offline/foo.f90 src/trunk/offline/.tmp\n"
"cp -p src/trunk/offline/Makefile src/trunk/offline/.tmp\n"
"cp -p src/trunk/offline/parallel_cable src/trunk/offline/.tmp\n"
"cp -p src/trunk/offline/serial_cable src/trunk/offline/.tmp\n",
),
],
)
def test_standard_output(self, model, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
model.pre_build(verbose=verbosity)
assert buf.getvalue() == expected


class TestRunBuild:
"""Tests for `Model.run_build()`."""
Expand Down Expand Up @@ -260,19 +220,6 @@ def test_commands_are_run_with_environment_variables(
for kv in env.items():
assert kv in mock_subprocess_handler.env.items()

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(False, ""),
(True, "Loading modules: foo bar\nUnloading modules: foo bar\n"),
],
)
def test_standard_output(self, model, modules, verbosity, expected):
"""Success case: test standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
model.run_build(modules, verbose=verbosity)
assert buf.getvalue() == expected


class TestPostBuild:
"""Tests for `Model.post_build()`."""
Expand All @@ -293,19 +240,6 @@ def test_exe_moved_to_offline_dir(self, model):
offline_dir = internal.SRC_DIR / model.name / "offline"
assert (offline_dir / internal.CABLE_EXE).exists()

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(False, ""),
(True, "mv src/trunk/offline/.tmp/cable src/trunk/offline/cable\n"),
],
)
def test_standard_output(self, model, verbosity, expected):
"""Success case: test non-verbose standard output."""
with contextlib.redirect_stdout(io.StringIO()) as buf:
model.post_build(verbose=verbosity)
assert buf.getvalue() == expected


class TestCustomBuild:
"""Tests for `Model.custom_build()`."""
Expand Down Expand Up @@ -347,31 +281,6 @@ def test_modules_loaded_at_runtime(
"module unload " + " ".join(modules)
) in mock_environment_modules_handler.commands

@pytest.mark.parametrize(
("verbosity", "expected"),
[
(
False,
"",
),
(
True,
"Copying src/trunk/my-custom-build.sh to src/trunk/tmp-build.sh\n"
"chmod +x src/trunk/tmp-build.sh\n"
"Modifying tmp-build.sh: remove lines that call environment "
"modules\n"
"Loading modules: foo bar\n"
"Unloading modules: foo bar\n",
),
],
)
def test_standard_output(self, model, build_script, modules, verbosity, expected):
"""Success case: test non-verbose standard output for a custom build script."""
model.build_script = str(build_script)
with contextlib.redirect_stdout(io.StringIO()) as buf:
model.custom_build(modules, verbose=verbosity)
assert buf.getvalue() == expected

def test_file_not_found_exception(self, model, build_script, modules):
"""Failure case: cannot find custom build script."""
build_script_path = internal.SRC_DIR / model.name / build_script
Expand Down
Loading