diff --git a/tests/common.py b/tests/common.py deleted file mode 100644 index a2111545..00000000 --- a/tests/common.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Helper functions for `pytest`.""" - -import tempfile -from pathlib import Path -from subprocess import CalledProcessError, CompletedProcess -from typing import Optional - -from benchcab.environment_modules import EnvironmentModulesInterface -from benchcab.utils.subprocess import SubprocessWrapperInterface - -MOCK_CWD = TMP_DIR = Path(tempfile.mkdtemp(prefix="benchcab_tests")) - - -def get_mock_config() -> dict: - """Returns a valid mock config.""" - config = { - "project": "bar", - "experiment": "five-site-test", - "modules": [ - "intel-compiler/2021.1.1", - "openmpi/4.1.0", - "netcdf/4.7.4", - ], - "realisations": [ - { - "name": "trunk", - "revision": 9000, - "path": "trunk", - "patch": {}, - "patch_remove": {}, - "build_script": "", - }, - { - "name": "v3.0-YP-changes", - "revision": -1, - "path": "branches/Users/sean/my-branch", - "patch": {"cable": {"cable_user": {"ENABLE_SOME_FEATURE": False}}}, - "patch_remove": {}, - "build_script": "", - }, - ], - "science_configurations": [ - { - "cable": { - "cable_user": { - "GS_SWITCH": "medlyn", - "FWSOIL_SWITCH": "Haverd2013", - } - } - }, - { - "cable": { - "cable_user": { - "GS_SWITCH": "leuning", - "FWSOIL_SWITCH": "Haverd2013", - } - } - }, - ], - "fluxsite": { - "pbs": { - "ncpus": 16, - "mem": "64G", - "walltime": "01:00:00", - "storage": ["gdata/foo123"], - }, - "multiprocessing": True, - }, - } - return config - - -class MockSubprocessWrapper(SubprocessWrapperInterface): - """A mock implementation of `SubprocessWrapperInterface` used for testing.""" - - def __init__(self) -> None: - self.commands: list[str] = [] - self.stdout = "mock standard output" - self.error_on_call = False - self.env = {} - - def run_cmd( - self, - cmd: str, - capture_output: bool = False, - output_file: Optional[Path] = None, - verbose: bool = False, - env: Optional[dict] = None, - ) -> CompletedProcess: - self.commands.append(cmd) - if self.error_on_call: - raise CalledProcessError(returncode=1, cmd=cmd, output=self.stdout) - if output_file: - output_file.touch() - if env: - self.env = env - return CompletedProcess(cmd, returncode=0, stdout=self.stdout) - - -class MockEnvironmentModules(EnvironmentModulesInterface): - """A mock implementation of `EnvironmentModulesInterface` used for testing.""" - - def __init__(self) -> None: - self.commands: list[str] = [] - - def module_is_avail(self, *args: str) -> bool: - self.commands.append("module is-avail " + " ".join(args)) - return True - - def module_is_loaded(self, *args: str) -> bool: - self.commands.append("module is-loaded " + " ".join(args)) - return True - - def module_load(self, *args: str) -> None: - self.commands.append("module load " + " ".join(args)) - - def module_unload(self, *args: str) -> None: - self.commands.append("module unload " + " ".join(args)) diff --git a/tests/conftest.py b/tests/conftest.py index e84e2a05..807e0203 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,26 +2,156 @@ import os import shutil +import tempfile from pathlib import Path +from subprocess import CalledProcessError, CompletedProcess +from typing import Optional import pytest -from .common import MOCK_CWD +from benchcab.environment_modules import EnvironmentModulesInterface +from benchcab.utils.subprocess import SubprocessWrapperInterface + + +@pytest.fixture() +def mock_cwd(): + """Create and return a unique temporary directory to use as the CWD. + + The return value is the path of the directory. + """ + return Path(tempfile.mkdtemp(prefix="benchcab_tests")) @pytest.fixture(autouse=True) -def _run_around_tests(): - """`pytest` autouse fixture that runs around each test.""" - # Setup: +def _run_around_tests(mock_cwd): + """Change into the `mock_cwd` directory.""" prevdir = Path.cwd() - if MOCK_CWD.exists(): - shutil.rmtree(MOCK_CWD) - MOCK_CWD.mkdir() - os.chdir(MOCK_CWD.expanduser()) + os.chdir(mock_cwd.expanduser()) - # Run the test: yield - # Teardown: os.chdir(prevdir) - shutil.rmtree(MOCK_CWD) + shutil.rmtree(mock_cwd) + + +@pytest.fixture() +def config(): + """Returns a valid mock config.""" + return { + "project": "bar", + "experiment": "five-site-test", + "modules": [ + "intel-compiler/2021.1.1", + "openmpi/4.1.0", + "netcdf/4.7.4", + ], + "realisations": [ + { + "name": "trunk", + "revision": 9000, + "path": "trunk", + "patch": {}, + "patch_remove": {}, + "build_script": "", + }, + { + "name": "v3.0-YP-changes", + "revision": -1, + "path": "branches/Users/sean/my-branch", + "patch": {"cable": {"cable_user": {"ENABLE_SOME_FEATURE": False}}}, + "patch_remove": {}, + "build_script": "", + }, + ], + "science_configurations": [ + { + "cable": { + "cable_user": { + "GS_SWITCH": "medlyn", + "FWSOIL_SWITCH": "Haverd2013", + } + } + }, + { + "cable": { + "cable_user": { + "GS_SWITCH": "leuning", + "FWSOIL_SWITCH": "Haverd2013", + } + } + }, + ], + "fluxsite": { + "pbs": { + "ncpus": 16, + "mem": "64G", + "walltime": "01:00:00", + "storage": ["gdata/foo123"], + }, + "multiprocessing": True, + }, + } + + +# Global string literal used so that it is accessible in tests +DEFAULT_STDOUT = "mock standard output" + + +@pytest.fixture() +def mock_subprocess_handler(): + """Returns a mock implementation of `SubprocessWrapperInterface`.""" + + class MockSubprocessWrapper(SubprocessWrapperInterface): + """A mock implementation of `SubprocessWrapperInterface` used for testing.""" + + def __init__(self) -> None: + self.commands: list[str] = [] + self.stdout = DEFAULT_STDOUT + self.error_on_call = False + self.env = {} + + def run_cmd( + self, + cmd: str, + capture_output: bool = False, + output_file: Optional[Path] = None, + verbose: bool = False, + env: Optional[dict] = None, + ) -> CompletedProcess: + self.commands.append(cmd) + if self.error_on_call: + raise CalledProcessError(returncode=1, cmd=cmd, output=self.stdout) + if output_file: + output_file.touch() + if env: + self.env = env + return CompletedProcess(cmd, returncode=0, stdout=self.stdout) + + return MockSubprocessWrapper() + + +@pytest.fixture() +def mock_environment_modules_handler(): + """Returns a mock implementation of `EnvironmentModulesInterface`.""" + + class MockEnvironmentModules(EnvironmentModulesInterface): + """A mock implementation of `EnvironmentModulesInterface` used for testing.""" + + def __init__(self) -> None: + self.commands: list[str] = [] + + def module_is_avail(self, *args: str) -> bool: + self.commands.append("module is-avail " + " ".join(args)) + return True + + def module_is_loaded(self, *args: str) -> bool: + self.commands.append("module is-loaded " + " ".join(args)) + return True + + def module_load(self, *args: str) -> None: + self.commands.append("module load " + " ".join(args)) + + def module_unload(self, *args: str) -> None: + self.commands.append("module unload " + " ".join(args)) + + return MockEnvironmentModules() diff --git a/tests/test_benchcab.py b/tests/test_benchcab.py index cd6e1f85..80446dd8 100644 --- a/tests/test_benchcab.py +++ b/tests/test_benchcab.py @@ -1,4 +1,9 @@ -"""`pytest` tests for `benchcab.py`.""" +"""`pytest` tests for `benchcab.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" import contextlib import io @@ -9,66 +14,64 @@ from benchcab import internal from benchcab.benchcab import Benchcab -from benchcab.utils.subprocess import SubprocessWrapperInterface - -from .common import MOCK_CWD, MockSubprocessWrapper, get_mock_config -def get_mock_app( - subprocess_handler: SubprocessWrapperInterface = MockSubprocessWrapper(), -) -> Benchcab: +@pytest.fixture() +def app(config, mock_cwd, mock_subprocess_handler): """Returns a mock `Benchcab` instance for testing against.""" - config = get_mock_config() - app = Benchcab( + _app = Benchcab( argv=["benchcab", "fluxsite"], benchcab_exe_path=Path("/path/to/benchcab"), config=config, validate_env=False, ) - app.subprocess_handler = subprocess_handler - app.root_dir = MOCK_CWD - return app + _app.subprocess_handler = mock_subprocess_handler + _app.root_dir = mock_cwd + return _app -def test_fluxsite_submit_job(): +class TestFluxsiteSubmitJob: """Tests for `Benchcab.fluxsite_submit_job()`.""" - # Success case: test qsub command is executed - mock_subprocess = MockSubprocessWrapper() - app = get_mock_app(mock_subprocess) - app.fluxsite_submit_job() - assert f"qsub {MOCK_CWD / internal.QSUB_FNAME}" in mock_subprocess.commands - # Success case: test non-verbose output - app = get_mock_app() - with contextlib.redirect_stdout(io.StringIO()) as buf: + def test_qsub_execution(self, app, mock_subprocess_handler, mock_cwd): + """Success case: test qsub command is executed.""" app.fluxsite_submit_job() - assert buf.getvalue() == ( - "Creating PBS job script to run fluxsite tasks on compute " - f"nodes: {internal.QSUB_FNAME}\n" - f"PBS job submitted: {mock_subprocess.stdout}\n" - "The CABLE log file for each task is written to " - f"{internal.FLUXSITE_DIRS['LOG']}/_log.txt\n" - "The CABLE standard output for each task is written to " - f"{internal.FLUXSITE_DIRS['TASKS']}//out.txt\n" - "The NetCDF output for each task is written to " - f"{internal.FLUXSITE_DIRS['OUTPUT']}/_out.nc\n" - ) + assert ( + f"qsub {mock_cwd / internal.QSUB_FNAME}" in mock_subprocess_handler.commands + ) - # Failure case: qsub non-zero exit code prints an error message - mock_subprocess = MockSubprocessWrapper() - mock_subprocess.error_on_call = True - app = get_mock_app(subprocess_handler=mock_subprocess) - with contextlib.redirect_stdout(io.StringIO()) as buf: - with pytest.raises(CalledProcessError): + def test_default_standard_output(self, app, mock_subprocess_handler): + """Success case: test default standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: app.fluxsite_submit_job() - assert buf.getvalue() == ( - "Creating PBS job script to run fluxsite tasks on compute " - f"nodes: {internal.QSUB_FNAME}\n" - f"Error when submitting job to NCI queue\n{mock_subprocess.stdout}\n" - ) + assert buf.getvalue() == ( + "Creating PBS job script to run fluxsite tasks on compute " + f"nodes: {internal.QSUB_FNAME}\n" + f"PBS job submitted: {mock_subprocess_handler.stdout}\n" + "The CABLE log file for each task is written to " + f"{internal.FLUXSITE_DIRS['LOG']}/_log.txt\n" + "The CABLE standard output for each task is written to " + f"{internal.FLUXSITE_DIRS['TASKS']}//out.txt\n" + "The NetCDF output for each task is written to " + f"{internal.FLUXSITE_DIRS['OUTPUT']}/_out.nc\n" + ) - # Failure case: test exception is raised when benchcab_exe_path is None - app = get_mock_app() - app.benchcab_exe_path = None - with pytest.raises(RuntimeError, match="Path to benchcab executable is undefined."): - app.fluxsite_submit_job() + def test_qsub_non_zero_exit_code_prints_error(self, app, mock_subprocess_handler): + """Failure case: qsub non-zero exit code prints an error message.""" + mock_subprocess_handler.error_on_call = True + with contextlib.redirect_stdout(io.StringIO()) as buf: + with pytest.raises(CalledProcessError): + app.fluxsite_submit_job() + assert buf.getvalue() == ( + "Creating PBS job script to run fluxsite tasks on compute " + f"nodes: {internal.QSUB_FNAME}\n" + f"Error when submitting job to NCI queue\n{mock_subprocess_handler.stdout}\n" + ) + + def test_benchcab_exe_path_exception(self, app): + """Failure case: test exception is raised when benchcab_exe_path is None.""" + app.benchcab_exe_path = None + with pytest.raises( + RuntimeError, match="Path to benchcab executable is undefined." + ): + app.fluxsite_submit_job() diff --git a/tests/test_comparison.py b/tests/test_comparison.py index 704bcd90..d5ebba47 100644 --- a/tests/test_comparison.py +++ b/tests/test_comparison.py @@ -1,88 +1,115 @@ -"""`pytest` tests for `comparison.py`.""" +"""`pytest` tests for `comparison.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" import contextlib import io +from pathlib import Path + +import pytest from benchcab import internal from benchcab.comparison import ComparisonTask -from benchcab.utils.subprocess import SubprocessWrapperInterface -from .common import MOCK_CWD, MockSubprocessWrapper +FILE_NAME_A, FILE_NAME_B = "file_a.nc", "file_b.nc" +TASK_NAME = "mock_comparison_task_name" + +@pytest.fixture() +def files(): + """Return mock file paths used for comparison.""" + return Path(FILE_NAME_A), Path(FILE_NAME_B) -def get_mock_comparison_task( - subprocess_handler: SubprocessWrapperInterface = MockSubprocessWrapper(), -) -> ComparisonTask: + +@pytest.fixture() +def comparison_task(files, mock_cwd, mock_subprocess_handler): """Returns a mock `ComparisonTask` instance for testing against.""" - comparison_task = ComparisonTask( - files=(MOCK_CWD / "file_a.nc", MOCK_CWD / "file_b.nc"), - task_name="mock_comparison_task_name", - ) - comparison_task.subprocess_handler = subprocess_handler - comparison_task.root_dir = MOCK_CWD - return comparison_task - - -def test_run_comparison(): - """Tests for `run_comparison()`.""" - file_a = MOCK_CWD / "file_a.nc" - file_b = MOCK_CWD / "file_b.nc" - bitwise_cmp_dir = MOCK_CWD / internal.FLUXSITE_DIRS["BITWISE_CMP"] - bitwise_cmp_dir.mkdir(parents=True) - - # Success case: run comparison - mock_subprocess = MockSubprocessWrapper() - task = get_mock_comparison_task(subprocess_handler=mock_subprocess) - task.run() - assert f"nccmp -df {file_a} {file_b}" in mock_subprocess.commands - - # Success case: test non-verbose output - task = get_mock_comparison_task() - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run() - assert ( - buf.getvalue() == f"Success: files {file_a.name} {file_b.name} are identical\n" - ) + _comparison_task = ComparisonTask(files=files, task_name=TASK_NAME) + _comparison_task.subprocess_handler = mock_subprocess_handler + _comparison_task.root_dir = mock_cwd + return _comparison_task - # Success case: test verbose output - task = get_mock_comparison_task() - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run(verbose=True) - assert buf.getvalue() == ( - f"Comparing files {file_a.name} and {file_b.name} bitwise...\n" - f"Success: files {file_a.name} {file_b.name} are identical\n" - ) - # Failure case: test failed comparison check (files differ) - mock_subprocess = MockSubprocessWrapper() - mock_subprocess.error_on_call = True - task = get_mock_comparison_task(subprocess_handler=mock_subprocess) - stdout_file = bitwise_cmp_dir / f"{task.task_name}.txt" - task.run() - with stdout_file.open("r", encoding="utf-8") as file: - assert file.read() == mock_subprocess.stdout - - # Failure case: test non-verbose standard output on failure - mock_subprocess = MockSubprocessWrapper() - mock_subprocess.error_on_call = True - task = get_mock_comparison_task(subprocess_handler=mock_subprocess) - stdout_file = bitwise_cmp_dir / f"{task.task_name}.txt" - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run() - assert buf.getvalue() == ( - f"Failure: files {file_a.name} {file_b.name} differ. Results of diff " - f"have been written to {stdout_file}\n" +class TestRun: + """Tests for `ComparisonTask.run()`.""" + + @pytest.fixture() + def bitwise_cmp_dir(self): + """Create and return the fluxsite bitwise comparison directory.""" + internal.FLUXSITE_DIRS["BITWISE_CMP"].mkdir(parents=True) + return internal.FLUXSITE_DIRS["BITWISE_CMP"] + + def test_nccmp_execution(self, comparison_task, files, mock_subprocess_handler): + """Success case: test nccmp is executed.""" + file_a, file_b = files + comparison_task.run() + assert f"nccmp -df {file_a} {file_b}" in mock_subprocess_handler.commands + + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + ( + False, + f"Success: files {FILE_NAME_A} {FILE_NAME_B} are identical\n", + ), + ( + True, + f"Comparing files {FILE_NAME_A} and {FILE_NAME_B} bitwise...\n" + f"Success: files {FILE_NAME_A} {FILE_NAME_B} are identical\n", + ), + ], ) + def test_standard_output(self, comparison_task, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + comparison_task.run(verbose=verbosity) + assert buf.getvalue() == expected + + def test_failed_comparison_check( + self, comparison_task, mock_subprocess_handler, bitwise_cmp_dir + ): + """Failure case: test failed comparison check (files differ).""" + stdout_file = bitwise_cmp_dir / f"{comparison_task.task_name}.txt" + mock_subprocess_handler.error_on_call = True + comparison_task.run() + with stdout_file.open("r", encoding="utf-8") as file: + assert file.read() == mock_subprocess_handler.stdout - # Failure case: test verbose standard output on failure - mock_subprocess = MockSubprocessWrapper() - mock_subprocess.error_on_call = True - task = get_mock_comparison_task(subprocess_handler=mock_subprocess) - stdout_file = bitwise_cmp_dir / f"{task.task_name}.txt" - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run(verbose=True) - assert buf.getvalue() == ( - f"Comparing files {file_a.name} and {file_b.name} bitwise...\n" - f"Failure: files {file_a.name} {file_b.name} differ. Results of diff " - f"have been written to {stdout_file}\n" + # TODO(Sean) fix for issue https://github.com/CABLE-LSM/benchcab/issues/162 + @pytest.mark.skip( + reason="""This will always fail since `parametrize()` parameters are + dependent on the `mock_cwd` fixture.""" + ) + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + ( + False, + f"Failure: files {FILE_NAME_A} {FILE_NAME_B} differ. Results of " + "diff have been written to " + f"{internal.FLUXSITE_DIRS['BITWISE_CMP']}/{TASK_NAME}\n", + ), + ( + True, + f"Comparing files {FILE_NAME_A} and {FILE_NAME_B} bitwise...\n" + f"Failure: files {FILE_NAME_A} {FILE_NAME_B} differ. Results of " + "diff have been written to " + f"{internal.FLUXSITE_DIRS['BITWISE_CMP']}/{TASK_NAME}\n", + ), + ], ) + def test_standard_output_on_failure( + self, + comparison_task, + mock_subprocess_handler, + verbosity, + expected, + ): + """Failure case: test standard output on failure.""" + mock_subprocess_handler.error_on_call = True + with contextlib.redirect_stdout(io.StringIO()) as buf: + comparison_task.run(verbose=verbosity) + assert buf.getvalue() == expected diff --git a/tests/test_config.py b/tests/test_config.py index a45b1b28..140fb721 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,315 +1,330 @@ -"""`pytest` tests for `config.py`.""" +"""`pytest` tests for `config.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" + +from pathlib import Path import pytest import yaml from benchcab import internal from benchcab.config import check_config, read_config -from tests.common import TMP_DIR, get_mock_config -def test_check_config(): +class TestCheckConfig: """Tests for `check_config()`.""" - # Success case: test barebones config is valid - config = get_mock_config() - check_config(config) - - # Success case: branch configuration with missing name key - config = get_mock_config() - config["realisations"][0].pop("name") - check_config(config) - - # Success case: branch configuration with missing revision key - config = get_mock_config() - config["realisations"][0].pop("revision") - check_config(config) - - # Success case: branch configuration with missing patch key - config = get_mock_config() - config["realisations"][0].pop("patch") - check_config(config) - - # Success case: branch configuration with missing patch_remove key - config = get_mock_config() - config["realisations"][0].pop("patch_remove") - check_config(config) - - # Success case: test config when realisations contains more than two keys - config = get_mock_config() - config["realisations"].append({"path": "path/to/my_new_branch"}) - check_config(config) - - # Success case: test config when realisations contains less than two keys - config = get_mock_config() - config["realisations"].pop() - check_config(config) - - # Success case: test experiment with site id from the - # five-site-test is valid - config = get_mock_config() - config["experiment"] = "AU-Tum" - check_config(config) - - # Success case: test config without science_configurations is valid - config = get_mock_config() - config.pop("science_configurations") - check_config(config) - - # Success case: test config without fluxsite key is valid - config = get_mock_config() - config.pop("fluxsite") - check_config(config) - - # Success case: test config without multiprocessing key is valid - config = get_mock_config() - config["fluxsite"].pop("multiprocessing") - check_config(config) - - # Success case: test config without pbs key is valid - config = get_mock_config() - config["fluxsite"].pop("pbs") - check_config(config) - - # Success case: test config without ncpus key is valid - config = get_mock_config() - config["fluxsite"]["pbs"].pop("ncpus") - check_config(config) - - # Success case: test config without mem key is valid - config = get_mock_config() - config["fluxsite"]["pbs"].pop("mem") - check_config(config) - - # Success case: test config without walltime key is valid - config = get_mock_config() - config["fluxsite"]["pbs"].pop("walltime") - check_config(config) - - # Success case: test config without storage key is valid - config = get_mock_config() - config["fluxsite"]["pbs"].pop("storage") - check_config(config) - - # Failure case: test missing required keys raises an exception - config = get_mock_config() - config.pop("project") - config.pop("experiment") - with pytest.raises( - ValueError, - match="Keys are missing from the config file: project, experiment", - ): - check_config(config) - - # Failure case: test config with empty realisations key raises an exception - config = get_mock_config() - config["realisations"] = [] - with pytest.raises(ValueError, match="The 'realisations' key cannot be empty."): - check_config(config) - - # Failure case: test config with invalid experiment key raises an exception - config = get_mock_config() - config["experiment"] = "foo" - with pytest.raises( - ValueError, - match="The 'experiment' key is invalid.\n" - "Valid experiments are: " - + ", ".join( - list(internal.MEORG_EXPERIMENTS) - + internal.MEORG_EXPERIMENTS["five-site-test"] - ), - ): - check_config(config) - - # Failure case: test config with invalid experiment key (not a subset of - # five-site-test) raises an exception - config = get_mock_config() - config["experiment"] = "CH-Dav" - with pytest.raises( - ValueError, - match="The 'experiment' key is invalid.\n" - "Valid experiments are: " - + ", ".join( - list(internal.MEORG_EXPERIMENTS) - + internal.MEORG_EXPERIMENTS["five-site-test"] - ), - ): - check_config(config) - - # Failure case: 'path' key is missing in branch configuration - config = get_mock_config() - config["realisations"][1].pop("path") - with pytest.raises( - ValueError, match="Realisation '1' must specify the `path` field." - ): - check_config(config) - - # Failure case: test config with empty science_configurations key - # raises an exception - config = get_mock_config() - config["science_configurations"] = [] - with pytest.raises( - ValueError, match="The 'science_configurations' key cannot be empty." - ): - check_config(config) - # Failure case: project key is not a string - config = get_mock_config() - config["project"] = 123 - with pytest.raises(TypeError, match="The 'project' key must be a string."): + def test_config_is_valid(self, config): + """Success case: test barebones config is valid.""" check_config(config) - # Failure case: realisations key is not a list - config = get_mock_config() - config["realisations"] = {"foo": "bar"} - with pytest.raises(TypeError, match="The 'realisations' key must be a list."): + def test_branch_configuration_with_missing_name_key(self, config): + """Success case: branch configuration with missing name key.""" + config["realisations"][0].pop("name") check_config(config) - # Failure case: realisations key is not a list of dict - config = get_mock_config() - config["realisations"] = ["foo"] - with pytest.raises(TypeError, match="Realisation '0' must be a dictionary object."): + def test_branch_configuration_with_missing_revision_key(self, config): + """Success case: branch configuration with missing revision key.""" + config["realisations"][0].pop("revision") check_config(config) - # Failure case: type of name is not a string - config = get_mock_config() - config["realisations"][1]["name"] = 1234 - with pytest.raises( - TypeError, match="The 'name' field in realisation '1' must be a string." - ): + def test_branch_configuration_with_missing_patch_key(self, config): + """Success case: branch configuration with missing patch key.""" + config["realisations"][0].pop("patch") check_config(config) - # Failure case: type of path is not a string - config = get_mock_config() - config["realisations"][1]["path"] = 1234 - with pytest.raises( - TypeError, match="The 'path' field in realisation '1' must be a string." - ): + def test_branch_configuration_with_missing_patch_remove_key(self, config): + """Success case: branch configuration with missing patch_remove key.""" + config["realisations"][0].pop("patch_remove") check_config(config) - # Failure case: type of revision key is not an integer - config = get_mock_config() - config["realisations"][1]["revision"] = "-1" - with pytest.raises( - TypeError, match="The 'revision' field in realisation '1' must be an integer." - ): + def test_config_when_realisations_contains_more_than_two_keys(self, config): + """Success case: test config when realisations contains more than two keys.""" + config["realisations"].append({"path": "path/to/my_new_branch"}) check_config(config) - # Failure case: type of patch key is not a dictionary - config = get_mock_config() - config["realisations"][1]["patch"] = r"cable_user%ENABLE_SOME_FEATURE = .FALSE." - with pytest.raises( - TypeError, - match="The 'patch' field in realisation '1' must be a dictionary that is " - "compatible with the f90nml python package.", - ): + def test_config_when_realisations_contains_less_than_two_keys(self, config): + """Success case: test config when realisations contains less than two keys.""" + config["realisations"].pop() check_config(config) - # Failure case: type of patch_remove key is not a dictionary - config = get_mock_config() - config["realisations"][1]["patch_remove"] = r"cable_user%ENABLE_SOME_FEATURE" - with pytest.raises( - TypeError, - match="The 'patch_remove' field in realisation '1' must be a dictionary that is " - "compatible with the f90nml python package.", - ): + def test_experiment_from_five_site_test(self, config): + """Success case: test experiment with site id from the five-site-test is valid.""" + config["experiment"] = "AU-Tum" check_config(config) - # Failure case: type of build_script key is not a string - config = get_mock_config() - config["realisations"][1]["build_script"] = ["echo", "hello"] - with pytest.raises( - TypeError, match="The 'build_script' field in realisation '1' must be a string." - ): + def test_config_without_science_configurations_is_valid(self, config): + """Success case: test config without science_configurations is valid.""" + config.pop("science_configurations") check_config(config) - # Failure case: modules key is not a list - config = get_mock_config() - config["modules"] = "netcdf" - with pytest.raises(TypeError, match="The 'modules' key must be a list."): + def test_config_without_fluxsite_key_is_valid(self, config): + """Success case: test config without fluxsite key is valid.""" + config.pop("fluxsite") check_config(config) - # Failure case: experiment key is not a string - config = get_mock_config() - config["experiment"] = 0 - with pytest.raises(TypeError, match="The 'experiment' key must be a string."): + def test_config_without_multiprocessing_key_is_valid(self, config): + """Success case: test config without multiprocessing key is valid.""" + config["fluxsite"].pop("multiprocessing") check_config(config) - # Failure case: type of config["science_configurations"] is not a list - config = get_mock_config() - config["science_configurations"] = r"cable_user%GS_SWITCH = 'medlyn'" - with pytest.raises( - TypeError, match="The 'science_configurations' key must be a list." - ): + def test_config_without_pbs_key_is_valid(self, config): + """Success case: test config without pbs key is valid.""" + config["fluxsite"].pop("pbs") check_config(config) - # Failure case: type of config["science_configurations"] is not a list of dict - config = get_mock_config() - config["science_configurations"] = [r"cable_user%GS_SWITCH = 'medlyn'"] - with pytest.raises( - TypeError, - match="Science config settings must be specified using a dictionary " - "that is compatible with the f90nml python package.", - ): + def test_config_without_ncpus_key_is_valid(self, config): + """Success case: test config without ncpus key is valid.""" + config["fluxsite"]["pbs"].pop("ncpus") check_config(config) - # Failure case: type of config["fluxsite"] is not a dict - config = get_mock_config() - config["fluxsite"] = ["ncpus: 16\nmem: 64GB\n"] - with pytest.raises(TypeError, match="The 'fluxsite' key must be a dictionary."): + def test_config_without_mem_key_is_valid(self, config): + """Success case: test config without mem key is valid.""" + config["fluxsite"]["pbs"].pop("mem") check_config(config) - # Failure case: type of config["pbs"] is not a dict - config = get_mock_config() - config["fluxsite"]["pbs"] = "-l ncpus=16" - with pytest.raises(TypeError, match="The 'pbs' key must be a dictionary."): + def test_config_without_walltime_key_is_valid(self, config): + """Success case: test config without walltime key is valid.""" + config["fluxsite"]["pbs"].pop("walltime") check_config(config) - # Failure case: type of config["pbs"]["ncpus"] is not an int - config = get_mock_config() - config["fluxsite"]["pbs"]["ncpus"] = "16" - with pytest.raises(TypeError, match="The 'ncpus' key must be an integer."): + def test_config_without_storage_key_is_valid(self, config): + """Success case: test config without storage key is valid.""" + config["fluxsite"]["pbs"].pop("storage") check_config(config) - # Failure case: type of config["pbs"]["mem"] is not a string - config = get_mock_config() - config["fluxsite"]["pbs"]["mem"] = 64 - with pytest.raises(TypeError, match="The 'mem' key must be a string."): - check_config(config) - - # Failure case: type of config["pbs"]["walltime"] is not a string - config = get_mock_config() - config["fluxsite"]["pbs"]["walltime"] = 60 - with pytest.raises(TypeError, match="The 'walltime' key must be a string."): - check_config(config) - - # Failure case: type of config["pbs"]["storage"] is not a list - config = get_mock_config() - config["fluxsite"]["pbs"]["storage"] = "gdata/foo+gdata/bar" - with pytest.raises(TypeError, match="The 'storage' key must be a list of strings."): - check_config(config) - - # Failure case: type of config["pbs"]["storage"] is not a list of strings - config = get_mock_config() - config["fluxsite"]["pbs"]["storage"] = [1, 2, 3] - with pytest.raises(TypeError, match="The 'storage' key must be a list of strings."): - check_config(config) - - # Failure case: type of config["multiprocessing"] is not a bool - config = get_mock_config() - config["fluxsite"]["multiprocessing"] = 1 - with pytest.raises(TypeError, match="The 'multiprocessing' key must be a boolean."): - check_config(config) - - -def test_read_config(): + def test_missing_required_keys_raises_an_exception(self, config): + """Failure case: test missing required keys raises an exception.""" + config.pop("project") + config.pop("experiment") + with pytest.raises( + ValueError, + match="Keys are missing from the config file: project, experiment", + ): + check_config(config) + + def test_config_with_empty_realisations_key_raises_an_exception(self, config): + """Failure case: test config with empty realisations key raises an exception.""" + config["realisations"] = [] + with pytest.raises(ValueError, match="The 'realisations' key cannot be empty."): + check_config(config) + + def test_config_with_invalid_experiment_key_raises_an_exception(self, config): + """Failure case: test config with invalid experiment key raises an exception.""" + config["experiment"] = "foo" + with pytest.raises( + ValueError, + match="The 'experiment' key is invalid.\n" + "Valid experiments are: " + + ", ".join( + list(internal.MEORG_EXPERIMENTS) + + internal.MEORG_EXPERIMENTS["five-site-test"] + ), + ): + check_config(config) + + def test_invlid_experiment_key_raises_exception(self, config): + """Failure case: test invalid experiment key (not a subset of -site-test).""" + config["experiment"] = "CH-Dav" + with pytest.raises( + ValueError, + match="The 'experiment' key is invalid.\n" + "Valid experiments are: " + + ", ".join( + list(internal.MEORG_EXPERIMENTS) + + internal.MEORG_EXPERIMENTS["five-site-test"] + ), + ): + check_config(config) + + def test_missing_path_key_raises_exception(self, config): + """Failure case: 'path' key is missing in branch configuration.""" + config["realisations"][1].pop("path") + with pytest.raises( + ValueError, match="Realisation '1' must specify the `path` field." + ): + check_config(config) + + def test_empty_science_configurations_raises_exception(self, config): + """Failure case: test empty science_configurations key raises an exception.""" + config["science_configurations"] = [] + with pytest.raises( + ValueError, match="The 'science_configurations' key cannot be empty." + ): + check_config(config) + + def test_project_key_type_error(self, config): + """Failure case: project key is not a string.""" + config["project"] = 123 + with pytest.raises(TypeError, match="The 'project' key must be a string."): + check_config(config) + + def test_realisations_key_type_error(self, config): + """Failure case: realisations key is not a list.""" + config["realisations"] = {"foo": "bar"} + with pytest.raises(TypeError, match="The 'realisations' key must be a list."): + check_config(config) + + def test_realisations_element_type_error(self, config): + """Failure case: realisations key is not a list of dict.""" + config["realisations"] = ["foo"] + with pytest.raises( + TypeError, match="Realisation '0' must be a dictionary object." + ): + check_config(config) + + def test_name_key_type_error(self, config): + """Failure case: type of name is not a string.""" + config["realisations"][1]["name"] = 1234 + with pytest.raises( + TypeError, match="The 'name' field in realisation '1' must be a string." + ): + check_config(config) + + def test_path_key_type_error(self, config): + """Failure case: type of path is not a string.""" + config["realisations"][1]["path"] = 1234 + with pytest.raises( + TypeError, match="The 'path' field in realisation '1' must be a string." + ): + check_config(config) + + def test_revision_key_type_error(self, config): + """Failure case: type of revision key is not an integer.""" + config["realisations"][1]["revision"] = "-1" + with pytest.raises( + TypeError, + match="The 'revision' field in realisation '1' must be an integer.", + ): + check_config(config) + + def test_patch_key_type_error(self, config): + """Failure case: type of patch key is not a dictionary.""" + config["realisations"][1]["patch"] = r"cable_user%ENABLE_SOME_FEATURE = .FALSE." + with pytest.raises( + TypeError, + match="The 'patch' field in realisation '1' must be a dictionary that is " + "compatible with the f90nml python package.", + ): + check_config(config) + + def test_patch_remove_key_type_error(self, config): + """Failure case: type of patch_remove key is not a dictionary.""" + config["realisations"][1]["patch_remove"] = r"cable_user%ENABLE_SOME_FEATURE" + with pytest.raises( + TypeError, + match="The 'patch_remove' field in realisation '1' must be a dictionary that is " + "compatible with the f90nml python package.", + ): + check_config(config) + + def test_build_script_type_error(self, config): + """Failure case: type of build_script key is not a string.""" + config["realisations"][1]["build_script"] = ["echo", "hello"] + with pytest.raises( + TypeError, + match="The 'build_script' field in realisation '1' must be a string.", + ): + check_config(config) + + def test_modules_key_type_error(self, config): + """Failure case: modules key is not a list.""" + config["modules"] = "netcdf" + with pytest.raises(TypeError, match="The 'modules' key must be a list."): + check_config(config) + + def test_experiment_key_type_error(self, config): + """Failure case: experiment key is not a string.""" + config["experiment"] = 0 + with pytest.raises(TypeError, match="The 'experiment' key must be a string."): + check_config(config) + + def test_science_configurations_key_type_error(self, config): + """Failure case: type of config["science_configurations"] is not a list.""" + config["science_configurations"] = r"cable_user%GS_SWITCH = 'medlyn'" + with pytest.raises( + TypeError, match="The 'science_configurations' key must be a list." + ): + check_config(config) + + def test_science_configurations_element_type_error(self, config): + """Failure case: type of config["science_configurations"] is not a list of dict.""" + config["science_configurations"] = [r"cable_user%GS_SWITCH = 'medlyn'"] + with pytest.raises( + TypeError, + match="Science config settings must be specified using a dictionary " + "that is compatible with the f90nml python package.", + ): + check_config(config) + + def test_fluxsite_key_type_error(self, config): + """Failure case: type of config["fluxsite"] is not a dict.""" + config["fluxsite"] = ["ncpus: 16\nmem: 64GB\n"] + with pytest.raises(TypeError, match="The 'fluxsite' key must be a dictionary."): + check_config(config) + + def test_pbs_key_type_error(self, config): + """Failure case: type of config["pbs"] is not a dict.""" + config["fluxsite"]["pbs"] = "-l ncpus=16" + with pytest.raises(TypeError, match="The 'pbs' key must be a dictionary."): + check_config(config) + + def test_ncpus_key_type_error(self, config): + """Failure case: type of config["pbs"]["ncpus"] is not an int.""" + config["fluxsite"]["pbs"]["ncpus"] = "16" + with pytest.raises(TypeError, match="The 'ncpus' key must be an integer."): + check_config(config) + + def test_mem_key_type_error(self, config): + """Failure case: type of config["pbs"]["mem"] is not a string.""" + config["fluxsite"]["pbs"]["mem"] = 64 + with pytest.raises(TypeError, match="The 'mem' key must be a string."): + check_config(config) + + def test_walltime_key_type_error(self, config): + """Failure case: type of config["pbs"]["walltime"] is not a string.""" + config["fluxsite"]["pbs"]["walltime"] = 60 + with pytest.raises(TypeError, match="The 'walltime' key must be a string."): + check_config(config) + + def test_storage_key_type_error(self, config): + """Failure case: type of config["pbs"]["storage"] is not a list.""" + config["fluxsite"]["pbs"]["storage"] = "gdata/foo+gdata/bar" + with pytest.raises( + TypeError, match="The 'storage' key must be a list of strings." + ): + check_config(config) + + def test_storage_element_type_error(self, config): + """Failure case: type of config["pbs"]["storage"] is not a list of strings.""" + config["fluxsite"]["pbs"]["storage"] = [1, 2, 3] + with pytest.raises( + TypeError, match="The 'storage' key must be a list of strings." + ): + check_config(config) + + def test_multiprocessing_key_type_error(self, config): + """Failure case: type of config["multiprocessing"] is not a bool.""" + config["fluxsite"]["multiprocessing"] = 1 + with pytest.raises( + TypeError, match="The 'multiprocessing' key must be a boolean." + ): + check_config(config) + + +class TestReadConfig: """Tests for `read_config()`.""" - # Success case: write config to file, then read config from file - config = get_mock_config() - filename = TMP_DIR / "config-barebones.yaml" - with filename.open("w", encoding="utf-8") as file: - yaml.dump(config, file) + def test_read_config(self, config): + """Success case: write config to file, then read config from file.""" + filename = Path("config-barebones.yaml") + + with filename.open("w", encoding="utf-8") as file: + yaml.dump(config, file) - res = read_config(filename) - filename.unlink() - assert config == res + res = read_config(filename) + filename.unlink() + assert config == res diff --git a/tests/test_fluxsite.py b/tests/test_fluxsite.py index d7dc9a34..7c16ab3f 100644 --- a/tests/test_fluxsite.py +++ b/tests/test_fluxsite.py @@ -1,4 +1,9 @@ -"""`pytest` tests for `fluxsite.py`.""" +"""`pytest` tests for `fluxsite.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" import contextlib import io @@ -20,482 +25,526 @@ patch_remove_namelist, ) from benchcab.repository import CableRepository -from benchcab.utils.subprocess import SubprocessWrapperInterface - -from .common import MOCK_CWD, MockSubprocessWrapper, get_mock_config -def get_mock_task( - subprocess_handler: SubprocessWrapperInterface = MockSubprocessWrapper(), -) -> Task: - """Returns a mock `Task` instance.""" - repo = CableRepository( +@pytest.fixture() +def repo(mock_cwd, mock_subprocess_handler): + """Returns a `CableRepository` instance.""" + _repo = CableRepository( repo_id=1, path="path/to/test-branch", patch={"cable": {"some_branch_specific_setting": True}}, ) - repo.subprocess_handler = subprocess_handler - repo.root_dir = MOCK_CWD + _repo.subprocess_handler = mock_subprocess_handler + _repo.root_dir = mock_cwd + return _repo - task = Task( + +@pytest.fixture() +def task(repo, mock_cwd, mock_subprocess_handler): + """Returns a mock `Task` instance.""" + _task = Task( repo=repo, met_forcing_file="forcing-file.nc", sci_conf_id=0, sci_config={"cable": {"some_setting": True}}, ) - task.subprocess_handler = subprocess_handler - task.root_dir = MOCK_CWD + _task.subprocess_handler = mock_subprocess_handler + _task.root_dir = mock_cwd + return _task - return task +class TestGetTaskName: + """tests for `Task.get_task_name()`.""" -def setup_mock_namelists_directory(): - """Setup a mock namelists directory in MOCK_CWD.""" - Path(MOCK_CWD, internal.NAMELIST_DIR).mkdir() + def test_task_name_convention(self, task): + """Success case: check task name convention.""" + assert task.get_task_name() == "forcing-file_R1_S0" - cable_nml_path = Path(MOCK_CWD, internal.NAMELIST_DIR, internal.CABLE_NML) - cable_nml_path.touch() - assert cable_nml_path.exists() - cable_soil_nml_path = Path(MOCK_CWD, internal.NAMELIST_DIR, internal.CABLE_SOIL_NML) - cable_soil_nml_path.touch() - assert cable_soil_nml_path.exists() +class TestGetLogFilename: + """Tests for `Task.get_log_filename()`.""" - cable_vegetation_nml_path = Path( - MOCK_CWD, internal.NAMELIST_DIR, internal.CABLE_VEGETATION_NML - ) - cable_vegetation_nml_path.touch() - assert cable_vegetation_nml_path.exists() + def test_log_filename_convention(self, task): + """Success case: check log file name convention.""" + assert task.get_log_filename() == "forcing-file_R1_S0_log.txt" -def setup_mock_run_directory(task: Task): - """Setup mock run directory for a single task.""" - task_dir = MOCK_CWD / internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() - task_dir.mkdir(parents=True) - output_dir = MOCK_CWD / internal.FLUXSITE_DIRS["OUTPUT"] - output_dir.mkdir(parents=True) - log_dir = MOCK_CWD / internal.FLUXSITE_DIRS["LOG"] - log_dir.mkdir(parents=True) +class TestGetOutputFilename: + """Tests for `Task.get_output_filename()`.""" + def test_output_filename_convention(self, task): + """Success case: check output file name convention.""" + assert task.get_output_filename() == "forcing-file_R1_S0_out.nc" -def do_mock_checkout_and_build(): - """Setup mock repository that has been checked out and built.""" - Path(MOCK_CWD, internal.SRC_DIR, "test-branch", "offline").mkdir(parents=True) - cable_exe_path = Path( - MOCK_CWD, internal.SRC_DIR, "test-branch", "offline", internal.CABLE_EXE - ) - cable_exe_path.touch() - assert cable_exe_path.exists() +class TestFetchFiles: + """Tests for `Task.fetch_files()`.""" + @pytest.fixture(autouse=True) + def _setup(self, task): + """Setup precondition for `Task.fetch_files()`.""" + internal.NAMELIST_DIR.mkdir() + (internal.NAMELIST_DIR / internal.CABLE_NML).touch() + (internal.NAMELIST_DIR / internal.CABLE_SOIL_NML).touch() + (internal.NAMELIST_DIR / internal.CABLE_VEGETATION_NML).touch() -def do_mock_run(task: Task): - """Make mock log files and output files as if benchcab has just been run.""" - output_path = Path( - MOCK_CWD, internal.FLUXSITE_DIRS["OUTPUT"], task.get_output_filename() - ) - output_path.touch() - assert output_path.exists() - - log_path = Path(MOCK_CWD, internal.FLUXSITE_DIRS["LOG"], task.get_log_filename()) - log_path.touch() - assert log_path.exists() - - -def test_get_task_name(): - """Tests for `get_task_name()`.""" - # Success case: check task name convention - task = get_mock_task() - assert task.get_task_name() == "forcing-file_R1_S0" - - -def test_get_log_filename(): - """Tests for `get_log_filename()`.""" - # Success case: check log file name convention - task = get_mock_task() - assert task.get_log_filename() == "forcing-file_R1_S0_log.txt" - - -def test_get_output_filename(): - """Tests for `get_output_filename()`.""" - # Success case: check output file name convention - task = get_mock_task() - assert task.get_output_filename() == "forcing-file_R1_S0_out.nc" - - -def test_fetch_files(): - """Tests for `fetch_files()`.""" - # Success case: fetch files required to run CABLE - task = get_mock_task() - - setup_mock_namelists_directory() - setup_mock_run_directory(task) - do_mock_checkout_and_build() - - task.fetch_files() - - assert Path( - MOCK_CWD, internal.FLUXSITE_DIRS["TASKS"], task.get_task_name(), internal.CABLE_NML - ).exists() - assert Path( - MOCK_CWD, - internal.FLUXSITE_DIRS["TASKS"], - task.get_task_name(), - internal.CABLE_VEGETATION_NML, - ).exists() - assert Path( - MOCK_CWD, - internal.FLUXSITE_DIRS["TASKS"], - task.get_task_name(), - internal.CABLE_SOIL_NML, - ).exists() - assert Path( - MOCK_CWD, internal.FLUXSITE_DIRS["TASKS"], task.get_task_name(), internal.CABLE_EXE - ).exists() - - -def test_clean_task(): - """Tests for `clean_task()`.""" - # Success case: fetch then clean files - task = get_mock_task() - - setup_mock_namelists_directory() - setup_mock_run_directory(task) - do_mock_checkout_and_build() - - task.fetch_files() - - do_mock_run(task) - - task.clean_task() - - assert not Path( - MOCK_CWD, internal.FLUXSITE_DIRS["TASKS"], task.get_task_name(), internal.CABLE_NML - ).exists() - assert not Path( - MOCK_CWD, - internal.FLUXSITE_DIRS["TASKS"], - task.get_task_name(), - internal.CABLE_VEGETATION_NML, - ).exists() - assert not Path( - MOCK_CWD, - internal.FLUXSITE_DIRS["TASKS"], - task.get_task_name(), - internal.CABLE_SOIL_NML, - ).exists() - assert not Path( - MOCK_CWD, internal.FLUXSITE_DIRS["TASKS"], task.get_task_name(), internal.CABLE_EXE - ).exists() - assert not Path( - MOCK_CWD, internal.FLUXSITE_DIRS["OUTPUT"], task.get_output_filename() - ).exists() - assert not Path( - MOCK_CWD, internal.FLUXSITE_DIRS["LOG"], task.get_log_filename() - ).exists() - - -def test_patch_namelist(): + task_name = task.get_task_name() + (internal.FLUXSITE_DIRS["TASKS"] / task_name).mkdir(parents=True) + (internal.FLUXSITE_DIRS["OUTPUT"]).mkdir(parents=True) + (internal.FLUXSITE_DIRS["LOG"]).mkdir(parents=True) + + exe_build_dir = internal.SRC_DIR / "test-branch" / "offline" + exe_build_dir.mkdir(parents=True) + (exe_build_dir / internal.CABLE_EXE).touch() + + def test_required_files_are_copied_to_task_dir(self, task): + """Success case: test required files are copied to task directory.""" + task.fetch_files() + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + assert (task_dir / internal.CABLE_NML).exists() + assert (task_dir / internal.CABLE_VEGETATION_NML).exists() + assert (task_dir / internal.CABLE_SOIL_NML).exists() + assert (task_dir / internal.CABLE_EXE).exists() + + +class TestCleanTask: + """Tests for `Task.clean_task()`.""" + + @pytest.fixture(autouse=True) + def _setup(self, task): + """Setup precondition for `Task.clean_task()`.""" + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + task_dir.mkdir(parents=True) + (task_dir / internal.CABLE_NML).touch() + (task_dir / internal.CABLE_VEGETATION_NML).touch() + (task_dir / internal.CABLE_SOIL_NML).touch() + (task_dir / internal.CABLE_EXE).touch() + + internal.FLUXSITE_DIRS["OUTPUT"].mkdir(parents=True) + (internal.FLUXSITE_DIRS["OUTPUT"] / task.get_output_filename()).touch() + + internal.FLUXSITE_DIRS["LOG"].mkdir(parents=True) + (internal.FLUXSITE_DIRS["LOG"] / task.get_log_filename()).touch() + + def test_clean_files(self, task): + """Success case: clean files produced from run.""" + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + task.clean_task() + assert not (task_dir / internal.CABLE_NML).exists() + assert not (task_dir / internal.CABLE_VEGETATION_NML).exists() + assert not (task_dir / internal.CABLE_SOIL_NML).exists() + assert not (task_dir / internal.CABLE_EXE).exists() + assert not ( + internal.FLUXSITE_DIRS["OUTPUT"] / task.get_output_filename() + ).exists() + assert not (internal.FLUXSITE_DIRS["LOG"] / task.get_log_filename()).exists() + + +class TestPatchNamelist: """Tests for `patch_namelist()`.""" - nml_path = MOCK_CWD / "test.nml" - - # Success case: patch non-existing namelist file - assert not nml_path.exists() - patch = {"cable": {"file": "/path/to/file", "bar": 123}} - patch_namelist(nml_path, patch) - assert f90nml.read(nml_path) == patch - - # Success case: patch non-empty namelist file - patch_namelist(nml_path, {"cable": {"some": {"parameter": True}, "bar": 456}}) - assert f90nml.read(nml_path) == { - "cable": { - "file": "/path/to/file", - "bar": 456, - "some": {"parameter": True}, + + @pytest.fixture() + def nml_path(self): + """Return a path to a namelist file used for testing.""" + return Path("test.nml") + + def test_patch_on_non_existing_namelist_file(self, nml_path): + """Success case: patch non-existing namelist file.""" + patch = {"cable": {"file": "/path/to/file", "bar": 123}} + patch_namelist(nml_path, patch) + assert f90nml.read(nml_path) == patch + + def test_patch_on_non_empty_namelist_file(self, nml_path): + """Success case: patch non-empty namelist file.""" + f90nml.write({"cable": {"file": "/path/to/file", "bar": 123}}, nml_path) + patch_namelist(nml_path, {"cable": {"some": {"parameter": True}, "bar": 456}}) + assert f90nml.read(nml_path) == { + "cable": { + "file": "/path/to/file", + "bar": 456, + "some": {"parameter": True}, + } } - } - # Success case: empty patch does nothing - prev = f90nml.read(nml_path) - patch_namelist(nml_path, {}) - assert f90nml.read(nml_path) == prev + def test_empty_patch_does_nothing(self, nml_path): + """Success case: empty patch does nothing.""" + f90nml.write({"cable": {"file": "/path/to/file", "bar": 123}}, nml_path) + prev = f90nml.read(nml_path) + patch_namelist(nml_path, {}) + assert f90nml.read(nml_path) == prev -def test_patch_remove_namelist(): +class TestPatchRemoveNamelist: """Tests for `patch_remove_namelist()`.""" - nml_path = MOCK_CWD / "test.nml" - - # Success case: remove a namelist parameter from derrived type - nml = {"cable": {"cable_user": {"some_parameter": True}}} - f90nml.write(nml, nml_path) - patch_remove_namelist(nml_path, nml) - assert not f90nml.read(nml_path)["cable"] - nml_path.unlink() - - # Success case: test existing namelist parameters are preserved - # when removing a namelist parameter - to_remove = {"cable": {"cable_user": {"new_feature": True}}} - nml = {"cable": {"cable_user": {"some_parameter": True, "new_feature": True}}} - f90nml.write(nml, nml_path) - patch_remove_namelist(nml_path, to_remove) - assert f90nml.read(nml_path) == {"cable": {"cable_user": {"some_parameter": True}}} - nml_path.unlink() - - # Success case: empty patch_remove does nothing - nml = {"cable": {"cable_user": {"some_parameter": True}}} - f90nml.write(nml, nml_path) - patch_remove_namelist(nml_path, {}) - assert f90nml.read(nml_path) == nml - nml_path.unlink() - - # Failure case: patch_remove should raise KeyError when namelist parameters don't exist in - # nml_path - to_remove = {"cable": {"foo": {"bar": True}}} - nml = {"cable": {"cable_user": {"some_parameter": True, "new_feature": True}}} - f90nml.write(nml, nml_path) - with pytest.raises( - KeyError, - match=f"Namelist parameters specified in `patch_remove` do not exist in {nml_path.name}.", - ): - patch_remove_namelist(nml_path, to_remove) - nml_path.unlink(missing_ok=True) - - -def test_setup_task(): - """Tests for `setup_task()`.""" - task = get_mock_task() - task_dir = Path(MOCK_CWD, internal.FLUXSITE_DIRS["TASKS"], task.get_task_name()) - - setup_mock_namelists_directory() - setup_mock_run_directory(task) - do_mock_checkout_and_build() - - # Success case: test all settings are patched into task namelist file - task.setup_task() - res_nml = f90nml.read(str(task_dir / internal.CABLE_NML)) - assert res_nml["cable"] == { - "filename": { - "met": str(internal.MET_DIR / "forcing-file.nc"), - "out": str( - MOCK_CWD / internal.FLUXSITE_DIRS["OUTPUT"] / task.get_output_filename() - ), - "log": str(MOCK_CWD / internal.FLUXSITE_DIRS["LOG"] / task.get_log_filename()), - "restart_out": " ", - "type": str(MOCK_CWD / internal.GRID_FILE), - }, - "output": {"restart": False}, - "fixedco2": internal.CABLE_FIXED_CO2_CONC, - "casafile": { - "phen": str(MOCK_CWD / internal.PHEN_FILE), - "cnpbiome": str(MOCK_CWD / internal.CNPBIOME_FILE), - }, - "spinup": False, - "some_setting": True, - "some_branch_specific_setting": True, - } - - # Success case: test non-verbose output - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.setup_task() - assert not buf.getvalue() - - # Success case: test verbose output - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.setup_task(verbose=True) - assert buf.getvalue() == ( - "Setting up task: forcing-file_R1_S0\n" - "Creating runs/fluxsite/tasks/forcing-file_R1_S0 directory\n" - " Cleaning task\n" - f" Copying namelist files from {MOCK_CWD}/namelists to " - f"{MOCK_CWD / 'runs/fluxsite/tasks/forcing-file_R1_S0'}\n" - f" Copying CABLE executable from {MOCK_CWD}/src/test-branch/" - f"offline/cable to {MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable\n" - " Adding base configurations to CABLE namelist file " - f"{MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" - " Adding science configurations to CABLE namelist file " - f"{MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" - " Adding branch specific configurations to CABLE namelist file " - f"{MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" - ) + @pytest.fixture() + def nml(self): + """Return a namelist dictionary used for testing.""" + return { + "cable": { + "cable_user": { + "some_parameter": True, + "new_feature": True, + }, + }, + } -def test_run_cable(): - """Tests for `run_cable()`.""" - mock_subprocess = MockSubprocessWrapper() - task = get_mock_task(subprocess_handler=mock_subprocess) - task_dir = MOCK_CWD / internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() - task_dir.mkdir(parents=True) + @pytest.fixture() + def nml_path(self, nml): + """Create a namelist file and return its path.""" + _nml_path = Path("test.nml") + f90nml.write(nml, _nml_path) + return _nml_path + + def test_remove_namelist_parameter_from_derived_type(self, nml_path): + """Success case: remove a namelist parameter from derrived type.""" + patch_remove_namelist( + nml_path, {"cable": {"cable_user": {"new_feature": True}}} + ) + assert f90nml.read(nml_path) == { + "cable": {"cable_user": {"some_parameter": True}} + } - # Success case: run CABLE executable in subprocess - task.run_cable() - assert f"./{internal.CABLE_EXE} {internal.CABLE_NML}" in mock_subprocess.commands - stdout_file = task_dir / internal.CABLE_STDOUT_FILENAME - assert stdout_file.exists() + def test_empty_patch_remove_does_nothing(self, nml_path, nml): + """Success case: empty patch_remove does nothing.""" + patch_remove_namelist(nml_path, {}) + assert f90nml.read(nml_path) == nml + + def test_key_error_raised_for_non_existent_namelist_parameter(self, nml_path): + """Failure case: test patch_remove KeyError exeption.""" + with pytest.raises( + KeyError, + match=f"Namelist parameters specified in `patch_remove` do not exist in {nml_path.name}.", + ): + patch_remove_namelist(nml_path, {"cable": {"foo": {"bar": True}}}) + + +class TestSetupTask: + """Tests for `Task.setup_task()`.""" + + @pytest.fixture(autouse=True) + def _setup(self, task): + """Setup precondition for `Task.setup_task()`.""" + (internal.NAMELIST_DIR).mkdir() + (internal.NAMELIST_DIR / internal.CABLE_NML).touch() + (internal.NAMELIST_DIR / internal.CABLE_SOIL_NML).touch() + (internal.NAMELIST_DIR / internal.CABLE_VEGETATION_NML).touch() + + task_name = task.get_task_name() + (internal.FLUXSITE_DIRS["TASKS"] / task_name).mkdir(parents=True) + (internal.FLUXSITE_DIRS["OUTPUT"]).mkdir(parents=True) + (internal.FLUXSITE_DIRS["LOG"]).mkdir(parents=True) + + exe_build_dir = internal.SRC_DIR / "test-branch" / "offline" + exe_build_dir.mkdir(parents=True) + (exe_build_dir / internal.CABLE_EXE).touch() + + def test_all_settings_are_patched_into_namelist_file(self, task, mock_cwd): + """Success case: test all settings are patched into task namelist file.""" + task.setup_task() + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + res_nml = f90nml.read(str(task_dir / internal.CABLE_NML)) + assert res_nml["cable"] == { + "filename": { + "met": str(internal.MET_DIR / "forcing-file.nc"), + "out": str( + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / task.get_output_filename() + ), + "log": str( + mock_cwd / internal.FLUXSITE_DIRS["LOG"] / task.get_log_filename() + ), + "restart_out": " ", + "type": str(mock_cwd / internal.GRID_FILE), + }, + "output": {"restart": False}, + "fixedco2": internal.CABLE_FIXED_CO2_CONC, + "casafile": { + "phen": str(mock_cwd / internal.PHEN_FILE), + "cnpbiome": str(mock_cwd / internal.CNPBIOME_FILE), + }, + "spinup": False, + "some_setting": True, + "some_branch_specific_setting": True, + } - # Success case: test non-verbose output - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run_cable() - assert not buf.getvalue() + # TODO(Sean) fix for issue https://github.com/CABLE-LSM/benchcab/issues/162 + @pytest.mark.skip( + reason="""This will always fail since `parametrize()` parameters are + dependent on the `mock_cwd` fixture.""" + ) + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + ( + False, + "", + ), + ( + True, + "Setting up task: forcing-file_R1_S0\n" + "Creating runs/fluxsite/tasks/forcing-file_R1_S0 directory\n" + " Cleaning task\n" + " Copying namelist files from namelists to " + "runs/fluxsite/tasks/forcing-file_R1_S0\n" + " Copying CABLE executable from src/test-branch/" + "offline/cable to runs/fluxsite/tasks/forcing-file_R1_S0/cable\n" + " Adding base configurations to CABLE namelist file " + "runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" + " Adding science configurations to CABLE namelist file " + "runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" + " Adding branch specific configurations to CABLE namelist file " + "runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n", + ), + ], + ) + def test_standard_output(self, task, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + task.setup_task(verbose=verbosity) + assert buf.getvalue() == expected - # Success case: test verbose output - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.run_cable(verbose=True) - assert not buf.getvalue() - # Failure case: raise CableError on subprocess non-zero exit code - mock_subprocess.error_on_call = True - with pytest.raises(CableError): - task.run_cable() +class TestRunCable: + """Tests for `Task.run_cable()`.""" + @pytest.fixture(autouse=True) + def _setup(self, task): + """Setup precondition for `Task.run_cable()`.""" + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + task_dir.mkdir(parents=True) -def test_add_provenance_info(): - """Tests for `add_provenance_info()`.""" - mock_subprocess = MockSubprocessWrapper() - task = get_mock_task(subprocess_handler=mock_subprocess) - task_dir = MOCK_CWD / internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() - task_dir.mkdir(parents=True) - FLUXSITE_OUTPUT_DIR = MOCK_CWD / internal.FLUXSITE_DIRS["OUTPUT"] - FLUXSITE_OUTPUT_DIR.mkdir() - - # Create mock namelist file in task directory: - mock_namelist = { - "cable": {"filename": {"met": "/path/to/met/file", "foo": 123}, "bar": True} - } - f90nml.write(mock_namelist, task_dir / internal.CABLE_NML) - - # Create mock netcdf output file as if CABLE had just been run: - nc_output_path = FLUXSITE_OUTPUT_DIR / task.get_output_filename() - netCDF4.Dataset(nc_output_path, "w") - - # Success case: add global attributes to netcdf file - task.add_provenance_info() - with netCDF4.Dataset(str(nc_output_path), "r") as nc_output: - atts = vars(nc_output) - assert atts["cable_branch"] == mock_subprocess.stdout - assert atts["svn_revision_number"] == mock_subprocess.stdout - assert atts["benchcab_version"] == __version__ - assert atts[r"filename%met"] == mock_namelist["cable"]["filename"]["met"] - assert atts[r"filename%foo"] == mock_namelist["cable"]["filename"]["foo"] - assert atts[r"bar"] == ".true." - - # Success case: test non-verbose output - with contextlib.redirect_stdout(io.StringIO()) as buf: + def test_cable_execution(self, task, mock_subprocess_handler): + """Success case: run CABLE executable in subprocess.""" + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + task.run_cable() + assert ( + f"./{internal.CABLE_EXE} {internal.CABLE_NML}" + in mock_subprocess_handler.commands + ) + assert (task_dir / internal.CABLE_STDOUT_FILENAME).exists() + + @pytest.mark.parametrize(("verbosity", "expected"), [(False, ""), (True, "")]) + def test_standard_output(self, task, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + task.run_cable(verbose=verbosity) + assert buf.getvalue() == expected + + def test_cable_error_exception(self, task, mock_subprocess_handler): + """Failure case: raise CableError on subprocess non-zero exit code.""" + mock_subprocess_handler.error_on_call = True + with pytest.raises(CableError): + task.run_cable() + + +class TestAddProvenanceInfo: + """Tests for `Task.add_provenance_info()`.""" + + @pytest.fixture() + def nml(self): + """Return a namelist dictionary used for testing.""" + return { + "cable": { + "filename": {"met": "/path/to/met/file", "foo": 123}, + "bar": True, + } + } + + @pytest.fixture() + def nc_output_path(self, task): + """Create and return a netcdf output file as if CABLE had just been run. + + Return value is the path to the file. + """ + _nc_output_path = internal.FLUXSITE_DIRS["OUTPUT"] / task.get_output_filename() + netCDF4.Dataset(_nc_output_path, "w") + return _nc_output_path + + @pytest.fixture(autouse=True) + def _setup(self, task, nml): + """Setup precondition for `Task.add_provenance_info()`.""" + task_dir = internal.FLUXSITE_DIRS["TASKS"] / task.get_task_name() + task_dir.mkdir(parents=True) + fluxsite_output_dir = internal.FLUXSITE_DIRS["OUTPUT"] + fluxsite_output_dir.mkdir() + + # Create mock namelist file in task directory: + f90nml.write(nml, task_dir / internal.CABLE_NML) + + def test_netcdf_global_attributes( + self, task, nc_output_path, mock_subprocess_handler, nml + ): + """Success case: add global attributes to netcdf file.""" task.add_provenance_info() - assert not buf.getvalue() - - # Success case: test verbose output - with contextlib.redirect_stdout(io.StringIO()) as buf: - task.add_provenance_info(verbose=True) - assert buf.getvalue() == ( - "Adding attributes to output file: " - f"{MOCK_CWD}/runs/fluxsite/outputs/forcing-file_R1_S0_out.nc\n" + with netCDF4.Dataset(str(nc_output_path), "r") as nc_output: + atts = vars(nc_output) + assert atts["cable_branch"] == mock_subprocess_handler.stdout + assert atts["svn_revision_number"] == mock_subprocess_handler.stdout + assert atts["benchcab_version"] == __version__ + assert atts[r"filename%met"] == nml["cable"]["filename"]["met"] + assert atts[r"filename%foo"] == nml["cable"]["filename"]["foo"] + assert atts[r"bar"] == ".true." + + # TODO(Sean) fix for issue https://github.com/CABLE-LSM/benchcab/issues/162 + @pytest.mark.skip( + reason="""This will always fail since `parametrize()` parameters are + dependent on the `mock_cwd` fixture.""" ) + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + ( + False, + "", + ), + ( + True, + "Adding attributes to output file: " + "runs/fluxsite/outputs/forcing-file_R1_S0_out.nc\n", + ), + ], + ) + def test_standard_output(self, task, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + task.add_provenance_info(verbose=verbosity) + assert buf.getvalue() == expected -def test_get_fluxsite_tasks(): +class TestGetFluxsiteTasks: """Tests for `get_fluxsite_tasks()`.""" - # Success case: get task list for two branches, two fluxsite met - # forcing files and two science configurations - config = get_mock_config() - repos = [ - CableRepository(**branch_config, repo_id=id) - for id, branch_config in enumerate(config["realisations"]) - ] - met_forcing_file_a, met_forcing_file_b = "foo", "bar" - sci_a, sci_b = config["science_configurations"] - tasks = get_fluxsite_tasks( - repos, - config["science_configurations"], - [met_forcing_file_a, met_forcing_file_b], - ) - assert [(task.repo, task.met_forcing_file, task.sci_config) for task in tasks] == [ - (repos[0], met_forcing_file_a, sci_a), - (repos[0], met_forcing_file_a, sci_b), - (repos[0], met_forcing_file_b, sci_a), - (repos[0], met_forcing_file_b, sci_b), - (repos[1], met_forcing_file_a, sci_a), - (repos[1], met_forcing_file_a, sci_b), - (repos[1], met_forcing_file_b, sci_a), - (repos[1], met_forcing_file_b, sci_b), - ] - - -def test_get_fluxsite_comparisons(): + + @pytest.fixture() + def repos(self, config): + """Return a list of `CableRepository` instances used for testing.""" + return [ + CableRepository(**branch_config, repo_id=id) + for id, branch_config in enumerate(config["realisations"]) + ] + + @pytest.fixture() + def met_forcings(self): + """Return a list of forcing file names used for testing.""" + return ["foo", "bar"] + + @pytest.fixture() + def science_configurations(self, config): + """Return a list of science configurations used for testing.""" + return config["science_configurations"] + + def test_task_product_across_branches_forcings_and_configurations( + self, repos, met_forcings, science_configurations + ): + """Success case: test task product across branches, forcings and configurations.""" + tasks = get_fluxsite_tasks( + repos=repos, + science_configurations=science_configurations, + fluxsite_forcing_file_names=met_forcings, + ) + assert [ + (task.repo, task.met_forcing_file, task.sci_config) for task in tasks + ] == [ + (repos[0], met_forcings[0], science_configurations[0]), + (repos[0], met_forcings[0], science_configurations[1]), + (repos[0], met_forcings[1], science_configurations[0]), + (repos[0], met_forcings[1], science_configurations[1]), + (repos[1], met_forcings[0], science_configurations[0]), + (repos[1], met_forcings[0], science_configurations[1]), + (repos[1], met_forcings[1], science_configurations[0]), + (repos[1], met_forcings[1], science_configurations[1]), + ] + + +class TestGetFluxsiteComparisons: """Tests for `get_fluxsite_comparisons()`.""" - output_dir = MOCK_CWD / internal.FLUXSITE_DIRS["OUTPUT"] - - # Success case: comparisons for two branches with two tasks - # met0_S0_R0 met0_S0_R1 - task_a = Task( - repo=CableRepository("path/to/repo_a", repo_id=0), - met_forcing_file="foo.nc", - sci_config={"foo": "bar"}, - sci_conf_id=0, - ) - task_b = Task( - repo=CableRepository("path/to/repo_b", repo_id=1), - met_forcing_file="foo.nc", - sci_config={"foo": "bar"}, - sci_conf_id=0, - ) - tasks = [task_a, task_b] - comparisons = get_fluxsite_comparisons(tasks, root_dir=MOCK_CWD) - assert len(comparisons) == math.comb(len(tasks), 2) - assert comparisons[0].files == ( - output_dir / task_a.get_output_filename(), - output_dir / task_b.get_output_filename(), - ) - assert comparisons[0].task_name == "foo_S0_R0_R1" - - # Success case: comparisons for three branches with three tasks - # met0_S0_R0 met0_S0_R1 met0_S0_R2 - task_a = Task( - repo=CableRepository("path/to/repo_a", repo_id=0), - met_forcing_file="foo.nc", - sci_config={"foo": "bar"}, - sci_conf_id=0, - ) - task_b = Task( - repo=CableRepository("path/to/repo_b", repo_id=1), - met_forcing_file="foo.nc", - sci_config={"foo": "bar"}, - sci_conf_id=0, - ) - task_c = Task( - repo=CableRepository("path/to/repo_b", repo_id=2), - met_forcing_file="foo.nc", - sci_config={"foo": "bar"}, - sci_conf_id=0, - ) - tasks = [task_a, task_b, task_c] - comparisons = get_fluxsite_comparisons(tasks, root_dir=MOCK_CWD) - assert len(comparisons) == math.comb(len(tasks), 2) - assert comparisons[0].files == ( - output_dir / task_a.get_output_filename(), - output_dir / task_b.get_output_filename(), - ) - assert comparisons[1].files == ( - output_dir / task_a.get_output_filename(), - output_dir / task_c.get_output_filename(), - ) - assert comparisons[2].files == ( - output_dir / task_b.get_output_filename(), - output_dir / task_c.get_output_filename(), - ) - assert comparisons[0].task_name == "foo_S0_R0_R1" - assert comparisons[1].task_name == "foo_S0_R0_R2" - assert comparisons[2].task_name == "foo_S0_R1_R2" + + def test_comparisons_for_two_branches_with_two_tasks(self, mock_cwd): + """Success case: comparisons for two branches with two tasks.""" + tasks = [ + Task( + repo=CableRepository("path/to/repo", repo_id=repo_id), + met_forcing_file="foo.nc", + sci_config={"foo": "bar"}, + sci_conf_id=0, + ) + for repo_id in range(2) + ] + comparisons = get_fluxsite_comparisons(tasks, root_dir=mock_cwd) + n_repos, n_science_configurations, n_met_forcings = 2, 1, 1 + assert ( + len(comparisons) + == math.comb(n_repos, 2) * n_science_configurations * n_met_forcings + ) + assert comparisons[0].files == ( + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[0].get_output_filename(), + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[1].get_output_filename(), + ) + assert comparisons[0].task_name == "foo_S0_R0_R1" + + def test_comparisons_for_three_branches_with_three_tasks(self, mock_cwd): + """Success case: comparisons for three branches with three tasks.""" + tasks = [ + Task( + repo=CableRepository("path/to/repo", repo_id=repo_id), + met_forcing_file="foo.nc", + sci_config={"foo": "bar"}, + sci_conf_id=0, + ) + for repo_id in range(3) + ] + comparisons = get_fluxsite_comparisons(tasks, root_dir=mock_cwd) + n_repos, n_science_configurations, n_met_forcings = 3, 1, 1 + assert ( + len(comparisons) + == math.comb(n_repos, 2) * n_science_configurations * n_met_forcings + ) + assert comparisons[0].files == ( + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[0].get_output_filename(), + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[1].get_output_filename(), + ) + assert comparisons[1].files == ( + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[0].get_output_filename(), + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[2].get_output_filename(), + ) + assert comparisons[2].files == ( + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[1].get_output_filename(), + mock_cwd + / internal.FLUXSITE_DIRS["OUTPUT"] + / tasks[2].get_output_filename(), + ) + assert comparisons[0].task_name == "foo_S0_R0_R1" + assert comparisons[1].task_name == "foo_S0_R0_R2" + assert comparisons[2].task_name == "foo_S0_R1_R2" -def test_get_comparison_name(): +class TestGetComparisonName: """Tests for `get_comparison_name()`.""" - # Success case: check comparison name convention - assert ( - get_comparison_name( - CableRepository("path/to/repo", repo_id=0), - CableRepository("path/to/repo", repo_id=1), - met_forcing_file="foo.nc", - sci_conf_id=0, + + def test_comparison_name_convention(self): + """Success case: check comparison name convention.""" + assert ( + get_comparison_name( + CableRepository("path/to/repo", repo_id=0), + CableRepository("path/to/repo", repo_id=1), + met_forcing_file="foo.nc", + sci_conf_id=0, + ) + == "foo_S0_R0_R1" ) - == "foo_S0_R0_R1" - ) diff --git a/tests/test_fs.py b/tests/test_fs.py index fa10a3d1..be55cf19 100644 --- a/tests/test_fs.py +++ b/tests/test_fs.py @@ -1,54 +1,59 @@ -"""`pytest` tests for `utils/fs.py`.""" +"""`pytest` tests for `utils/fs.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" -import pytest -import io import contextlib +import io from pathlib import Path -from benchcab.utils.fs import next_path, mkdir +import pytest -from .common import MOCK_CWD +from benchcab.utils.fs import mkdir, next_path -def test_next_path(): +class TestNextPath: """Tests for `next_path()`.""" - pattern = "rev_number-*.log" - - # Success case: get next path in 'empty' CWD - assert len(list(MOCK_CWD.glob(pattern))) == 0 - ret = next_path(MOCK_CWD, pattern) - assert ret == "rev_number-1.log" - - # Success case: get next path in 'non-empty' CWD - ret_path = MOCK_CWD / ret - ret_path.touch() - assert len(list(MOCK_CWD.glob(pattern))) == 1 - ret = next_path(MOCK_CWD, pattern) - assert ret == "rev_number-2.log" - - -@pytest.mark.parametrize("test_path,kwargs", [ - (Path(MOCK_CWD, "test1"), {}), - (Path(MOCK_CWD, "test1/test2"), dict(parents=True)), - (Path(MOCK_CWD, "test1/test2"), dict(parents=True, exist_ok=True)), -]) -def test_mkdir(test_path, kwargs): - """Tests for `mkdir()`.""" - # Success case: create a test directory - mkdir(test_path, **kwargs) - assert test_path.exists() - test_path.rmdir() + @pytest.fixture() + def pattern(self): + """Return a file pattern for testing against.""" + return "rev_number-*.log" + + def test_next_path_in_empty_cwd(self, pattern, mock_cwd): + """Success case: get next path in 'empty' CWD.""" + assert next_path(mock_cwd, pattern) == "rev_number-1.log" + def test_next_path_in_non_empty_cwd(self, pattern, mock_cwd): + """Success case: get next path in 'non-empty' CWD.""" + (mock_cwd / next_path(mock_cwd, pattern)).touch() + assert next_path(mock_cwd, pattern) == "rev_number-2.log" -def test_mkdir_verbose(): - """Tests for verbose output of `mkdir()`.""" - # Success case: verbose output - test_path = Path(MOCK_CWD, "test1") - with contextlib.redirect_stdout(io.StringIO()) as buf: - mkdir(test_path, verbose=True) - assert buf.getvalue() == ( - f"Creating {test_path} directory\n" +class TestMkdir: + """Tests for `mkdir()`.""" + + @pytest.mark.parametrize( + ("test_path", "kwargs"), + [ + (Path("test1"), {}), + (Path("test1/test2"), dict(parents=True)), + (Path("test1/test2"), dict(parents=True, exist_ok=True)), + ], + ) + def test_mkdir(self, test_path, kwargs): + """Success case: create a test directory.""" + mkdir(test_path, **kwargs) + assert test_path.exists() + test_path.rmdir() + + @pytest.mark.parametrize( + ("verbosity", "expected"), [(False, ""), (True, "Creating test1 directory\n")] ) - test_path.rmdir() + def test_standard_output(self, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + mkdir(Path("test1"), verbose=verbosity) + assert buf.getvalue() == expected diff --git a/tests/test_pbs.py b/tests/test_pbs.py index a3fb9a02..c12e3e0d 100644 --- a/tests/test_pbs.py +++ b/tests/test_pbs.py @@ -4,16 +4,18 @@ from benchcab.utils.pbs import render_job_script -def test_render_job_script(): +class TestRenderJobScript: """Tests for `render_job_script()`.""" - # Success case: test default job script generated is correct - assert render_job_script( - project="tm70", - config_path="/path/to/config.yaml", - modules=["foo", "bar", "baz"], - benchcab_path="/absolute/path/to/benchcab", - ) == ( - f"""#!/bin/bash + + def test_default_job_script(self): + """Success case: test default job script generated is correct.""" + assert render_job_script( + project="tm70", + config_path="/path/to/config.yaml", + modules=["foo", "bar", "baz"], + benchcab_path="/absolute/path/to/benchcab", + ) == ( + f"""#!/bin/bash #PBS -l wd #PBS -l ncpus={internal.FLUXSITE_DEFAULT_PBS["ncpus"]} #PBS -l mem={internal.FLUXSITE_DEFAULT_PBS["mem"]} @@ -36,17 +38,18 @@ def test_render_job_script(): /absolute/path/to/benchcab fluxsite-bitwise-cmp --config=/path/to/config.yaml """ - ) - - # Success case: test verbose flag is added to command line arguments - assert render_job_script( - project="tm70", - config_path="/path/to/config.yaml", - modules=["foo", "bar", "baz"], - verbose=True, - benchcab_path="/absolute/path/to/benchcab", - ) == ( - f"""#!/bin/bash + ) + + def test_verbose_flag_added_to_command_line_arguments(self): + """Success case: test verbose flag is added to command line arguments.""" + assert render_job_script( + project="tm70", + config_path="/path/to/config.yaml", + modules=["foo", "bar", "baz"], + verbose=True, + benchcab_path="/absolute/path/to/benchcab", + ) == ( + f"""#!/bin/bash #PBS -l wd #PBS -l ncpus={internal.FLUXSITE_DEFAULT_PBS["ncpus"]} #PBS -l mem={internal.FLUXSITE_DEFAULT_PBS["mem"]} @@ -69,17 +72,18 @@ def test_render_job_script(): /absolute/path/to/benchcab fluxsite-bitwise-cmp --config=/path/to/config.yaml -v """ - ) - - # Success case: skip fluxsite-bitwise-cmp step - assert render_job_script( - project="tm70", - config_path="/path/to/config.yaml", - modules=["foo", "bar", "baz"], - skip_bitwise_cmp=True, - benchcab_path="/absolute/path/to/benchcab", - ) == ( - f"""#!/bin/bash + ) + + def test_skip_bitwise_comparison_step(self): + """Success case: skip fluxsite-bitwise-cmp step.""" + assert render_job_script( + project="tm70", + config_path="/path/to/config.yaml", + modules=["foo", "bar", "baz"], + skip_bitwise_cmp=True, + benchcab_path="/absolute/path/to/benchcab", + ) == ( + f"""#!/bin/bash #PBS -l wd #PBS -l ncpus={internal.FLUXSITE_DEFAULT_PBS["ncpus"]} #PBS -l mem={internal.FLUXSITE_DEFAULT_PBS["mem"]} @@ -100,23 +104,24 @@ def test_render_job_script(): /absolute/path/to/benchcab fluxsite-run-tasks --config=/path/to/config.yaml """ - ) - - # Success case: specify parameters in pbs_config - assert render_job_script( - project="tm70", - config_path="/path/to/config.yaml", - modules=["foo", "bar", "baz"], - skip_bitwise_cmp=True, - benchcab_path="/absolute/path/to/benchcab", - pbs_config={ - "ncpus": 4, - "mem": "16GB", - "walltime": "00:00:30", - "storage": ["gdata/foo"], - }, - ) == ( - """#!/bin/bash + ) + + def test_pbs_config_parameters(self): + """Success case: specify parameters in pbs_config.""" + assert render_job_script( + project="tm70", + config_path="/path/to/config.yaml", + modules=["foo", "bar", "baz"], + skip_bitwise_cmp=True, + benchcab_path="/absolute/path/to/benchcab", + pbs_config={ + "ncpus": 4, + "mem": "16GB", + "walltime": "00:00:30", + "storage": ["gdata/foo"], + }, + ) == ( + """#!/bin/bash #PBS -l wd #PBS -l ncpus=4 #PBS -l mem=16GB @@ -137,18 +142,19 @@ def test_render_job_script(): /absolute/path/to/benchcab fluxsite-run-tasks --config=/path/to/config.yaml """ - ) - - # Success case: if the pbs_config is empty, use the default values - assert render_job_script( - project="tm70", - config_path="/path/to/config.yaml", - modules=["foo", "bar", "baz"], - skip_bitwise_cmp=True, - benchcab_path="/absolute/path/to/benchcab", - pbs_config={}, - ) == ( - f"""#!/bin/bash + ) + + def test_default_pbs_config(self): + """Success case: if the pbs_config is empty, use the default values.""" + assert render_job_script( + project="tm70", + config_path="/path/to/config.yaml", + modules=["foo", "bar", "baz"], + skip_bitwise_cmp=True, + benchcab_path="/absolute/path/to/benchcab", + pbs_config={}, + ) == ( + f"""#!/bin/bash #PBS -l wd #PBS -l ncpus={internal.FLUXSITE_DEFAULT_PBS["ncpus"]} #PBS -l mem={internal.FLUXSITE_DEFAULT_PBS["mem"]} @@ -169,4 +175,4 @@ def test_render_job_script(): /absolute/path/to/benchcab fluxsite-run-tasks --config=/path/to/config.yaml """ - ) + ) diff --git a/tests/test_repository.py b/tests/test_repository.py index b5cddcee..06bef617 100644 --- a/tests/test_repository.py +++ b/tests/test_repository.py @@ -1,315 +1,360 @@ -"""`pytest` tests for `repository.py`.""" +"""`pytest` tests for `repository.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" import contextlib import io import os -import shutil +from pathlib import Path import pytest from benchcab import internal -from benchcab.environment_modules import EnvironmentModulesInterface from benchcab.repository import CableRepository, remove_module_lines -from benchcab.utils.subprocess import SubprocessWrapperInterface -from .common import MOCK_CWD, MockEnvironmentModules, MockSubprocessWrapper +from .conftest import DEFAULT_STDOUT -def get_mock_repo( - subprocess_handler: SubprocessWrapperInterface = MockSubprocessWrapper(), - modules_handler: EnvironmentModulesInterface = MockEnvironmentModules(), -) -> CableRepository: - """Returns a mock `CableRepository` instance for testing against.""" - repo = CableRepository(path="trunk") - repo.root_dir = MOCK_CWD - repo.subprocess_handler = subprocess_handler - repo.modules_handler = modules_handler - return repo +@pytest.fixture() +def repo(mock_cwd, mock_subprocess_handler, mock_environment_modules_handler): + """Return a mock `CableRepository` instance for testing against.""" + _repo = CableRepository(path="trunk") + _repo.root_dir = mock_cwd + _repo.subprocess_handler = mock_subprocess_handler + _repo.modules_handler = mock_environment_modules_handler + return _repo -def test_repo_id(): +class TestRepoID: """Tests for `CableRepository.repo_id`.""" - mock_repo_id = 123 - - # Success case: get repository ID - repo = CableRepository("path/to/repo", repo_id=mock_repo_id) - assert repo.repo_id == mock_repo_id - # Success case: set repository ID - new_repo_id = 456 - repo = CableRepository("path/to/repo", repo_id=mock_repo_id) - repo.repo_id = new_repo_id - assert repo.repo_id == new_repo_id + def test_set_and_get_repo_id(self, repo): + """Success case: set and get repository ID.""" + val = 456 + repo.repo_id = val + assert repo.repo_id == val - # Failure case: access undefined repository ID - repo = CableRepository("path/to/repo") - with pytest.raises(RuntimeError, match="Attempting to access undefined repo ID"): - _ = repo.repo_id + def test_undefined_repo_id(self, repo): + """Failure case: access undefined repository ID.""" + repo.repo_id = None + with pytest.raises( + RuntimeError, match="Attempting to access undefined repo ID" + ): + _ = repo.repo_id -def test_checkout(): +class TestCheckout: """Tests for `CableRepository.checkout()`.""" - # Success case: checkout mock repository - mock_subprocess = MockSubprocessWrapper() - repo = get_mock_repo(mock_subprocess) - repo.checkout() - assert ( - f"svn checkout https://trac.nci.org.au/svn/cable/trunk {MOCK_CWD}/src/trunk" - in mock_subprocess.commands - ) - # Success case: checkout mock repository with specified revision number - mock_subprocess = MockSubprocessWrapper() - repo = get_mock_repo(mock_subprocess) - repo.revision = 9000 - repo.checkout() - assert ( - f"svn checkout -r 9000 https://trac.nci.org.au/svn/cable/trunk {MOCK_CWD}/src/trunk" - in mock_subprocess.commands - ) + def test_checkout_command_execution(self, repo, mock_cwd, mock_subprocess_handler): + """Success case: `svn checkout` command is executed.""" + repo.checkout() + assert ( + f"svn checkout https://trac.nci.org.au/svn/cable/trunk {mock_cwd}/src/trunk" + in mock_subprocess_handler.commands + ) - # Success case: test non-verbose standard output - mock_subprocess = MockSubprocessWrapper() - repo = get_mock_repo(mock_subprocess) - with contextlib.redirect_stdout(io.StringIO()) as buf: + def test_checkout_command_execution_with_revision_number( + self, repo, mock_cwd, mock_subprocess_handler + ): + """Success case: `svn checkout` command is executed with specified revision number.""" + repo.revision = 9000 repo.checkout() - assert ( - buf.getvalue() - == f"Successfully checked out trunk at revision {mock_subprocess.stdout}\n" - ) + assert ( + f"svn checkout -r 9000 https://trac.nci.org.au/svn/cable/trunk {mock_cwd}/src/trunk" + in mock_subprocess_handler.commands + ) - # Success case: test verbose standard output - mock_subprocess = MockSubprocessWrapper() - repo = get_mock_repo(mock_subprocess) - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.checkout(verbose=True) - assert ( - buf.getvalue() - == f"Successfully checked out trunk at revision {mock_subprocess.stdout}\n" + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + (False, f"Successfully checked out trunk at revision {DEFAULT_STDOUT}\n"), + (True, f"Successfully checked out trunk at revision {DEFAULT_STDOUT}\n"), + ], ) + def test_standard_output(self, repo, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + repo.checkout(verbose=verbosity) + assert buf.getvalue() == expected -def test_svn_info_show_item(): +class TestSVNInfoShowItem: """Tests for `CableRepository.svn_info_show_item()`.""" - # Success case: call svn info command and get result - mock_subprocess = MockSubprocessWrapper() - mock_subprocess.stdout = "mock standard output" - repo = get_mock_repo(mock_subprocess) - assert repo.svn_info_show_item("some-mock-item") == mock_subprocess.stdout - assert ( - f"svn info --show-item some-mock-item {MOCK_CWD}/src/trunk" - in mock_subprocess.commands - ) - # Success case: test leading and trailing white space is removed from standard output - mock_subprocess = MockSubprocessWrapper() - mock_subprocess.stdout = " \n\n mock standard output \n\n" - repo = get_mock_repo(mock_subprocess) - assert repo.svn_info_show_item("some-mock-item") == mock_subprocess.stdout.strip() - assert ( - f"svn info --show-item some-mock-item {MOCK_CWD}/src/trunk" - in mock_subprocess.commands - ) + def test_svn_info_command_execution(self, repo, mock_subprocess_handler, mock_cwd): + """Success case: call svn info command and get result.""" + assert ( + repo.svn_info_show_item("some-mock-item") == mock_subprocess_handler.stdout + ) + assert ( + f"svn info --show-item some-mock-item {mock_cwd}/src/trunk" + in mock_subprocess_handler.commands + ) + def test_white_space_removed_from_standard_output( + self, repo, mock_subprocess_handler + ): + """Success case: test leading and trailing white space is removed from standard output.""" + mock_subprocess_handler.stdout = " \n\n mock standard output \n\n" + assert ( + repo.svn_info_show_item("some-mock-item") + == mock_subprocess_handler.stdout.strip() + ) -def test_pre_build(): + +class TestPreBuild: """Tests for `CableRepository.pre_build()`.""" - repo_dir = MOCK_CWD / internal.SRC_DIR / "trunk" - offline_dir = repo_dir / "offline" - offline_dir.mkdir(parents=True) - (offline_dir / "Makefile").touch() - (offline_dir / "parallel_cable").touch() - (offline_dir / "serial_cable").touch() - (offline_dir / "foo.f90").touch() - - # Success case: test source files and scripts are copied to .tmp - repo = get_mock_repo() - repo.pre_build() - assert (offline_dir / ".tmp" / "Makefile").exists() - assert (offline_dir / ".tmp" / "parallel_cable").exists() - assert (offline_dir / ".tmp" / "serial_cable").exists() - assert (offline_dir / ".tmp" / "foo.f90").exists() - shutil.rmtree(offline_dir / ".tmp") - - # Success case: test non-verbose standard output - repo = get_mock_repo() - with contextlib.redirect_stdout(io.StringIO()) as buf: + + @pytest.fixture(autouse=True) + def _setup(self, repo): + """Setup precondition for `CableRepository.pre_build()`.""" + (internal.SRC_DIR / repo.name / "offline").mkdir(parents=True) + (internal.SRC_DIR / repo.name / "offline" / "Makefile").touch() + (internal.SRC_DIR / repo.name / "offline" / "parallel_cable").touch() + (internal.SRC_DIR / repo.name / "offline" / "serial_cable").touch() + (internal.SRC_DIR / repo.name / "offline" / "foo.f90").touch() + + def test_source_files_and_scripts_are_copied_to_tmp_dir(self, repo): + """Success case: test source files and scripts are copied to .tmp.""" repo.pre_build() - assert not buf.getvalue() - shutil.rmtree(offline_dir / ".tmp") - - # Success case: test verbose standard output - repo = get_mock_repo() - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.pre_build(verbose=True) - assert buf.getvalue() == ( - "mkdir src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/foo.f90 src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/Makefile src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/parallel_cable src/trunk/offline/.tmp\n" - "cp -p src/trunk/offline/serial_cable src/trunk/offline/.tmp\n" + tmp_dir = internal.SRC_DIR / repo.name / "offline" / ".tmp" + assert (tmp_dir / "Makefile").exists() + assert (tmp_dir / "parallel_cable").exists() + assert (tmp_dir / "serial_cable").exists() + assert (tmp_dir / "foo.f90").exists() + + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + ( + False, + "", + ), + ( + True, + "mkdir src/trunk/offline/.tmp\n" + "cp -p src/trunk/offline/foo.f90 src/trunk/offline/.tmp\n" + "cp -p src/trunk/offline/Makefile src/trunk/offline/.tmp\n" + "cp -p src/trunk/offline/parallel_cable src/trunk/offline/.tmp\n" + "cp -p src/trunk/offline/serial_cable src/trunk/offline/.tmp\n", + ), + ], ) - shutil.rmtree(offline_dir / ".tmp") + def test_standard_output(self, repo, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + repo.pre_build(verbose=verbosity) + assert buf.getvalue() == expected -def test_run_build(): +class TestRunBuild: """Tests for `CableRepository.run_build()`.""" - mock_netcdf_root = "/mock/path/to/root" - mock_modules = ["foo", "bar"] - (MOCK_CWD / internal.SRC_DIR / "trunk" / "offline" / ".tmp").mkdir(parents=True) - - environment_vars = { - "NCDIR": f"{mock_netcdf_root}/lib/Intel", - "NCMOD": f"{mock_netcdf_root}/include/Intel", - "CFLAGS": "-O2 -fp-model precise", - "LDFLAGS": f"-L{mock_netcdf_root}/lib/Intel -O0", - "LD": "-lnetcdf -lnetcdff", - "FC": "ifort", - } - - # This is required so that we can use the NETCDF_ROOT environment variable - # when running `make`, and `serial_cable` and `parallel_cable` scripts: - os.environ["NETCDF_ROOT"] = mock_netcdf_root - - # Success case: test build commands are run - mock_subprocess = MockSubprocessWrapper() - repo = get_mock_repo(subprocess_handler=mock_subprocess) - repo.run_build(mock_modules) - assert mock_subprocess.commands == [ - "make -f Makefile", - './serial_cable "ifort" "-O2 -fp-model precise"' - f' "-L{mock_netcdf_root}/lib/Intel -O0" "-lnetcdf -lnetcdff" ' - f'"{mock_netcdf_root}/include/Intel"', - ] - - # Success case: test modules are loaded at runtime - mock_environment_modules = MockEnvironmentModules() - repo = get_mock_repo(modules_handler=mock_environment_modules) - repo.run_build(mock_modules) - assert ( - "module load " + " ".join(mock_modules) - ) in mock_environment_modules.commands - assert ( - "module unload " + " ".join(mock_modules) - ) in mock_environment_modules.commands - - # Success case: test commands are run with the correct environment variables - mock_subprocess = MockSubprocessWrapper() - repo = get_mock_repo(subprocess_handler=mock_subprocess) - repo.run_build(mock_modules) - for kv in environment_vars.items(): - assert (kv in mock_subprocess.env.items()) - - # Success case: test non-verbose standard output - repo = get_mock_repo() - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.run_build(mock_modules) - assert not buf.getvalue() - - # Success case: test verbose standard output - repo = get_mock_repo() - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.run_build(mock_modules, verbose=True) - assert buf.getvalue() == ( - f"Loading modules: {' '.join(mock_modules)}\n" - f"Unloading modules: {' '.join(mock_modules)}\n" + + @pytest.fixture() + def netcdf_root(self): + """Return an absolute path to use as the NETCDF_ROOT environment variable.""" + return "/mock/path/to/root" + + @pytest.fixture() + def modules(self): + """Return a list of modules for testing.""" + return ["foo", "bar"] + + @pytest.fixture() + def env(self, netcdf_root): + """Return a dictionary containing the required environment variables.""" + return { + "NCDIR": f"{netcdf_root}/lib/Intel", + "NCMOD": f"{netcdf_root}/include/Intel", + "CFLAGS": "-O2 -fp-model precise", + "LDFLAGS": f"-L{netcdf_root}/lib/Intel -O0", + "LD": "-lnetcdf -lnetcdff", + "FC": "ifort", + } + + @pytest.fixture(autouse=True) + def _setup(self, repo, netcdf_root): + """Setup precondition for `CableRepository.run_build()`.""" + (internal.SRC_DIR / repo.name / "offline" / ".tmp").mkdir(parents=True) + + # This is required so that we can use the NETCDF_ROOT environment variable + # when running `make`, and `serial_cable` and `parallel_cable` scripts: + os.environ["NETCDF_ROOT"] = netcdf_root + + def test_build_command_execution( + self, repo, mock_subprocess_handler, modules, netcdf_root + ): + """Success case: test build commands are run.""" + repo.run_build(modules) + assert mock_subprocess_handler.commands == [ + "make -f Makefile", + './serial_cable "ifort" "-O2 -fp-model precise"' + f' "-L{netcdf_root}/lib/Intel -O0" "-lnetcdf -lnetcdff" ' + f'"{netcdf_root}/include/Intel"', + ] + + def test_modules_loaded_at_runtime( + self, repo, mock_environment_modules_handler, modules + ): + """Success case: test modules are loaded at runtime.""" + repo.run_build(modules) + assert ( + "module load " + " ".join(modules) + ) in mock_environment_modules_handler.commands + assert ( + "module unload " + " ".join(modules) + ) in mock_environment_modules_handler.commands + + def test_commands_are_run_with_environment_variables( + self, repo, mock_subprocess_handler, modules, env + ): + """Success case: test commands are run with the correct environment variables.""" + repo.run_build(modules) + for kv in env.items(): + assert kv in mock_subprocess_handler.env.items() + + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + (False, ""), + (True, "Loading modules: foo bar\nUnloading modules: foo bar\n"), + ], ) + def test_standard_output(self, repo, modules, verbosity, expected): + """Success case: test standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + repo.run_build(modules, verbose=verbosity) + assert buf.getvalue() == expected -def test_post_build(): +class TestPostBuild: """Tests for `CableRepository.post_build()`.""" - repo_dir = MOCK_CWD / internal.SRC_DIR / "trunk" - offline_dir = repo_dir / "offline" - tmp_dir = offline_dir / ".tmp" - - # Success case: test executable is moved to offline directory - tmp_dir.mkdir(parents=True) - (tmp_dir / internal.CABLE_EXE).touch() - repo = get_mock_repo() - repo.post_build() - assert not (offline_dir / ".tmp" / internal.CABLE_EXE).exists() - assert (offline_dir / internal.CABLE_EXE).exists() - - # Success case: test non-verbose standard output - (tmp_dir / internal.CABLE_EXE).touch() - repo = get_mock_repo() - with contextlib.redirect_stdout(io.StringIO()) as buf: + + @pytest.fixture(autouse=True) + def _setup(self, repo): + """Setup precondition for `CableRepository.post_build()`.""" + (internal.SRC_DIR / repo.name / "offline" / ".tmp").mkdir(parents=True) + (internal.SRC_DIR / repo.name / "offline" / ".tmp" / internal.CABLE_EXE).touch() + + def test_exe_moved_to_offline_dir(self, repo): + """Success case: test executable is moved to offline directory.""" repo.post_build() - assert not buf.getvalue() - - # Success case: test verbose standard output - (tmp_dir / internal.CABLE_EXE).touch() - repo = get_mock_repo() - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.post_build(verbose=True) - assert buf.getvalue() == ( - "mv src/trunk/offline/.tmp/cable src/trunk/offline/cable\n" + tmp_dir = internal.SRC_DIR / repo.name / "offline" / ".tmp" + assert not (tmp_dir / internal.CABLE_EXE).exists() + offline_dir = internal.SRC_DIR / repo.name / "offline" + assert (offline_dir / internal.CABLE_EXE).exists() + + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + (False, ""), + (True, "mv src/trunk/offline/.tmp/cable src/trunk/offline/cable\n"), + ], ) + def test_standard_output(self, repo, verbosity, expected): + """Success case: test non-verbose standard output.""" + with contextlib.redirect_stdout(io.StringIO()) as buf: + repo.post_build(verbose=verbosity) + assert buf.getvalue() == expected -def test_custom_build(): +class TestCustomBuild: """Tests for `CableRepository.custom_build()`.""" - repo_dir = MOCK_CWD / internal.SRC_DIR / "trunk" - custom_build_script_path = repo_dir / "my-custom-build.sh" - custom_build_script_path.parent.mkdir(parents=True) - custom_build_script_path.touch() - mock_modules = ["foo", "bar"] - - # Success case: execute the build command for a custom build script - mock_subprocess = MockSubprocessWrapper() - mock_environment_modules = MockEnvironmentModules() - repo = get_mock_repo(mock_subprocess, mock_environment_modules) - repo.build_script = str(custom_build_script_path.relative_to(repo_dir)) - repo.custom_build(mock_modules) - assert "./tmp-build.sh" in mock_subprocess.commands - assert ( - "module load " + " ".join(mock_modules) - ) in mock_environment_modules.commands - assert ( - "module unload " + " ".join(mock_modules) - ) in mock_environment_modules.commands - - # Success case: test non-verbose standard output for a custom build script - repo = get_mock_repo() - repo.build_script = str(custom_build_script_path.relative_to(repo_dir)) - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.custom_build(mock_modules) - assert not buf.getvalue() - - # Success case: test verbose standard output for a custom build script - repo = get_mock_repo() - repo.build_script = str(custom_build_script_path.relative_to(repo_dir)) - with contextlib.redirect_stdout(io.StringIO()) as buf: - repo.custom_build(mock_modules, verbose=True) - assert buf.getvalue() == ( - f"Copying {custom_build_script_path} to {custom_build_script_path.parent}/tmp-build.sh\n" - f"chmod +x {custom_build_script_path.parent}/tmp-build.sh\n" - "Modifying tmp-build.sh: remove lines that call environment " - "modules\n" - f"Loading modules: {' '.join(mock_modules)}\n" - f"Unloading modules: {' '.join(mock_modules)}\n" - ) - # Failure case: cannot find custom build script - custom_build_script_path.unlink() - repo = get_mock_repo() - repo.build_script = str(custom_build_script_path.relative_to(repo_dir)) - with pytest.raises( - FileNotFoundError, - match=f"The build script, {custom_build_script_path}, could not be " - "found. Do you need to specify a different build script with the 'build_script' " - "option in config.yaml?", - ): - repo.custom_build(mock_modules) + @pytest.fixture() + def build_script(self, repo): + """Create a custom build script and return its path. + + The return value is the path relative to root directory of the repository. + """ + _build_script = internal.SRC_DIR / repo.name / "my-custom-build.sh" + _build_script.parent.mkdir(parents=True) + _build_script.touch() + return _build_script.relative_to(internal.SRC_DIR / repo.name) + @pytest.fixture() + def modules(self): + """Return a list of modules for testing.""" + return ["foo", "bar"] -def test_remove_module_lines(): + def test_build_command_execution( + self, repo, mock_subprocess_handler, build_script, modules + ): + """Success case: execute the build command for a custom build script.""" + repo.build_script = str(build_script) + repo.custom_build(modules) + assert "./tmp-build.sh" in mock_subprocess_handler.commands + + def test_modules_loaded_at_runtime( + self, repo, mock_environment_modules_handler, build_script, modules + ): + """Success case: test modules are loaded at runtime.""" + repo.build_script = str(build_script) + repo.custom_build(modules) + assert ( + "module load " + " ".join(modules) + ) in mock_environment_modules_handler.commands + assert ( + "module unload " + " ".join(modules) + ) in mock_environment_modules_handler.commands + + # TODO(Sean) fix for issue https://github.com/CABLE-LSM/benchcab/issues/162 + @pytest.mark.skip( + reason="""This will always fail since `parametrize()` parameters are + dependent on the `mock_cwd` fixture.""" + ) + @pytest.mark.parametrize( + ("verbosity", "expected"), + [ + ( + False, + "", + ), + ( + True, + "Copying src/trunk/my-custom-build.sh to src/trunk/tmp-build.sh\n" + "chmod +x src/trunk/tmp-build.sh\n" + "Modifying tmp-build.sh: remove lines that call environment " + "modules\n" + "Loading modules: foo bar\n" + "Unloading modules: foo bar\n", + ), + ], + ) + def test_standard_output(self, repo, build_script, modules, verbosity, expected): + """Success case: test non-verbose standard output for a custom build script.""" + repo.build_script = str(build_script) + with contextlib.redirect_stdout(io.StringIO()) as buf: + repo.custom_build(modules, verbose=verbosity) + assert buf.getvalue() == expected + + def test_file_not_found_exception(self, repo, build_script, modules, mock_cwd): + """Failure case: cannot find custom build script.""" + build_script_path = mock_cwd / internal.SRC_DIR / repo.name / build_script + build_script_path.unlink() + repo.build_script = str(build_script) + with pytest.raises( + FileNotFoundError, + match=f"The build script, {build_script_path}, could not be " + "found. Do you need to specify a different build script with the 'build_script' " + "option in config.yaml?", + ): + repo.custom_build(modules) + + +class TestRemoveModuleLines: """Tests for `remove_module_lines()`.""" - # Success case: test 'module' lines are removed from mock shell script - file_path = MOCK_CWD / "test-build.sh" - with file_path.open("w", encoding="utf-8") as file: - file.write( - """#!/bin/bash + + def test_module_lines_removed_from_shell_script(self): + """Success case: test 'module' lines are removed from mock shell script.""" + file_path = Path("test-build.sh") + with file_path.open("w", encoding="utf-8") as file: + file.write( + """#!/bin/bash module add bar module purge @@ -330,13 +375,13 @@ def test_remove_module_lines(): fi } """ - ) + ) - remove_module_lines(file_path) + remove_module_lines(file_path) - with file_path.open("r", encoding="utf-8") as file: - assert file.read() == ( - """#!/bin/bash + with file_path.open("r", encoding="utf-8") as file: + assert file.read() == ( + """#!/bin/bash host_gadi() { @@ -349,4 +394,4 @@ def test_remove_module_lines(): fi } """ - ) + ) diff --git a/tests/test_subprocess.py b/tests/test_subprocess.py index d63788e2..868d12d2 100644 --- a/tests/test_subprocess.py +++ b/tests/test_subprocess.py @@ -2,98 +2,123 @@ import os import subprocess +from pathlib import Path import pytest from benchcab.utils.subprocess import SubprocessWrapper -from .common import TMP_DIR - -def test_run_cmd(capfd): +class TestRunCmd: """Tests for `run_cmd()`.""" - subprocess_handler = SubprocessWrapper() - - # Success case: test stdout is suppressed in non-verbose mode - subprocess_handler.run_cmd("echo foo") - captured = capfd.readouterr() - assert not captured.out - assert not captured.err - - # Success case: test stderr is suppressed in non-verbose mode - subprocess_handler.run_cmd("echo foo 1>&2") - captured = capfd.readouterr() - assert not captured.out - assert not captured.err - - # Success case: test command and stdout is printed in verbose mode - subprocess_handler.run_cmd("echo foo", verbose=True) - captured = capfd.readouterr() - assert captured.out == "echo foo\nfoo\n" - assert not captured.err - - # Success case: test command and stderr is redirected to stdout in verbose mode - subprocess_handler.run_cmd("echo foo 1>&2", verbose=True) - captured = capfd.readouterr() - assert captured.out == "echo foo 1>&2\nfoo\n" - assert not captured.err - - # Success case: test output is captured with capture_output enabled - proc = subprocess_handler.run_cmd("echo foo", capture_output=True) - captured = capfd.readouterr() - assert not captured.out - assert not captured.err - assert proc.stdout == "foo\n" - assert not proc.stderr - - # Success case: test stderr is captured and redirected to stdout with - # capture_output enabled - proc = subprocess_handler.run_cmd("echo foo 1>&2", capture_output=True) - captured = capfd.readouterr() - assert not captured.out - assert not captured.err - assert proc.stdout == "foo\n" - assert not proc.stderr - - # Success case: test command is printed and stdout is captured in verbose mode - proc = subprocess_handler.run_cmd("echo foo", capture_output=True, verbose=True) - captured = capfd.readouterr() - assert captured.out == "echo foo\n" - assert not captured.err - assert proc.stdout == "foo\n" - assert not proc.stderr - - # Success case: test stdout is redirected to file - file_path = TMP_DIR / "out.txt" - subprocess_handler.run_cmd("echo foo", output_file=file_path) - with file_path.open("r", encoding="utf-8") as file: - assert file.read() == "foo\n" - captured = capfd.readouterr() - assert not captured.out - assert not captured.err - - # Success case: test command is printed and stdout is redirected to file in verbose mode - file_path = TMP_DIR / "out.txt" - subprocess_handler.run_cmd("echo foo", output_file=file_path, verbose=True) - with file_path.open("r", encoding="utf-8") as file: - assert file.read() == "foo\n" - captured = capfd.readouterr() - assert captured.out == "echo foo\n" - assert not captured.err - - # Success case: test command is run with environment - proc = subprocess_handler.run_cmd( - "echo $FOO", capture_output=True, env={"FOO": "bar", **os.environ} - ) - assert proc.stdout == "bar\n" - - # Failure case: check non-zero return code throws an exception - with pytest.raises(subprocess.CalledProcessError): - subprocess_handler.run_cmd("exit 1") - - # Failure case: check stderr is redirected to stdout on non-zero - # return code - with pytest.raises(subprocess.CalledProcessError) as exc: - subprocess_handler.run_cmd("echo foo 1>&2; exit 1", capture_output=True) - assert exc.value.stdout == "foo\n" - assert not exc.value.stderr + + @pytest.fixture() + def subprocess_handler(self): + """Return an instance of `SubprocessWrapper` for testing.""" + return SubprocessWrapper() + + def test_stdout_is_suppressed_in_non_verbose_mode(self, subprocess_handler, capfd): + """Success case: test stdout is suppressed in non-verbose mode.""" + subprocess_handler.run_cmd("echo foo") + captured = capfd.readouterr() + assert not captured.out + assert not captured.err + + def test_stderr_is_suppressed_in_non_verbose_mode(self, subprocess_handler, capfd): + """Success case: test stderr is suppressed in non-verbose mode.""" + subprocess_handler.run_cmd("echo foo 1>&2") + captured = capfd.readouterr() + assert not captured.out + assert not captured.err + + def test_command_and_stdout_is_printed_in_verbose_mode( + self, subprocess_handler, capfd + ): + """Success case: test command and stdout is printed in verbose mode.""" + subprocess_handler.run_cmd("echo foo", verbose=True) + captured = capfd.readouterr() + assert captured.out == "echo foo\nfoo\n" + assert not captured.err + + def test_command_and_stderr_is_redirected_to_stdout_in_verbose_mode( + self, subprocess_handler, capfd + ): + """Success case: test command and stderr is redirected to stdout in verbose mode.""" + subprocess_handler.run_cmd("echo foo 1>&2", verbose=True) + captured = capfd.readouterr() + assert captured.out == "echo foo 1>&2\nfoo\n" + assert not captured.err + + def test_output_is_captured_with_capture_output_enabled( + self, subprocess_handler, capfd + ): + """Success case: test output is captured with capture_output enabled.""" + proc = subprocess_handler.run_cmd("echo foo", capture_output=True) + captured = capfd.readouterr() + assert not captured.out + assert not captured.err + assert proc.stdout == "foo\n" + assert not proc.stderr + + def test_stderr_captured_to_stdout(self, subprocess_handler, capfd): + """Success case: test stderr is captured to stdout with capture_output enabled.""" + proc = subprocess_handler.run_cmd("echo foo 1>&2", capture_output=True) + captured = capfd.readouterr() + assert not captured.out + assert not captured.err + assert proc.stdout == "foo\n" + assert not proc.stderr + + def test_command_is_printed_and_stdout_is_captured_in_verbose_mode( + self, subprocess_handler, capfd + ): + """Success case: test command is printed and stdout is captured in verbose mode.""" + proc = subprocess_handler.run_cmd("echo foo", capture_output=True, verbose=True) + captured = capfd.readouterr() + assert captured.out == "echo foo\n" + assert not captured.err + assert proc.stdout == "foo\n" + assert not proc.stderr + + def test_stdout_is_redirected_to_file(self, subprocess_handler, capfd): + """Success case: test stdout is redirected to file.""" + file_path = Path("out.txt") + subprocess_handler.run_cmd("echo foo", output_file=file_path) + with file_path.open("r", encoding="utf-8") as file: + assert file.read() == "foo\n" + captured = capfd.readouterr() + assert not captured.out + assert not captured.err + + def test_command_is_printed_and_stdout_is_redirected_to_file_in_verbose_mode( + self, subprocess_handler, capfd + ): + """Success case: test command is printed and stdout is redirected to file in verbose mode.""" + file_path = Path("out.txt") + subprocess_handler.run_cmd("echo foo", output_file=file_path, verbose=True) + with file_path.open("r", encoding="utf-8") as file: + assert file.read() == "foo\n" + captured = capfd.readouterr() + assert captured.out == "echo foo\n" + assert not captured.err + + def test_command_is_run_with_environment(self, subprocess_handler): + """Success case: test command is run with environment.""" + proc = subprocess_handler.run_cmd( + "echo $FOO", capture_output=True, env={"FOO": "bar", **os.environ} + ) + assert proc.stdout == "bar\n" + + def test_check_non_zero_return_code_throws_an_exception(self, subprocess_handler): + """Failure case: check non-zero return code throws an exception.""" + with pytest.raises(subprocess.CalledProcessError): + subprocess_handler.run_cmd("exit 1") + + def test_stderr_is_redirected_to_stdout_on_non_zero_return_code( + self, subprocess_handler + ): + """Failure case: check stderr is redirected to stdout on non-zero return code.""" + with pytest.raises(subprocess.CalledProcessError) as exc: + subprocess_handler.run_cmd("echo foo 1>&2; exit 1", capture_output=True) + assert exc.value.stdout == "foo\n" + assert not exc.value.stderr diff --git a/tests/test_workdir.py b/tests/test_workdir.py index af925b6c..62dd710c 100644 --- a/tests/test_workdir.py +++ b/tests/test_workdir.py @@ -1,4 +1,9 @@ -"""`pytest` tests for `workdir.py`.""" +"""`pytest` tests for `workdir.py`. + +Note: explicit teardown for generated files and directories are not required as +the working directory used for testing is cleaned up in the `_run_around_tests` +pytest autouse fixture. +""" from pathlib import Path @@ -10,33 +15,34 @@ ) -def setup_mock_fluxsite_directory_list(): - """Return the list of work directories we want benchcab to create.""" - fluxsite_directory_list = [ - Path("runs", "fluxsite"), - Path("runs", "fluxsite", "logs"), - Path("runs", "fluxsite", "outputs"), - Path("runs", "fluxsite", "tasks"), - Path("runs", "fluxsite", "analysis"), - Path("runs", "fluxsite", "analysis", "bitwise-comparisons"), - ] - - return fluxsite_directory_list - - -def test_setup_directory_tree(): +class TestSetupFluxsiteDirectoryTree: """Tests for `setup_fluxsite_directory_tree()`.""" - # Success case: generate the full fluxsite directory structure - setup_fluxsite_directory_tree() - for path in setup_mock_fluxsite_directory_list(): - assert path.exists() - - -@pytest.mark.parametrize("test_path", [Path("runs"), Path("src")]) -def test_clean_directory_tree(test_path): + @pytest.fixture(autouse=True) + def fluxsite_directory_list(self): + """Return the list of work directories we want benchcab to create.""" + return [ + Path("runs", "fluxsite"), + Path("runs", "fluxsite", "logs"), + Path("runs", "fluxsite", "outputs"), + Path("runs", "fluxsite", "tasks"), + Path("runs", "fluxsite", "analysis"), + Path("runs", "fluxsite", "analysis", "bitwise-comparisons"), + ] + + def test_directory_structure_generated(self, fluxsite_directory_list): + """Success case: generate the full fluxsite directory structure.""" + setup_fluxsite_directory_tree() + for path in fluxsite_directory_list: + assert path.exists() + + +class TestCleanDirectoryTree: """Tests for `clean_directory_tree()`.""" - # Success case: directory tree does not exist after clean - test_path.mkdir() - clean_directory_tree() - assert not test_path.exists() + + @pytest.mark.parametrize("test_path", [Path("runs"), Path("src")]) + def test_clean_directory_tree(self, test_path): + """Success case: directory tree does not exist after clean.""" + test_path.mkdir() + clean_directory_tree() + assert not test_path.exists()