From b829293fb453b02ccbd932655355b1492bdb210a Mon Sep 17 00:00:00 2001 From: Sean Bryan Date: Mon, 17 Jul 2023 14:48:06 +1000 Subject: [PATCH] Remove synonymous names for 'fluxsite' Currently, there is inconsistent naming conventions being used to describe fluxsite tests which can be confusing for developers and users. These include names such as 'site', 'single site', 'fluxsite', 'fluxnet', etc. This change replaces all occurrences of these names in the code base and documentation with 'fluxsite'. We choose 'fluxsite' as this describes the nature of the tests (as opposed to 'fluxnet' which describes a product/network). Fixes #62 --- benchcab/benchcab.py | 88 +++++++++++----------- benchcab/benchtree.py | 57 ++++++++------- benchcab/cli.py | 38 +++++----- benchcab/comparison.py | 4 +- benchcab/internal.py | 30 ++++---- benchcab/task.py | 52 ++++++------- benchcab/utils/pbs.py | 8 +- docs/user_guide/expected_output.md | 26 +++---- docs/user_guide/index.md | 16 ++-- tests/test_benchcab.py | 22 +++--- tests/test_benchtree.py | 114 ++++++++++++++++------------- tests/test_cli.py | 26 +++---- tests/test_comparison.py | 2 +- tests/test_pbs.py | 26 +++---- tests/test_task.py | 112 +++++++++++++++------------- 15 files changed, 323 insertions(+), 298 deletions(-) diff --git a/benchcab/benchcab.py b/benchcab/benchcab.py index fdfbc4b2..f7ab1fb8 100644 --- a/benchcab/benchcab.py +++ b/benchcab/benchcab.py @@ -8,13 +8,13 @@ from subprocess import CalledProcessError from benchcab import internal -from benchcab.internal import get_met_sites +from benchcab.internal import get_met_forcing_file_names from benchcab.bench_config import read_config -from benchcab.benchtree import setup_fluxnet_directory_tree, setup_src_dir +from benchcab.benchtree import setup_fluxsite_directory_tree, setup_src_dir from benchcab.repository import CableRepository from benchcab.task import ( - get_fluxnet_tasks, - get_fluxnet_comparisons, + get_fluxsite_tasks, + get_fluxsite_comparisons, run_tasks, run_tasks_in_parallel, Task, @@ -46,7 +46,7 @@ def __init__( CableRepository(**config, repo_id=id) for id, config in enumerate(self.config["realisations"]) ] - self.tasks: list[Task] = [] # initialise fluxnet tasks lazily + self.tasks: list[Task] = [] # initialise fluxsite tasks lazily if validate_env: self._validate_environment( @@ -102,23 +102,25 @@ def _validate_environment(self, project: str, modules: list): def _initialise_tasks(self) -> list[Task]: """A helper method that initialises and returns the `tasks` attribute.""" - self.tasks = get_fluxnet_tasks( + self.tasks = get_fluxsite_tasks( repos=self.repos, science_configurations=self.config.get( "science_configurations", internal.DEFAULT_SCIENCE_CONFIGURATIONS ), - met_sites=get_met_sites(self.config["experiment"]), + fluxsite_forcing_file_names=get_met_forcing_file_names( + self.config["experiment"] + ), ) return self.tasks - # TODO(Sean) this method should be the endpoint for the `fluxnet-submit-job` + # TODO(Sean) this method should be the endpoint for the `fluxsite-submit-job` # command line argument. - def fluxnet_submit_job(self) -> None: - """Submits the PBS job script step in the fluxnet test workflow.""" + def fluxsite_submit_job(self) -> None: + """Submits the PBS job script step in the fluxsite test workflow.""" job_script_path = self.root_dir / internal.QSUB_FNAME print( - "Creating PBS job script to run FLUXNET tasks on compute " + "Creating PBS job script to run fluxsite tasks on compute " f"nodes: {job_script_path.relative_to(self.root_dir)}" ) with job_script_path.open("w", encoding="utf-8") as file: @@ -128,7 +130,7 @@ def fluxnet_submit_job(self) -> None: modules=self.config["modules"], storage_flags=[], # TODO(Sean) add storage flags option to config verbose=self.args.verbose, - skip_bitwise_cmp="fluxnet-bitwise-cmp" in self.args.skip, + skip_bitwise_cmp="fluxsite-bitwise-cmp" in self.args.skip, ) file.write(contents) @@ -146,11 +148,11 @@ def fluxnet_submit_job(self) -> None: print( f"PBS job submitted: {proc.stdout.strip()}\n" "The CABLE log file for each task is written to " - f"{internal.SITE_LOG_DIR}/_log.txt\n" + f"{internal.FLUXSITE_LOG_DIR}/_log.txt\n" "The CABLE standard output for each task is written to " - f"{internal.SITE_TASKS_DIR}//out.txt\n" + f"{internal.FLUXSITE_TASKS_DIR}//out.txt\n" "The NetCDF output for each task is written to " - f"{internal.SITE_OUTPUT_DIR}/_out.nc" + f"{internal.FLUXSITE_OUTPUT_DIR}/_out.nc" ) def checkout(self): @@ -192,30 +194,30 @@ def build(self): print(f"Successfully compiled CABLE for realisation {repo.name}") print("") - def fluxnet_setup_work_directory(self): - """Endpoint for `benchcab fluxnet-setup-work-dir`.""" + def fluxsite_setup_work_directory(self): + """Endpoint for `benchcab fluxsite-setup-work-dir`.""" tasks = self.tasks if self.tasks else self._initialise_tasks() - print("Setting up run directory tree for FLUXNET tests...") - setup_fluxnet_directory_tree(fluxnet_tasks=tasks, verbose=self.args.verbose) + print("Setting up run directory tree for fluxsite tests...") + setup_fluxsite_directory_tree(fluxsite_tasks=tasks, verbose=self.args.verbose) print("Setting up tasks...") for task in tasks: task.setup_task(verbose=self.args.verbose) - print("Successfully setup FLUXNET tasks") + print("Successfully setup fluxsite tasks") print("") - def fluxnet_run_tasks(self): - """Endpoint for `benchcab fluxnet-run-tasks`.""" + def fluxsite_run_tasks(self): + """Endpoint for `benchcab fluxsite-run-tasks`.""" tasks = self.tasks if self.tasks else self._initialise_tasks() - print("Running FLUXNET tasks...") + print("Running fluxsite tasks...") if internal.MULTIPROCESS: run_tasks_in_parallel(tasks, verbose=self.args.verbose) else: run_tasks(tasks, verbose=self.args.verbose) - print("Successfully ran FLUXNET tasks") + print("Successfully ran fluxsite tasks") print("") - def fluxnet_bitwise_cmp(self): - """Endpoint for `benchcab fluxnet-bitwise-cmp`.""" + def fluxsite_bitwise_cmp(self): + """Endpoint for `benchcab fluxsite-bitwise-cmp`.""" if not self.modules_handler.module_is_loaded("nccmp/1.8.5.0"): self.modules_handler.module_load( @@ -223,7 +225,7 @@ def fluxnet_bitwise_cmp(self): ) # use `nccmp -df` for bitwise comparisons tasks = self.tasks if self.tasks else self._initialise_tasks() - comparisons = get_fluxnet_comparisons(tasks) + comparisons = get_fluxsite_comparisons(tasks) print("Running comparison tasks...") if internal.MULTIPROCESS: @@ -232,24 +234,24 @@ def fluxnet_bitwise_cmp(self): run_comparisons(comparisons, verbose=self.args.verbose) print("Successfully ran comparison tasks") - def fluxnet(self): - """Endpoint for `benchcab fluxnet`.""" + def fluxsite(self): + """Endpoint for `benchcab fluxsite`.""" self.checkout() self.build() - self.fluxnet_setup_work_directory() + self.fluxsite_setup_work_directory() if self.args.no_submit: - self.fluxnet_run_tasks() - if "fluxnet-bitwise-cmp" not in self.args.skip: - self.fluxnet_bitwise_cmp() + self.fluxsite_run_tasks() + if "fluxsite-bitwise-cmp" not in self.args.skip: + self.fluxsite_bitwise_cmp() else: - self.fluxnet_submit_job() + self.fluxsite_submit_job() def spatial(self): """Endpoint for `benchcab spatial`.""" def run(self): """Endpoint for `benchcab run`.""" - self.fluxnet() + self.fluxsite() self.spatial() def main(self): @@ -264,17 +266,17 @@ def main(self): if self.args.subcommand == "build": self.build() - if self.args.subcommand == "fluxnet": - self.fluxnet() + if self.args.subcommand == "fluxsite": + self.fluxsite() - if self.args.subcommand == "fluxnet-setup-work-dir": - self.fluxnet_setup_work_directory() + if self.args.subcommand == "fluxsite-setup-work-dir": + self.fluxsite_setup_work_directory() - if self.args.subcommand == "fluxnet-run-tasks": - self.fluxnet_run_tasks() + if self.args.subcommand == "fluxsite-run-tasks": + self.fluxsite_run_tasks() - if self.args.subcommand == "fluxnet-bitwise-cmp": - self.fluxnet_bitwise_cmp() + if self.args.subcommand == "fluxsite-bitwise-cmp": + self.fluxsite_bitwise_cmp() if self.args.subcommand == "spatial": self.spatial() diff --git a/benchcab/benchtree.py b/benchcab/benchtree.py index ef51cd07..bfce6964 100644 --- a/benchcab/benchtree.py +++ b/benchcab/benchtree.py @@ -28,8 +28,8 @@ def setup_src_dir(root_dir=internal.CWD): os.makedirs(src_dir) -def setup_fluxnet_directory_tree( - fluxnet_tasks: list[Task], root_dir=internal.CWD, verbose=False +def setup_fluxsite_directory_tree( + fluxsite_tasks: list[Task], root_dir=internal.CWD, verbose=False ): """Generate the directory structure used of `benchcab`.""" @@ -37,49 +37,50 @@ def setup_fluxnet_directory_tree( if not run_dir.exists(): os.makedirs(run_dir) - site_run_dir = Path(root_dir, internal.SITE_RUN_DIR) - if not site_run_dir.exists(): - os.makedirs(site_run_dir) + fluxsite_run_dir = Path(root_dir, internal.FLUXSITE_RUN_DIR) + if not fluxsite_run_dir.exists(): + os.makedirs(fluxsite_run_dir) - site_log_dir = Path(root_dir, internal.SITE_LOG_DIR) - if not site_log_dir.exists(): + fluxsite_log_dir = Path(root_dir, internal.FLUXSITE_LOG_DIR) + if not fluxsite_log_dir.exists(): print( - f"Creating {site_log_dir.relative_to(root_dir)} directory: {site_log_dir}" + f"Creating {fluxsite_log_dir.relative_to(root_dir)} directory: {fluxsite_log_dir}" ) - os.makedirs(site_log_dir) + os.makedirs(fluxsite_log_dir) - site_output_dir = Path(root_dir, internal.SITE_OUTPUT_DIR) - if not site_output_dir.exists(): + fluxsite_output_dir = Path(root_dir, internal.FLUXSITE_OUTPUT_DIR) + if not fluxsite_output_dir.exists(): print( - f"Creating {site_output_dir.relative_to(root_dir)} directory: {site_output_dir}" + f"Creating {fluxsite_output_dir.relative_to(root_dir)} directory: {fluxsite_output_dir}" ) - os.makedirs(site_output_dir) + os.makedirs(fluxsite_output_dir) - site_tasks_dir = Path(root_dir, internal.SITE_TASKS_DIR) - if not site_tasks_dir.exists(): + fluxsite_tasks_dir = Path(root_dir, internal.FLUXSITE_TASKS_DIR) + if not fluxsite_tasks_dir.exists(): print( - f"Creating {site_tasks_dir.relative_to(root_dir)} directory: {site_tasks_dir}" + f"Creating {fluxsite_tasks_dir.relative_to(root_dir)} directory: {fluxsite_tasks_dir}" ) - os.makedirs(site_tasks_dir) + os.makedirs(fluxsite_tasks_dir) - site_analysis_dir = Path(root_dir, internal.SITE_ANALYSIS_DIR) - if not site_analysis_dir.exists(): + fluxsite_analysis_dir = Path(root_dir, internal.FLUXSITE_ANALYSIS_DIR) + if not fluxsite_analysis_dir.exists(): print( - f"Creating {site_analysis_dir.relative_to(root_dir)} directory: {site_analysis_dir}" + f"Creating {fluxsite_analysis_dir.relative_to(root_dir)} directory: " + f"{fluxsite_analysis_dir}" ) - os.makedirs(site_analysis_dir) + os.makedirs(fluxsite_analysis_dir) - site_bitwise_cmp_dir = Path(root_dir, internal.SITE_BITWISE_CMP_DIR) - if not site_bitwise_cmp_dir.exists(): + fluxsite_bitwise_cmp_dir = Path(root_dir, internal.FLUXSITE_BITWISE_CMP_DIR) + if not fluxsite_bitwise_cmp_dir.exists(): print( - f"Creating {site_bitwise_cmp_dir.relative_to(root_dir)} directory: " - f"{site_bitwise_cmp_dir}" + f"Creating {fluxsite_bitwise_cmp_dir.relative_to(root_dir)} directory: " + f"{fluxsite_bitwise_cmp_dir}" ) - os.makedirs(site_bitwise_cmp_dir) + os.makedirs(fluxsite_bitwise_cmp_dir) print("Creating task directories...") - for task in fluxnet_tasks: - task_dir = Path(root_dir, internal.SITE_TASKS_DIR, task.get_task_name()) + for task in fluxsite_tasks: + task_dir = Path(root_dir, internal.FLUXSITE_TASKS_DIR, task.get_task_name()) if not task_dir.exists(): if verbose: print(f"Creating {task_dir.relative_to(root_dir)}: " f"{task_dir}") diff --git a/benchcab/cli.py b/benchcab/cli.py index 9a750ada..413946f9 100644 --- a/benchcab/cli.py +++ b/benchcab/cli.py @@ -70,19 +70,19 @@ def generate_parser() -> argparse.ArgumentParser: "run", parents=[args_help, args_subcommand, args_run_subcommand], help="Run all test suites for CABLE.", - description="""Runs all test suites for CABLE: fluxnet sites and spatial test suites. This + description="""Runs all test suites for CABLE: fluxsite and spatial test suites. This command runs the full default set of tests for CABLE.""", add_help=False, ) - # subcommand: 'benchcab fluxnet' + # subcommand: 'benchcab fluxsite' subparsers.add_parser( - "fluxnet", + "fluxsite", parents=[args_help, args_subcommand, args_run_subcommand], - help="Run the fluxnet test suite for CABLE.", - description="""Runs the default fluxnet test suite for CABLE. This command is the + help="Run the fluxsite test suite for CABLE.", + description="""Runs the default fluxsite test suite for CABLE. This command is the equivalent of running 'benchcab checkout', 'benchcab build', 'benchcab - fluxnet-setup-work-dir', and 'benchcab fluxnet-run-tasks' sequentially.""", + fluxsite-setup-work-dir', and 'benchcab fluxsite-run-tasks' sequentially.""", add_help=False, ) @@ -106,33 +106,33 @@ def generate_parser() -> argparse.ArgumentParser: add_help=False, ) - # subcommand: 'benchcab fluxnet-setup-work-dir' + # subcommand: 'benchcab fluxsite-setup-work-dir' subparsers.add_parser( - "fluxnet-setup-work-dir", + "fluxsite-setup-work-dir", parents=[args_help, args_subcommand], - help="Run the work directory setup step of the fluxnet command.", - description="""Generates the benchcab site/run directory tree in the current working - directory so that tasks can be run.""", + help="Run the work directory setup step of the fluxsite command.", + description="""Generates the fluxsite run directory tree in the current working + directory so that fluxsite tasks can be run.""", add_help=False, ) - # subcommand: 'benchcab fluxnet-run-tasks' + # subcommand: 'benchcab fluxsite-run-tasks' subparsers.add_parser( - "fluxnet-run-tasks", + "fluxsite-run-tasks", parents=[args_help, args_subcommand], - help="Run the fluxnet tasks of the main fluxnet command.", - description="""Runs the fluxnet tasks for the fluxnet test suite. Note, this command should + help="Run the fluxsite tasks of the main fluxsite command.", + description="""Runs the fluxsite tasks for the fluxsite test suite. Note, this command should ideally be run inside a PBS job. This command is invoked by the PBS job script generated by `benchcab run`.""", add_help=False, ) - # subcommand: 'benchcab fluxnet-bitwise-cmp' + # subcommand: 'benchcab fluxsite-bitwise-cmp' subparsers.add_parser( - "fluxnet-bitwise-cmp", + "fluxsite-bitwise-cmp", parents=[args_help, args_subcommand], - help="Run the bitwise comparison step of the main fluxnet command.", - description="""Runs the bitwise comparison step for the fluxnet test suite. Bitwise + help="Run the bitwise comparison step of the main fluxsite command.", + description="""Runs the bitwise comparison step for the fluxsite test suite. Bitwise comparisons are done on NetCDF output files using the `nccmp -df` command. Comparisons are made between outputs that differ in their realisation and are matching in all other configurations. Note, this command should ideally be run inside a PBS job. diff --git a/benchcab/comparison.py b/benchcab/comparison.py index a536c03a..4a2b08f8 100644 --- a/benchcab/comparison.py +++ b/benchcab/comparison.py @@ -40,7 +40,9 @@ def run(self, verbose=False) -> None: print(f"Success: files {file_a.name} {file_b.name} are identical") except CalledProcessError as exc: output_file = ( - self.root_dir / internal.SITE_BITWISE_CMP_DIR / f"{self.task_name}.txt" + self.root_dir + / internal.FLUXSITE_BITWISE_CMP_DIR + / f"{self.task_name}.txt" ) with open(output_file, "w", encoding="utf-8") as file: file.write(exc.stdout) diff --git a/benchcab/internal.py b/benchcab/internal.py index 83fdad71..f24e061d 100644 --- a/benchcab/internal.py +++ b/benchcab/internal.py @@ -54,23 +54,23 @@ CABLE_AUX_DIR / "core" / "biogeochem" / "pftlookup_csiro_v16_17tiles.csv" ) -# Relative path to root directory for CABLE site runs -SITE_RUN_DIR = RUN_DIR / "site" +# Relative path to root directory for CABLE fluxsite runs +FLUXSITE_RUN_DIR = RUN_DIR / "fluxsite" # Relative path to directory that stores CABLE log files -SITE_LOG_DIR = SITE_RUN_DIR / "logs" +FLUXSITE_LOG_DIR = FLUXSITE_RUN_DIR / "logs" # Relative path to directory that stores CABLE output files -SITE_OUTPUT_DIR = SITE_RUN_DIR / "outputs" +FLUXSITE_OUTPUT_DIR = FLUXSITE_RUN_DIR / "outputs" # Relative path to tasks directory where cable executables are run from -SITE_TASKS_DIR = SITE_RUN_DIR / "tasks" +FLUXSITE_TASKS_DIR = FLUXSITE_RUN_DIR / "tasks" # Relative path to directory that stores results of analysis on model output -SITE_ANALYSIS_DIR = SITE_RUN_DIR / "analysis" +FLUXSITE_ANALYSIS_DIR = FLUXSITE_RUN_DIR / "analysis" # Relative path to directory that stores bitwise comparison results -SITE_BITWISE_CMP_DIR = SITE_ANALYSIS_DIR / "bitwise-comparisons" +FLUXSITE_BITWISE_CMP_DIR = FLUXSITE_ANALYSIS_DIR / "bitwise-comparisons" # Path to met files: MET_DIR = Path("/g/data/ks32/CLEX_Data/PLUMBER2/v1-0/Met/") @@ -133,10 +133,10 @@ }, ] -# Contains the site ids for each met forcing file associated with an experiment +# Contains the FLUXNET site ids for each met forcing file associated with an experiment # on modelevaluation.org MEORG_EXPERIMENTS = { - # List of site ids associated with the 'Five site test' + # List of FLUXNET site ids associated with the 'Five site test' # experiment (workspace: NRI Land testing), see: # https://modelevaluation.org/experiment/display/xNZx2hSvn4PMKAa9R "five-site-test": [ @@ -146,7 +146,7 @@ "US-Var", "US-Whs", ], - # List of site ids associated with the 'Forty two site test' + # List of FLUXNET site ids associated with the 'Forty two site test' # experiment (workspace: NRI Land testing), see: # https://modelevaluation.org/experiment/display/urTKSXEsojdvEPwdR "forty-two-site-test": [ @@ -195,11 +195,11 @@ ], } -OPTIONAL_COMMANDS = ["fluxnet-bitwise-cmp"] +OPTIONAL_COMMANDS = ["fluxsite-bitwise-cmp"] -def get_met_sites(experiment: str) -> list[str]: - """Get a list of met forcing file basenames specified by an experiment +def get_met_forcing_file_names(experiment: str) -> list[str]: + """Get a list of meteorological forcing file basenames specified by an experiment The `experiment` argument either specifies a key in `MEORG_EXPERIMENTS` or a site id within the five-site-test experiment. @@ -211,9 +211,9 @@ def get_met_sites(experiment: str) -> list[str]: # the user is specifying a single met site return [next(MET_DIR.glob(f"{experiment}*")).name] - met_sites = [ + file_names = [ next(MET_DIR.glob(f"{site_id}*")).name for site_id in MEORG_EXPERIMENTS[experiment] ] - return met_sites + return file_names diff --git a/benchcab/task.py b/benchcab/task.py index 6336adcf..bdf29a27 100644 --- a/benchcab/task.py +++ b/benchcab/task.py @@ -1,4 +1,4 @@ -"""A module containing functions and data structures for running fluxnet tasks.""" +"""A module containing functions and data structures for running fluxsite tasks.""" import shutil @@ -66,7 +66,7 @@ class CableError(Exception): class Task: - """A class used to represent a single fluxnet task.""" + """A class used to represent a single fluxsite task.""" root_dir: Path = internal.CWD subprocess_handler: SubprocessWrapperInterface = SubprocessWrapper() @@ -102,7 +102,7 @@ def setup_task(self, verbose=False): These include: 1. cleaning output, namelist, log files and cable executables if they exist 2. copying namelist files (cable.nml, pft_params.nml and cable_soil_parm.nml) - into the `runs/site/tasks/` directory. + into the `runs/fluxsite/tasks/` directory. 3. copying the cable executable from the source directory 4. make appropriate adjustments to namelist files 5. apply a branch patch if specified @@ -116,7 +116,7 @@ def setup_task(self, verbose=False): nml_path = ( self.root_dir - / internal.SITE_TASKS_DIR + / internal.FLUXSITE_TASKS_DIR / self.get_task_name() / internal.CABLE_NML ) @@ -131,12 +131,12 @@ def setup_task(self, verbose=False): "met": str(internal.MET_DIR / self.met_forcing_file), "out": str( self.root_dir - / internal.SITE_OUTPUT_DIR + / internal.FLUXSITE_OUTPUT_DIR / self.get_output_filename() ), "log": str( self.root_dir - / internal.SITE_LOG_DIR + / internal.FLUXSITE_LOG_DIR / self.get_log_filename() ), "restart_out": " ", @@ -172,7 +172,7 @@ def clean_task(self, verbose=False): if verbose: print(" Cleaning task") - task_dir = self.root_dir / internal.SITE_TASKS_DIR / self.get_task_name() + task_dir = self.root_dir / internal.FLUXSITE_TASKS_DIR / self.get_task_name() cable_exe = task_dir / internal.CABLE_EXE if cable_exe.exists(): @@ -191,12 +191,12 @@ def clean_task(self, verbose=False): cable_soil_nml.unlink() output_file = ( - self.root_dir / internal.SITE_OUTPUT_DIR / self.get_output_filename() + self.root_dir / internal.FLUXSITE_OUTPUT_DIR / self.get_output_filename() ) if output_file.exists(): output_file.unlink() - log_file = self.root_dir / internal.SITE_LOG_DIR / self.get_log_filename() + log_file = self.root_dir / internal.FLUXSITE_LOG_DIR / self.get_log_filename() if log_file.exists(): log_file.unlink() @@ -206,11 +206,11 @@ def fetch_files(self, verbose=False): """Retrieves all files necessary to run cable in the task directory. Namely: - - copies contents of 'namelists' directory to 'runs/site/tasks/' directory. - - copies cable executable from source to 'runs/site/tasks/' directory. + - copies contents of 'namelists' directory to 'runs/fluxsite/tasks/' directory. + - copies cable executable from source to 'runs/fluxsite/tasks/' directory. """ - task_dir = self.root_dir / internal.SITE_TASKS_DIR / self.get_task_name() + task_dir = self.root_dir / internal.FLUXSITE_TASKS_DIR / self.get_task_name() if verbose: print( @@ -239,9 +239,9 @@ def fetch_files(self, verbose=False): return self def run(self, verbose=False): - """Runs a single fluxnet task.""" + """Runs a single fluxsite task.""" task_name = self.get_task_name() - task_dir = self.root_dir / internal.SITE_TASKS_DIR / task_name + task_dir = self.root_dir / internal.FLUXSITE_TASKS_DIR / task_name if verbose: print( f"Running task {task_name}... CABLE standard output " @@ -259,7 +259,7 @@ def run_cable(self, verbose=False): Raises `CableError` when CABLE returns a non-zero exit code. """ task_name = self.get_task_name() - task_dir = self.root_dir / internal.SITE_TASKS_DIR / task_name + task_dir = self.root_dir / internal.FLUXSITE_TASKS_DIR / task_name exe_path = task_dir / internal.CABLE_EXE nml_path = task_dir / internal.CABLE_NML stdout_path = task_dir / internal.CABLE_STDOUT_FILENAME @@ -279,11 +279,11 @@ def add_provenance_info(self, verbose=False): the namelist file used to run cable. """ nc_output_path = ( - self.root_dir / internal.SITE_OUTPUT_DIR / self.get_output_filename() + self.root_dir / internal.FLUXSITE_OUTPUT_DIR / self.get_output_filename() ) nml = f90nml.read( self.root_dir - / internal.SITE_TASKS_DIR + / internal.FLUXSITE_TASKS_DIR / self.get_task_name() / internal.CABLE_NML ) @@ -306,21 +306,21 @@ def add_provenance_info(self, verbose=False): ) -def get_fluxnet_tasks( +def get_fluxsite_tasks( repos: list[CableRepository], science_configurations: list[dict], - met_sites: list[str], + fluxsite_forcing_file_names: list[str], ) -> list[Task]: - """Returns a list of fluxnet tasks to run.""" + """Returns a list of fluxsite tasks to run.""" tasks = [ Task( repo=repo, - met_forcing_file=site, + met_forcing_file=file_name, sci_conf_id=sci_conf_id, sci_config=sci_config, ) for repo in repos - for site in met_sites + for file_name in fluxsite_forcing_file_names for sci_conf_id, sci_config in enumerate(science_configurations) ] return tasks @@ -359,16 +359,16 @@ def worker_run(task_queue: multiprocessing.Queue, verbose=False): task.run(verbose=verbose) -def get_fluxnet_comparisons( +def get_fluxsite_comparisons( tasks: list[Task], root_dir=internal.CWD ) -> list[ComparisonTask]: - """Returns a list of pairs of fluxnet tasks to run comparisons with. + """Returns a list of `ComparisonTask` objects to run comparisons with. Pairs should be matching in science configurations and meteorological forcing, but differ in realisations. When multiple realisations are specified, return all pair wise combinations between all realisations. """ - output_dir = root_dir / internal.SITE_OUTPUT_DIR + output_dir = root_dir / internal.FLUXSITE_OUTPUT_DIR return [ ComparisonTask( files=( @@ -389,7 +389,7 @@ def get_fluxnet_comparisons( # to re-initialize task instances to get access to the output file path # for each task. There is probably a better way but should be fine for # now... - # for site in met_sites + # for file_name in fluxsite_forcing_file_names # for sci_conf_id in range(len(science_configurations)) # for branch_id_first, branch_id_second in itertools.combinations( # range(len(realisations)), 2 diff --git a/benchcab/utils/pbs.py b/benchcab/utils/pbs.py index 1cc4f7fc..3b9b5903 100644 --- a/benchcab/utils/pbs.py +++ b/benchcab/utils/pbs.py @@ -38,15 +38,15 @@ def render_job_script( module load conda/analysis3-unstable {module_load_lines} -benchcab fluxnet-run-tasks --config={config_path} {verbose_flag} +benchcab fluxsite-run-tasks --config={config_path} {verbose_flag} if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-run-tasks failed. Exiting...' + echo 'Error: benchcab fluxsite-run-tasks failed. Exiting...' exit 1 fi {'' if skip_bitwise_cmp else f''' -benchcab fluxnet-bitwise-cmp --config={config_path} {verbose_flag} +benchcab fluxsite-bitwise-cmp --config={config_path} {verbose_flag} if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-bitwise-cmp failed. Exiting...' + echo 'Error: benchcab fluxsite-bitwise-cmp failed. Exiting...' exit 1 fi''' } """ diff --git a/docs/user_guide/expected_output.md b/docs/user_guide/expected_output.md index 502e0285..a126fb69 100644 --- a/docs/user_guide/expected_output.md +++ b/docs/user_guide/expected_output.md @@ -18,27 +18,27 @@ Successfully compiled CABLE for realisation trunk Compiling CABLE serially for realisation test-branch... Successfully compiled CABLE for realisation test-branch -Setting up run directory tree for FLUXNET tests... -Creating runs/site/logs directory: /scratch/tm70/sb8430/bench_example/runs/site/logs -Creating runs/site/outputs directory: /scratch/tm70/sb8430/bench_example/runs/site/outputs -Creating runs/site/tasks directory: /scratch/tm70/sb8430/bench_example/runs/site/tasks -Creating runs/site/analysis directory: /scratch/tm70/sb8430/bench_example/runs/site/analysis -Creating runs/site/analysis/bitwise-comparisons directory: /scratch/tm70/sb8430/bench_example/runs/site/analysis/bitwise-comparisons +Setting up run directory tree for fluxsite tests... +Creating runs/fluxsite/logs directory: /scratch/tm70/sb8430/bench_example/runs/fluxsite/logs +Creating runs/fluxsite/outputs directory: /scratch/tm70/sb8430/bench_example/runs/fluxsite/outputs +Creating runs/fluxsite/tasks directory: /scratch/tm70/sb8430/bench_example/runs/fluxsite/tasks +Creating runs/fluxsite/analysis directory: /scratch/tm70/sb8430/bench_example/runs/fluxsite/analysis +Creating runs/fluxsite/analysis/bitwise-comparisons directory: /scratch/tm70/sb8430/bench_example/runs/fluxsite/analysis/bitwise-comparisons Creating task directories... Setting up tasks... -Successfully setup FLUXNET tasks +Successfully setup fluxsite tasks -Creating PBS job script to run FLUXNET tasks on compute nodes: benchmark_cable_qsub.sh +Creating PBS job script to run fluxsite tasks on compute nodes: benchmark_cable_qsub.sh PBS job submitted: 82479088.gadi-pbs -The CABLE log file for each task is written to runs/site/logs/_log.txt -The CABLE standard output for each task is written to runs/site/tasks//out.txt -The NetCDF output for each task is written to runs/site/outputs/_out.nc +The CABLE log file for each task is written to runs/fluxsite/logs/_log.txt +The CABLE standard output for each task is written to runs/fluxsite/tasks//out.txt +The NetCDF output for each task is written to runs/fluxsite/outputs/_out.nc ``` The PBS schedule job should print out the following to the job log file: ``` -Running FLUXNET tasks... -Successfully ran FLUXNET tasks +Running fluxsite tasks... +Successfully ran fluxsite tasks Running comparison tasks... Success: files AU-Tum_2002-2017_OzFlux_Met_R0_S0_out.nc AU-Tum_2002-2017_OzFlux_Met_R1_S0_out.nc are identical diff --git a/docs/user_guide/index.md b/docs/user_guide/index.md index 83859a1d..942890e4 100644 --- a/docs/user_guide/index.md +++ b/docs/user_guide/index.md @@ -119,7 +119,7 @@ The following files and directories are created when `benchcab run` executes suc ├── benchmark_cable_qsub.sh.o ├── rev_number-1.log ├── runs -│   └── site +│   └── fluxsite │   ├── logs │ │ ├── _log.txt │ │ └── ... @@ -153,7 +153,7 @@ The following files and directories are created when `benchcab run` executes suc : directory that contains the source code checked out from SVN for each branch specified in the config file (labelled `realisation-*` above) and the CABLE-AUX branch. -`runs/site/` +`runs/fluxsite/` : directory that contains the log files, output files, and tasks for running CABLE. @@ -167,21 +167,21 @@ The following files and directories are created when `benchcab run` executes suc : where `met_file_base_name` is the base file name of the meteorological forcing file in the FLUXNET dataset, `realisation_key` is the branch key specified in the config file, and `science_config_key` identifies the science configuration used. -`runs/site/tasks//` +`runs/fluxsite/tasks//` : directory that contains the executable, the input files for each task and the recorded standard output from the CABLE model run. -`runs/site/outputs/` +`runs/fluxsite/outputs/` : directory that contains the netCDF output files for all tasks -`runs/site/logs/` +`runs/fluxsite/logs/` : directory that contains the log files produced by all tasks -`runs/site/analysis/bitwise-comparisons` +`runs/fluxsite/analysis/bitwise-comparisons` -: directory that contains the standard output produced by the bitwise comparison command: `benchcab fluxnet-bitwise-cmp`. Standard output is only saved when the netcdf files being compared differ from each other +: directory that contains the standard output produced by the bitwise comparison command: `benchcab fluxsite-bitwise-cmp`. Standard output is only saved when the netcdf files being compared differ from each other !!! warning "Re-running `benchcab` multiple times in the same working directory" We recommend the user to manually delete the generated files when re-running `benchcab`. Re-running `benchcab` multiple times in the same working directory is currently not yet supported (see issue [CABLE-LSM/benchcab#20](https://github.com/CABLE-LSM/benchcab/issues/20)). To clean the current working directory, run the following command in the working directory @@ -216,7 +216,7 @@ Once the benchmarking has finished running all the simulations, you need to uplo The model profile should describe the versions of CABLE used to generate the model outputs and the URLs to the repository pointing to the code versions. You are free to set the name as you like. 4. Upload model outputs created by `benchcab` by doing the following: - 1. Transfer model outputs from the `runs/site/outputs/` directory to your local computer so that they can be uploaded via the web interface. + 1. Transfer model outputs from the `runs/fluxsite/outputs/` directory to your local computer so that they can be uploaded via the web interface. 2. Create a new model output form. You can see [this example][model_output_eg] to get started. To create your own, select the **Model Outputs** tab on [modelevaluation.org][meorg] and click **Upload Model Output**.
![Model output](../assets/model_evaluation/New%20model%20output.png){ width="500" } diff --git a/tests/test_benchcab.py b/tests/test_benchcab.py index fbc0799c..2a86c077 100644 --- a/tests/test_benchcab.py +++ b/tests/test_benchcab.py @@ -16,35 +16,35 @@ def get_mock_app( ) -> Benchcab: """Returns a mock `Benchcab` instance for testing against.""" config = get_mock_config() - app = Benchcab(argv=["benchcab", "fluxnet"], config=config, validate_env=False) + app = Benchcab(argv=["benchcab", "fluxsite"], config=config, validate_env=False) app.subprocess_handler = subprocess_handler app.root_dir = MOCK_CWD return app -def test_fluxnet_submit_job(): - """Tests for `Benchcab.fluxnet_submit_job()`.""" +def test_fluxsite_submit_job(): + """Tests for `Benchcab.fluxsite_submit_job()`.""" # Success case: test qsub command is executed mock_subprocess = MockSubprocessWrapper() app = get_mock_app(mock_subprocess) - app.fluxnet_submit_job() + app.fluxsite_submit_job() assert f"qsub {MOCK_CWD / internal.QSUB_FNAME}" in mock_subprocess.commands # Success case: test non-verbose output app = get_mock_app() with contextlib.redirect_stdout(io.StringIO()) as buf: - app.fluxnet_submit_job() + app.fluxsite_submit_job() assert buf.getvalue() == ( - "Creating PBS job script to run FLUXNET tasks on compute " + "Creating PBS job script to run fluxsite tasks on compute " f"nodes: {internal.QSUB_FNAME}\n" f"PBS job submitted: {mock_subprocess.stdout}\n" "The CABLE log file for each task is written to " - f"{internal.SITE_LOG_DIR}/_log.txt\n" + f"{internal.FLUXSITE_LOG_DIR}/_log.txt\n" "The CABLE standard output for each task is written to " - f"{internal.SITE_TASKS_DIR}//out.txt\n" + f"{internal.FLUXSITE_TASKS_DIR}//out.txt\n" "The NetCDF output for each task is written to " - f"{internal.SITE_OUTPUT_DIR}/_out.nc\n" + f"{internal.FLUXSITE_OUTPUT_DIR}/_out.nc\n" ) # Failure case: qsub non-zero exit code prints an error message @@ -53,9 +53,9 @@ def test_fluxnet_submit_job(): app = get_mock_app(subprocess_handler=mock_subprocess) with contextlib.redirect_stdout(io.StringIO()) as buf: with pytest.raises(CalledProcessError): - app.fluxnet_submit_job() + app.fluxsite_submit_job() assert buf.getvalue() == ( - "Creating PBS job script to run FLUXNET tasks on compute " + "Creating PBS job script to run fluxsite tasks on compute " f"nodes: {internal.QSUB_FNAME}\n" f"Error when submitting job to NCI queue\n{mock_subprocess.stdout}\n" ) diff --git a/tests/test_benchtree.py b/tests/test_benchtree.py index d4b7a39a..6c26e00d 100644 --- a/tests/test_benchtree.py +++ b/tests/test_benchtree.py @@ -11,73 +11,75 @@ from benchcab.task import Task from benchcab.repository import CableRepository from benchcab.benchtree import ( - setup_fluxnet_directory_tree, + setup_fluxsite_directory_tree, clean_directory_tree, setup_src_dir, ) def setup_mock_tasks() -> list[Task]: - """Return a mock list of fluxnet tasks.""" + """Return a mock list of fluxsite tasks.""" config = get_mock_config() repo_a = CableRepository("trunk", repo_id=0) repo_b = CableRepository("path/to/my-branch", repo_id=1) - met_site_a, met_site_b = "site_foo", "site_bar" + met_forcing_file_a, met_forcing_file_b = "site_foo", "site_bar" (sci_id_a, sci_config_a), (sci_id_b, sci_config_b) = enumerate( config["science_configurations"] ) tasks = [ - Task(repo_a, met_site_a, sci_id_a, sci_config_a), - Task(repo_a, met_site_a, sci_id_b, sci_config_b), - Task(repo_a, met_site_b, sci_id_a, sci_config_a), - Task(repo_a, met_site_b, sci_id_b, sci_config_b), - Task(repo_b, met_site_a, sci_id_a, sci_config_a), - Task(repo_b, met_site_a, sci_id_b, sci_config_b), - Task(repo_b, met_site_b, sci_id_a, sci_config_a), - Task(repo_b, met_site_b, sci_id_b, sci_config_b), + Task(repo_a, met_forcing_file_a, sci_id_a, sci_config_a), + Task(repo_a, met_forcing_file_a, sci_id_b, sci_config_b), + Task(repo_a, met_forcing_file_b, sci_id_a, sci_config_a), + Task(repo_a, met_forcing_file_b, sci_id_b, sci_config_b), + Task(repo_b, met_forcing_file_a, sci_id_a, sci_config_a), + Task(repo_b, met_forcing_file_a, sci_id_b, sci_config_b), + Task(repo_b, met_forcing_file_b, sci_id_a, sci_config_a), + Task(repo_b, met_forcing_file_b, sci_id_b, sci_config_b), ] return tasks def test_setup_directory_tree(): - """Tests for `setup_fluxnet_directory_tree()`.""" + """Tests for `setup_fluxsite_directory_tree()`.""" - # Success case: generate fluxnet directory structure + # Success case: generate fluxsite directory structure tasks = setup_mock_tasks() - setup_fluxnet_directory_tree(fluxnet_tasks=tasks, root_dir=MOCK_CWD) + setup_fluxsite_directory_tree(fluxsite_tasks=tasks, root_dir=MOCK_CWD) assert len(list(MOCK_CWD.glob("*"))) == 1 assert Path(MOCK_CWD, "runs").exists() - assert Path(MOCK_CWD, "runs", "site").exists() - assert Path(MOCK_CWD, "runs", "site", "logs").exists() - assert Path(MOCK_CWD, "runs", "site", "outputs").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks").exists() - assert Path(MOCK_CWD, "runs", "site", "analysis", "bitwise-comparisons").exists() - - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_foo_R0_S0").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_foo_R0_S1").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_bar_R0_S0").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_bar_R0_S1").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_foo_R1_S0").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_foo_R1_S1").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_bar_R1_S0").exists() - assert Path(MOCK_CWD, "runs", "site", "tasks", "site_bar_R1_S1").exists() + assert Path(MOCK_CWD, "runs", "fluxsite").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "logs").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "outputs").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks").exists() + assert Path( + MOCK_CWD, "runs", "fluxsite", "analysis", "bitwise-comparisons" + ).exists() + + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_foo_R0_S0").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_foo_R0_S1").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_bar_R0_S0").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_bar_R0_S1").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_foo_R1_S0").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_foo_R1_S1").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_bar_R1_S0").exists() + assert Path(MOCK_CWD, "runs", "fluxsite", "tasks", "site_bar_R1_S1").exists() shutil.rmtree(MOCK_CWD / "runs") # Success case: test non-verbose output with contextlib.redirect_stdout(io.StringIO()) as buf: - setup_fluxnet_directory_tree(fluxnet_tasks=tasks, root_dir=MOCK_CWD) + setup_fluxsite_directory_tree(fluxsite_tasks=tasks, root_dir=MOCK_CWD) assert buf.getvalue() == ( - f"Creating runs/site/logs directory: {MOCK_CWD}/runs/site/logs\n" - f"Creating runs/site/outputs directory: {MOCK_CWD}/runs/site/outputs\n" - f"Creating runs/site/tasks directory: {MOCK_CWD}/runs/site/tasks\n" - f"Creating runs/site/analysis directory: {MOCK_CWD}/runs/site/analysis\n" - f"Creating runs/site/analysis/bitwise-comparisons directory: {MOCK_CWD}" - "/runs/site/analysis/bitwise-comparisons\n" + f"Creating runs/fluxsite/logs directory: {MOCK_CWD}/runs/fluxsite/logs\n" + f"Creating runs/fluxsite/outputs directory: {MOCK_CWD}/runs/fluxsite/outputs\n" + f"Creating runs/fluxsite/tasks directory: {MOCK_CWD}/runs/fluxsite/tasks\n" + f"Creating runs/fluxsite/analysis directory: {MOCK_CWD}/runs/fluxsite/analysis\n" + f"Creating runs/fluxsite/analysis/bitwise-comparisons directory: {MOCK_CWD}" + "/runs/fluxsite/analysis/bitwise-comparisons\n" f"Creating task directories...\n" ) @@ -85,25 +87,33 @@ def test_setup_directory_tree(): # Success case: test verbose output with contextlib.redirect_stdout(io.StringIO()) as buf: - setup_fluxnet_directory_tree( - fluxnet_tasks=tasks, verbose=True, root_dir=MOCK_CWD + setup_fluxsite_directory_tree( + fluxsite_tasks=tasks, verbose=True, root_dir=MOCK_CWD ) assert buf.getvalue() == ( - f"Creating runs/site/logs directory: {MOCK_CWD}/runs/site/logs\n" - f"Creating runs/site/outputs directory: {MOCK_CWD}/runs/site/outputs\n" - f"Creating runs/site/tasks directory: {MOCK_CWD}/runs/site/tasks\n" - f"Creating runs/site/analysis directory: {MOCK_CWD}/runs/site/analysis\n" - f"Creating runs/site/analysis/bitwise-comparisons directory: {MOCK_CWD}" - "/runs/site/analysis/bitwise-comparisons\n" + f"Creating runs/fluxsite/logs directory: {MOCK_CWD}/runs/fluxsite/logs\n" + f"Creating runs/fluxsite/outputs directory: {MOCK_CWD}/runs/fluxsite/outputs\n" + f"Creating runs/fluxsite/tasks directory: {MOCK_CWD}/runs/fluxsite/tasks\n" + f"Creating runs/fluxsite/analysis directory: {MOCK_CWD}/runs/fluxsite/analysis\n" + f"Creating runs/fluxsite/analysis/bitwise-comparisons directory: {MOCK_CWD}" + "/runs/fluxsite/analysis/bitwise-comparisons\n" f"Creating task directories...\n" - f"Creating runs/site/tasks/site_foo_R0_S0: {MOCK_CWD}/runs/site/tasks/site_foo_R0_S0\n" - f"Creating runs/site/tasks/site_foo_R0_S1: {MOCK_CWD}/runs/site/tasks/site_foo_R0_S1\n" - f"Creating runs/site/tasks/site_bar_R0_S0: {MOCK_CWD}/runs/site/tasks/site_bar_R0_S0\n" - f"Creating runs/site/tasks/site_bar_R0_S1: {MOCK_CWD}/runs/site/tasks/site_bar_R0_S1\n" - f"Creating runs/site/tasks/site_foo_R1_S0: {MOCK_CWD}/runs/site/tasks/site_foo_R1_S0\n" - f"Creating runs/site/tasks/site_foo_R1_S1: {MOCK_CWD}/runs/site/tasks/site_foo_R1_S1\n" - f"Creating runs/site/tasks/site_bar_R1_S0: {MOCK_CWD}/runs/site/tasks/site_bar_R1_S0\n" - f"Creating runs/site/tasks/site_bar_R1_S1: {MOCK_CWD}/runs/site/tasks/site_bar_R1_S1\n" + f"Creating runs/fluxsite/tasks/site_foo_R0_S0: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_foo_R0_S0\n" + f"Creating runs/fluxsite/tasks/site_foo_R0_S1: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_foo_R0_S1\n" + f"Creating runs/fluxsite/tasks/site_bar_R0_S0: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_bar_R0_S0\n" + f"Creating runs/fluxsite/tasks/site_bar_R0_S1: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_bar_R0_S1\n" + f"Creating runs/fluxsite/tasks/site_foo_R1_S0: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_foo_R1_S0\n" + f"Creating runs/fluxsite/tasks/site_foo_R1_S1: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_foo_R1_S1\n" + f"Creating runs/fluxsite/tasks/site_bar_R1_S0: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_bar_R1_S0\n" + f"Creating runs/fluxsite/tasks/site_bar_R1_S1: " + f"{MOCK_CWD}/runs/fluxsite/tasks/site_bar_R1_S1\n" ) shutil.rmtree(MOCK_CWD / "runs") @@ -114,7 +124,7 @@ def test_clean_directory_tree(): # Success case: directory tree does not exist after clean tasks = setup_mock_tasks() - setup_fluxnet_directory_tree(fluxnet_tasks=tasks, root_dir=MOCK_CWD) + setup_fluxsite_directory_tree(fluxsite_tasks=tasks, root_dir=MOCK_CWD) clean_directory_tree(root_dir=MOCK_CWD) assert not Path(MOCK_CWD, "runs").exists() diff --git a/tests/test_cli.py b/tests/test_cli.py index d1deab7d..96023bd8 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -27,36 +27,36 @@ def test_cli_parser(): res = vars(parser.parse_args(["build"])) assert res == {"subcommand": "build", "config": "config.yaml", "verbose": False} - # Success case: default fluxnet command - res = vars(parser.parse_args(["fluxnet"])) + # Success case: default fluxsite command + res = vars(parser.parse_args(["fluxsite"])) assert res == { - "subcommand": "fluxnet", + "subcommand": "fluxsite", "config": "config.yaml", "no_submit": False, "verbose": False, "skip": [], } - # Success case: default fluxnet-setup-work-dir command - res = vars(parser.parse_args(["fluxnet-setup-work-dir"])) + # Success case: default fluxsite-setup-work-dir command + res = vars(parser.parse_args(["fluxsite-setup-work-dir"])) assert res == { - "subcommand": "fluxnet-setup-work-dir", + "subcommand": "fluxsite-setup-work-dir", "config": "config.yaml", "verbose": False, } - # Success case: default fluxnet run-tasks command - res = vars(parser.parse_args(["fluxnet-run-tasks"])) + # Success case: default fluxsite run-tasks command + res = vars(parser.parse_args(["fluxsite-run-tasks"])) assert res == { - "subcommand": "fluxnet-run-tasks", + "subcommand": "fluxsite-run-tasks", "config": "config.yaml", "verbose": False, } - # Success case: default fluxnet-bitwise-cmp command - res = vars(parser.parse_args(["fluxnet-bitwise-cmp"])) + # Success case: default fluxsite-bitwise-cmp command + res = vars(parser.parse_args(["fluxsite-bitwise-cmp"])) assert res == { - "subcommand": "fluxnet-bitwise-cmp", + "subcommand": "fluxsite-bitwise-cmp", "config": "config.yaml", "verbose": False, } @@ -71,7 +71,7 @@ def test_cli_parser(): # Failure case: pass --no-submit to a non 'run' command with pytest.raises(SystemExit): - parser.parse_args(["fluxnet-setup-work-dir", "--no-submit"]) + parser.parse_args(["fluxsite-setup-work-dir", "--no-submit"]) # Failure case: pass non-optional command to --skip with pytest.raises(SystemExit): diff --git a/tests/test_comparison.py b/tests/test_comparison.py index fe3e99f1..068e338e 100644 --- a/tests/test_comparison.py +++ b/tests/test_comparison.py @@ -27,7 +27,7 @@ def test_run_comparison(): file_a = MOCK_CWD / "file_a.nc" file_b = MOCK_CWD / "file_b.nc" - bitwise_cmp_dir = MOCK_CWD / internal.SITE_BITWISE_CMP_DIR + bitwise_cmp_dir = MOCK_CWD / internal.FLUXSITE_BITWISE_CMP_DIR bitwise_cmp_dir.mkdir(parents=True) # Success case: run comparison diff --git a/tests/test_pbs.py b/tests/test_pbs.py index e3625df1..5ee49d47 100644 --- a/tests/test_pbs.py +++ b/tests/test_pbs.py @@ -32,15 +32,15 @@ def test_render_job_script(): module load bar module load baz -benchcab fluxnet-run-tasks --config=/path/to/config.yaml +benchcab fluxsite-run-tasks --config=/path/to/config.yaml if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-run-tasks failed. Exiting...' + echo 'Error: benchcab fluxsite-run-tasks failed. Exiting...' exit 1 fi -benchcab fluxnet-bitwise-cmp --config=/path/to/config.yaml +benchcab fluxsite-bitwise-cmp --config=/path/to/config.yaml if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-bitwise-cmp failed. Exiting...' + echo 'Error: benchcab fluxsite-bitwise-cmp failed. Exiting...' exit 1 fi """ @@ -72,21 +72,21 @@ def test_render_job_script(): module load bar module load baz -benchcab fluxnet-run-tasks --config=/path/to/config.yaml -v +benchcab fluxsite-run-tasks --config=/path/to/config.yaml -v if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-run-tasks failed. Exiting...' + echo 'Error: benchcab fluxsite-run-tasks failed. Exiting...' exit 1 fi -benchcab fluxnet-bitwise-cmp --config=/path/to/config.yaml -v +benchcab fluxsite-bitwise-cmp --config=/path/to/config.yaml -v if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-bitwise-cmp failed. Exiting...' + echo 'Error: benchcab fluxsite-bitwise-cmp failed. Exiting...' exit 1 fi """ ) - # Success case: skip fluxnet-bitwise-cmp step + # Success case: skip fluxsite-bitwise-cmp step assert render_job_script( project="tm70", config_path="/path/to/config.yaml", @@ -112,9 +112,9 @@ def test_render_job_script(): module load bar module load baz -benchcab fluxnet-run-tasks --config=/path/to/config.yaml +benchcab fluxsite-run-tasks --config=/path/to/config.yaml if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-run-tasks failed. Exiting...' + echo 'Error: benchcab fluxsite-run-tasks failed. Exiting...' exit 1 fi @@ -147,9 +147,9 @@ def test_render_job_script(): module load bar module load baz -benchcab fluxnet-run-tasks --config=/path/to/config.yaml +benchcab fluxsite-run-tasks --config=/path/to/config.yaml if [ $? -ne 0 ]; then - echo 'Error: benchcab fluxnet-run-tasks failed. Exiting...' + echo 'Error: benchcab fluxsite-run-tasks failed. Exiting...' exit 1 fi diff --git a/tests/test_task.py b/tests/test_task.py index 8186d262..7d0e89d2 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -9,8 +9,8 @@ from benchcab.task import ( patch_namelist, - get_fluxnet_tasks, - get_fluxnet_comparisons, + get_fluxsite_tasks, + get_fluxsite_comparisons, get_comparison_name, Task, CableError, @@ -67,11 +67,11 @@ def setup_mock_namelists_directory(): def setup_mock_run_directory(task: Task): """Setup mock run directory for a single task.""" - task_dir = MOCK_CWD / internal.SITE_TASKS_DIR / task.get_task_name() + task_dir = MOCK_CWD / internal.FLUXSITE_TASKS_DIR / task.get_task_name() task_dir.mkdir(parents=True) - output_dir = MOCK_CWD / internal.SITE_OUTPUT_DIR + output_dir = MOCK_CWD / internal.FLUXSITE_OUTPUT_DIR output_dir.mkdir(parents=True) - log_dir = MOCK_CWD / internal.SITE_LOG_DIR + log_dir = MOCK_CWD / internal.FLUXSITE_LOG_DIR log_dir.mkdir(parents=True) @@ -88,11 +88,13 @@ def do_mock_checkout_and_build(): def do_mock_run(task: Task): """Make mock log files and output files as if benchcab has just been run.""" - output_path = Path(MOCK_CWD, internal.SITE_OUTPUT_DIR, task.get_output_filename()) + output_path = Path( + MOCK_CWD, internal.FLUXSITE_OUTPUT_DIR, task.get_output_filename() + ) output_path.touch() assert output_path.exists() - log_path = Path(MOCK_CWD, internal.SITE_LOG_DIR, task.get_log_filename()) + log_path = Path(MOCK_CWD, internal.FLUXSITE_LOG_DIR, task.get_log_filename()) log_path.touch() assert log_path.exists() @@ -131,19 +133,22 @@ def test_fetch_files(): task.fetch_files() assert Path( - MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name(), internal.CABLE_NML + MOCK_CWD, internal.FLUXSITE_TASKS_DIR, task.get_task_name(), internal.CABLE_NML ).exists() assert Path( MOCK_CWD, - internal.SITE_TASKS_DIR, + internal.FLUXSITE_TASKS_DIR, task.get_task_name(), internal.CABLE_VEGETATION_NML, ).exists() assert Path( - MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name(), internal.CABLE_SOIL_NML + MOCK_CWD, + internal.FLUXSITE_TASKS_DIR, + task.get_task_name(), + internal.CABLE_SOIL_NML, ).exists() assert Path( - MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name(), internal.CABLE_EXE + MOCK_CWD, internal.FLUXSITE_TASKS_DIR, task.get_task_name(), internal.CABLE_EXE ).exists() @@ -164,24 +169,29 @@ def test_clean_task(): task.clean_task() assert not Path( - MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name(), internal.CABLE_NML + MOCK_CWD, internal.FLUXSITE_TASKS_DIR, task.get_task_name(), internal.CABLE_NML ).exists() assert not Path( MOCK_CWD, - internal.SITE_TASKS_DIR, + internal.FLUXSITE_TASKS_DIR, task.get_task_name(), internal.CABLE_VEGETATION_NML, ).exists() assert not Path( - MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name(), internal.CABLE_SOIL_NML + MOCK_CWD, + internal.FLUXSITE_TASKS_DIR, + task.get_task_name(), + internal.CABLE_SOIL_NML, + ).exists() + assert not Path( + MOCK_CWD, internal.FLUXSITE_TASKS_DIR, task.get_task_name(), internal.CABLE_EXE ).exists() assert not Path( - MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name(), internal.CABLE_EXE + MOCK_CWD, internal.FLUXSITE_OUTPUT_DIR, task.get_output_filename() ).exists() assert not Path( - MOCK_CWD, internal.SITE_OUTPUT_DIR, task.get_output_filename() + MOCK_CWD, internal.FLUXSITE_LOG_DIR, task.get_log_filename() ).exists() - assert not Path(MOCK_CWD, internal.SITE_LOG_DIR, task.get_log_filename()).exists() def test_patch_namelist(): @@ -215,7 +225,7 @@ def test_setup_task(): """Tests for `setup_task()`.""" task = get_mock_task() - task_dir = Path(MOCK_CWD, internal.SITE_TASKS_DIR, task.get_task_name()) + task_dir = Path(MOCK_CWD, internal.FLUXSITE_TASKS_DIR, task.get_task_name()) setup_mock_namelists_directory() setup_mock_run_directory(task) @@ -228,9 +238,9 @@ def test_setup_task(): "filename": { "met": str(internal.MET_DIR / "forcing-file.nc"), "out": str( - MOCK_CWD / internal.SITE_OUTPUT_DIR / task.get_output_filename() + MOCK_CWD / internal.FLUXSITE_OUTPUT_DIR / task.get_output_filename() ), - "log": str(MOCK_CWD / internal.SITE_LOG_DIR / task.get_log_filename()), + "log": str(MOCK_CWD / internal.FLUXSITE_LOG_DIR / task.get_log_filename()), "restart_out": " ", "type": str(MOCK_CWD / internal.GRID_FILE), }, @@ -257,15 +267,15 @@ def test_setup_task(): "Setting up task: forcing-file_R1_S0\n" " Cleaning task\n" f" Copying namelist files from {MOCK_CWD}/namelists to " - f"{MOCK_CWD / 'runs/site/tasks/forcing-file_R1_S0'}\n" + f"{MOCK_CWD / 'runs/fluxsite/tasks/forcing-file_R1_S0'}\n" f" Copying CABLE executable from {MOCK_CWD}/src/test-branch/" - f"offline/cable to {MOCK_CWD}/runs/site/tasks/forcing-file_R1_S0/cable\n" + f"offline/cable to {MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable\n" " Adding base configurations to CABLE namelist file " - f"{MOCK_CWD}/runs/site/tasks/forcing-file_R1_S0/cable.nml\n" + f"{MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" " Adding science configurations to CABLE namelist file " - f"{MOCK_CWD}/runs/site/tasks/forcing-file_R1_S0/cable.nml\n" + f"{MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" " Adding branch specific configurations to CABLE namelist file " - f"{MOCK_CWD}/runs/site/tasks/forcing-file_R1_S0/cable.nml\n" + f"{MOCK_CWD}/runs/fluxsite/tasks/forcing-file_R1_S0/cable.nml\n" ) @@ -274,7 +284,7 @@ def test_run_cable(): mock_subprocess = MockSubprocessWrapper() task = get_mock_task(subprocess_handler=mock_subprocess) - task_dir = MOCK_CWD / internal.SITE_TASKS_DIR / task.get_task_name() + task_dir = MOCK_CWD / internal.FLUXSITE_TASKS_DIR / task.get_task_name() task_dir.mkdir(parents=True) exe_path = task_dir / internal.CABLE_EXE exe_path.touch() @@ -309,10 +319,10 @@ def test_add_provenance_info(): mock_subprocess = MockSubprocessWrapper() task = get_mock_task(subprocess_handler=mock_subprocess) - task_dir = MOCK_CWD / internal.SITE_TASKS_DIR / task.get_task_name() + task_dir = MOCK_CWD / internal.FLUXSITE_TASKS_DIR / task.get_task_name() task_dir.mkdir(parents=True) - site_output_dir = MOCK_CWD / internal.SITE_OUTPUT_DIR - site_output_dir.mkdir() + fluxsite_output_dir = MOCK_CWD / internal.FLUXSITE_OUTPUT_DIR + fluxsite_output_dir.mkdir() # Create mock namelist file in task directory: f90nml.write( @@ -321,7 +331,7 @@ def test_add_provenance_info(): ) # Create mock netcdf output file as if CABLE had just been run: - nc_output_path = site_output_dir / task.get_output_filename() + nc_output_path = fluxsite_output_dir / task.get_output_filename() netCDF4.Dataset(nc_output_path, "w") # Success case: add global attributes to netcdf file @@ -344,43 +354,43 @@ def test_add_provenance_info(): task.add_provenance_info(verbose=True) assert buf.getvalue() == ( "Adding attributes to output file: " - f"{MOCK_CWD}/runs/site/outputs/forcing-file_R1_S0_out.nc\n" + f"{MOCK_CWD}/runs/fluxsite/outputs/forcing-file_R1_S0_out.nc\n" ) -def test_get_fluxnet_tasks(): - """Tests for `get_fluxnet_tasks()`.""" +def test_get_fluxsite_tasks(): + """Tests for `get_fluxsite_tasks()`.""" - # Success case: get task list for two branches, two met - # sites and two science configurations + # Success case: get task list for two branches, two fluxsite met + # forcing files and two science configurations config = get_mock_config() repos = [ CableRepository(**branch_config, repo_id=id) for id, branch_config in enumerate(config["realisations"]) ] - met_site_a, met_site_b = "foo", "bar" + met_forcing_file_a, met_forcing_file_b = "foo", "bar" sci_a, sci_b = config["science_configurations"] - tasks = get_fluxnet_tasks( + tasks = get_fluxsite_tasks( repos, config["science_configurations"], - [met_site_a, met_site_b], + [met_forcing_file_a, met_forcing_file_b], ) assert [(task.repo, task.met_forcing_file, task.sci_config) for task in tasks] == [ - (repos[0], met_site_a, sci_a), - (repos[0], met_site_a, sci_b), - (repos[0], met_site_b, sci_a), - (repos[0], met_site_b, sci_b), - (repos[1], met_site_a, sci_a), - (repos[1], met_site_a, sci_b), - (repos[1], met_site_b, sci_a), - (repos[1], met_site_b, sci_b), + (repos[0], met_forcing_file_a, sci_a), + (repos[0], met_forcing_file_a, sci_b), + (repos[0], met_forcing_file_b, sci_a), + (repos[0], met_forcing_file_b, sci_b), + (repos[1], met_forcing_file_a, sci_a), + (repos[1], met_forcing_file_a, sci_b), + (repos[1], met_forcing_file_b, sci_a), + (repos[1], met_forcing_file_b, sci_b), ] -def test_get_fluxnet_comparisons(): - """Tests for `get_fluxnet_comparisons()`.""" +def test_get_fluxsite_comparisons(): + """Tests for `get_fluxsite_comparisons()`.""" - output_dir = MOCK_CWD / internal.SITE_OUTPUT_DIR + output_dir = MOCK_CWD / internal.FLUXSITE_OUTPUT_DIR # Success case: comparisons for two branches with two tasks # met0_S0_R0 met0_S0_R1 @@ -396,7 +406,7 @@ def test_get_fluxnet_comparisons(): sci_config={"foo": "bar"}, sci_conf_id=0, ) - comparisons = get_fluxnet_comparisons([task_a, task_b], root_dir=MOCK_CWD) + comparisons = get_fluxsite_comparisons([task_a, task_b], root_dir=MOCK_CWD) assert len(comparisons) == 1 assert comparisons[0].files == ( output_dir / task_a.get_output_filename(), @@ -424,7 +434,7 @@ def test_get_fluxnet_comparisons(): sci_config={"foo": "bar"}, sci_conf_id=0, ) - comparisons = get_fluxnet_comparisons([task_a, task_b, task_c], root_dir=MOCK_CWD) + comparisons = get_fluxsite_comparisons([task_a, task_b, task_c], root_dir=MOCK_CWD) assert len(comparisons) == 3 assert comparisons[0].files == ( output_dir / task_a.get_output_filename(),