From 1cf8b448af562dbb7af198399c78c585977e81da Mon Sep 17 00:00:00 2001
From: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com>
Date: Tue, 30 Jul 2024 10:38:49 -0400
Subject: [PATCH] Simplify resource-related variables, remove CDUMP where
unneeded (#2727)
This overhauls resource-related variables to use a common set of
variables for each job. In the process, this also removed the use of
CDUMP in most cases.
Resolves #1299 #2693
---
ci/scripts/tests/test_rocotostat.py | 19 +-
docs/doxygen/mainpage.h | 4 +-
docs/source/init.rst | 2 +-
docs/source/setup.rst | 4 +-
.../analysis/create/jenkfgdas_diag.ecf | 1 -
.../analysis/create/jenkfgdas_select_obs.ecf | 1 -
.../analysis/create/jenkfgdas_update.ecf | 1 -
.../analysis/recenter/ecen/jenkfgdas_ecen.ecf | 1 -
.../analysis/recenter/jenkfgdas_sfc.ecf | 1 -
.../enkfgdas/forecast/jenkfgdas_fcst.ecf | 1 -
.../enkfgdas/post/jenkfgdas_post_master.ecf | 1 -
.../atmos/analysis/jgdas_atmos_analysis.ecf | 1 -
.../analysis/jgdas_atmos_analysis_calc.ecf | 1 -
.../analysis/jgdas_atmos_analysis_diag.ecf | 1 -
.../gdas/atmos/gempak/jgdas_atmos_gempak.ecf | 1 -
.../gempak/jgdas_atmos_gempak_meta_ncdc.ecf | 1 -
.../dump/jgdas_atmos_tropcy_qc_reloc.ecf | 1 -
.../prep/jgdas_atmos_emcsfc_sfc_prep.ecf | 1 -
.../atmos/post/jgdas_atmos_post_manager.ecf | 1 -
.../atmos/post/jgdas_atmos_post_master.ecf | 1 -
.../jgdas_atmos_chgres_forenkf.ecf | 1 -
.../gdas/atmos/verf/jgdas_atmos_verfozn.ecf | 1 -
.../gdas/atmos/verf/jgdas_atmos_verfrad.ecf | 1 -
.../gdas/atmos/verf/jgdas_atmos_vminmon.ecf | 1 -
ecf/scripts/gdas/jgdas_forecast.ecf | 1 -
.../gdas/wave/init/jgdas_wave_init.ecf | 1 -
.../gdas/wave/post/jgdas_wave_postpnt.ecf | 1 -
.../gdas/wave/post/jgdas_wave_postsbs.ecf | 1 -
.../gdas/wave/prep/jgdas_wave_prep.ecf | 1 -
.../atmos/analysis/jgfs_atmos_analysis.ecf | 1 -
.../analysis/jgfs_atmos_analysis_calc.ecf | 1 -
.../gfs/atmos/gempak/jgfs_atmos_gempak.ecf | 1 -
.../atmos/gempak/jgfs_atmos_gempak_meta.ecf | 1 -
.../gempak/jgfs_atmos_gempak_ncdc_upapgif.ecf | 1 -
.../gempak/jgfs_atmos_npoess_pgrb2_0p5deg.ecf | 1 -
.../gempak/jgfs_atmos_pgrb2_spec_gempak.ecf | 1 -
.../dump/jgfs_atmos_tropcy_qc_reloc.ecf | 1 -
.../prep/jgfs_atmos_emcsfc_sfc_prep.ecf | 1 -
.../atmos/post/jgfs_atmos_post_manager.ecf | 1 -
.../gfs/atmos/post/jgfs_atmos_post_master.ecf | 1 -
.../jgfs_atmos_awips_master.ecf | 1 -
.../bufr_sounding/jgfs_atmos_postsnd.ecf | 1 -
.../bulletins/jgfs_atmos_fbwind.ecf | 1 -
.../gfs/atmos/verf/jgfs_atmos_vminmon.ecf | 1 -
ecf/scripts/gfs/jgfs_forecast.ecf | 1 -
.../gfs/wave/gempak/jgfs_wave_gempak.ecf | 1 -
ecf/scripts/gfs/wave/init/jgfs_wave_init.ecf | 1 -
.../gfs/wave/post/jgfs_wave_post_bndpnt.ecf | 1 -
.../wave/post/jgfs_wave_post_bndpntbll.ecf | 1 -
.../gfs/wave/post/jgfs_wave_postpnt.ecf | 1 -
.../gfs/wave/post/jgfs_wave_postsbs.ecf | 1 -
.../gfs/wave/post/jgfs_wave_prdgen_bulls.ecf | 1 -
.../wave/post/jgfs_wave_prdgen_gridded.ecf | 1 -
ecf/scripts/gfs/wave/prep/jgfs_wave_prep.ecf | 1 -
env/AWSPW.env | 107 +-
env/GAEA.env | 43 +-
env/HERA.env | 232 +--
env/HERCULES.env | 223 ++-
env/JET.env | 205 +--
env/ORION.env | 233 +--
env/S4.env | 204 +--
env/WCOSS2.env | 231 ++-
jobs/JGDAS_ATMOS_ANALYSIS_DIAG | 3 +-
jobs/JGDAS_ATMOS_CHGRES_FORENKF | 5 +-
jobs/JGDAS_ENKF_ARCHIVE | 2 -
jobs/JGDAS_ENKF_DIAG | 5 +-
jobs/JGDAS_ENKF_ECEN | 7 +-
jobs/JGDAS_ENKF_POST | 1 -
jobs/JGDAS_ENKF_SELECT_OBS | 9 +-
jobs/JGDAS_ENKF_SFC | 11 +-
jobs/JGDAS_ENKF_UPDATE | 1 -
jobs/JGDAS_FIT2OBS | 4 +-
jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_CHKPT | 2 +-
jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_POST | 3 +
jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP | 5 +-
jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_VRFY | 1 -
jobs/JGFS_ATMOS_CYCLONE_TRACKER | 3 +-
jobs/JGFS_ATMOS_FSU_GENESIS | 1 -
jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF | 2 +-
jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS | 2 +-
jobs/JGFS_ATMOS_POSTSND | 1 -
jobs/JGFS_ATMOS_VERIFICATION | 2 +-
jobs/JGLOBAL_ARCHIVE | 2 -
jobs/JGLOBAL_ATMOS_ANALYSIS | 6 +-
jobs/JGLOBAL_ATMOS_ANALYSIS_CALC | 6 +-
jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC | 1 -
jobs/JGLOBAL_STAGE_IC | 5 +-
jobs/JGLOBAL_WAVE_PREP | 2 -
jobs/rocoto/awips_20km_1p0deg.sh | 2 +-
jobs/rocoto/prep.sh | 21 +-
parm/archive/arcdir.yaml.j2 | 2 +-
parm/archive/gdas.yaml.j2 | 2 +-
parm/archive/gfsa.yaml.j2 | 2 +-
parm/archive/master_gfs.yaml.j2 | 2 +-
parm/config/gefs/config.base | 24 +-
parm/config/gefs/config.fcst | 4 +-
parm/config/gefs/config.resources | 386 +++--
parm/config/gefs/config.wave | 4 +-
parm/config/gfs/config.anal | 5 +-
parm/config/gfs/config.base | 28 +-
parm/config/gfs/config.eobs | 3 +-
parm/config/gfs/config.eupd | 2 +-
parm/config/gfs/config.fcst | 8 +-
parm/config/gfs/config.metp | 10 +-
parm/config/gfs/config.prep | 2 +-
parm/config/gfs/config.resources | 1424 ++++++++---------
parm/config/gfs/config.resources.GAEA | 4 +-
parm/config/gfs/config.resources.HERA | 17 +-
parm/config/gfs/config.resources.HERCULES | 3 +-
parm/config/gfs/config.resources.JET | 23 +-
parm/config/gfs/config.resources.S4 | 32 +-
parm/config/gfs/config.resources.WCOSS2 | 28 +-
parm/config/gfs/config.wave | 4 +-
parm/config/gfs/config.waveprep | 2 +-
scripts/exgdas_atmos_chgres_forenkf.sh | 3 +-
scripts/exgdas_enkf_update.sh | 2 +-
scripts/exgfs_aero_init_aerosol.py | 18 +-
scripts/exgfs_wave_post_pnt.sh | 2 +-
scripts/exglobal_archive.py | 2 +-
scripts/exglobal_atmos_analysis.sh | 10 +-
scripts/exglobal_atmos_analysis_calc.sh | 4 +-
scripts/exglobal_atmos_products.sh | 4 +-
scripts/exglobal_diag.sh | 3 +-
scripts/exglobal_forecast.sh | 8 +-
sorc/wxflow | 2 +-
ush/calcanl_gfs.py | 12 +-
ush/forecast_predet.sh | 4 +-
ush/getdump.sh | 8 +-
workflow/applications/applications.py | 69 +-
workflow/applications/gefs.py | 6 +-
workflow/applications/gfs_cycled.py | 32 +-
workflow/applications/gfs_forecast_only.py | 10 +-
workflow/create_experiment.py | 9 +-
workflow/rocoto/gefs_tasks.py | 12 +-
workflow/rocoto/gfs_tasks.py | 754 ++++-----
workflow/rocoto/tasks.py | 68 +-
workflow/rocoto/workflow_tasks.py | 8 +-
workflow/setup_expt.py | 26 +-
138 files changed, 2100 insertions(+), 2669 deletions(-)
diff --git a/ci/scripts/tests/test_rocotostat.py b/ci/scripts/tests/test_rocotostat.py
index f43f8df2f8..a3c56df988 100755
--- a/ci/scripts/tests/test_rocotostat.py
+++ b/ci/scripts/tests/test_rocotostat.py
@@ -25,17 +25,16 @@
database_destination = os.path.join(testdata_full_path, 'database.db')
wget.download(database_url, database_destination)
-try:
- rocotostat = which('rocotostat')
-except CommandNotFoundError:
+rocotostat_cmd = which('rocotostat')
+if not rocotostat_cmd:
raise CommandNotFoundError("rocotostat not found in PATH")
-rocotostat.add_default_arg(['-w', os.path.join(testdata_path, 'workflow.xml'), '-d', os.path.join(testdata_path, 'database.db')])
+rocotostat_cmd.add_default_arg(['-w', os.path.join(testdata_path, 'workflow.xml'), '-d', os.path.join(testdata_path, 'database.db')])
def test_rocoto_statcount():
- result = rocoto_statcount(rocotostat)
+ result = rocoto_statcount(rocotostat_cmd)
assert result['SUCCEEDED'] == 20
assert result['FAIL'] == 0
@@ -47,7 +46,7 @@ def test_rocoto_statcount():
def test_rocoto_summary():
- result = rocotostat_summary(rocotostat)
+ result = rocotostat_summary(rocotostat_cmd)
assert result['CYCLES_TOTAL'] == 1
assert result['CYCLES_DONE'] == 1
@@ -55,7 +54,7 @@ def test_rocoto_summary():
def test_rocoto_done():
- result = rocotostat_summary(rocotostat)
+ result = rocotostat_summary(rocotostat_cmd)
assert is_done(result)
@@ -79,10 +78,10 @@ def test_rocoto_stalled():
database_destination = os.path.join(testdata_full_path, 'stalled.db')
wget.download(database_url, database_destination)
- rocotostat = which('rocotostat')
- rocotostat.add_default_arg(['-w', xml, '-d', db])
+ rocotostat_cmd = which('rocotostat')
+ rocotostat_cmd.add_default_arg(['-w', xml, '-d', db])
- result = rocoto_statcount(rocotostat)
+ result = rocoto_statcount(rocotostat_cmd)
assert result['SUCCEEDED'] == 11
assert is_stalled(result)
diff --git a/docs/doxygen/mainpage.h b/docs/doxygen/mainpage.h
index 19a51be272..92d602aa82 100644
--- a/docs/doxygen/mainpage.h
+++ b/docs/doxygen/mainpage.h
@@ -24,7 +24,7 @@ To setup an experiment, a python script \c setup_expt.py (located in \
usage: setup_expt.py [-h] --pslot PSLOT
[--configdir CONFIGDIR] [--idate IDATE] [--icsdir ICSDIR]
[--resdetatmos RESDET] [--resensatmos RESENS] [--comroot COMROOT]
- [--expdir EXPDIR] [--nens NENS] [--cdump CDUMP]
+ [--expdir EXPDIR] [--nens NENS] [--run RUN]
Setup files and directories to start a GFS parallel. Create EXPDIR, copy
config files Create ROTDIR experiment directory structure, link initial
@@ -52,7 +52,7 @@ To setup an experiment, a python script \c setup_expt.py (located in \
(default: None)
--nens number of ensemble members
(default: 80)
- --cdump CDUMP to start the experiment
+ --run RUN to start the experiment
(default: gdas)
The above script creates directories \c EXPDIR and \c ROTDIR. It will make links for initial conditions from a location provided via the \c --icsdir argument for a chosen resolution for the control \c --resdetatmos and the ensemble \c --resensatmos. Experiment name is controlled by the input argument \c --pslot. The script will ask user input in case any of the directories already exist. It will copy experiment configuration files into the \c EXPDIR from \c CONFIGDIR.
diff --git a/docs/source/init.rst b/docs/source/init.rst
index ac900e1be2..69e43f9bb0 100644
--- a/docs/source/init.rst
+++ b/docs/source/init.rst
@@ -384,7 +384,7 @@ The warm starts and other output from production are at C768 deterministic and C
What files should you pull for starting a new experiment with warm starts from production?
------------------------------------------------------------------------------------------
-That depends on what mode you want to run -- forecast-only or cycled. Whichever mode, navigate to the top of your ``ROTDIR`` and pull the entirety of the tarball(s) listed below for your mode. The files within the tarball are already in the ``$CDUMP.$PDY/$CYC/$ATMOS`` folder format expected by the system.
+That depends on what mode you want to run -- forecast-only or cycled. Whichever mode, navigate to the top of your ``ROTDIR`` and pull the entirety of the tarball(s) listed below for your mode. The files within the tarball are already in the ``$RUN.$PDY/$CYC/$ATMOS`` folder format expected by the system.
For forecast-only there are two tarballs to pull
diff --git a/docs/source/setup.rst b/docs/source/setup.rst
index de5cfa099a..1715899927 100644
--- a/docs/source/setup.rst
+++ b/docs/source/setup.rst
@@ -145,7 +145,7 @@ The following command examples include variables for reference but users should
cd workflow
./setup_expt.py gfs cycled --idate $IDATE --edate $EDATE [--app $APP] [--start $START] [--gfs_cyc $GFS_CYC]
- [--resdetatmos $RESDETATMOS] [--resdetocean $RESDETOCEAN] [--resensatmos $RESENSATMOS] [--nens $NENS] [--cdump $CDUMP]
+ [--resdetatmos $RESDETATMOS] [--resdetocean $RESDETOCEAN] [--resensatmos $RESENSATMOS] [--nens $NENS] [--run $RUN]
[--pslot $PSLOT] [--configdir $CONFIGDIR] [--comroot $COMROOT] [--expdir $EXPDIR] [--icsdir $ICSDIR]
where:
@@ -170,7 +170,7 @@ where:
* ``$RESDETOCEAN`` is the resolution of the ocean component of the deterministic forecast [default: 0.; determined based on atmosphere resolution]
* ``$RESENSATMOS`` is the resolution of the atmosphere component of the ensemble forecast [default: 192]
* ``$NENS`` is the number of ensemble members [default: 20]
- * ``$CDUMP`` is the starting phase [default: gdas]
+ * ``$RUN`` is the starting phase [default: gdas]
* ``$PSLOT`` is the name of your experiment [default: test]
* ``$CONFIGDIR`` is the path to the config folder under the copy of the system you're using [default: $TOP_OF_CLONE/parm/config/]
* ``$COMROOT`` is the path to your experiment output directory. Your ``ROTDIR`` (rotating com directory) will be created using ``COMROOT`` and ``PSLOT``. [default: $HOME]
diff --git a/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_diag.ecf b/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_diag.ecf
index 08d0185399..03835172b9 100755
--- a/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_diag.ecf
+++ b/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_diag.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_select_obs.ecf b/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_select_obs.ecf
index b94e3a18e3..bc289b8006 100755
--- a/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_select_obs.ecf
+++ b/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_select_obs.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_update.ecf b/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_update.ecf
index 6611afff52..91eef7fb74 100755
--- a/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_update.ecf
+++ b/ecf/scripts/enkfgdas/analysis/create/jenkfgdas_update.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/enkfgdas/analysis/recenter/ecen/jenkfgdas_ecen.ecf b/ecf/scripts/enkfgdas/analysis/recenter/ecen/jenkfgdas_ecen.ecf
index 1ed2568d61..044a65c843 100755
--- a/ecf/scripts/enkfgdas/analysis/recenter/ecen/jenkfgdas_ecen.ecf
+++ b/ecf/scripts/enkfgdas/analysis/recenter/ecen/jenkfgdas_ecen.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/enkfgdas/analysis/recenter/jenkfgdas_sfc.ecf b/ecf/scripts/enkfgdas/analysis/recenter/jenkfgdas_sfc.ecf
index d095742193..954ca49533 100755
--- a/ecf/scripts/enkfgdas/analysis/recenter/jenkfgdas_sfc.ecf
+++ b/ecf/scripts/enkfgdas/analysis/recenter/jenkfgdas_sfc.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/enkfgdas/forecast/jenkfgdas_fcst.ecf b/ecf/scripts/enkfgdas/forecast/jenkfgdas_fcst.ecf
index 923d208350..2fd692d1df 100755
--- a/ecf/scripts/enkfgdas/forecast/jenkfgdas_fcst.ecf
+++ b/ecf/scripts/enkfgdas/forecast/jenkfgdas_fcst.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/enkfgdas/post/jenkfgdas_post_master.ecf b/ecf/scripts/enkfgdas/post/jenkfgdas_post_master.ecf
index 6627b97c10..4f682a7a0a 100755
--- a/ecf/scripts/enkfgdas/post/jenkfgdas_post_master.ecf
+++ b/ecf/scripts/enkfgdas/post/jenkfgdas_post_master.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis.ecf b/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis.ecf
index 36b9272204..cc6eee326d 100755
--- a/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis.ecf
+++ b/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_calc.ecf b/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_calc.ecf
index 41601c4de8..92c8c0551e 100755
--- a/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_calc.ecf
+++ b/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_calc.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_diag.ecf b/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_diag.ecf
index f766333272..53d9daf734 100755
--- a/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_diag.ecf
+++ b/ecf/scripts/gdas/atmos/analysis/jgdas_atmos_analysis_diag.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf b/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf
index 754d921f95..b3bb579ca3 100755
--- a/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf
+++ b/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak_meta_ncdc.ecf b/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak_meta_ncdc.ecf
index 9d66f4bda1..312d3dcdaa 100755
--- a/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak_meta_ncdc.ecf
+++ b/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak_meta_ncdc.ecf
@@ -17,7 +17,6 @@ set -x
export model=%model:gdas%
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/obsproc/dump/jgdas_atmos_tropcy_qc_reloc.ecf b/ecf/scripts/gdas/atmos/obsproc/dump/jgdas_atmos_tropcy_qc_reloc.ecf
index 2dd0bdf06c..c5f838fb5f 100755
--- a/ecf/scripts/gdas/atmos/obsproc/dump/jgdas_atmos_tropcy_qc_reloc.ecf
+++ b/ecf/scripts/gdas/atmos/obsproc/dump/jgdas_atmos_tropcy_qc_reloc.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/obsproc/prep/jgdas_atmos_emcsfc_sfc_prep.ecf b/ecf/scripts/gdas/atmos/obsproc/prep/jgdas_atmos_emcsfc_sfc_prep.ecf
index 7e3282bc95..6ebae60924 100755
--- a/ecf/scripts/gdas/atmos/obsproc/prep/jgdas_atmos_emcsfc_sfc_prep.ecf
+++ b/ecf/scripts/gdas/atmos/obsproc/prep/jgdas_atmos_emcsfc_sfc_prep.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_manager.ecf b/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_manager.ecf
index 1da24c0d46..9792253ec8 100755
--- a/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_manager.ecf
+++ b/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_manager.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_master.ecf b/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_master.ecf
index f88fdcdaf9..b65be6586e 100755
--- a/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_master.ecf
+++ b/ecf/scripts/gdas/atmos/post/jgdas_atmos_post_master.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/post_processing/jgdas_atmos_chgres_forenkf.ecf b/ecf/scripts/gdas/atmos/post_processing/jgdas_atmos_chgres_forenkf.ecf
index 33fa481a29..32b024f663 100755
--- a/ecf/scripts/gdas/atmos/post_processing/jgdas_atmos_chgres_forenkf.ecf
+++ b/ecf/scripts/gdas/atmos/post_processing/jgdas_atmos_chgres_forenkf.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfozn.ecf b/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfozn.ecf
index 9c7a1609e7..938611b4bc 100755
--- a/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfozn.ecf
+++ b/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfozn.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfrad.ecf b/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfrad.ecf
index e2d3bb3463..dd0c19d6f0 100755
--- a/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfrad.ecf
+++ b/ecf/scripts/gdas/atmos/verf/jgdas_atmos_verfrad.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/atmos/verf/jgdas_atmos_vminmon.ecf b/ecf/scripts/gdas/atmos/verf/jgdas_atmos_vminmon.ecf
index 9afd0b5083..b538a18a3d 100755
--- a/ecf/scripts/gdas/atmos/verf/jgdas_atmos_vminmon.ecf
+++ b/ecf/scripts/gdas/atmos/verf/jgdas_atmos_vminmon.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/jgdas_forecast.ecf b/ecf/scripts/gdas/jgdas_forecast.ecf
index 69c8e17801..392d5f362f 100755
--- a/ecf/scripts/gdas/jgdas_forecast.ecf
+++ b/ecf/scripts/gdas/jgdas_forecast.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gdas/wave/init/jgdas_wave_init.ecf b/ecf/scripts/gdas/wave/init/jgdas_wave_init.ecf
index 208ed2cc52..1f73e43eb1 100755
--- a/ecf/scripts/gdas/wave/init/jgdas_wave_init.ecf
+++ b/ecf/scripts/gdas/wave/init/jgdas_wave_init.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gdas/wave/post/jgdas_wave_postpnt.ecf b/ecf/scripts/gdas/wave/post/jgdas_wave_postpnt.ecf
index 1899dc152f..fb45d8fda5 100755
--- a/ecf/scripts/gdas/wave/post/jgdas_wave_postpnt.ecf
+++ b/ecf/scripts/gdas/wave/post/jgdas_wave_postpnt.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gdas/wave/post/jgdas_wave_postsbs.ecf b/ecf/scripts/gdas/wave/post/jgdas_wave_postsbs.ecf
index 31cca40bed..5212a026d9 100755
--- a/ecf/scripts/gdas/wave/post/jgdas_wave_postsbs.ecf
+++ b/ecf/scripts/gdas/wave/post/jgdas_wave_postsbs.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gdas/wave/prep/jgdas_wave_prep.ecf b/ecf/scripts/gdas/wave/prep/jgdas_wave_prep.ecf
index 77b44634a9..b1fd9fe32e 100755
--- a/ecf/scripts/gdas/wave/prep/jgdas_wave_prep.ecf
+++ b/ecf/scripts/gdas/wave/prep/jgdas_wave_prep.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis.ecf b/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis.ecf
index a30eceae57..12653d0e95 100755
--- a/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis.ecf
+++ b/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis_calc.ecf b/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis_calc.ecf
index 41601c4de8..92c8c0551e 100755
--- a/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis_calc.ecf
+++ b/ecf/scripts/gfs/atmos/analysis/jgfs_atmos_analysis_calc.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf
index e01fa35e57..5f56e7ac17 100755
--- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf
+++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_meta.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_meta.ecf
index e9833baa41..4798e2a06a 100755
--- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_meta.ecf
+++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_meta.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_ncdc_upapgif.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_ncdc_upapgif.ecf
index 08686dbca3..25659058f8 100755
--- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_ncdc_upapgif.ecf
+++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak_ncdc_upapgif.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_npoess_pgrb2_0p5deg.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_npoess_pgrb2_0p5deg.ecf
index 1ff597411a..da66dfe7f6 100755
--- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_npoess_pgrb2_0p5deg.ecf
+++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_npoess_pgrb2_0p5deg.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf
index df53868b05..df0f9f90f1 100755
--- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf
+++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/obsproc/dump/jgfs_atmos_tropcy_qc_reloc.ecf b/ecf/scripts/gfs/atmos/obsproc/dump/jgfs_atmos_tropcy_qc_reloc.ecf
index 2dd0bdf06c..c5f838fb5f 100755
--- a/ecf/scripts/gfs/atmos/obsproc/dump/jgfs_atmos_tropcy_qc_reloc.ecf
+++ b/ecf/scripts/gfs/atmos/obsproc/dump/jgfs_atmos_tropcy_qc_reloc.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/obsproc/prep/jgfs_atmos_emcsfc_sfc_prep.ecf b/ecf/scripts/gfs/atmos/obsproc/prep/jgfs_atmos_emcsfc_sfc_prep.ecf
index bb0bcf8db7..f2b21cb168 100755
--- a/ecf/scripts/gfs/atmos/obsproc/prep/jgfs_atmos_emcsfc_sfc_prep.ecf
+++ b/ecf/scripts/gfs/atmos/obsproc/prep/jgfs_atmos_emcsfc_sfc_prep.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_manager.ecf b/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_manager.ecf
index d2e315bcef..50a71a44ba 100755
--- a/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_manager.ecf
+++ b/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_manager.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_master.ecf b/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_master.ecf
index ad717147fc..d8b1e2b531 100755
--- a/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_master.ecf
+++ b/ecf/scripts/gfs/atmos/post/jgfs_atmos_post_master.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
export FHRGRP=%FHRGRP%
export FHRLST=%FHRLST%
diff --git a/ecf/scripts/gfs/atmos/post_processing/awips_20km_1p0/jgfs_atmos_awips_master.ecf b/ecf/scripts/gfs/atmos/post_processing/awips_20km_1p0/jgfs_atmos_awips_master.ecf
index e146f8df32..9108b879a5 100755
--- a/ecf/scripts/gfs/atmos/post_processing/awips_20km_1p0/jgfs_atmos_awips_master.ecf
+++ b/ecf/scripts/gfs/atmos/post_processing/awips_20km_1p0/jgfs_atmos_awips_master.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/post_processing/bufr_sounding/jgfs_atmos_postsnd.ecf b/ecf/scripts/gfs/atmos/post_processing/bufr_sounding/jgfs_atmos_postsnd.ecf
index e2ddf7a1e5..3322aceeb1 100755
--- a/ecf/scripts/gfs/atmos/post_processing/bufr_sounding/jgfs_atmos_postsnd.ecf
+++ b/ecf/scripts/gfs/atmos/post_processing/bufr_sounding/jgfs_atmos_postsnd.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/post_processing/bulletins/jgfs_atmos_fbwind.ecf b/ecf/scripts/gfs/atmos/post_processing/bulletins/jgfs_atmos_fbwind.ecf
index 7443002a0b..4afac0d273 100755
--- a/ecf/scripts/gfs/atmos/post_processing/bulletins/jgfs_atmos_fbwind.ecf
+++ b/ecf/scripts/gfs/atmos/post_processing/bulletins/jgfs_atmos_fbwind.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/atmos/verf/jgfs_atmos_vminmon.ecf b/ecf/scripts/gfs/atmos/verf/jgfs_atmos_vminmon.ecf
index e9922e0751..2d9e8814ab 100755
--- a/ecf/scripts/gfs/atmos/verf/jgfs_atmos_vminmon.ecf
+++ b/ecf/scripts/gfs/atmos/verf/jgfs_atmos_vminmon.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/jgfs_forecast.ecf b/ecf/scripts/gfs/jgfs_forecast.ecf
index 370cd9342d..26d0c3b80d 100755
--- a/ecf/scripts/gfs/jgfs_forecast.ecf
+++ b/ecf/scripts/gfs/jgfs_forecast.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
# Load modules
diff --git a/ecf/scripts/gfs/wave/gempak/jgfs_wave_gempak.ecf b/ecf/scripts/gfs/wave/gempak/jgfs_wave_gempak.ecf
index 2cb7f75949..8406f0449c 100755
--- a/ecf/scripts/gfs/wave/gempak/jgfs_wave_gempak.ecf
+++ b/ecf/scripts/gfs/wave/gempak/jgfs_wave_gempak.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/init/jgfs_wave_init.ecf b/ecf/scripts/gfs/wave/init/jgfs_wave_init.ecf
index 208ed2cc52..1f73e43eb1 100755
--- a/ecf/scripts/gfs/wave/init/jgfs_wave_init.ecf
+++ b/ecf/scripts/gfs/wave/init/jgfs_wave_init.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpnt.ecf b/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpnt.ecf
index 2871a0f1a1..d4de0a9725 100755
--- a/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpnt.ecf
+++ b/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpnt.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpntbll.ecf b/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpntbll.ecf
index 73015f869f..528068f057 100755
--- a/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpntbll.ecf
+++ b/ecf/scripts/gfs/wave/post/jgfs_wave_post_bndpntbll.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/post/jgfs_wave_postpnt.ecf b/ecf/scripts/gfs/wave/post/jgfs_wave_postpnt.ecf
index 39e58f01c3..d09204cb2d 100755
--- a/ecf/scripts/gfs/wave/post/jgfs_wave_postpnt.ecf
+++ b/ecf/scripts/gfs/wave/post/jgfs_wave_postpnt.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/post/jgfs_wave_postsbs.ecf b/ecf/scripts/gfs/wave/post/jgfs_wave_postsbs.ecf
index 0b0e516bc2..52179a56e2 100755
--- a/ecf/scripts/gfs/wave/post/jgfs_wave_postsbs.ecf
+++ b/ecf/scripts/gfs/wave/post/jgfs_wave_postsbs.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_bulls.ecf b/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_bulls.ecf
index 00f005a877..f7d0ea1be7 100755
--- a/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_bulls.ecf
+++ b/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_bulls.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_gridded.ecf b/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_gridded.ecf
index 8197d58020..1c6ba47c93 100755
--- a/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_gridded.ecf
+++ b/ecf/scripts/gfs/wave/post/jgfs_wave_prdgen_gridded.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/ecf/scripts/gfs/wave/prep/jgfs_wave_prep.ecf b/ecf/scripts/gfs/wave/prep/jgfs_wave_prep.ecf
index 8f93f6d098..171e737692 100755
--- a/ecf/scripts/gfs/wave/prep/jgfs_wave_prep.ecf
+++ b/ecf/scripts/gfs/wave/prep/jgfs_wave_prep.ecf
@@ -16,7 +16,6 @@ set -x
export NET=%NET:gfs%
export RUN=%RUN%
-export CDUMP=%RUN%
############################################################
## Load modules
diff --git a/env/AWSPW.env b/env/AWSPW.env
index a4f598d3d7..867b9220ba 100755
--- a/env/AWSPW.env
+++ b/env/AWSPW.env
@@ -19,96 +19,67 @@ export NTHSTACK=1024000000
ulimit -s unlimited
ulimit -a
-if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing AWSPW.env"
+ exit 2
+fi
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
elif [[ "${step}" = "post" ]]; then
- nth_max=$((npe_node_max / npe_node_post))
+ export NTHREADS_NP=${NTHREADS1}
+ export APRUN_NP="${APRUN}"
- export NTHREADS_NP=${nth_np:-1}
- [[ ${NTHREADS_NP} -gt ${nth_max} ]] && export NTHREADS_NP=${nth_max}
- export APRUN_NP="${launcher} -n ${npe_post}"
-
- export NTHREADS_DWN=${nth_dwn:-1}
- [[ ${NTHREADS_DWN} -gt ${nth_max} ]] && export NTHREADS_DWN=${nth_max}
- export APRUN_DWN="${launcher} -n ${npe_dwn}"
+ export NTHREADS_DWN=${threads_per_task_dwn:-1}
+ [[ ${NTHREADS_DWN} -gt ${max_threads_per_task} ]] && export NTHREADS_DWN=${max_threads_per_task}
+ export APRUN_DWN="${launcher} -n ${ntasks_dwn}"
elif [[ "${step}" = "ecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ecen))
-
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen}"
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN}"
- export NTHREADS_CHGRES=${nth_chgres:-12}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN}"
elif [[ "${step}" = "esfc" ]]; then
- nth_max=$((npe_node_max / npe_node_esfc))
+ export NTHREADS_ESFC=${NTHREADSmax}
+ export APRUN_ESFC="${APRUN}"
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc}"
-
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN}"
elif [[ "${step}" = "epos" ]]; then
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos}"
-
-elif [[ "${step}" = "postsnd" ]]; then
-
- export CFP_MP="YES"
-
- nth_max=$((npe_node_max / npe_node_postsnd))
-
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd}"
-
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
-
-elif [[ "${step}" = "awips" ]]; then
-
- nth_max=$((npe_node_max / npe_node_awips))
-
- export NTHREADS_AWIPS=${nth_awips:-2}
- [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
- export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
-
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN}"
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN}"
fi
diff --git a/env/GAEA.env b/env/GAEA.env
index d72be6ba22..6809a9b186 100755
--- a/env/GAEA.env
+++ b/env/GAEA.env
@@ -18,6 +18,22 @@ export NTHSTACK=1024000000
ulimit -s unlimited
ulimit -a
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ # This may be useful when Gaea is fully ported, so ignore SC warning
+ # shellcheck disable=SC2034
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing GAEA.env"
+ exit 2
+fi
+
if [[ "${step}" = "waveinit" ]]; then
export CFP_MP="YES"
@@ -27,19 +43,11 @@ if [[ "${step}" = "waveinit" ]]; then
elif [[ "${step}" = "fcst" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
-
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
elif [[ "${step}" = "atmos_products" ]]; then
@@ -47,17 +55,12 @@ elif [[ "${step}" = "atmos_products" ]]; then
elif [[ "${step}" = "oceanice_products" ]]; then
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN} --cpus-per-task=${NTHREADS_FIT2OBS}"
fi
diff --git a/env/HERA.env b/env/HERA.env
index 98ac04ced3..3f0e7c9f36 100755
--- a/env/HERA.env
+++ b/env/HERA.env
@@ -31,9 +31,21 @@ if [[ -n "${SLURM_JOB_ID:-}" ]]; then
ulimit -a
fi
-if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing HERA.env"
+ exit 2
+fi
- nth_max=$((npe_node_max / npe_node_prep))
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
export POE="NO"
export BACK="NO"
@@ -42,11 +54,11 @@ if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
elif [[ "${step}" = "prepsnowobs" ]]; then
- export APRUN_CALCFIMS="${launcher} -n 1"
+ export APRUN_CALCFIMS="${APRUN}"
elif [[ "${step}" = "prep_emissions" ]]; then
- export APRUN="${launcher} -n 1"
+ export APRUN="${APRUN}"
elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then
@@ -57,102 +69,74 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}
elif [[ "${step}" = "atmanlvar" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlvar))
-
- export NTHREADS_ATMANLVAR=${nth_atmanlvar:-${nth_max}}
- [[ ${NTHREADS_ATMANLVAR} -gt ${nth_max} ]] && export NTHREADS_ATMANLVAR=${nth_max}
- export APRUN_ATMANLVAR="${launcher} -n ${npe_atmanlvar} --cpus-per-task=${NTHREADS_ATMANLVAR}"
+ export NTHREADS_ATMANLVAR=${NTHREADSmax}
+ export APRUN_ATMANLVAR="${APRUN} --cpus-per-task=${NTHREADS_ATMANLVAR}"
elif [[ "${step}" = "atmensanlletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlletkf))
-
- export NTHREADS_ATMENSANLLETKF=${nth_atmensanlletkf:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLLETKF} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLLETKF=${nth_max}
- export APRUN_ATMENSANLLETKF="${launcher} -n ${npe_atmensanlletkf} --cpus-per-task=${NTHREADS_ATMENSANLLETKF}"
+ export NTHREADS_ATMENSANLLETKF=${NTHREADSmax}
+ export APRUN_ATMENSANLLETKF="${APRUN} --cpus-per-task=${NTHREADS_ATMENSANLLETKF}"
elif [[ "${step}" = "atmensanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlfv3inc))
-
- export NTHREADS_ATMENSANLFV3INC=${nth_atmensanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLFV3INC=${nth_max}
- export APRUN_ATMENSANLFV3INC="${launcher} -n ${npe_atmensanlfv3inc} --cpus-per-task=${NTHREADS_ATMENSANLFV3INC}"
+ export NTHREADS_ATMENSANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMENSANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMENSANLFV3INC}"
elif [[ "${step}" = "aeroanlrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_aeroanlrun))
-
- export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
- [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
- export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}"
+ export NTHREADS_AEROANL=${NTHREADSmax}
+ export APRUN_AEROANL="${APRUN} --cpus-per-task=${NTHREADS_AEROANL}"
elif [[ "${step}" = "atmanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlfv3inc))
-
- export NTHREADS_ATMANLFV3INC=${nth_atmanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMANLFV3INC=${nth_max}
- export APRUN_ATMANLFV3INC="${launcher} -n ${npe_atmanlfv3inc} --cpus-per-task=${NTHREADS_ATMANLFV3INC}"
+ export NTHREADS_ATMANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMANLFV3INC}"
elif [[ "${step}" = "prepobsaero" ]]; then
- nth_max=$((npe_node_max / npe_node_prepobsaero))
-
- export NTHREADS_PREPOBSAERO=${nth_prepobsaero:-1}
- export APRUN_PREPOBSAERO="${launcher} -n ${npe_prepobsaero} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
+ export NTHREADS_PREPOBSAERO=${NTHREADS1}
+ export APRUN_PREPOBSAERO="${APRUN} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
elif [[ "${step}" = "snowanl" ]]; then
- nth_max=$((npe_node_max / npe_node_snowanl))
-
- export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
- [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
- export APRUN_SNOWANL="${launcher} -n ${npe_snowanl} --cpus-per-task=${NTHREADS_SNOWANL}"
+ export NTHREADS_SNOWANL=${NTHREADSmax}
+ export APRUN_SNOWANL="${APRUN} --cpus-per-task=${NTHREADS_SNOWANL}"
export APRUN_APPLY_INCR="${launcher} -n 6"
elif [[ "${step}" = "marinebmat" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
-
- export APRUN_MARINEBMAT="${launcher} -n ${npe_marinebmat}"
+ export APRUN_MARINEBMAT="${APRUN}"
elif [[ "${step}" = "marinebmat" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
-
- export APRUN_OCNANAL="${launcher} -n ${npe_marinebmat}"
+ export APRUN_MARINEBMAT="${APRUN}"
elif [[ "${step}" = "ocnanalrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}"
+ export APRUN_OCNANAL="${APRUN}"
elif [[ "${step}" = "ocnanalchkpt" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt}"
+ export APRUN_OCNANAL="${APRUN}"
elif [[ "${step}" = "ocnanalecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ocnanalecen))
-
- export NTHREADS_OCNANALECEN=${nth_ocnanalecen:-${nth_max}}
- [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max}
- export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}"
+ export NTHREADS_OCNANALECEN=${NTHREADSmax}
+ export APRUN_OCNANALECEN="${APRUN} --cpus-per-task=${NTHREADS_OCNANALECEN}"
elif [[ "${step}" = "marineanalletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_marineanalletkf))
-
- export NTHREADS_MARINEANALLETKF=${nth_marineanalletkf:-${nth_max}}
- [[ ${NTHREADS_MARINEANALLETKF} -gt ${nth_max} ]] && export NTHREADS_MARINEANALLETKF=${nth_max}
- export APRUN_MARINEANALLETKF="${launcher} -n ${npe_marineanalletkf} --cpus-per-task=${NTHREADS_MARINEANALLETKF}"
+ export NTHREADS_MARINEANALLETKF=${NTHREADSmax}
+ export APRUN_MARINEANALLETKF="${APRUN} --cpus-per-task=${NTHREADS_MARINEANALLETKF}"
elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
@@ -163,44 +147,34 @@ elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_anal))
-
- export NTHREADS_GSI=${nth_anal:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN} --cpus-per-task=${NTHREADS_GSI}"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}"
- export NTHREADS_CYCLE=${nth_cycle:-12}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_cycle=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ ntasks_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${ntasks_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
export NTHREADS_GAUSFCANL=1
- npe_gausfcanl=${npe_gausfcanl:-1}
- export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
+ ntasks_gausfcanl=${ntasks_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${ntasks_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
elif [[ "${step}" = "sfcanl" ]]; then
- nth_max=$((npe_node_max / npe_node_sfcanl))
-
- export NTHREADS_CYCLE=${nth_sfcanl:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_sfcanl=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task:-14}
+ export APRUN_CYCLE="${APRUN} --cpus-per-task=${NTHREADS_CYCLE}"
elif [[ "${step}" = "eobs" ]]; then
export MKL_NUM_THREADS=4
export MKL_CBWR=AUTO
- nth_max=$((npe_node_max / npe_node_eobs))
-
- export NTHREADS_GSI=${nth_eobs:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN} --cpus-per-task=${NTHREADS_GSI}"
export CFP_MP=${CFP_MP:-"YES"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -208,11 +182,8 @@ elif [[ "${step}" = "eobs" ]]; then
elif [[ "${step}" = "eupd" ]]; then
- nth_max=$((npe_node_max / npe_node_eupd))
-
- export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
- [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
- export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}"
+ export NTHREADS_ENKF=${NTHREADSmax}
+ export APRUN_ENKF="${launcher} -n ${ntasks_enkf:-${ntasks}} --cpus-per-task=${NTHREADS_ENKF}"
export CFP_MP=${CFP_MP:-"YES"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -220,25 +191,16 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
-
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
-
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
elif [[ "${step}" = "upp" ]]; then
- nth_max=$((npe_node_max / npe_node_upp))
-
- export NTHREADS_UPP=${nth_upp:-1}
- [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
- export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}"
+ export NTHREADS_UPP=${NTHREADS1}
+ export APRUN_UPP="${APRUN} --cpus-per-task=${NTHREADS_UPP}"
elif [[ "${step}" = "atmos_products" ]]; then
@@ -246,84 +208,62 @@ elif [[ "${step}" = "atmos_products" ]]; then
elif [[ "${step}" = "oceanice_products" ]]; then
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
elif [[ "${step}" = "ecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ecen))
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN} --cpus-per-task=${NTHREADS_ECEN}"
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}"
-
- export NTHREADS_CHGRES=${nth_chgres:-12}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN} --cpus-per-task=${NTHREADS_CALCINC}"
elif [[ "${step}" = "esfc" ]]; then
- nth_max=$((npe_node_max / npe_node_esfc))
-
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}"
+ export NTHREADS_ESFC=${threads_per_task_esfc:-${max_threads_per_task}}
+ export APRUN_ESFC="${APRUN} --cpus-per-task=${NTHREADS_ESFC}"
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} --cpus-per-task=${NTHREADS_CYCLE}"
elif [[ "${step}" = "epos" ]]; then
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}"
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN} --cpus-per-task=${NTHREADS_EPOS}"
elif [[ "${step}" = "postsnd" ]]; then
export CFP_MP="YES"
- nth_max=$((npe_node_max / npe_node_postsnd))
-
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}"
+ export NTHREADS_POSTSND=${NTHREADS1}
+ export APRUN_POSTSND="${APRUN} --cpus-per-task=${NTHREADS_POSTSND}"
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
+ export NTHREADS_POSTSNDCFP=${threads_per_task_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${max_threads_per_task} ]] && export NTHREADS_POSTSNDCFP=${max_threads_per_task}
+ export APRUN_POSTSNDCFP="${launcher} -n ${ntasks_postsndcfp} ${mpmd_opt}"
elif [[ "${step}" = "awips" ]]; then
- nth_max=$((npe_node_max / npe_node_awips))
-
- export NTHREADS_AWIPS=${nth_awips:-2}
- [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
- export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
+ export NTHREADS_AWIPS=${NTHREADS1}
+ export APRUN_AWIPSCFP="${APRUN} ${mpmd_opt}"
elif [[ "${step}" = "gempak" ]]; then
export CFP_MP="YES"
- nth_max=$((npe_node_max / npe_node_gempak))
-
- export NTHREADS_GEMPAK=${nth_gempak:-1}
- [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
+ export NTHREADS_GEMPAK=${NTHREADS1}
+ [[ ${NTHREADS_GEMPAK} -gt ${max_threads_per_task} ]] && export NTHREADS_GEMPAK=${max_threads_per_task}
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN} --cpus-per-task=${NTHREADS_FIT2OBS}"
fi
diff --git a/env/HERCULES.env b/env/HERCULES.env
index f94bae73cc..83fa1aadd1 100755
--- a/env/HERCULES.env
+++ b/env/HERCULES.env
@@ -28,11 +28,23 @@ export I_MPI_EXTRA_FILESYSTEM_LIST=lustre
ulimit -s unlimited
ulimit -a
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing HERCULES.env"
+ exit 2
+fi
+
case ${step} in
"prep" | "prepbufr")
- nth_max=$((npe_node_max / npe_node_prep))
-
export POE="NO"
export BACK=${BACK:-"YES"}
export sys_tp="HERCULES"
@@ -40,11 +52,11 @@ case ${step} in
;;
"prepsnowobs")
- export APRUN_CALCFIMS="${launcher} -n 1"
+ export APRUN_CALCFIMS="${APRUN}"
;;
"prep_emissions")
- export APRUN="${launcher} -n 1"
+ export APRUN="${APRUN}"
;;
"waveinit" | "waveprep" | "wavepostsbs" | "wavepostbndpnt" | "wavepostpnt" | "wavepostbndpntbll")
@@ -56,91 +68,69 @@ case ${step} in
;;
"atmanlvar")
- nth_max=$((npe_node_max / npe_node_atmanlvar))
-
- export NTHREADS_ATMANLVAR=${nth_atmanlvar:-${nth_max}}
- [[ ${NTHREADS_ATMANLVAR} -gt ${nth_max} ]] && export NTHREADS_ATMANLVAR=${nth_max}
- export APRUN_ATMANLVAR="${launcher} -n ${npe_atmanlvar} --cpus-per-task=${NTHREADS_ATMANLVAR}"
+ export NTHREADS_ATMANLVAR=${NTHREADSmax}
+ export APRUN_ATMANLVAR="${APRUN} --cpus-per-task=${NTHREADS_ATMANLVAR}"
;;
"atmanlfv3inc")
- nth_max=$((npe_node_max / npe_node_atmanlfv3inc))
-
- export NTHREADS_ATMANLFV3INC=${nth_atmanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMANLFV3INC=${nth_max}
- export APRUN_ATMANLFV3INC="${launcher} -n ${npe_atmanlfv3inc} --cpus-per-task=${NTHREADS_ATMANLFV3INC}"
+ export NTHREADS_ATMANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMANLFV3INC}"
;;
"atmensanlletkf")
- nth_max=$((npe_node_max / npe_node_atmensanlletkf))
-
- export NTHREADS_ATMENSANLLETKF=${nth_atmensanlletkf:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLLETKF} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLLETKF=${nth_max}
- export APRUN_ATMENSANLLETKF="${launcher} -n ${npe_atmensanlletkf} --cpus-per-task=${NTHREADS_ATMENSANLLETKF}"
+ export NTHREADS_ATMENSANLLETKF=${NTHREADSmax}
+ export APRUN_ATMENSANLLETKF="${APRUN} --cpus-per-task=${NTHREADS_ATMENSANLLETKF}"
;;
"atmensanlfv3inc")
- nth_max=$((npe_node_max / npe_node_atmensanlfv3inc))
-
- export NTHREADS_ATMENSANLFV3INC=${nth_atmensanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLFV3INC=${nth_max}
- export APRUN_ATMENSANLFV3INC="${launcher} -n ${npe_atmensanlfv3inc} --cpus-per-task=${NTHREADS_ATMENSANLFV3INC}"
+ export NTHREADS_ATMENSANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMENSANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMENSANLFV3INC}"
;;
"aeroanlrun")
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_aeroanlrun))
-
- export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
- [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
- export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}"
+ export NTHREADS_AEROANL=${NTHREADSmax}
+ export APRUN_AEROANL="${APRUN} --cpus-per-task=${NTHREADS_AEROANL}"
;;
"prepobsaero")
- nth_max=$((npe_node_max / npe_node_prepobsaero))
- export NTHREADS_PREPOBSAERO=${nth_prepobsaero:-1}
- export APRUN_PREPOBSAERO="${launcher} -n ${npe_prepobsaero} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
+ export NTHREADS_PREPOBSAERO=${NTHREADS1}
+ export APRUN_PREPOBSAERO="${APRUN} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
;;
"snowanl")
- nth_max=$((npe_node_max / npe_node_snowanl))
-
- export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
- [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
- export APRUN_SNOWANL="${launcher} -n ${npe_snowanl} --cpus-per-task=${NTHREADS_SNOWANL}"
+ export NTHREADS_SNOWANL=${NTHREADSmax}
+ export APRUN_SNOWANL="${APRUN} --cpus-per-task=${NTHREADS_SNOWANL}"
export APRUN_APPLY_INCR="${launcher} -n 6"
;;
"marinebmat")
- export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- export APRUN_MARINEBMAT="${launcher} -n ${npe_marinebmat}"
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+ export APRUN_MARINEBMAT="${APRUN}"
;;
"ocnanalrun")
-
- export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}"
+
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+ export APRUN_OCNANAL="${APRUN}"
;;
"ocnanalecen")
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_ocnanalecen))
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node_ocnanalecen))
- export NTHREADS_OCNANALECEN=${nth_ocnanalecen:-${nth_max}}
- [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max}
- export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}"
+ export NTHREADS_OCNANALECEN=${threads_per_task_ocnanalecen:-${max_threads_per_task}}
+ [[ ${NTHREADS_OCNANALECEN} -gt ${max_threads_per_task} ]] && export NTHREADS_OCNANALECEN=${max_threads_per_task}
+ export APRUN_OCNANALECEN="${launcher} -n ${ntasks_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}"
;;
"ocnanalchkpt")
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_ocnanalchkpt))
-
- export NTHREADS_OCNANAL=${nth_ocnanalchkpt:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export NTHREADS_OCNANAL=${NTHREADSmax}
+ export APRUN_OCNANAL="${APRUN} --cpus-per-task=${NTHREADS_OCNANAL}"
;;
"anal" | "analcalc")
@@ -151,32 +141,28 @@ case ${step} in
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_anal))
- export NTHREADS_GSI=${nth_anal:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}"
+ export NTHREADS_GSI=${threads_per_task_anal:-${max_threads_per_task}}
+ export APRUN_GSI="${APRUN} --cpus-per-task=${NTHREADS_GSI}"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}"
- export NTHREADS_CYCLE=${nth_cycle:-12}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_cycle=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ ntasks_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${ntasks_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
export NTHREADS_GAUSFCANL=1
- npe_gausfcanl=${npe_gausfcanl:-1}
- export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
+ ntasks_gausfcanl=${ntasks_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${ntasks_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
;;
"sfcanl")
- nth_max=$((npe_node_max / npe_node_sfcanl))
- export NTHREADS_CYCLE=${nth_sfcanl:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_sfcanl=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} --cpus-per-task=${NTHREADS_CYCLE}"
;;
"eobs")
@@ -187,11 +173,10 @@ case ${step} in
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_eobs))
- export NTHREADS_GSI=${nth_eobs:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ [[ ${NTHREADS_GSI} -gt ${max_threads_per_task} ]] && export NTHREADS_GSI=${max_threads_per_task}
+ export APRUN_GSI="${APRUN} --cpus-per-task=${NTHREADS_GSI}"
;;
"eupd")
@@ -199,35 +184,25 @@ case ${step} in
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_eupd))
- export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
- [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
- export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}"
+ export NTHREADS_ENKF=${NTHREADSmax}
+ export APRUN_ENKF="${launcher} -n ${ntasks_enkf:-${ntasks}} --cpus-per-task=${NTHREADS_ENKF}"
;;
"fcst" | "efcs")
export OMP_STACKSIZE=512M
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
-
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
;;
"upp")
- nth_max=$((npe_node_max / npe_node_upp))
-
- export NTHREADS_UPP=${nth_upp:-1}
- [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
- export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}"
+ export NTHREADS_UPP=${NTHREADS1}
+ export APRUN_UPP="${APRUN} --cpus-per-task=${NTHREADS_UPP}"
;;
"atmos_products")
@@ -237,73 +212,56 @@ case ${step} in
"oceanice_products")
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
;;
"ecen")
- nth_max=$((npe_node_max / npe_node_ecen))
-
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}"
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN} --cpus-per-task=${NTHREADS_ECEN}"
- export NTHREADS_CHGRES=${nth_chgres:-12}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN} --cpus-per-task=${NTHREADS_CALCINC}"
;;
"esfc")
- nth_max=$((npe_node_max / npe_node_esfc))
+ export NTHREADS_ESFC=${NTHREADSmax}
+ export APRUN_ESFC="${APRUN} --cpus-per-task=${NTHREADS_ESFC}"
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}"
-
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} --cpus-per-task=${NTHREADS_CYCLE}"
;;
"epos")
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}"
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN} --cpus-per-task=${NTHREADS_EPOS}"
;;
"postsnd")
export CFP_MP="YES"
- nth_max=$((npe_node_max / npe_node_postsnd))
-
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}"
+ export NTHREADS_POSTSND=${NTHREADS1}
+ export APRUN_POSTSND="${APRUN} --cpus-per-task=${NTHREADS_POSTSND}"
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
+ export NTHREADS_POSTSNDCFP=${threads_per_task_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${max_threads_per_task} ]] && export NTHREADS_POSTSNDCFP=${max_threads_per_task}
+ export APRUN_POSTSNDCFP="${launcher} -n ${ntasks_postsndcfp} ${mpmd_opt}"
;;
"awips")
- nth_max=$((npe_node_max / npe_node_awips))
-
- export NTHREADS_AWIPS=${nth_awips:-2}
- [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
- export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
+ export NTHREADS_AWIPS=${NTHREADS1}
+ export APRUN_AWIPSCFP="${APRUN} ${mpmd_opt}"
;;
"gempak")
@@ -313,11 +271,8 @@ case ${step} in
;;
"fit2obs")
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN} --cpus-per-task=${NTHREADS_FIT2OBS}"
;;
*)
diff --git a/env/JET.env b/env/JET.env
index 956762a921..810a8cd501 100755
--- a/env/JET.env
+++ b/env/JET.env
@@ -19,9 +19,21 @@ export NTHSTACK=1024000000
ulimit -s unlimited
ulimit -a
-if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing JET.env"
+ exit 2
+fi
- nth_max=$((npe_node_max / npe_node_prep))
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
export POE="NO"
export BACK="NO"
@@ -45,82 +57,52 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}
elif [[ "${step}" = "atmanlvar" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlvar))
-
- export NTHREADS_ATMANLVAR=${nth_atmanlvar:-${nth_max}}
- [[ ${NTHREADS_ATMANLVAR} -gt ${nth_max} ]] && export NTHREADS_ATMANLVAR=${nth_max}
- export APRUN_ATMANLVAR="${launcher} -n ${npe_atmanlvar}"
+ export NTHREADS_ATMANLVAR=${NTHREADSmax}
+ export APRUN_ATMANLVAR="${APRUN}"
elif [[ "${step}" = "atmensanlletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlletkf))
-
- export NTHREADS_ATMENSANLLETKF=${nth_atmensanlletkf:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLLETKF} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLLETKF=${nth_max}
- export APRUN_ATMENSANLLETKF="${launcher} ${npe_atmensanlletkf}"
+ export NTHREADS_ATMENSANLLETKF=${NTHREADSmax}
+ export APRUN_ATMENSANLLETKF="${launcher} ${ntasks}"
elif [[ "${step}" = "atmensanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlfv3inc))
-
- export NTHREADS_ATMENSANLFV3INC=${nth_atmensanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLFV3INC=${nth_max}
- export APRUN_ATMENSANLFV3INC="${launcher} ${npe_atmensanlfv3inc}"
+ export NTHREADS_ATMENSANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMENSANLFV3INC="${launcher} ${ntasks}"
elif [[ "${step}" = "aeroanlrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_aeroanlrun))
-
- export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
- [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
- export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun}"
+ export NTHREADS_AEROANL=${NTHREADSmax}
+ export APRUN_AEROANL="${APRUN}"
elif [[ "${step}" = "prepobsaero" ]]; then
- nth_max=$((npe_node_max / npe_node_prepobsaero))
-
- export NTHREADS_PREPOBSAERO=${nth_prepobsaero:-1}
- export APRUN_PREPOBSAERO="${launcher} -n ${npe_prepobsaero} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
+ export NTHREADS_PREPOBSAERO=${NTHREADS1}
+ export APRUN_PREPOBSAERO="${APRUN} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
elif [[ "${step}" = "snowanl" ]]; then
- nth_max=$((npe_node_max / npe_node_snowanl))
-
- export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
- [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
- export APRUN_SNOWANL="${launcher} -n ${npe_snowanl}"
+ export NTHREADS_SNOWANL=${NTHREADSmax}
+ export APRUN_SNOWANL="${APRUN}"
export APRUN_APPLY_INCR="${launcher} -n 6"
elif [[ "${step}" = "atmanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlfv3inc))
-
- export NTHREADS_ATMANLFV3INC=${nth_atmanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMANLFV3INC=${nth_max}
- export APRUN_ATMANLFV3INC="${launcher} -n ${npe_atmanlfv3inc}"
+ export NTHREADS_ATMANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMANLFV3INC="${APRUN}"
elif [[ "${step}" = "marinebmat" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
-
- nth_max=$((npe_node_max / npe_node_marinebmat))
-
- export NTHREADS_OCNANAL=${nth_marinebmat:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_marinebmat}"
+ export APRUN_MARINEBMAT="${APRUN}"
elif [[ "${step}" = "ocnanalrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
-
- nth_max=$((npe_node_max / npe_node_ocnanalrun))
-
- export NTHREADS_OCNANAL=${nth_ocnanalrun:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}"
+ export APRUN_OCNANAL="${APRUN}"
elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
@@ -131,43 +113,34 @@ elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_anal))
-
- export NTHREADS_GSI=${nth_anal:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}}"
+ export NTHREADS_GSI=${threads_per_task_anal:-${max_threads_per_task}}
+ export APRUN_GSI="${APRUN}"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
export APRUN_CALCINC="${launcher} \$ncmd"
- export NTHREADS_CYCLE=${nth_cycle:-12}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_cycle=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_cycle}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ ntasks_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${ntasks_cycle}"
export NTHREADS_GAUSFCANL=1
- npe_gausfcanl=${npe_gausfcanl:-1}
- export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl}"
+ ntasks_gausfcanl=${ntasks_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${ntasks_gausfcanl}"
elif [[ "${step}" = "sfcanl" ]]; then
- nth_max=$((npe_node_max / npe_node_sfcanl))
-
- export NTHREADS_CYCLE=${nth_sfcanl:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_sfcanl=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_sfcanl}"
+ export NTHREADS_CYCLE=${threads_per_task:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN}"
elif [[ "${step}" = "eobs" ]]; then
export MKL_NUM_THREADS=4
export MKL_CBWR=AUTO
- nth_max=$((npe_node_max / npe_node_eobs))
-
- export NTHREADS_GSI=${nth_eobs:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN}"
export CFP_MP=${CFP_MP:-"YES"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -175,11 +148,8 @@ elif [[ "${step}" = "eobs" ]]; then
elif [[ "${step}" = "eupd" ]]; then
- nth_max=$((npe_node_max / npe_node_eupd))
-
- export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
- [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
- export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}}"
+ export NTHREADS_ENKF=${NTHREADSmax}
+ export APRUN_ENKF="${launcher} -n ${ntasks_enkf:-${ntasks}}"
export CFP_MP=${CFP_MP:-"YES"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -187,24 +157,16 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
-
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
elif [[ "${step}" = "upp" ]]; then
- nth_max=$((npe_node_max / npe_node_upp))
-
- export NTHREADS_UPP=${nth_upp:-1}
- [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
- export APRUN_UPP="${launcher} -n ${npe_upp}"
+ export NTHREADS_UPP=${NTHREADS1}
+ export APRUN_UPP="${APRUN}"
elif [[ "${step}" = "atmos_products" ]]; then
@@ -212,60 +174,46 @@ elif [[ "${step}" = "atmos_products" ]]; then
elif [[ "${step}" = "oceanice_products" ]]; then
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
elif [[ "${step}" = "ecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ecen))
-
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen}"
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN}"
- export NTHREADS_CHGRES=${nth_chgres:-12}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN}"
elif [[ "${step}" = "esfc" ]]; then
- nth_max=$((npe_node_max / npe_node_esfc))
-
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc}"
+ export NTHREADS_ESFC=${NTHREADSmax}
+ export APRUN_ESFC="${APRUN}"
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN}"
elif [[ "${step}" = "epos" ]]; then
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos}"
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN}"
elif [[ "${step}" = "postsnd" ]]; then
export CFP_MP="YES"
- nth_max=$((npe_node_max / npe_node_postsnd))
-
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd}"
+ export NTHREADS_POSTSND=${NTHREADS1}
+ export APRUN_POSTSND="${APRUN}"
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
+ export NTHREADS_POSTSNDCFP=${threads_per_task_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${max_threads_per_task} ]] && export NTHREADS_POSTSNDCFP=${max_threads_per_task}
+ export APRUN_POSTSNDCFP="${launcher} -n ${ntasks_postsndcfp} ${mpmd_opt}"
elif [[ "${step}" = "awips" ]]; then
@@ -277,10 +225,7 @@ elif [[ "${step}" = "gempak" ]]; then
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN}"
fi
diff --git a/env/ORION.env b/env/ORION.env
index 71ba7bbda3..bbbfb59182 100755
--- a/env/ORION.env
+++ b/env/ORION.env
@@ -26,9 +26,21 @@ export NTHSTACK=1024000000
ulimit -s unlimited
ulimit -a
-if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing ORION.env"
+ exit 2
+fi
- nth_max=$((npe_node_max / npe_node_prep))
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
export POE="NO"
export BACK=${BACK:-"YES"}
@@ -53,108 +65,72 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}
elif [[ "${step}" = "atmanlvar" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlvar))
-
- export NTHREADS_ATMANLVAR=${nth_atmanlvar:-${nth_max}}
- [[ ${NTHREADS_ATMANLVAR} -gt ${nth_max} ]] && export NTHREADS_ATMANLVAR=${nth_max}
- export APRUN_ATMANLVAR="${launcher} -n ${npe_atmanlvar} --cpus-per-task=${NTHREADS_ATMANLVAR}"
+ export NTHREADS_ATMANLVAR=${NTHREADSmax}
+ export APRUN_ATMANLVAR="${APRUN} --cpus-per-task=${NTHREADS_ATMANLVAR}"
elif [[ "${step}" = "atmensanlletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlletkf))
-
- export NTHREADS_ATMENSANLLETKF=${nth_atmensanlletkf:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLLETKF} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLLETKF=${nth_max}
- export APRUN_ATMENSANLLETKF="${launcher} -n ${npe_atmensanlletkf} --cpus-per-task=${NTHREADS_ATMENSANLLETKF}"
+ export NTHREADS_ATMENSANLLETKF=${NTHREADSmax}
+ export APRUN_ATMENSANLLETKF="${APRUN} --cpus-per-task=${NTHREADS_ATMENSANLLETKF}"
elif [[ "${step}" = "atmensanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlfv3inc))
-
- export NTHREADS_ATMENSANLFV3INC=${nth_atmensanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLFV3INC=${nth_max}
- export APRUN_ATMENSANLFV3INC="${launcher} -n ${npe_atmensanlfv3inc} --cpus-per-task=${NTHREADS_ATMENSANLFV3INC}"
+ export NTHREADS_ATMENSANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMENSANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMENSANLFV3INC}"
elif [[ "${step}" = "aeroanlrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_aeroanlrun))
-
- export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
- [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
- export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}"
+ export NTHREADS_AEROANL=${NTHREADSmax}
+ export APRUN_AEROANL="${APRUN} --cpus-per-task=${NTHREADS_AEROANL}"
elif [[ "${step}" = "prepobsaero" ]]; then
- nth_max=$((npe_node_max / npe_node_prepobsaero))
-
- export NTHREADS_PREPOBSAERO=${nth_prepobsaero:-1}
- export APRUN_PREPOBSAERO="${launcher} -n ${npe_prepobsaero} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
+ export NTHREADS_PREPOBSAERO=${NTHREADS1}
+ export APRUN_PREPOBSAERO="${APRUN} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
elif [[ "${step}" = "snowanl" ]]; then
- nth_max=$((npe_node_max / npe_node_snowanl))
-
- export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
- [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
- export APRUN_SNOWANL="${launcher} -n ${npe_snowanl} --cpus-per-task=${NTHREADS_SNOWANL}"
+ export NTHREADS_SNOWANL=${NTHREADSmax}
+ export APRUN_SNOWANL="${APRUN} --cpus-per-task=${NTHREADS_SNOWANL}"
export APRUN_APPLY_INCR="${launcher} -n 6"
elif [[ "${step}" = "atmanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlfv3inc))
-
- export NTHREADS_ATMANLFV3INC=${nth_atmanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMANLFV3INC=${nth_max}
- export APRUN_ATMANLFV3INC="${launcher} -n ${npe_atmanlfv3inc} --cpus-per-task=${NTHREADS_ATMANLFV3INC}"
+ export NTHREADS_ATMANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMANLFV3INC}"
elif [[ "${step}" = "marinebmat" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_marinebmat))
-
- export NTHREADS_OCNANAL=${nth_marinebmat:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_marinebmat} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export NTHREADS_MARINEBMAT=${NTHREADSmax}
+ export APRUN_MARINEBMAT="${APRUN}"
elif [[ "${step}" = "ocnanalrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_ocnanalrun))
-
- export NTHREADS_OCNANAL=${nth_ocnanalrun:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export APRUN_OCNANAL="${APRUN}"
elif [[ "${step}" = "ocnanalchkpt" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_ocnanalchkpt))
-
- export NTHREADS_OCNANAL=${nth_ocnanalchkpt:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export NTHREADS_OCNANAL=${NTHREADSmax}
+ export APRUN_OCNANAL="${APRUN} --cpus-per-task=${NTHREADS_OCNANAL}"
elif [[ "${step}" = "ocnanalecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ocnanalecen))
-
- export NTHREADS_OCNANALECEN=${nth_ocnanalecen:-${nth_max}}
- [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max}
- export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}"
+ export NTHREADS_OCNANALECEN=${NTHREADSmax}
+ export APRUN_OCNANALECEN="${APRUN} --cpus-per-task=${NTHREADS_OCNANALECEN}"
elif [[ "${step}" = "marineanalletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_marineanalletkf))
-
- export NTHREADS_MARINEANALLETKF=${nth_marineanalletkf:-${nth_max}}
- [[ ${NTHREADS_MARINEANALLETKF} -gt ${nth_max} ]] && export NTHREADS_MARINEANALLETKF=${nth_max}
- export APRUN_MARINEANALLETKF="${launcher} -n ${npe_marineanalletkf} --cpus-per-task=${NTHREADS_MARINEANALLETKF}"
+ export NTHREADS_MARINEANALLETKF=${NTHREADSmax}
+ export APRUN_MARINEANALLETKF="${APRUN} --cpus-per-task=${NTHREADS_MARINEANALLETKF}"
elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
@@ -165,32 +141,26 @@ elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_anal))
-
- export NTHREADS_GSI=${nth_anal:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN} --cpus-per-task=${NTHREADS_GSI}"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}"
- export NTHREADS_CYCLE=${nth_cycle:-12}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_cycle=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ ntasks_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${ntasks_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
export NTHREADS_GAUSFCANL=1
- npe_gausfcanl=${npe_gausfcanl:-1}
- export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
+ ntasks_gausfcanl=${ntasks_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${ntasks_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
elif [[ "${step}" = "sfcanl" ]]; then
- nth_max=$((npe_node_max / npe_node_sfcanl))
-
- export NTHREADS_CYCLE=${nth_sfcanl:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_sfcanl=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} --cpus-per-task=${NTHREADS_CYCLE}"
elif [[ "${step}" = "eobs" ]]; then
@@ -201,11 +171,9 @@ elif [[ "${step}" = "eobs" ]]; then
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_eobs))
-
- export NTHREADS_GSI=${nth_eobs:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ [[ ${NTHREADS_GSI} -gt ${max_threads_per_task} ]] && export NTHREADS_GSI=${max_threads_per_task}
+ export APRUN_GSI="${APRUN} --cpus-per-task=${NTHREADS_GSI}"
elif [[ "${step}" = "eupd" ]]; then
@@ -213,32 +181,21 @@ elif [[ "${step}" = "eupd" ]]; then
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_eupd))
-
- export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
- [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
- export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}"
+ export NTHREADS_ENKF=${NTHREADSmax}
+ export APRUN_ENKF="${launcher} -n ${ntasks_enkf:-${ntasks}} --cpus-per-task=${NTHREADS_ENKF}"
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
-
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
elif [[ "${step}" = "upp" ]]; then
- nth_max=$((npe_node_max / npe_node_upp))
-
- export NTHREADS_UPP=${nth_upp:-1}
- [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
- export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}"
+ export NTHREADS_UPP=${NTHREADS1}
+ export APRUN_UPP="${APRUN} --cpus-per-task=${NTHREADS_UPP}"
elif [[ "${step}" = "atmos_products" ]]; then
@@ -246,68 +203,51 @@ elif [[ "${step}" = "atmos_products" ]]; then
elif [[ "${step}" = "oceanice_products" ]]; then
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
elif [[ "${step}" = "ecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ecen))
-
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}"
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN} --cpus-per-task=${NTHREADS_ECEN}"
- export NTHREADS_CHGRES=${nth_chgres:-12}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN} --cpus-per-task=${NTHREADS_CALCINC}"
elif [[ "${step}" = "esfc" ]]; then
- nth_max=$((npe_node_max / npe_node_esfc))
+ export NTHREADS_ESFC=${NTHREADSmax}
+ export APRUN_ESFC="${APRUN} --cpus-per-task=${NTHREADS_ESFC}"
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}"
-
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} --cpus-per-task=${NTHREADS_CYCLE}"
elif [[ "${step}" = "epos" ]]; then
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}"
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN} --cpus-per-task=${NTHREADS_EPOS}"
elif [[ "${step}" = "postsnd" ]]; then
export CFP_MP="YES"
- nth_max=$((npe_node_max / npe_node_postsnd))
+ export NTHREADS_POSTSND=${NTHREADS1}
+ export APRUN_POSTSND="${APRUN} --cpus-per-task=${NTHREADS_POSTSND}"
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}"
-
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
+ export NTHREADS_POSTSNDCFP=${threads_per_task_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${max_threads_per_task} ]] && export NTHREADS_POSTSNDCFP=${max_threads_per_task}
+ export APRUN_POSTSNDCFP="${launcher} -n ${ntasks_postsndcfp} ${mpmd_opt}"
elif [[ "${step}" = "awips" ]]; then
- nth_max=$((npe_node_max / npe_node_awips))
-
- export NTHREADS_AWIPS=${nth_awips:-2}
- [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
- export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
+ export NTHREADS_AWIPS=${NTHREADS1}
+ export APRUN_AWIPSCFP="${APRUN} ${mpmd_opt}"
elif [[ "${step}" = "gempak" ]]; then
@@ -315,10 +255,7 @@ elif [[ "${step}" = "gempak" ]]; then
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN} --cpus-per-task=${NTHREADS_FIT2OBS}"
fi
diff --git a/env/S4.env b/env/S4.env
index 5e768d889d..840ca65898 100755
--- a/env/S4.env
+++ b/env/S4.env
@@ -19,9 +19,21 @@ export NTHSTACK=1024000000
ulimit -s unlimited
ulimit -a
-if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+# Calculate common variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing S4.env"
+ exit 2
+fi
- nth_max=$((npe_node_max / npe_node_prep))
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
export POE="NO"
export BACK="NO"
@@ -30,11 +42,11 @@ if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
elif [[ "${step}" = "prepsnowobs" ]]; then
- export APRUN_CALCFIMS="${launcher} -n 1"
+ export APRUN_CALCFIMS="${APRUN}"
elif [[ "${step}" = "prep_emissions" ]]; then
- export APRUN="${launcher} -n 1"
+ export APRUN="${APRUN}"
elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then
@@ -45,62 +57,42 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}
elif [[ "${step}" = "atmanlvar" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlvar))
-
- export NTHREADS_ATMANLVAR=${nth_atmanlvar:-${nth_max}}
- [[ ${NTHREADS_ATMANLVAR} -gt ${nth_max} ]] && export NTHREADS_ATMANLVAR=${nth_max}
- export APRUN_ATMANLVAR="${launcher} -n ${npe_atmanlvar}"
+ export NTHREADS_ATMANLVAR=${NTHREADSmax}
+ export APRUN_ATMANLVAR="${APRUN}"
elif [[ "${step}" = "atmensanlletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlletkf))
-
- export NTHREADS_ATMENSANLLETKF=${nth_atmensanlletkf:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLLETKF} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLLETKF=${nth_max}
- export APRUN_ATMENSANLLETKF="${launcher} -n ${npe_atmensanlletkf}"
+ export NTHREADS_ATMENSANLLETKF=${NTHREADSmax}
+ export APRUN_ATMENSANLLETKF="${APRUN}"
elif [[ "${step}" = "atmensanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlfv3inc))
-
- export NTHREADS_ATMENSANLFV3INC=${nth_atmensanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLFV3INC=${nth_max}
- export APRUN_ATMENSANLFV3INC="${launcher} -n ${npe_atmensanlfv3inc}"
+ export NTHREADS_ATMENSANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMENSANLFV3INC="${APRUN}"
elif [[ "${step}" = "aeroanlrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_aeroanlrun))
-
- export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
- [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
- export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun}"
+ export NTHREADS_AEROANL=${NTHREADSmax}
+ export APRUN_AEROANL="${APRUN}"
elif [[ "${step}" = "prepobsaero" ]]; then
- nth_max=$((npe_node_max / npe_node_prepobsaero))
-
- export NTHREADS_PREPOBSAERO=${nth_prepobsaero:-1}
- export APRUN_PREPOBSAERO="${launcher} -n ${npe_prepobsaero} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
+ export NTHREADS_PREPOBSAERO=${NTHREADS1}
+ export APRUN_PREPOBSAERO="${APRUN} --cpus-per-task=${NTHREADS_PREPOBSAERO}"
elif [[ "${step}" = "snowanl" ]]; then
- nth_max=$((npe_node_max / npe_node_snowanl))
-
- export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
- [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
- export APRUN_SNOWANL="${launcher} -n ${npe_snowanl}"
+ export NTHREADS_SNOWANL=${NTHREADSmax}
+ export APRUN_SNOWANL="${APRUN}"
export APRUN_APPLY_INCR="${launcher} -n 6"
elif [[ "${step}" = "atmanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlfv3inc))
-
- export NTHREADS_ATMANLFV3INC=${nth_atmanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMANLFV3INC=${nth_max}
- export APRUN_ATMANLFV3INC="${launcher} -n ${npe_atmanlfv3inc}"
+ export NTHREADS_ATMANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMANLFV3INC="${APRUN}"
elif [[ "${step}" = "marinebmat" ]]; then
echo "WARNING: ${step} is not enabled on S4!"
@@ -117,44 +109,35 @@ elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
export USE_CFP=${USE_CFP:-"YES"}
export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_anal))
-
- export NTHREADS_GSI=${nth_anal:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN}"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
export APRUN_CALCINC="${launcher} \$ncmd"
- export NTHREADS_CYCLE=${nth_cycle:-12}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_cycle=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_cycle}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ ntasks_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${ntasks_cycle}"
export NTHREADS_GAUSFCANL=1
- npe_gausfcanl=${npe_gausfcanl:-1}
- export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl}"
+ ntasks_gausfcanl=${ntasks_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${ntasks_gausfcanl}"
elif [[ "${step}" = "sfcanl" ]]; then
- nth_max=$((npe_node_max / npe_node_sfcanl))
-
- export NTHREADS_CYCLE=${nth_sfcanl:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_sfcanl=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_sfcanl}"
+ export NTHREADS_CYCLE=${threads_per_task:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN}"
elif [[ "${step}" = "eobs" ]]; then
export MKL_NUM_THREADS=4
export MKL_CBWR=AUTO
- nth_max=$((npe_node_max / npe_node_eobs))
-
- export NTHREADS_GSI=${nth_eobs:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN}"
export CFP_MP=${CFP_MP:-"YES"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -162,11 +145,8 @@ elif [[ "${step}" = "eobs" ]]; then
elif [[ "${step}" = "eupd" ]]; then
- nth_max=$((npe_node_max / npe_node_eupd))
-
- export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
- [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
- export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}}"
+ export NTHREADS_ENKF=${NTHREADSmax}
+ export APRUN_ENKF="${launcher} -n ${ntasks_enkf:-${ntasks}}"
export CFP_MP=${CFP_MP:-"YES"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -174,25 +154,17 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
-
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
- unset nprocs ppn nnodes ntasks
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks}"
+ unset nnodes ufs_ntasks
elif [[ "${step}" = "upp" ]]; then
- nth_max=$((npe_node_max / npe_node_upp))
-
- export NTHREADS_UPP=${nth_upp:-1}
- [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
+ export NTHREADS_UPP=${NTHREADS1}
export OMP_NUM_THREADS="${NTHREADS_UPP}"
- export APRUN_UPP="${launcher} -n ${npe_upp}"
+ export APRUN_UPP="${APRUN}"
elif [[ "${step}" = "atmos_products" ]]; then
@@ -200,75 +172,39 @@ elif [[ "${step}" = "atmos_products" ]]; then
elif [[ "${step}" = "oceanice_products" ]]; then
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
elif [[ "${step}" = "ecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ecen))
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN}"
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen}"
-
- export NTHREADS_CHGRES=${nth_chgres:-12}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN}"
elif [[ "${step}" = "esfc" ]]; then
- nth_max=$((npe_node_max / npe_node_esfc))
-
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc}"
+ export NTHREADS_ESFC=${NTHREADSmax}
+ export APRUN_ESFC="${APRUN}"
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN}"
elif [[ "${step}" = "epos" ]]; then
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos}"
-
-elif [[ "${step}" = "postsnd" ]]; then
-
- export CFP_MP="YES"
-
- nth_max=$((npe_node_max / npe_node_postsnd))
-
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd}"
-
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
-
-elif [[ "${step}" = "awips" ]]; then
-
- echo "WARNING: ${step} is not enabled on S4!"
-
-elif [[ "${step}" = "gempak" ]]; then
-
- echo "WARNING: ${step} is not enabled on S4!"
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN}"
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -n ${npe_fit2obs}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${APRUN}"
fi
diff --git a/env/WCOSS2.env b/env/WCOSS2.env
index befca81d26..18caf1bc03 100755
--- a/env/WCOSS2.env
+++ b/env/WCOSS2.env
@@ -13,9 +13,21 @@ step=$1
export launcher="mpiexec -l"
export mpmd_opt="--cpu-bind verbose,core cfp"
-if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+# Calculate common resource variables
+# Check first if the dependent variables are set
+if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then
+ max_threads_per_task=$((max_tasks_per_node / tasks_per_node))
+ NTHREADSmax=${threads_per_task:-${max_threads_per_task}}
+ NTHREADS1=${threads_per_task:-1}
+ [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task}
+ [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task}
+ APRUN="${launcher} -n ${ntasks}"
+else
+ echo "ERROR config.resources must be sourced before sourcing WCOSS2.env"
+ exit 2
+fi
- nth_max=$((npe_node_max / npe_node_prep))
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
export POE=${POE:-"YES"}
export BACK=${BACK:-"off"}
@@ -24,11 +36,11 @@ if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
elif [[ "${step}" = "prepsnowobs" ]]; then
- export APRUN_CALCFIMS="${launcher} -n 1"
+ export APRUN_CALCFIMS="${APRUN}"
elif [[ "${step}" = "prep_emissions" ]]; then
- export APRUN="${launcher} -n 1"
+ export APRUN="${APRUN}"
elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then
@@ -38,62 +50,42 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}
elif [[ "${step}" = "atmanlvar" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlvar))
-
- export NTHREADS_ATMANLVAR=${nth_atmanlvar:-${nth_max}}
- [[ ${NTHREADS_ATMANLVAR} -gt ${nth_max} ]] && export NTHREADS_ATMANLVAR=${nth_max}
- export APRUN_ATMANLVAR="${launcher} -n ${npe_atmanlvar}"
+ export NTHREADS_ATMANLVAR=${NTHREADSmax}
+ export APRUN_ATMANLVAR="${APRUN}"
elif [[ "${step}" = "atmensanlletkf" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlletkf))
-
- export NTHREADS_ATMENSANLLETKF=${nth_atmensanlletkf:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLLETKF} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLLETKF=${nth_max}
- export APRUN_ATMENSANLLETKF="${launcher} -n ${npe_atmensanlletkf}"
+ export NTHREADS_ATMENSANLLETKF=${NTHREADSmax}
+ export APRUN_ATMENSANLLETKF="${APRUN}"
elif [[ "${step}" = "atmensanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmensanlfv3inc))
-
- export NTHREADS_ATMENSANLFV3INC=${nth_atmensanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMENSANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMENSANLFV3INC=${nth_max}
- export APRUN_ATMENSANLFV3INC="${launcher} -n ${npe_atmensanlfv3inc}"
+ export NTHREADS_ATMENSANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMENSANLFV3INC="${APRUN}"
elif [[ "${step}" = "aeroanlrun" ]]; then
export APRUNCFP="${launcher} -np \$ncmd ${mpmd_opt}"
- nth_max=$((npe_node_max / npe_node_aeroanlrun))
-
- export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
- [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
- export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun}"
+ export NTHREADS_AEROANL=${NTHREADSmax}
+ export APRUN_AEROANL="${APRUN}"
elif [[ "${step}" = "prepobsaero" ]]; then
- nth_max=$((npe_node_max / npe_node_prepaeroobs))
-
- export NTHREADS_PREPOBSAERO=${nth_prepobsaero:-1}
- export APRUN_PREPOBSAERO="${launcher} -n ${npe_prepobsaero} --ppn ${npe_node_prepobsaero}--cpu-bind depth --depth=${NTHREADS_PREPOBSAERO}"
+ export NTHREADS_PREPOBSAERO=${NTHREADS1}
+ export APRUN_PREPOBSAERO="${APRUN} --ppn ${tasks_per_node}--cpu-bind depth --depth=${NTHREADS_PREPOBSAERO}"
elif [[ "${step}" = "snowanl" ]]; then
- nth_max=$((npe_node_max / npe_node_snowanl))
-
- export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
- [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
- export APRUN_SNOWANL="${launcher} -n ${npe_snowanl}"
+ export NTHREADS_SNOWANL=${NTHREADSmax}
+ export APRUN_SNOWANL="${APRUN}"
export APRUN_APPLY_INCR="${launcher} -n 6"
elif [[ "${step}" = "atmanlfv3inc" ]]; then
- nth_max=$((npe_node_max / npe_node_atmanlfv3inc))
-
- export NTHREADS_ATMANLFV3INC=${nth_atmanlfv3inc:-${nth_max}}
- [[ ${NTHREADS_ATMANLFV3INC} -gt ${nth_max} ]] && export NTHREADS_ATMANLFV3INC=${nth_max}
- export APRUN_ATMANLFV3INC="${launcher} -n ${npe_atmanlfv3inc}"
+ export NTHREADS_ATMANLFV3INC=${NTHREADSmax}
+ export APRUN_ATMANLFV3INC="${APRUN}"
elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
@@ -105,27 +97,24 @@ elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
export MPICH_MPIIO_HINTS="*:romio_cb_write=disable"
fi
- nth_max=$((npe_node_max / npe_node_anal))
-
- export NTHREADS_GSI=${nth_anal:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} -ppn ${npe_node_anal} --cpu-bind depth --depth ${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_GSI}"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
export APRUN_CALCINC="${launcher} \$ncmd"
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_cycle=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_cycle} -ppn ${npe_node_cycle} --cpu-bind depth --depth ${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ ntasks_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${ntasks_cycle} -ppn ${tasks_per_node_cycle} --cpu-bind depth --depth ${NTHREADS_CYCLE}"
export NTHREADS_GAUSFCANL=1
- npe_gausfcanl=${npe_gausfcanl:-1}
- export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl}"
+ ntasks_gausfcanl=${ntasks_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${ntasks_gausfcanl}"
- export NTHREADS_CHGRES=${nth_echgres:-14}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_echgres:-14}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES=""
export CFP_MP=${CFP_MP:-"NO"}
@@ -134,12 +123,9 @@ elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
elif [[ "${step}" = "sfcanl" ]]; then
- nth_max=$((npe_node_max / npe_node_sfcanl))
-
- export NTHREADS_CYCLE=${nth_sfcanl:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- npe_sfcanl=${ntiles:-6}
- export APRUN_CYCLE="${launcher} -n ${npe_sfcanl}"
+ export NTHREADS_CYCLE=${threads_per_task:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN}"
elif [[ "${step}" = "eobs" ]]; then
@@ -147,11 +133,8 @@ elif [[ "${step}" = "eobs" ]]; then
export OMP_STACKSIZE=1G
export FI_OFI_RXM_SAR_LIMIT=3145728
- nth_max=$((npe_node_max / npe_node_eobs))
-
- export NTHREADS_GSI=${nth_eobs:-${nth_max}}
- [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
- export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} -ppn ${npe_node_eobs} --cpu-bind depth --depth ${NTHREADS_GSI}"
+ export NTHREADS_GSI=${NTHREADSmax}
+ export APRUN_GSI="${APRUN} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_GSI}"
export CFP_MP=${CFP_MP:-"NO"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -164,11 +147,8 @@ elif [[ "${step}" = "eupd" ]]; then
export MPICH_COLL_OPT_OFF=1
export FI_OFI_RXM_SAR_LIMIT=3145728
- nth_max=$((npe_node_max / npe_node_eupd))
-
- export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
- [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
- export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} -ppn ${npe_node_eupd} --cpu-bind depth --depth ${NTHREADS_ENKF}"
+ export NTHREADS_ENKF=${NTHREADSmax}
+ export APRUN_ENKF="${launcher} -n ${ntasks_enkf:-${ntasks}} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_ENKF}"
export CFP_MP=${CFP_MP:-"NO"}
export USE_CFP=${USE_CFP:-"YES"}
@@ -176,36 +156,26 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- ppn="npe_node_${step}_${RUN}"
- [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
- nprocs="npe_${step}_${RUN}"
- [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
-
- (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
- (( ntasks = nnodes*${!ppn} ))
+ (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node ))
+ (( ufs_ntasks = nnodes*tasks_per_node ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks} -ppn ${!ppn} --cpu-bind depth --depth 1"
- unset nprocs ppn nnodes ntasks
+ export APRUN_UFS="${launcher} -n ${ufs_ntasks} -ppn ${tasks_per_node} --cpu-bind depth --depth 1"
+ unset nnodes ufs_ntasks
# TODO: Why are fcst and efcs so different on WCOSS2?
# TODO: Compare these with the ufs-weather-model regression test job card at:
# https://github.com/ufs-community/ufs-weather-model/blob/develop/tests/fv3_conf/fv3_qsub.IN_wcoss2
export FI_OFI_RXM_RX_SIZE=40000
export FI_OFI_RXM_TX_SIZE=40000
- if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- export OMP_PLACES=cores
- export OMP_STACKSIZE=2048M
- export MPICH_MPIIO_HINTS="*:romio_cb_write=disable"
- export FI_OFI_RXM_SAR_LIMIT=3145728
- fi
+ export OMP_PLACES=cores
+ export OMP_STACKSIZE=2048M
+ export MPICH_MPIIO_HINTS="*:romio_cb_write=disable"
+ export FI_OFI_RXM_SAR_LIMIT=3145728
elif [[ "${step}" = "upp" ]]; then
- nth_max=$((npe_node_max / npe_node_upp))
-
- export NTHREADS_UPP=${nth_upp:-1}
- [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
- export APRUN_UPP="${launcher} -n ${npe_upp} -ppn ${npe_node_upp} --cpu-bind depth --depth ${NTHREADS_UPP}"
+ export NTHREADS_UPP=${NTHREADS1}
+ export APRUN_UPP="${APRUN} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_UPP}"
elif [[ "${step}" = "atmos_products" ]]; then
@@ -213,89 +183,66 @@ elif [[ "${step}" = "atmos_products" ]]; then
elif [[ "${step}" = "oceanice_products" ]]; then
- nth_max=$((npe_node_max / npe_node_oceanice_products))
-
- export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
- export APRUN_OCNICEPOST="${launcher} -n 1 -ppn ${npe_node_oceanice_products} --cpu-bind depth --depth ${NTHREADS_OCNICEPOST}"
+ export NTHREADS_OCNICEPOST=${NTHREADS1}
+ export APRUN_OCNICEPOST="${launcher} -n 1 -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_OCNICEPOST}"
elif [[ "${step}" = "ecen" ]]; then
- nth_max=$((npe_node_max / npe_node_ecen))
-
- export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
- [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
- export APRUN_ECEN="${launcher} -n ${npe_ecen} -ppn ${npe_node_ecen} --cpu-bind depth --depth ${NTHREADS_ECEN}"
+ export NTHREADS_ECEN=${NTHREADSmax}
+ export APRUN_ECEN="${APRUN} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_ECEN}"
- export NTHREADS_CHGRES=${nth_chgres:-14}
- [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export NTHREADS_CHGRES=${threads_per_task_chgres:-14}
+ [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node}
export APRUN_CHGRES="time"
- export NTHREADS_CALCINC=${nth_calcinc:-1}
- [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
- export APRUN_CALCINC="${launcher} -n ${npe_ecen}"
+ export NTHREADS_CALCINC=${threads_per_task_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task}
+ export APRUN_CALCINC="${APRUN}"
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_ecen} -ppn ${npe_node_cycle} --cpu-bind depth --depth ${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} -ppn ${tasks_per_node_cycle} --cpu-bind depth --depth ${NTHREADS_CYCLE}"
elif [[ "${step}" = "esfc" ]]; then
- nth_max=$((npe_node_max / npe_node_esfc))
+ export NTHREADS_ESFC=${NTHREADSmax}
+ export APRUN_ESFC="${APRUN} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_ESFC}"
- export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
- [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
- export APRUN_ESFC="${launcher} -n ${npe_esfc} -ppn ${npe_node_esfc} --cpu-bind depth --depth ${NTHREADS_ESFC}"
-
- export NTHREADS_CYCLE=${nth_cycle:-14}
- [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
- export APRUN_CYCLE="${launcher} -n ${npe_esfc} -ppn ${npe_node_cycle} --cpu-bind depth --depth ${NTHREADS_CYCLE}"
+ export NTHREADS_CYCLE=${threads_per_task_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node}
+ export APRUN_CYCLE="${APRUN} -ppn ${tasks_per_node_cycle} --cpu-bind depth --depth ${NTHREADS_CYCLE}"
elif [[ "${step}" = "epos" ]]; then
- nth_max=$((npe_node_max / npe_node_epos))
-
- export NTHREADS_EPOS=${nth_epos:-${nth_max}}
- [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
- export APRUN_EPOS="${launcher} -n ${npe_epos} -ppn ${npe_node_epos} --cpu-bind depth --depth ${NTHREADS_EPOS}"
+ export NTHREADS_EPOS=${NTHREADSmax}
+ export APRUN_EPOS="${APRUN} -ppn ${tasks_per_node} --cpu-bind depth --depth ${NTHREADS_EPOS}"
elif [[ "${step}" = "postsnd" ]]; then
export MPICH_MPIIO_HINTS_DISPLAY=1
export OMP_NUM_THREADS=1
- nth_max=$((npe_node_max / npe_node_postsnd))
-
- export NTHREADS_POSTSND=${nth_postsnd:-1}
- [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
- export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --depth=${NTHREADS_POSTSND} --cpu-bind depth"
+ export NTHREADS_POSTSND=${NTHREADS1}
+ export APRUN_POSTSND="${APRUN} --depth=${NTHREADS_POSTSND} --cpu-bind depth"
- export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
- [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
- export APRUN_POSTSNDCFP="${launcher} -np ${npe_postsndcfp} ${mpmd_opt}"
+ export NTHREADS_POSTSNDCFP=${threads_per_task_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${max_threads_per_task} ]] && export NTHREADS_POSTSNDCFP=${max_threads_per_task}
+ export APRUN_POSTSNDCFP="${launcher} -np ${ntasks_postsndcfp} ${mpmd_opt}"
elif [[ "${step}" = "awips" ]]; then
- nth_max=$((npe_node_max / npe_node_awips))
-
- export NTHREADS_AWIPS=${nth_awips:-2}
- [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
- export APRUN_AWIPSCFP="${launcher} -np ${npe_awips} ${mpmd_opt}"
+ export NTHREADS_AWIPS=${NTHREADS1}
+ export APRUN_AWIPSCFP="${launcher} -np ${ntasks} ${mpmd_opt}"
elif [[ "${step}" = "gempak" ]]; then
- nth_max=$((npe_node_max / npe_node_gempak))
-
- export NTHREADS_GEMPAK=${nth_gempak:-1}
- [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
- export APRUN_GEMPAKCFP="${launcher} -np ${npe_gempak} ${mpmd_opt}"
+ export NTHREADS_GEMPAK=${NTHREADS1}
+ export APRUN_GEMPAKCFP="${launcher} -np ${ntasks} ${mpmd_opt}"
elif [[ "${step}" = "fit2obs" ]]; then
- nth_max=$((npe_node_max / npe_node_fit2obs))
-
- export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
- [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
- export MPIRUN="${launcher} -np ${npe_fit2obs}"
+ export NTHREADS_FIT2OBS=${NTHREADS1}
+ export MPIRUN="${launcher} -np ${ntasks}"
elif [[ "${step}" = "waveawipsbulls" ]]; then
diff --git a/jobs/JGDAS_ATMOS_ANALYSIS_DIAG b/jobs/JGDAS_ATMOS_ANALYSIS_DIAG
index 04655abd4e..a1e0c9f1d5 100755
--- a/jobs/JGDAS_ATMOS_ANALYSIS_DIAG
+++ b/jobs/JGDAS_ATMOS_ANALYSIS_DIAG
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "anal" -c "base anal analdiag"
##############################################
# Set variables used in the script
##############################################
-export CDUMP="${RUN/enkf}"
export DO_CALC_ANALYSIS=${DO_CALC_ANALYSIS:-"YES"}
@@ -24,7 +23,7 @@ export gcyc=${GDATE:8:2}
export GDUMP="gdas"
export GDUMP_ENS="enkf${GDUMP}"
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN/enkf}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
export APREFIX="${RUN}.t${cyc}z."
diff --git a/jobs/JGDAS_ATMOS_CHGRES_FORENKF b/jobs/JGDAS_ATMOS_CHGRES_FORENKF
index eb8f073733..5747675fe2 100755
--- a/jobs/JGDAS_ATMOS_CHGRES_FORENKF
+++ b/jobs/JGDAS_ATMOS_CHGRES_FORENKF
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "anal" -c "base anal echgres"
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${RUN/enkf}
export DO_CALC_ANALYSIS=${DO_CALC_ANALYSIS:-"YES"}
@@ -15,10 +14,10 @@ export DO_CALC_ANALYSIS=${DO_CALC_ANALYSIS:-"YES"}
# Begin JOB SPECIFIC work
##############################################
-export APREFIX="${CDUMP}.t${cyc}z."
+export APREFIX="${RUN/enkf}.t${cyc}z."
export APREFIX_ENS="${RUN}.t${cyc}z."
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_HISTORY
+RUN=${RUN/enkf} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_HISTORY
MEMDIR="mem001" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_HISTORY_MEM:COM_ATMOS_HISTORY_TMPL
###############################################################
diff --git a/jobs/JGDAS_ENKF_ARCHIVE b/jobs/JGDAS_ENKF_ARCHIVE
index 7496acd8d4..29ef9c1812 100755
--- a/jobs/JGDAS_ENKF_ARCHIVE
+++ b/jobs/JGDAS_ENKF_ARCHIVE
@@ -7,8 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "earc" -c "base earc"
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${RUN/enkf}
-
YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_TOP
MEMDIR="ensstat" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
COMIN_ATMOS_ANALYSIS_ENSSTAT:COM_ATMOS_ANALYSIS_TMPL \
diff --git a/jobs/JGDAS_ENKF_DIAG b/jobs/JGDAS_ENKF_DIAG
index cdf258379f..cc8c933cc8 100755
--- a/jobs/JGDAS_ENKF_DIAG
+++ b/jobs/JGDAS_ENKF_DIAG
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "eobs" -c "base anal eobs analdiag edi
##############################################
# Set variables used in the script
##############################################
-export CDUMP="${RUN/enkf}"
export MAKE_NSSTBUFR=${MAKE_NSSTBUFR:-"NO"}
export MAKE_ACFTBUFR=${MAKE_ACFTBUFR:-"NO"}
@@ -26,12 +25,12 @@ export GDUMP_ENS="enkf${GDUMP}"
export CASE=${CASE_ENS}
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN/enkf}.t${cyc}z."
export APREFIX="${RUN}.t${cyc}z."
export GPREFIX="${GDUMP_ENS}.t${gcyc}z."
GPREFIX_DET="${GDUMP}.t${gcyc}z."
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
+RUN=${RUN/enkf} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
MEMDIR="ensstat" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_ANALYSIS
RUN=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \
diff --git a/jobs/JGDAS_ENKF_ECEN b/jobs/JGDAS_ENKF_ECEN
index 20a818f6e9..38bf847b38 100755
--- a/jobs/JGDAS_ENKF_ECEN
+++ b/jobs/JGDAS_ENKF_ECEN
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "ecen" -c "base ecen"
##############################################
# Set variables used in the script
##############################################
-export CDUMP="${RUN/enkf}"
##############################################
# Begin JOB SPECIFIC work
@@ -23,13 +22,13 @@ export GDUMP_ENS="enkf${GDUMP}"
export CASE=${CASE_ENS}
-export OPREFIX="${CDUMP}.t${cyc}z."
-export APREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN/enkf}.t${cyc}z."
+export APREFIX="${RUN/enkf}.t${cyc}z."
export APREFIX_ENS="${RUN}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
export GPREFIX_ENS="${GDUMP_ENS}.t${gcyc}z."
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+RUN=${RUN/enkf} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
COM_ATMOS_ANALYSIS_DET:COM_ATMOS_ANALYSIS_TMPL
MEMDIR="ensstat" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
diff --git a/jobs/JGDAS_ENKF_POST b/jobs/JGDAS_ENKF_POST
index 0f7039d614..3a3b5b0c71 100755
--- a/jobs/JGDAS_ENKF_POST
+++ b/jobs/JGDAS_ENKF_POST
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "epos" -c "base epos"
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${RUN/enkf}
##############################################
diff --git a/jobs/JGDAS_ENKF_SELECT_OBS b/jobs/JGDAS_ENKF_SELECT_OBS
index 5f08dadffd..3cfe48bb2b 100755
--- a/jobs/JGDAS_ENKF_SELECT_OBS
+++ b/jobs/JGDAS_ENKF_SELECT_OBS
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "eobs" -c "base anal eobs"
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${RUN/enkf}
export MAKE_NSSTBUFR=${MAKE_NSSTBUFR:-"NO"}
export MAKE_ACFTBUFR=${MAKE_ACFTBUFR:-"NO"}
@@ -24,20 +23,20 @@ export gcyc=${GDATE:8:2}
export GDUMP="gdas"
export GDUMP_ENS="enkf${GDUMP}"
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN/enkf}.t${cyc}z."
export APREFIX="${RUN}.t${cyc}z."
export GPREFIX="${GDUMP_ENS}.t${gcyc}z."
-APREFIX_DET="${CDUMP}.t${cyc}z."
+APREFIX_DET="${RUN/enkf}.t${cyc}z."
GPREFIX_DET="${GDUMP}.t${gcyc}z."
export GSUFFIX=".ensmean.nc"
# Generate COM variables from templates
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
+RUN=${RUN/enkf} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
MEMDIR='ensstat' YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_ANALYSIS
declare -rx COM_ATMOS_ANALYSIS_ENS="${COM_ATMOS_ANALYSIS}"
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -r COM_ATMOS_ANALYSIS_DET:COM_ATMOS_ANALYSIS_TMPL
+RUN=${RUN/enkf} YMD=${PDY} HH=${cyc} declare_from_tmpl -r COM_ATMOS_ANALYSIS_DET:COM_ATMOS_ANALYSIS_TMPL
MEMDIR='ensstat' RUN=${GDUMP_ENS} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \
COM_ATMOS_ANALYSIS_PREV:COM_ATMOS_ANALYSIS_TMPL \
diff --git a/jobs/JGDAS_ENKF_SFC b/jobs/JGDAS_ENKF_SFC
index d859abeb57..1ed10f20c0 100755
--- a/jobs/JGDAS_ENKF_SFC
+++ b/jobs/JGDAS_ENKF_SFC
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "esfc" -c "base esfc"
##############################################
# Set variables used in the script
##############################################
-export CDUMP="${RUN/enkf}"
##############################################
# Begin JOB SPECIFIC work
@@ -21,19 +20,19 @@ export gcyc=${GDATE:8:2}
export GDUMP="gdas"
export GDUMP_ENS="enkf${GDUMP}"
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN/enkf}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
-export APREFIX="${CDUMP}.t${cyc}z."
+export APREFIX="${RUN/enkf}.t${cyc}z."
export CASE=${CASE_ENS}
-export OPREFIX="${CDUMP}.t${cyc}z."
-export APREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN/enkf}.t${cyc}z."
+export APREFIX="${RUN/enkf}.t${cyc}z."
export APREFIX_ENS="${RUN}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
export GPREFIX_ENS="${GDUMP_ENS}.t${gcyc}z."
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS \
+RUN=${RUN/enkf} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS \
COM_ATMOS_ANALYSIS_DET:COM_ATMOS_ANALYSIS_TMPL
RUN=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \
diff --git a/jobs/JGDAS_ENKF_UPDATE b/jobs/JGDAS_ENKF_UPDATE
index 66f9ddf21b..213b49081a 100755
--- a/jobs/JGDAS_ENKF_UPDATE
+++ b/jobs/JGDAS_ENKF_UPDATE
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "eupd" -c "base anal eupd"
##############################################
# Set variables used in the script
##############################################
-export CDUMP="${RUN/enkf}"
##############################################
diff --git a/jobs/JGDAS_FIT2OBS b/jobs/JGDAS_FIT2OBS
index 1b37168093..7e000c95cf 100755
--- a/jobs/JGDAS_FIT2OBS
+++ b/jobs/JGDAS_FIT2OBS
@@ -8,8 +8,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "fit2obs" -c "base fit2obs"
# Set variables used in the script
##############################################
-export CDUMP=${RUN/enkf}
-
# Ignore spelling warning; nothing is misspelled
# shellcheck disable=SC2153
CDATE=$(${NDATE} -"${VBACKUP_FITS}" "${PDY}${cyc}") # set CDATE to lookback cycle for use in fit2obs package
@@ -20,7 +18,7 @@ vcyc=${CDATE:8:2}
# These are used by fit2obs, so we can't change them to the standard COM variable names
# shellcheck disable=SC2153
YMD=${vday} HH=${vcyc} declare_from_tmpl -rx COM_INA:COM_ATMOS_ANALYSIS_TMPL
-RUN=${CDUMP} YMD=${vday} HH=${vcyc} declare_from_tmpl -rx COM_PRP:COM_OBS_TMPL
+RUN=${RUN/enkf} YMD=${vday} HH=${vcyc} declare_from_tmpl -rx COM_PRP:COM_OBS_TMPL
# We want to defer variable expansion, so ignore warning about single quotes
# shellcheck disable=SC2016
diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_CHKPT b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_CHKPT
index 64764e249d..875fe9d0ee 100755
--- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_CHKPT
+++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_CHKPT
@@ -19,7 +19,7 @@ export GDUMP=${GDUMP:-"gdas"}
export GPREFIX="${GDUMP}.t${gcyc}z."
# Ignore possible spelling error (nothing is misspelled)
# shellcheck disable=SC2153
-export APREFIX="${CDUMP}.t${cyc}z."
+export APREFIX="${RUN}.t${cyc}z."
# Generate COM variables from templates
YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_ANALYSIS
diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_POST b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_POST
index c87dc6b34f..00597f14f8 100755
--- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_POST
+++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_POST
@@ -8,6 +8,9 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalpost" -c "base ocnanalpost"
##############################################
# Set variables used in the script
##############################################
+# TODO remove this CDUMP declaration when the GDAS script
+# exgdas_global_marine_analysis_post.py is updated to look for RUN instead
+# of CDUMP.
export CDUMP=${CDUMP:-${RUN:-"gfs"}}
export CDATE=${CDATE:-${PDY}${cyc}}
export GDUMP=${GDUMP:-"gdas"}
diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP
index bf714939f5..664df3aad6 100755
--- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP
+++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalprep" -c "base ocnanal ocnanal
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${CDUMP:-${RUN:-"gfs"}}
# Ignore possible spelling error (nothing is misspelled)
# shellcheck disable=SC2153
GDATE=$(date --utc +%Y%m%d%H -d "${PDY} ${cyc} - ${assim_freq} hours")
@@ -16,9 +15,9 @@ export gPDY=${GDATE:0:8}
export gcyc=${GDATE:8:2}
export GDUMP=${GDUMP:-"gdas"}
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
-export APREFIX="${CDUMP}.t${cyc}z."
+export APREFIX="${RUN}.t${cyc}z."
# Generate COM variables from templates
YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_VRFY b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_VRFY
index 96aa9a23a7..0d90c46184 100755
--- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_VRFY
+++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_VRFY
@@ -6,7 +6,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalprep" -c "base ocnanal ocnanal
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${CDUMP:-${RUN:-"gfs"}}
export GDUMP=${GDUMP:-"gdas"}
# Ignore possible spelling error (nothing is misspelled)
# shellcheck disable=SC2153
diff --git a/jobs/JGFS_ATMOS_CYCLONE_TRACKER b/jobs/JGFS_ATMOS_CYCLONE_TRACKER
index 16a51d5c33..067de2c4aa 100755
--- a/jobs/JGFS_ATMOS_CYCLONE_TRACKER
+++ b/jobs/JGFS_ATMOS_CYCLONE_TRACKER
@@ -15,7 +15,6 @@ export COMPONENT="atmos"
# Set variables used in the exglobal script
##############################################
export CDATE=${CDATE:-${PDY}${cyc}}
-export CDUMP=${RUN/enkf}
####################################
@@ -70,7 +69,7 @@ fi
#############################################################
# Execute the script
export pert="p01"
-export cmodel=${CDUMP}
+export cmodel=${RUN/enkf}
export loopnum=1
#-----------input data checking -----------------
diff --git a/jobs/JGFS_ATMOS_FSU_GENESIS b/jobs/JGFS_ATMOS_FSU_GENESIS
index 89788e6d23..1b92816b61 100755
--- a/jobs/JGFS_ATMOS_FSU_GENESIS
+++ b/jobs/JGFS_ATMOS_FSU_GENESIS
@@ -15,7 +15,6 @@ export COMPONENT="atmos"
# Set variables used in the exglobal script
##############################################
export CDATE=${CDATE:-${PDY}${cyc}}
-export CDUMP=${CDUMP:-${RUN:-"gfs"}}
####################################
diff --git a/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF b/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF
index cd8c76eadd..d62c3320a1 100755
--- a/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF
+++ b/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF
@@ -4,7 +4,7 @@
# GFS GEMPAK NCDC PRODUCT GENERATION
############################################
source "${HOMEgfs}/ush/preamble.sh"
-source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_gif" -c "base"
+source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_gif" -c "base gempak"
export MP_PULSE=0
export MP_TIMEOUT=2000
diff --git a/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS b/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS
index 1b2cfd9f0c..72dba0679d 100755
--- a/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS
+++ b/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS
@@ -6,7 +6,7 @@
# GFS PGRB2_SPECIAL_POST PRODUCT GENERATION
############################################
source "${HOMEgfs}/ush/preamble.sh"
-source "${HOMEgfs}/ush/jjob_header.sh" -e "npoess" -c "base"
+source "${HOMEgfs}/ush/jjob_header.sh" -e "npoess" -c "base npoess"
export OMP_NUM_THREADS=${OMP_NUM_THREADS:-1}
diff --git a/jobs/JGFS_ATMOS_POSTSND b/jobs/JGFS_ATMOS_POSTSND
index a038fca9e6..13adb11d7d 100755
--- a/jobs/JGFS_ATMOS_POSTSND
+++ b/jobs/JGFS_ATMOS_POSTSND
@@ -7,7 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "postsnd" -c "base postsnd"
##############################################
# Set variables used in the exglobal script
##############################################
-export CDUMP=${RUN/enkf}
########################################
# Runs GFS BUFR SOUNDINGS
diff --git a/jobs/JGFS_ATMOS_VERIFICATION b/jobs/JGFS_ATMOS_VERIFICATION
index 85b1c734a8..48133364e5 100755
--- a/jobs/JGFS_ATMOS_VERIFICATION
+++ b/jobs/JGFS_ATMOS_VERIFICATION
@@ -9,8 +9,8 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "metp" -c "base metp"
## HOMEgfs : /full/path/to/workflow
## EXPDIR : /full/path/to/config/files
## CDATE : current analysis date (YYYYMMDDHH)
-## CDUMP : cycle name (gdas / gfs)
## PDY : current date (YYYYMMDD)
+## RUN : cycle name (gdas / gfs)
## cyc : current cycle (HH)
## SDATE_GFS : first date of GFS cycle (YYYYMMDDHHMM)
## METPCASE : METplus verification use case (g2g1 | g2o1 | pcp1)
diff --git a/jobs/JGLOBAL_ARCHIVE b/jobs/JGLOBAL_ARCHIVE
index 6e9d671285..401feba35f 100755
--- a/jobs/JGLOBAL_ARCHIVE
+++ b/jobs/JGLOBAL_ARCHIVE
@@ -7,8 +7,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "arch" -c "base arch"
##############################################
# Set variables used in the script
##############################################
-export CDUMP=${RUN/enkf}
-
YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
COMIN_ATMOS_ANALYSIS:COM_ATMOS_ANALYSIS_TMPL \
COMIN_ATMOS_BUFR:COM_ATMOS_BUFR_TMPL \
diff --git a/jobs/JGLOBAL_ATMOS_ANALYSIS b/jobs/JGLOBAL_ATMOS_ANALYSIS
index 1586109c66..5776aa6d13 100755
--- a/jobs/JGLOBAL_ATMOS_ANALYSIS
+++ b/jobs/JGLOBAL_ATMOS_ANALYSIS
@@ -8,7 +8,7 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "anal" -c "base anal"
# Set variables used in the script
##############################################
export CDATE=${CDATE:-${PDY}${cyc}}
-export CDUMP=${RUN/enkf}
+export rCDUMP=${RUN/enkf}
export COMPONENT="atmos"
export DO_CALC_ANALYSIS=${DO_CALC_ANALYSIS:-"YES"}
export MAKE_NSSTBUFR=${MAKE_NSSTBUFR:-"NO"}
@@ -25,9 +25,9 @@ export gcyc=${GDATE:8:2}
export GDUMP="gdas"
export GDUMP_ENS="enkf${GDUMP}"
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${rCDUMP}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
-export APREFIX="${CDUMP}.t${cyc}z."
+export APREFIX="${rCDUMP}.t${cyc}z."
export GPREFIX_ENS="${GDUMP_ENS}.t${gcyc}z."
# Generate COM variables from templates
diff --git a/jobs/JGLOBAL_ATMOS_ANALYSIS_CALC b/jobs/JGLOBAL_ATMOS_ANALYSIS_CALC
index 4b34638581..5b6073254a 100755
--- a/jobs/JGLOBAL_ATMOS_ANALYSIS_CALC
+++ b/jobs/JGLOBAL_ATMOS_ANALYSIS_CALC
@@ -7,7 +7,7 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "analcalc" -c "base anal analcalc"
##############################################
# Set variables used in the script
##############################################
-export CDUMP="${RUN/enkf}"
+export rCDUMP="${RUN/enkf}"
export DO_CALC_ANALYSIS=${DO_CALC_ANALYSIS:-"YES"}
@@ -23,12 +23,12 @@ export gcyc=${GDATE:8:2}
export GDUMP="gdas"
export GDUMP_ENS="enkf${GDUMP}"
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${rCDUMP}.t${cyc}z."
export GPREFIX="${GDUMP}.t${gcyc}z."
export APREFIX="${RUN}.t${cyc}z."
export GPREFIX_ENS="${GDUMP_ENS}.t${gcyc}z."
-RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
+RUN=${rCDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS
YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_ANALYSIS COM_ATMOS_RESTART
diff --git a/jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC b/jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC
index 7e30b8ab7f..906c195164 100755
--- a/jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC
+++ b/jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC
@@ -8,7 +8,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "prep" -c "base prep"
# Set variables used in the exglobal script
##############################################
export CDATE=${CDATE:-${PDY}${cyc}}
-export CDUMP=${RUN/enkf}
##############################################
diff --git a/jobs/JGLOBAL_STAGE_IC b/jobs/JGLOBAL_STAGE_IC
index c460e91c9e..52225ac9d3 100755
--- a/jobs/JGLOBAL_STAGE_IC
+++ b/jobs/JGLOBAL_STAGE_IC
@@ -5,8 +5,9 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "stage_ic" -c "base stage_ic"
# Restart conditions for GFS cycle come from GDAS
# shellcheck disable=SC2153
-rCDUMP=${CDUMP}
-[[ ${CDUMP} = "gfs" ]] && export rCDUMP="gdas"
+rCDUMP=${RUN}
+# shellcheck disable=SC2153
+[[ ${RUN} = "gfs" ]] && export rCDUMP="gdas"
export rCDUMP
# Execute the Script
diff --git a/jobs/JGLOBAL_WAVE_PREP b/jobs/JGLOBAL_WAVE_PREP
index 65928b870d..5d4e76dc8a 100755
--- a/jobs/JGLOBAL_WAVE_PREP
+++ b/jobs/JGLOBAL_WAVE_PREP
@@ -6,8 +6,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "waveprep" -c "base wave waveprep"
# Add default errchk = err_chk
export errchk=${errchk:-err_chk}
-export CDUMP=${RUN/enkf}
-
# Set rtofs PDY
export RPDY=${PDY}
diff --git a/jobs/rocoto/awips_20km_1p0deg.sh b/jobs/rocoto/awips_20km_1p0deg.sh
index b2a291e37e..af08b46111 100755
--- a/jobs/rocoto/awips_20km_1p0deg.sh
+++ b/jobs/rocoto/awips_20km_1p0deg.sh
@@ -8,7 +8,7 @@ source "${HOMEgfs}/ush/preamble.sh"
## HOMEgfs : /full/path/to/workflow
## EXPDIR : /full/path/to/config/files
## CDATE : current analysis date (YYYYMMDDHH)
-## CDUMP : cycle name (gdas / gfs)
+## RUN : cycle name (gdas / gfs)
## PDY : current date (YYYYMMDD)
## cyc : current cycle (HH)
###############################################################
diff --git a/jobs/rocoto/prep.sh b/jobs/rocoto/prep.sh
index 4c07157b5d..bbde68377d 100755
--- a/jobs/rocoto/prep.sh
+++ b/jobs/rocoto/prep.sh
@@ -13,7 +13,8 @@ export job="prep"
export jobid="${job}.$$"
source "${HOMEgfs}/ush/jjob_header.sh" -e "prep" -c "base prep"
-export CDUMP="${RUN/enkf}"
+# Strip 'enkf' from RUN for pulling data
+RUN_local="${RUN/enkf}"
###############################################################
# Set script and dependency variables
@@ -25,9 +26,9 @@ gPDY=${GDATE:0:8}
gcyc=${GDATE:8:2}
GDUMP="gdas"
-export OPREFIX="${CDUMP}.t${cyc}z."
+export OPREFIX="${RUN_local}.t${cyc}z."
-YMD=${PDY} HH=${cyc} DUMP=${CDUMP} declare_from_tmpl -rx COM_OBS COM_OBSDMP
+YMD=${PDY} HH=${cyc} DUMP=${RUN_local} declare_from_tmpl -rx COM_OBS COM_OBSDMP
RUN=${GDUMP} DUMP=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \
COM_OBS_PREV:COM_OBS_TMPL \
@@ -39,7 +40,7 @@ if [[ ! -d "${COM_OBS}" ]]; then mkdir -p "${COM_OBS}"; fi
###############################################################
# If ROTDIR_DUMP=YES, copy dump files to rotdir
if [[ ${ROTDIR_DUMP} = "YES" ]]; then
- "${HOMEgfs}/ush/getdump.sh" "${PDY}${cyc}" "${CDUMP}" "${COM_OBSDMP}" "${COM_OBS}"
+ "${HOMEgfs}/ush/getdump.sh" "${PDY}${cyc}" "${RUN_local}" "${COM_OBSDMP}" "${COM_OBS}"
status=$?
[[ ${status} -ne 0 ]] && exit ${status}
@@ -73,14 +74,14 @@ if [[ ${PROCESS_TROPCY} = "YES" ]]; then
done
fi
- if [[ ${ROTDIR_DUMP} = "YES" ]]; then rm "${COM_OBS}/${CDUMP}.t${cyc}z.syndata.tcvitals.tm00"; fi
+ if [[ ${ROTDIR_DUMP} = "YES" ]]; then rm "${COM_OBS}/${RUN_local}.t${cyc}z.syndata.tcvitals.tm00"; fi
"${HOMEgfs}/jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC"
status=$?
[[ ${status} -ne 0 ]] && exit ${status}
else
- if [[ ${ROTDIR_DUMP} = "NO" ]]; then cp "${COM_OBSDMP}/${CDUMP}.t${cyc}z.syndata.tcvitals.tm00" "${COM_OBS}/"; fi
+ if [[ ${ROTDIR_DUMP} = "NO" ]]; then cp "${COM_OBSDMP}/${RUN_local}.t${cyc}z.syndata.tcvitals.tm00" "${COM_OBS}/"; fi
fi
@@ -93,17 +94,17 @@ if [[ ${MAKE_PREPBUFR} = "YES" ]]; then
rm -f "${COM_OBS}/${OPREFIX}nsstbufr"
fi
- export job="j${CDUMP}_prep_${cyc}"
+ export job="j${RUN_local}_prep_${cyc}"
export COMIN=${COM_OBS}
export COMOUT=${COM_OBS}
RUN="gdas" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMINgdas:COM_ATMOS_HISTORY_TMPL
RUN="gfs" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMINgfs:COM_ATMOS_HISTORY_TMPL
if [[ ${ROTDIR_DUMP} = "NO" ]]; then
- export COMSP=${COMSP:-"${COM_OBSDMP}/${CDUMP}.t${cyc}z."}
+ export COMSP=${COMSP:-"${COM_OBSDMP}/${RUN_local}.t${cyc}z."}
else
- export COMSP=${COMSP:-"${COM_OBS}/${CDUMP}.t${cyc}z."}
+ export COMSP=${COMSP:-"${COM_OBS}/${RUN_local}.t${cyc}z."}
fi
- export COMSP=${COMSP:-${COMIN_OBS}/${CDUMP}.t${cyc}z.}
+ export COMSP=${COMSP:-${COMIN_OBS}/${RUN_local}.t${cyc}z.}
# Disable creating NSSTBUFR if desired, copy from DMPDIR instead
if [[ ${MAKE_NSSTBUFR:-"NO"} = "NO" ]]; then
diff --git a/parm/archive/arcdir.yaml.j2 b/parm/archive/arcdir.yaml.j2
index f845e3c9cb..57dbc78885 100644
--- a/parm/archive/arcdir.yaml.j2
+++ b/parm/archive/arcdir.yaml.j2
@@ -50,7 +50,7 @@
ARCDIR ~ "/snowstat." ~ RUN ~ "." ~ cycle_YMDH ~ ".tgz"]) %}
{% endif %}
- {% if AERO_ANL_CDUMP == RUN or AERO_ANL_CDUMP == "both" %}
+ {% if AERO_ANL_RUN == RUN or AERO_ANL_RUN == "both" %}
{% do det_anl_files.append([COMIN_CHEM_ANALYSIS ~ "/" ~ head ~ "aerostat",
ARCDIR ~ "/aerostat." ~ RUN ~ "." ~ cycle_YMDH ]) %}
{% endif %}
diff --git a/parm/archive/gdas.yaml.j2 b/parm/archive/gdas.yaml.j2
index fe6a794224..ce5054a82f 100644
--- a/parm/archive/gdas.yaml.j2
+++ b/parm/archive/gdas.yaml.j2
@@ -66,7 +66,7 @@ gdas:
- "{{ COMIN_ATMOS_ANALYSIS | relpath(ROTDIR) }}/{{ head }}oznstat"
- "{{ COMIN_ATMOS_ANALYSIS | relpath(ROTDIR) }}/{{ head }}radstat"
{% endif %}
- {% if AERO_ANL_CDUMP == "gdas" or AERO_ANL_CDUMP == "both" %}
+ {% if AERO_ANL_RUN == "gdas" or AERO_ANL_RUN == "both" %}
- "{{ COMIN_CHEM_ANALYSIS | relpath(ROTDIR) }}/{{ head }}aerostat"
{% endif %}
{% if DO_PREP_OBS_AERO %}
diff --git a/parm/archive/gfsa.yaml.j2 b/parm/archive/gfsa.yaml.j2
index e76c26e60e..4a86778e2e 100644
--- a/parm/archive/gfsa.yaml.j2
+++ b/parm/archive/gfsa.yaml.j2
@@ -37,7 +37,7 @@ gfsa:
{% else %}
- "{{ COMIN_ATMOS_ANALYSIS | relpath(ROTDIR) }}/{{ head }}gsistat"
{% endif %}
- {% if AERO_ANL_CDUMP == "gfs" or AERO_ANL_CDUMP == "both" %}
+ {% if AERO_ANL_RUN == "gfs" or AERO_ANL_RUN == "both" %}
- "{{ COMIN_CHEM_ANALYSIS | relpath(ROTDIR) }}/{{ head }}aerostat"
{% endif %}
{% if DO_PREP_OBS_AERO %}
diff --git a/parm/archive/master_gfs.yaml.j2 b/parm/archive/master_gfs.yaml.j2
index 14178f3e7e..b789598fac 100644
--- a/parm/archive/master_gfs.yaml.j2
+++ b/parm/archive/master_gfs.yaml.j2
@@ -45,7 +45,7 @@ datasets:
{% endfilter %}
{% endif %}
-{% if AERO_FCST_CDUMP == "gfs" or AERO_FCST_CDUMP == "both" %}
+{% if AERO_FCST_RUN == "gfs" or AERO_FCST_RUN == "both" %}
# Aerosol forecasts
{% filter indent(width=4) %}
{% include "chem.yaml.j2" %}
diff --git a/parm/config/gefs/config.base b/parm/config/gefs/config.base
index 1f5c9228e2..d43ea09206 100644
--- a/parm/config/gefs/config.base
+++ b/parm/config/gefs/config.base
@@ -13,7 +13,6 @@ export RUN_ENVIR="emc"
# Account, queue, etc.
export ACCOUNT="@ACCOUNT@"
-export ACCOUNT_SERVICE="@ACCOUNT_SERVICE@"
export QUEUE="@QUEUE@"
export QUEUE_SERVICE="@QUEUE_SERVICE@"
export PARTITION_BATCH="@PARTITION_BATCH@"
@@ -105,10 +104,7 @@ export ATARDIR="@ATARDIR@"
# Commonly defined parameters in JJOBS
export envir=${envir:-"prod"}
export NET="gefs" # NET is defined in the job-card (ecf)
-export RUN="gefs" # RUN is defined in the job-card (ecf); CDUMP is used at EMC as a RUN proxy
-# TODO: determine where is RUN actually used in the workflow other than here
-# TODO: is it possible to replace all instances of ${CDUMP} to ${RUN} to be
-# consistent w/ EE2?
+export RUN="gefs" # RUN is defined in the job-card (ecf)
# Get all the COM path templates
source "${EXPDIR}/config.com"
@@ -138,9 +134,9 @@ export DO_OCN="NO"
export DO_ICE="NO"
export DO_AERO="NO"
export DO_EXTRACTVARS="@DO_EXTRACTVARS@" # Option to process and extract a subset of products to save on disk
-export AERO_FCST_CDUMP="" # When to run aerosol forecast: gdas, gfs, or both
-export AERO_ANL_CDUMP="" # When to run aerosol analysis: gdas, gfs, or both
-export WAVE_CDUMP="" # When to include wave suite: gdas, gfs, or both
+export AERO_FCST_RUN="" # When to run aerosol forecast: gdas, gfs, or both
+export AERO_ANL_RUN="" # When to run aerosol analysis: gdas, gfs, or both
+export WAVE_RUN="" # When to include wave suite: gdas, gfs, or both
export DOBNDPNT_WAVE="NO" # The GEFS buoys file does not currently have any boundary points
export DOIBP_WAV="NO" # Option to create point outputs from input boundary points
export FRAC_GRID=".true."
@@ -186,13 +182,13 @@ case "${APP}" in
;;
ATMA)
export DO_AERO="YES"
- export AERO_ANL_CDUMP="both"
- export AERO_FCST_CDUMP="gdas"
+ export AERO_ANL_RUN="both"
+ export AERO_FCST_RUN="gdas"
;;
ATMW)
export DO_COUPLED="YES"
export DO_WAVE="YES"
- export WAVE_CDUMP="both"
+ export WAVE_RUN="both"
;;
NG-GODAS)
export DO_ATM="NO"
@@ -206,13 +202,13 @@ case "${APP}" in
if [[ "${APP}" =~ A$ ]]; then
export DO_AERO="YES"
- export AERO_ANL_CDUMP="both"
- export AERO_FCST_CDUMP="gdas"
+ export AERO_ANL_RUN="both"
+ export AERO_FCST_RUN="gdas"
fi
if [[ "${APP}" =~ ^S2SW ]]; then
export DO_WAVE="YES"
- export WAVE_CDUMP="both"
+ export WAVE_RUN="both"
export cplwav2atm=".true."
fi
;;
diff --git a/parm/config/gefs/config.fcst b/parm/config/gefs/config.fcst
index bf24c4e906..e66fc15f87 100644
--- a/parm/config/gefs/config.fcst
+++ b/parm/config/gefs/config.fcst
@@ -9,13 +9,13 @@ export USE_ESMF_THREADING="YES" # Toggle to use ESMF-managed threading or tradi
export COPY_FINAL_RESTARTS="NO" # Toggle to copy restarts from the end of GFS/GEFS Run (GDAS is handled seperately)
# Turn off waves if not used for this RUN
-case ${WAVE_CDUMP} in
+case ${WAVE_RUN} in
both | "${RUN/enkf}" ) ;; # Don't change
*) DO_WAVE="NO" ;; # Turn waves off
esac
# Turn off aerosols if not used for this RUN
-case ${AERO_FCST_CDUMP} in
+case ${AERO_FCST_RUN} in
both | "${RUN/enkf}" ) ;; # Don't change
*) DO_AERO="NO" ;; # Turn waves off
esac
diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources
index 3e4f05b4c1..81d2a20635 100644
--- a/parm/config/gefs/config.resources
+++ b/parm/config/gefs/config.resources
@@ -16,15 +16,15 @@ step=$1
echo "BEGIN: config.resources"
case ${machine} in
- "WCOSS2") npe_node_max=128;;
- "HERA") npe_node_max=40;;
- "ORION") npe_node_max=40;;
- "HERCULES") npe_node_max=80;;
+ "WCOSS2") max_tasks_per_node=128;;
+ "HERA") max_tasks_per_node=40;;
+ "ORION") max_tasks_per_node=40;;
+ "HERCULES") max_tasks_per_node=80;;
"JET")
case ${PARTITION_BATCH} in
- "xjet") npe_node_max=24;;
- "vjet" | "sjet") npe_node_max=16;;
- "kjet") npe_node_max=40;;
+ "xjet") max_tasks_per_node=24;;
+ "vjet" | "sjet") max_tasks_per_node=16;;
+ "kjet") max_tasks_per_node=40;;
*)
echo "FATAL ERROR: Unknown partition ${PARTITION_BATCH} specified for ${machine}"
exit 3
@@ -32,8 +32,8 @@ case ${machine} in
;;
"S4")
case ${PARTITION_BATCH} in
- "s4") npe_node_max=32;;
- "ivy") npe_node_max=20;;
+ "s4") max_tasks_per_node=32;;
+ "ivy") max_tasks_per_node=20;;
*)
echo "FATAL ERROR: Unknown partition ${PARTITION_BATCH} specified for ${machine}"
exit 3
@@ -41,165 +41,149 @@ case ${machine} in
;;
"AWSPW")
export PARTITION_BATCH="compute"
- npe_node_max=40
+ max_tasks_per_node=40
;;
*)
echo "FATAL ERROR: Unknown machine encountered by ${BASH_SOURCE[0]}"
exit 2
;;
esac
-export npe_node_max
+export max_tasks_per_node
case ${step} in
"stage_ic")
- export wtime_stage_ic="00:15:00"
- export npe_stage_ic=1
- export npe_node_stage_ic=1
- export nth_stage_ic=1
+ export walltime="00:15:00"
+ export ntasks=1
+ export tasks_per_node=1
+ export threads_per_task=1
export is_exclusive=True
;;
"waveinit")
- export wtime_waveinit="00:10:00"
- export npe_waveinit=12
- export nth_waveinit=1
- export npe_node_waveinit=$(( npe_node_max / nth_waveinit ))
- export NTASKS=${npe_waveinit}
- export memory_waveinit="2GB"
+ export walltime="00:10:00"
+ export ntasks=12
+ export threads_per_task=1
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ export NTASKS=${ntasks}
+ export memory="2GB"
;;
"prep_emissions")
- export wtime_prep_emissions="00:10:00"
- export npe_prep_emissions=1
- export nth_prep_emissions=1
- export npe_node_prep_emissions=$(( npe_node_max / nth_prep_emissions ))
- export memory_prep_emissions="1GB"
+ export walltime="00:10:00"
+ export ntasks=1
+ export threads_per_task=1
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ export memory="1GB"
;;
"fcst" | "efcs")
export is_exclusive=True
- _CDUMP_LIST=${CDUMP:-"gdas gfs"}
-
- # During workflow creation, we need resources for all CDUMPs and CDUMP is undefined
- for _CDUMP in ${_CDUMP_LIST}; do
- if [[ "${_CDUMP}" =~ "gfs" ]]; then
- export layout_x=${layout_x_gfs}
- export layout_y=${layout_y_gfs}
- export WRITE_GROUP=${WRITE_GROUP_GFS}
- export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GFS}
- ntasks_fv3=${ntasks_fv3_gfs}
- ntasks_quilt=${ntasks_quilt_gfs}
- nthreads_fv3=${nthreads_fv3_gfs}
- nthreads_ufs=${nthreads_ufs_gfs}
- fi
-
- # Determine if using ESMF-managed threading or traditional threading
- # If using traditional threading, set them to 1
- if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then
- export UFS_THREADS=1
- else # traditional threading
- export UFS_THREADS=${nthreads_ufs:-1}
- nthreads_fv3=1
- nthreads_mediator=1
- [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1
- [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1
- [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1
- fi
-
- # PETS for the atmosphere dycore
- (( FV3PETS = ntasks_fv3 * nthreads_fv3 ))
- echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})"
-
- # PETS for quilting
- if [[ "${QUILTING:-}" == ".true." ]]; then
- (( QUILTPETS = ntasks_quilt * nthreads_fv3 ))
- (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD ))
- export WRTTASK_PER_GROUP
- else
- QUILTPETS=0
- fi
- echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})"
-
- # Total PETS for the atmosphere component
- ATMTHREADS=${nthreads_fv3}
- (( ATMPETS = FV3PETS + QUILTPETS ))
- export ATMPETS ATMTHREADS
- echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})"
-
- # Total PETS for the coupled model (starting w/ the atmosphere)
- NTASKS_TOT=${ATMPETS}
-
- # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks.
- # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance.
- # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit
- # TODO: Update reference when moved to ufs-weather-model RTD
- MEDTHREADS=${nthreads_mediator:-1}
- MEDPETS=${MEDPETS:-${FV3PETS}}
- (( "${MEDPETS}" > 300 )) && MEDPETS=300
- export MEDPETS MEDTHREADS
- echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})"
-
- CHMPETS=0; CHMTHREADS=0
- if [[ "${DO_AERO}" == "YES" ]]; then
- # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks).
- (( CHMTHREADS = ATMTHREADS ))
- (( CHMPETS = FV3PETS ))
- # Do not add to NTASKS_TOT
- echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})"
- fi
- export CHMPETS CHMTHREADS
-
- WAVPETS=0; WAVTHREADS=0
- if [[ "${DO_WAVE}" == "YES" ]]; then
- (( WAVPETS = ntasks_ww3 * nthreads_ww3 ))
- (( WAVTHREADS = nthreads_ww3 ))
- echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})"
- (( NTASKS_TOT = NTASKS_TOT + WAVPETS ))
- fi
- export WAVPETS WAVTHREADS
-
- OCNPETS=0; OCNTHREADS=0
- if [[ "${DO_OCN}" == "YES" ]]; then
- (( OCNPETS = ntasks_mom6 * nthreads_mom6 ))
- (( OCNTHREADS = nthreads_mom6 ))
- echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})"
- (( NTASKS_TOT = NTASKS_TOT + OCNPETS ))
- fi
- export OCNPETS OCNTHREADS
-
- ICEPETS=0; ICETHREADS=0
- if [[ "${DO_ICE}" == "YES" ]]; then
- (( ICEPETS = ntasks_cice6 * nthreads_cice6 ))
- (( ICETHREADS = nthreads_cice6 ))
- echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})"
- (( NTASKS_TOT = NTASKS_TOT + ICEPETS ))
- fi
- export ICEPETS ICETHREADS
-
- echo "Total PETS for ${_CDUMP} = ${NTASKS_TOT}"
-
- if [[ "${_CDUMP}" =~ "gfs" ]]; then
- declare -x "npe_${step}_gfs"="${NTASKS_TOT}"
- declare -x "nth_${step}_gfs"="${UFS_THREADS}"
- declare -x "npe_node_${step}_gfs"="${npe_node_max}"
- else
- declare -x "npe_${step}"="${NTASKS_TOT}"
- declare -x "nth_${step}"="${UFS_THREADS}"
- declare -x "npe_node_${step}"="${npe_node_max}"
- fi
-
- done
+ export layout_x=${layout_x_gfs}
+ export layout_y=${layout_y_gfs}
+ export WRITE_GROUP=${WRITE_GROUP_GFS}
+ export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GFS}
+ ntasks_fv3=${ntasks_fv3_gfs}
+ ntasks_quilt=${ntasks_quilt_gfs}
+ nthreads_fv3=${nthreads_fv3_gfs}
+ nthreads_ufs=${nthreads_ufs_gfs}
+
+ # Determine if using ESMF-managed threading or traditional threading
+ # If using traditional threading, set them to 1
+ if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then
+ export UFS_THREADS=1
+ else # traditional threading
+ export UFS_THREADS=${nthreads_ufs:-1}
+ nthreads_fv3=1
+ nthreads_mediator=1
+ [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1
+ [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1
+ [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1
+ fi
+
+ # PETS for the atmosphere dycore
+ (( FV3PETS = ntasks_fv3 * nthreads_fv3 ))
+ echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})"
+
+ # PETS for quilting
+ if [[ "${QUILTING:-}" == ".true." ]]; then
+ (( QUILTPETS = ntasks_quilt * nthreads_fv3 ))
+ (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD ))
+ export WRTTASK_PER_GROUP
+ else
+ QUILTPETS=0
+ fi
+ echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})"
+
+ # Total PETS for the atmosphere component
+ ATMTHREADS=${nthreads_fv3}
+ (( ATMPETS = FV3PETS + QUILTPETS ))
+ export ATMPETS ATMTHREADS
+ echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})"
+
+ # Total PETS for the coupled model (starting w/ the atmosphere)
+ NTASKS_TOT=${ATMPETS}
+
+ # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks.
+ # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance.
+ # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit
+ # TODO: Update reference when moved to ufs-weather-model RTD
+ MEDTHREADS=${nthreads_mediator:-1}
+ MEDPETS=${MEDPETS:-${FV3PETS}}
+ (( "${MEDPETS}" > 300 )) && MEDPETS=300
+ export MEDPETS MEDTHREADS
+ echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})"
+
+ CHMPETS=0; CHMTHREADS=0
+ if [[ "${DO_AERO}" == "YES" ]]; then
+ # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks).
+ (( CHMTHREADS = ATMTHREADS ))
+ (( CHMPETS = FV3PETS ))
+ # Do not add to NTASKS_TOT
+ echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})"
+ fi
+ export CHMPETS CHMTHREADS
+
+ WAVPETS=0; WAVTHREADS=0
+ if [[ "${DO_WAVE}" == "YES" ]]; then
+ (( WAVPETS = ntasks_ww3 * nthreads_ww3 ))
+ (( WAVTHREADS = nthreads_ww3 ))
+ echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})"
+ (( NTASKS_TOT = NTASKS_TOT + WAVPETS ))
+ fi
+ export WAVPETS WAVTHREADS
+
+ OCNPETS=0; OCNTHREADS=0
+ if [[ "${DO_OCN}" == "YES" ]]; then
+ (( OCNPETS = ntasks_mom6 * nthreads_mom6 ))
+ (( OCNTHREADS = nthreads_mom6 ))
+ echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})"
+ (( NTASKS_TOT = NTASKS_TOT + OCNPETS ))
+ fi
+ export OCNPETS OCNTHREADS
+
+ ICEPETS=0; ICETHREADS=0
+ if [[ "${DO_ICE}" == "YES" ]]; then
+ (( ICEPETS = ntasks_cice6 * nthreads_cice6 ))
+ (( ICETHREADS = nthreads_cice6 ))
+ echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})"
+ (( NTASKS_TOT = NTASKS_TOT + ICEPETS ))
+ fi
+ export ICEPETS ICETHREADS
+
+ echo "Total PETS = ${NTASKS_TOT}"
+
+ declare -x "ntasks"="${NTASKS_TOT}"
+ declare -x "threads_per_task"="${UFS_THREADS}"
+ declare -x "tasks_per_node"="${max_tasks_per_node}"
case "${CASE}" in
"C48" | "C96" | "C192")
- declare -x "wtime_${step}"="03:00:00"
- declare -x "wtime_${step}_gfs"="03:00:00"
+ declare -x "walltime"="03:00:00"
;;
"C384" | "C768" | "C1152")
- declare -x "wtime_${step}"="06:00:00"
- declare -x "wtime_${step}_gfs"="06:00:00"
+ declare -x "walltime"="06:00:00"
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
@@ -207,101 +191,92 @@ case ${step} in
;;
esac
- unset _CDUMP _CDUMP_LIST
unset NTASKS_TOT
;;
"atmos_products")
- export wtime_atmos_products="00:15:00"
- export npe_atmos_products=24
- export nth_atmos_products=1
- export npe_node_atmos_products="${npe_atmos_products}"
- export wtime_atmos_products_gfs="${wtime_atmos_products}"
- export npe_atmos_products_gfs="${npe_atmos_products}"
- export nth_atmos_products_gfs="${nth_atmos_products}"
- export npe_node_atmos_products_gfs="${npe_node_atmos_products}"
+ export walltime="00:15:00"
+ export ntasks=24
+ export threads_per_task=1
+ export tasks_per_node="${ntasks}"
export is_exclusive=True
;;
"atmos_ensstat")
- export wtime_atmos_ensstat="00:30:00"
- export npe_atmos_ensstat=6
- export nth_atmos_ensstat=1
- export npe_node_atmos_ensstat="${npe_atmos_ensstat}"
- export wtime_atmos_ensstat_gfs="${wtime_atmos_ensstat}"
- export npe_atmos_ensstat_gfs="${npe_atmos_ensstat}"
- export nth_atmos_ensstat_gfs="${nth_atmos_ensstat}"
- export npe_node_atmos_ensstat_gfs="${npe_node_atmos_ensstat}"
+ export walltime="00:30:00"
+ export ntasks=6
+ export threads_per_task=1
+ export tasks_per_node="${ntasks}"
export is_exclusive=True
;;
"oceanice_products")
- export wtime_oceanice_products="00:15:00"
- export npe_oceanice_products=1
- export npe_node_oceanice_products=1
- export nth_oceanice_products=1
- export memory_oceanice_products="96GB"
+ export walltime="00:15:00"
+ export ntasks=1
+ export tasks_per_node=1
+ export threads_per_task=1
+ export memory="96GB"
;;
"wavepostsbs")
- export wtime_wavepostsbs="03:00:00"
- export npe_wavepostsbs=1
- export nth_wavepostsbs=1
- export npe_node_wavepostsbs=$(( npe_node_max / nth_wavepostsbs ))
- export NTASKS=${npe_wavepostsbs}
- export memory_wavepostsbs="10GB"
+ export walltime="03:00:00"
+ export ntasks=1
+ export threads_per_task=1
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ export NTASKS=${ntasks}
+ export memory="10GB"
;;
# The wavepost*pnt* jobs are I/O heavy and do not scale well to large nodes.
# Limit the number of tasks/node to 40.
"wavepostbndpnt")
- export wtime_wavepostbndpnt="03:00:00"
- export npe_wavepostbndpnt=240
- export nth_wavepostbndpnt=1
- export npe_node_wavepostbndpnt=$(( npe_node_max / nth_wavepostbndpnt ))
+ export walltime="03:00:00"
+ export ntasks=240
+ export threads_per_task=1
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- if [[ ${npe_node_wavepostbndpnt} -gt 40 ]]; then
- export npe_node_wavepostbndpnt=40
+ if [[ ${tasks_per_node} -gt 40 ]]; then
+ export tasks_per_node=40
export is_exclusive=False
fi
- export NTASKS=${npe_wavepostbndpnt}
+ export NTASKS=${ntasks}
;;
"wavepostbndpntbll")
- export wtime_wavepostbndpntbll="01:00:00"
- export npe_wavepostbndpntbll=448
- export nth_wavepostbndpntbll=1
- export npe_node_wavepostbndpntbll=$(( npe_node_max / nth_wavepostbndpntbll ))
+ export walltime="01:00:00"
+ export ntasks=448
+ export threads_per_task=1
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- if [[ ${npe_node_wavepostbndpntbll} -gt 40 ]]; then
- export npe_node_wavepostbndpntbll=40
+ if [[ ${tasks_per_node} -gt 40 ]]; then
+ export tasks_per_node=40
export is_exclusive=False
fi
- export NTASKS=${npe_wavepostbndpntbll}
+ export NTASKS=${ntasks}
;;
"wavepostpnt")
- export wtime_wavepostpnt="04:00:00"
- export npe_wavepostpnt=200
- export nth_wavepostpnt=1
- export npe_node_wavepostpnt=$(( npe_node_max / nth_wavepostpnt ))
+ export walltime="04:00:00"
+ export ntasks=200
+ export threads_per_task=1
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- if [[ ${npe_node_wavepostpnt} -gt 40 ]]; then
- export npe_node_wavepostpnt=40
+ if [[ ${tasks_per_node} -gt 40 ]]; then
+ export tasks_per_node=40
export is_exclusive=False
fi
- export NTASKS=${npe_wavepostpnt}
+ export NTASKS=${ntasks}
;;
"extractvars")
- export wtime_extractvars="00:30:00"
- export npe_extractvars=1
- export nth_extractvars=1
- export npe_node_extractvars="${npe_extractvars}"
- export wtime_extractvars_gfs="${wtime_extractvars}"
- export npe_extractvars_gfs="${npe_extractvars}"
- export nth_extractvars_gfs="${nth_extractvars}"
- export npe_node_extractvars_gfs="${npe_node_extractvars}"
+ export walltime_gefs="00:30:00"
+ export ntasks_gefs=1
+ export threads_per_task_gefs=1
+ export tasks_per_node_gefs="${ntasks}"
+ export walltime_gfs="${walltime_gefs}"
+ export ntasks_gfs="${ntasks_gefs}"
+ export threads_per_tasks_gfs="${threads_per_task_gefs}"
+ export tasks_per_node_gfs="${tasks_per_node_gefs}"
export is_exclusive=False
;;
@@ -312,4 +287,19 @@ case ${step} in
esac
+# Get machine-specific resources, overriding/extending the above assignments
+if [[ -f "${EXPDIR}/config.resources.${machine}" ]]; then
+ source "${EXPDIR}/config.resources.${machine}"
+fi
+
+# Check for RUN-specific variables and export them
+for resource_var in threads_per_task ntasks tasks_per_node NTASKS memory walltime; do
+ run_resource_var="${resource_var}_${RUN}"
+ if [[ -n "${!run_resource_var+0}" ]]; then
+ declare -x "${resource_var}"="${!run_resource_var}"
+ elif [[ -n "${!resource_var+0}" ]]; then
+ export "${resource_var?}"
+ fi
+done
+
echo "END: config.resources"
diff --git a/parm/config/gefs/config.wave b/parm/config/gefs/config.wave
index bef3437adf..6a1529274a 100644
--- a/parm/config/gefs/config.wave
+++ b/parm/config/gefs/config.wave
@@ -10,10 +10,10 @@ echo "BEGIN: config.wave"
# Some others are also used across the workflow in wave component scripts
# General runtime labels
-export CDUMPwave="${RUN}wave"
+export RUNwave="${RUN}wave"
# In GFS/GDAS, restart files are generated/read from gdas runs
-export CDUMPRSTwave="gdas"
+export RUNRSTwave="gdas"
#grid dependent variable defaults
export waveGRDN='1' # grid number for ww3_multi
diff --git a/parm/config/gfs/config.anal b/parm/config/gfs/config.anal
index 2c55d85ff4..27ff8742e4 100644
--- a/parm/config/gfs/config.anal
+++ b/parm/config/gfs/config.anal
@@ -12,16 +12,13 @@ if [[ ${DONST} = "YES" ]]; then
. ${EXPDIR}/config.nsst
fi
-if [[ "${CDUMP}" = "gfs" ]] ; then
+if [[ "${RUN}" == "gfs" ]] ; then
export USE_RADSTAT="NO" # This can be only used when bias correction is not-zero.
export GENDIAG="NO"
export SETUP='diag_rad=.false.,diag_pcp=.false.,diag_conv=.false.,diag_ozone=.false.,write_diag(3)=.false.,niter(2)=100,'
export DIAG_TARBALL="YES"
fi
-npe_var="npe_anal_${RUN/enkf}"
-export npe_gsi="${!npe_var}"
-
# Set parameters specific to L127
if [[ ${LEVS} = "128" ]]; then
export GRIDOPTS="nlayers(63)=1,nlayers(64)=1,"
diff --git a/parm/config/gfs/config.base b/parm/config/gfs/config.base
index fca3fd99ed..d45d91961d 100644
--- a/parm/config/gfs/config.base
+++ b/parm/config/gfs/config.base
@@ -13,7 +13,6 @@ export RUN_ENVIR="emc"
# Account, queue, etc.
export ACCOUNT="@ACCOUNT@"
-export ACCOUNT_SERVICE="@ACCOUNT_SERVICE@"
export QUEUE="@QUEUE@"
export QUEUE_SERVICE="@QUEUE_SERVICE@"
export PARTITION_BATCH="@PARTITION_BATCH@"
@@ -135,10 +134,7 @@ export ATARDIR="@ATARDIR@"
# Commonly defined parameters in JJOBS
export envir=${envir:-"prod"}
export NET="gfs" # NET is defined in the job-card (ecf)
-export RUN=${RUN:-${CDUMP:-"gfs"}} # RUN is defined in the job-card (ecf); CDUMP is used at EMC as a RUN proxy
-# TODO: determine where is RUN actually used in the workflow other than here
-# TODO: is it possible to replace all instances of ${CDUMP} to ${RUN} to be
-# consistent w/ EE2?
+export RUN=${RUN:-"gfs"} # RUN is defined in the job-card (ecf)
# Get all the COM path templates
source "${EXPDIR}/config.com"
@@ -179,9 +175,9 @@ export DO_OCN="NO"
export DO_ICE="NO"
export DO_AERO="NO"
export DO_PREP_OBS_AERO="NO"
-export AERO_FCST_CDUMP="" # When to run aerosol forecast: gdas, gfs, or both
-export AERO_ANL_CDUMP="" # When to run aerosol analysis: gdas, gfs, or both
-export WAVE_CDUMP="" # When to include wave suite: gdas, gfs, or both
+export AERO_FCST_RUN="" # When to run aerosol forecast: gdas, gfs, or both
+export AERO_ANL_RUN="" # When to run aerosol analysis: gdas, gfs, or both
+export WAVE_RUN="" # When to include wave suite: gdas, gfs, or both
export DOBNDPNT_WAVE="NO"
export DOIBP_WAV="NO" # Option to create point outputs from input boundary points
export FRAC_GRID=".true."
@@ -232,13 +228,13 @@ case "${APP}" in
;;
ATMA)
export DO_AERO="YES"
- export AERO_ANL_CDUMP="both"
- export AERO_FCST_CDUMP="gdas"
+ export AERO_ANL_RUN="both"
+ export AERO_FCST_RUN="gdas"
;;
ATMW)
export DO_COUPLED="YES"
export DO_WAVE="YES"
- export WAVE_CDUMP="both"
+ export WAVE_RUN="both"
;;
NG-GODAS)
export DO_ATM="NO"
@@ -252,13 +248,13 @@ case "${APP}" in
if [[ "${APP}" =~ A$ ]]; then
export DO_AERO="YES"
- export AERO_ANL_CDUMP="both"
- export AERO_FCST_CDUMP="gdas"
+ export AERO_ANL_RUN="both"
+ export AERO_FCST_RUN="gdas"
fi
if [[ "${APP}" =~ ^S2SW ]]; then
export DO_WAVE="YES"
- export WAVE_CDUMP="both"
+ export WAVE_RUN="both"
fi
;;
*)
@@ -268,10 +264,10 @@ case "${APP}" in
esac
# Surface cycle update frequency
-if [[ "${CDUMP}" =~ "gdas" ]] ; then
+if [[ "${RUN}" =~ "gdas" ]] ; then
export FHCYC=1
export FTSFS=10
-elif [[ "${CDUMP}" =~ "gfs" ]] ; then
+elif [[ "${RUN}" =~ "gfs" ]] ; then
export FHCYC=24
fi
diff --git a/parm/config/gfs/config.eobs b/parm/config/gfs/config.eobs
index 21f982addc..7b7823e764 100644
--- a/parm/config/gfs/config.eobs
+++ b/parm/config/gfs/config.eobs
@@ -11,12 +11,11 @@ echo "BEGIN: config.eobs"
# Number of enkf members per innovation job
export NMEM_EOMGGRP=8
export RERUN_EOMGGRP="YES"
-export npe_gsi=$npe_eobs
# GSI namelist options related to observer for EnKF
export OBSINPUT_INVOBS="dmesh(1)=225.0,dmesh(2)=225.0,dmesh(3)=225.0,dmesh(4)=100.0"
export OBSQC_INVOBS="tcp_width=60.0,tcp_ermin=2.0,tcp_ermax=12.0"
-if [ $LEVS = "128" ]; then
+if (( LEVS == 128 )); then
export GRIDOPTS_INVOBS="nlayers(63)=1,nlayers(64)=1,"
export SETUP_INVOBS="gpstop=55,nsig_ext=56,"
fi
diff --git a/parm/config/gfs/config.eupd b/parm/config/gfs/config.eupd
index 1ac90d2b75..2ff48240ae 100644
--- a/parm/config/gfs/config.eupd
+++ b/parm/config/gfs/config.eupd
@@ -8,7 +8,7 @@ echo "BEGIN: config.eupd"
# Get task specific resources
. $EXPDIR/config.resources eupd
-export npe_enkf=$npe_eupd
+export ntasks_enkf=${ntasks}
# Use NAM_ENKF below for serial EnKF
##export NAM_ENKF="analpertwtnh=0.9,analpertwtsh=0.9,analpertwttr=0.9"
diff --git a/parm/config/gfs/config.fcst b/parm/config/gfs/config.fcst
index 3323107ba7..4982b8f6e6 100644
--- a/parm/config/gfs/config.fcst
+++ b/parm/config/gfs/config.fcst
@@ -9,13 +9,13 @@ export USE_ESMF_THREADING="YES" # Toggle to use ESMF-managed threading or tradi
export COPY_FINAL_RESTARTS="NO" # Toggle to copy restarts from the end of GFS/GEFS Run (GDAS is handled seperately)
# Turn off waves if not used for this RUN
-case ${WAVE_CDUMP} in
+case ${WAVE_RUN} in
both | "${RUN/enkf}" ) ;; # Don't change
*) DO_WAVE="NO" ;; # Turn waves off
esac
# Turn off aerosols if not used for this RUN
-case ${AERO_FCST_CDUMP} in
+case ${AERO_FCST_RUN} in
both | "${RUN/enkf}" ) ;; # Don't change
*) DO_AERO="NO" ;; # Turn aerosols off
esac
@@ -274,7 +274,7 @@ export FSICL="0"
export FSICS="0"
#---------------------------------------------------------------------
-if [[ "${CDUMP}" =~ "gdas" ]] ; then # GDAS cycle specific parameters
+if [[ "${RUN}" =~ "gdas" ]] ; then # GDAS cycle specific parameters
# Variables used in DA cycling
export DIAG_TABLE="${PARMgfs}/ufs/fv3/diag_table_da"
@@ -285,7 +285,7 @@ if [[ "${CDUMP}" =~ "gdas" ]] ; then # GDAS cycle specific parameters
# Turn on dry mass adjustment in GDAS
export adjust_dry_mass=".true."
-elif [[ "${CDUMP}" =~ "gfs" ]] ; then # GFS cycle specific parameters
+elif [[ "${RUN}" =~ "gfs" ]] ; then # GFS cycle specific parameters
# Write more variables to output
export DIAG_TABLE="${PARMgfs}/ufs/fv3/diag_table"
diff --git a/parm/config/gfs/config.metp b/parm/config/gfs/config.metp
index 3623ce0c6e..5cee80823e 100644
--- a/parm/config/gfs/config.metp
+++ b/parm/config/gfs/config.metp
@@ -23,7 +23,7 @@ export HOMEverif_global=${HOMEgfs}/sorc/verif-global.fd
export VERIF_GLOBALSH=${HOMEverif_global}/ush/run_verif_global_in_global_workflow.sh
## INPUT DATA SETTINGS
export model=${PSLOT}
-export model_file_format="pgbf{lead?fmt=%2H}.${CDUMP}.{init?fmt=%Y%m%d%H}.grib2"
+export model_file_format="pgbf{lead?fmt=%2H}.${RUN}.{init?fmt=%Y%m%d%H}.grib2"
export model_hpss_dir=${ATARDIR}/..
export model_dir=${ARCDIR}/..
export get_data_from_hpss="NO"
@@ -41,19 +41,19 @@ export log_MET_output_to_METplus="yes"
# GRID-TO-GRID STEP 1: gfsmetpg2g1
export g2g1_type_list="anom pres sfc"
export g2g1_anom_truth_name="self_anl"
-export g2g1_anom_truth_file_format="pgbanl.${CDUMP}.{valid?fmt=%Y%m%d%H}.grib2"
+export g2g1_anom_truth_file_format="pgbanl.${RUN}.{valid?fmt=%Y%m%d%H}.grib2"
export g2g1_anom_fhr_min=${FHMIN_GFS}
export g2g1_anom_fhr_max=${FHMAX_GFS}
export g2g1_anom_grid="G002"
export g2g1_anom_gather_by="VSDB"
export g2g1_pres_truth_name="self_anl"
-export g2g1_pres_truth_file_format="pgbanl.${CDUMP}.{valid?fmt=%Y%m%d%H}.grib2"
+export g2g1_pres_truth_file_format="pgbanl.${RUN}.{valid?fmt=%Y%m%d%H}.grib2"
export g2g1_pres_fhr_min=${FHMIN_GFS}
export g2g1_pres_fhr_max=${FHMAX_GFS}
export g2g1_pres_grid="G002"
export g2g1_pres_gather_by="VSDB"
export g2g1_sfc_truth_name="self_f00"
-export g2g1_sfc_truth_file_format="pgbf00.${CDUMP}.{valid?fmt=%Y%m%d%H}.grib2"
+export g2g1_sfc_truth_file_format="pgbf00.${RUN}.{valid?fmt=%Y%m%d%H}.grib2"
export g2g1_sfc_fhr_min=${FHMIN_GFS}
export g2g1_sfc_fhr_max=${FHMAX_GFS}
export g2g1_sfc_grid="G002"
@@ -89,7 +89,7 @@ export g2o1_mv_database_desc="Grid-to-obs METplus data for global workflow exper
export precip1_type_list="ccpa_accum24hr"
export precip1_ccpa_accum24hr_model_bucket="06"
export precip1_ccpa_accum24hr_model_var="APCP"
-export precip1_ccpa_accum24hr_model_file_format="pgbf{lead?fmt=%2H}.${CDUMP}.{init?fmt=%Y%m%d%H}.grib2"
+export precip1_ccpa_accum24hr_model_file_format="pgbf{lead?fmt=%2H}.${RUN}.{init?fmt=%Y%m%d%H}.grib2"
export precip1_ccpa_accum24hr_fhr_min=${FHMIN_GFS}
export precip1_ccpa_accum24hr_fhr_max="180"
export precip1_ccpa_accum24hr_grid="G211"
diff --git a/parm/config/gfs/config.prep b/parm/config/gfs/config.prep
index 6009280db0..e719d03d1d 100644
--- a/parm/config/gfs/config.prep
+++ b/parm/config/gfs/config.prep
@@ -15,7 +15,7 @@ export cdate10=${PDY}${cyc}
export PROCESS_TROPCY=${PROCESS_TROPCY:-NO}
export TROPCYQCRELOSH="${SCRgfs}/exglobal_atmos_tropcy_qc_reloc.sh"
-export COMINtcvital=${COMINtcvital:-${DMPDIR}/${CDUMP}.${PDY}/${cyc}/atmos}
+export COMINtcvital=${COMINtcvital:-${DMPDIR}/${RUN}.${PDY}/${cyc}/atmos}
export COMINsyn=${COMINsyn:-$(compath.py ${envir}/com/gfs/${gfs_ver})/syndat}
# Adjust observation error for GFS v16 parallels
diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources
index 2dd037d426..ebdfd5d713 100644
--- a/parm/config/gfs/config.resources
+++ b/parm/config/gfs/config.resources
@@ -1,4 +1,5 @@
#! /usr/bin/env bash
+# shellcheck disable=SC2034
########## config.resources ##########
# Set resource information for job tasks
@@ -36,49 +37,49 @@ echo "BEGIN: config.resources"
case ${machine} in
"WCOSS2")
- npe_node_max=128
+ max_tasks_per_node=128
# shellcheck disable=SC2034
mem_node_max="500GB"
;;
"HERA")
- npe_node_max=40
+ max_tasks_per_node=40
# shellcheck disable=SC2034
mem_node_max="96GB"
;;
"GAEA")
- npe_node_max=128
+ max_tasks_per_node=128
# shellcheck disable=SC2034
mem_node_max="251GB"
;;
"ORION")
- npe_node_max=40
+ max_tasks_per_node=40
# shellcheck disable=SC2034
mem_node_max="192GB"
;;
"HERCULES")
- npe_node_max=80
+ max_tasks_per_node=80
# shellcheck disable=SC2034
mem_node_max="512GB"
;;
"JET")
case ${PARTITION_BATCH} in
"xjet")
- npe_node_max=24
+ max_tasks_per_node=24
# shellcheck disable=SC2034
mem_node_max="61GB"
;;
"vjet")
- npe_node_max=16
+ max_tasks_per_node=16
# shellcheck disable=SC2034
mem_node_max="61GB"
;;
"sjet")
- npe_node_max=16
+ max_tasks_per_node=16
# shellcheck disable=SC2034
mem_node_max="29GB"
;;
"kjet")
- npe_node_max=40
+ max_tasks_per_node=40
# shellcheck disable=SC2034
mem_node_max="88GB"
;;
@@ -89,12 +90,12 @@ case ${machine} in
;;
"S4")
case ${PARTITION_BATCH} in
- "s4") npe_node_max=32
+ "s4") max_tasks_per_node=32
# shellcheck disable=SC2034
mem_node_max="168GB"
;;
"ivy")
- npe_node_max=20
+ max_tasks_per_node=20
# shellcheck disable=SC2034
mem_node_max="128GB"
;;
@@ -105,13 +106,13 @@ case ${machine} in
;;
"AWSPW")
export PARTITION_BATCH="compute"
- npe_node_max=40
+ max_tasks_per_node=40
# TODO Supply a max mem/node value for AWS
# shellcheck disable=SC2034
mem_node_max=""
;;
"CONTAINER")
- npe_node_max=1
+ max_tasks_per_node=1
# TODO Supply a max mem/node value for a container
# shellcheck disable=SC2034
mem_node_max=""
@@ -122,153 +123,139 @@ case ${machine} in
;;
esac
-export npe_node_max
+export max_tasks_per_node
case ${step} in
"prep")
- export wtime_prep='00:30:00'
- export npe_prep=4
- export npe_node_prep=2
- export nth_prep=1
- export memory_prep="40GB"
+ walltime='00:30:00'
+ ntasks=4
+ tasks_per_node=2
+ threads_per_task=1
+ memory="40GB"
;;
"prepsnowobs")
- export wtime_prepsnowobs="00:05:00"
- export npe_prepsnowobs=1
- export nth_prepsnowobs=1
- export npe_node_prepsnowobs=1
+ walltime="00:05:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
;;
"prepatmiodaobs")
- export wtime_prepatmiodaobs="00:30:00"
- export npe_prepatmiodaobs=1
- export nth_prepatmiodaobs=1
- export npe_node_prepatmiodaobs=$(( npe_node_max / nth_prepatmiodaobs ))
+ walltime="00:30:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"aerosol_init")
- export wtime_aerosol_init="00:05:00"
- export npe_aerosol_init=1
- export nth_aerosol_init=1
- export npe_node_aerosol_init=$(( npe_node_max / nth_aerosol_init ))
- export NTASKS=${npe_aerosol_init}
- export memory_aerosol_init="6GB"
+ walltime="00:05:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS=${ntasks}
+ memory="6GB"
;;
"waveinit")
- export wtime_waveinit="00:10:00"
- export npe_waveinit=12
- export nth_waveinit=1
- export npe_node_waveinit=$(( npe_node_max / nth_waveinit ))
- export NTASKS=${npe_waveinit}
- export memory_waveinit="2GB"
+ walltime="00:10:00"
+ ntasks=12
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS=${ntasks}
+ memory="2GB"
;;
"waveprep")
- export wtime_waveprep="00:10:00"
- export npe_waveprep_gdas=5
- export npe_waveprep_gfs=65
- export nth_waveprep_gdas=1
- export nth_waveprep_gfs=1
- export npe_node_waveprep_gdas=$(( npe_node_max / nth_waveprep_gdas ))
- export npe_node_waveprep_gfs=$(( npe_node_max / nth_waveprep_gfs ))
- export NTASKS_gdas=${npe_waveprep_gdas}
- export NTASKS_gfs=${npe_waveprep_gfs}
- export memory_waveprep_gdas="100GB"
- export memory_waveprep_gfs="150GB"
-
- var_npe_node="npe_node_waveprep_${RUN}"
- var_nth="nth_waveprep_${RUN}"
- var_npe="npe_waveprep_${RUN}"
- var_NTASKS="ntasks_${RUN}"
- # RUN is set to a single value at setup time, so these won't be found
- # TODO rework setup_xml.py to initialize RUN to the applicable option
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_waveprep"="${!var_npe_node}" \
- "nth_waveprep"="${!var_nth}" \
- "npe_waveprep"="${!var_npe}" \
- "NTASKS"="${!var_NTASKS}"
- fi
+ walltime="00:10:00"
+ ntasks_gdas=5
+ ntasks_gfs=65
+ threads_per_task=1
+
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS_gdas=${ntasks_gdas}
+ NTASKS_gfs=${ntasks_gfs}
+ memory_gdas="100GB"
+ memory_gfs="150GB"
;;
"wavepostsbs")
- export wtime_wavepostsbs_gdas="00:20:00"
- export wtime_wavepostsbs_gfs="03:00:00"
- export npe_wavepostsbs=8
- export nth_wavepostsbs=1
- export npe_node_wavepostsbs=$(( npe_node_max / nth_wavepostsbs ))
- export NTASKS=${npe_wavepostsbs}
- export memory_wavepostsbs_gdas="10GB"
- export memory_wavepostsbs_gfs="10GB"
+ walltime_gdas="00:20:00"
+ walltime_gfs="03:00:00"
+ ntasks=8
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS=${ntasks}
+ memory_gdas="10GB"
+ memory_gfs="10GB"
;;
# The wavepost*pnt* jobs are I/O heavy and do not scale well to large nodes.
# Limit the number of tasks/node to 40.
"wavepostbndpnt")
- export wtime_wavepostbndpnt="03:00:00"
- export npe_wavepostbndpnt=240
- export nth_wavepostbndpnt=1
- export npe_node_wavepostbndpnt=$(( npe_node_max / nth_wavepostbndpnt ))
+ walltime="03:00:00"
+ ntasks=240
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- if [[ ${npe_node_wavepostbndpnt} -gt 40 ]]; then
- export npe_node_wavepostbndpnt=40
+ if [[ ${tasks_per_node} -gt 40 ]]; then
+ tasks_per_node=40
export is_exclusive=False
fi
- export NTASKS=${npe_wavepostbndpnt}
+ NTASKS=${ntasks}
;;
"wavepostbndpntbll")
- export wtime_wavepostbndpntbll="01:00:00"
- export npe_wavepostbndpntbll=448
- export nth_wavepostbndpntbll=1
- export npe_node_wavepostbndpntbll=$(( npe_node_max / nth_wavepostbndpntbll ))
+ walltime="01:00:00"
+ ntasks=448
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- if [[ ${npe_node_wavepostbndpntbll} -gt 40 ]]; then
- export npe_node_wavepostbndpntbll=40
+ if [[ ${tasks_per_node} -gt 40 ]]; then
+ tasks_per_node=40
export is_exclusive=False
fi
- export NTASKS=${npe_wavepostbndpntbll}
+ NTASKS=${ntasks}
;;
"wavepostpnt")
- export wtime_wavepostpnt="04:00:00"
- export npe_wavepostpnt=200
- export nth_wavepostpnt=1
- export npe_node_wavepostpnt=$(( npe_node_max / nth_wavepostpnt ))
+ walltime="04:00:00"
+ ntasks=200
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- if [[ ${npe_node_wavepostpnt} -gt 40 ]]; then
- export npe_node_wavepostpnt=40
+ if [[ ${tasks_per_node} -gt 40 ]]; then
+ tasks_per_node=40
export is_exclusive=False
fi
- export NTASKS=${npe_wavepostpnt}
+ NTASKS=${ntasks}
;;
"wavegempak")
- export wtime_wavegempak="02:00:00"
- export npe_wavegempak=1
- export nth_wavegempak=1
- export npe_node_wavegempak=$(( npe_node_max / nth_wavegempak ))
- export NTASKS=${npe_wavegempak}
- export memory_wavegempak="1GB"
+ walltime="02:00:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS=${ntasks}
+ memory="1GB"
;;
"waveawipsbulls")
- export wtime_waveawipsbulls="00:20:00"
- export npe_waveawipsbulls=1
- export nth_waveawipsbulls=1
- export npe_node_waveawipsbulls=$(( npe_node_max / nth_waveawipsbulls ))
- export NTASKS=${npe_waveawipsbulls}
+ walltime="00:20:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS=${ntasks}
export is_exclusive=True
;;
"waveawipsgridded")
- export wtime_waveawipsgridded="02:00:00"
- export npe_waveawipsgridded=1
- export nth_waveawipsgridded=1
- export npe_node_waveawipsgridded=$(( npe_node_max / nth_waveawipsgridded ))
- export NTASKS=${npe_waveawipsgridded}
- export memory_waveawipsgridded_gfs="1GB"
+ walltime="02:00:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ NTASKS=${ntasks}
+ memory_gfs="1GB"
;;
"atmanlinit")
@@ -278,67 +265,48 @@ case ${step} in
export layout_gsib_x=$(( layout_x * 3 ))
export layout_gsib_y=$(( layout_y * 2 ))
- export wtime_atmanlinit="00:10:00"
- export npe_atmanlinit=1
- export nth_atmanlinit=1
- export npe_node_atmanlinit=$(( npe_node_max / nth_atmanlinit ))
- export npe_node_atmanlinit
- export memory_atmanlinit="3072M"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="3072M"
;;
"atmanlvar")
export layout_x=${layout_x_atmanl}
export layout_y=${layout_y_atmanl}
- export wtime_atmanlvar="00:30:00"
- export npe_atmanlvar_gdas=$(( layout_x * layout_y * 6 ))
- export npe_atmanlvar_gfs=$(( layout_x * layout_y * 6 ))
- export nth_atmanlvar_gdas=1
- export nth_atmanlvar_gfs=${nth_atmanlvar_gdas}
- export npe_node_atmanlvar_gdas=$(( npe_node_max / nth_atmanlvar_gdas ))
- export npe_node_atmanlvar_gfs=$(( npe_node_max / nth_atmanlvar_gfs ))
- export memory_atmanlvar="96GB"
+ walltime="00:30:00"
+ ntasks_gdas=$(( layout_x * layout_y * 6 ))
+ ntasks_gfs=$(( layout_x * layout_y * 6 ))
+ threads_per_task_gdas=1
+ threads_per_task_gfs=${threads_per_task_gdas}
+ tasks_per_node_gdas=$(( max_tasks_per_node / threads_per_task_gdas ))
+ tasks_per_node_gfs=$(( max_tasks_per_node / threads_per_task_gfs ))
+ memory="96GB"
export is_exclusive=True
-
- var_npe_node="npe_node_atmanlvar_${RUN}"
- var_nth="nth_atmanlvar_${RUN}"
- var_npe="npe_atmanlvar_${RUN}"
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_atmanlvar"="${!var_npe_node}" \
- "nth_atmanlvar"="${!var_nth}" \
- "npe_atmanlvar"="${!var_npe}"
- fi
;;
"atmanlfv3inc")
export layout_x=${layout_x_atmanl}
export layout_y=${layout_y_atmanl}
- export wtime_atmanlfv3inc="00:30:00"
- export npe_atmanlfv3inc_gdas=$(( layout_x * layout_y * 6 ))
- export npe_atmanlfv3inc_gfs=$(( layout_x * layout_y * 6 ))
- export nth_atmanlfv3inc_gdas=1
- export nth_atmanlfv3inc_gfs=${nth_atmanlfv3inc_gdas}
- export npe_node_atmanlfv3inc_gdas=$(( npe_node_max / nth_atmanlfv3inc_gdas ))
- export npe_node_atmanlfv3inc_gfs=$(( npe_node_max / nth_atmanlfv3inc_gfs ))
- export memory_atmanlfv3inc="96GB"
+ walltime="00:30:00"
+ ntasks_gdas=$(( layout_x * layout_y * 6 ))
+ ntasks_gfs=$(( layout_x * layout_y * 6 ))
+ threads_per_task_gdas=1
+ threads_per_task_gfs=${threads_per_task_gdas}
+ tasks_per_node_gdas=$(( max_tasks_per_node / threads_per_task_gdas ))
+ tasks_per_node_gfs=$(( max_tasks_per_node / threads_per_task_gfs ))
+ memory="96GB"
export is_exclusive=True
-
- var_npe_node="npe_node_atmanlfv3inc_${RUN}"
- var_nth="nth_atmanlfv3inc_${RUN}"
- var_npe="npe_atmanlfv3inc_${RUN}"
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_atmanlfv3inc"="${!var_npe_node}" \
- "nth_atmanlfv3inc"="${!var_nth}" \
- "npe_atmanlfv3inc"="${!var_npe}"
- fi
;;
"atmanlfinal")
- export wtime_atmanlfinal="00:30:00"
- export npe_atmanlfinal=${npe_node_max}
- export nth_atmanlfinal=1
- export npe_node_atmanlfinal=$(( npe_node_max / nth_atmanlfinal ))
+ walltime="00:30:00"
+ ntasks=${max_tasks_per_node}
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
;;
@@ -365,18 +333,18 @@ case ${step} in
export layout_x
export layout_y
- export wtime_snowanl="00:15:00"
- export npe_snowanl=$(( layout_x * layout_y * 6 ))
- export nth_snowanl=1
- export npe_node_snowanl=$(( npe_node_max / nth_snowanl ))
+ walltime="00:15:00"
+ ntasks=$(( layout_x * layout_y * 6 ))
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"prepobsaero")
- export wtime_prepobsaero="00:30:00"
- export npe_prepobsaero=1
- export nth_prepobsaero=1
- export npe_node_prepobsaero=1
- export memory_prepobsaero="96GB"
+ walltime="00:30:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="96GB"
;;
"aeroanlinit")
@@ -406,11 +374,11 @@ case ${step} in
export layout_x
export layout_y
- export wtime_aeroanlinit="00:10:00"
- export npe_aeroanlinit=1
- export nth_aeroanlinit=1
- export npe_node_aeroanlinit=$(( npe_node_max / nth_aeroanlinit ))
- export memory_aeroanlinit="3072M"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="3072M"
;;
"aeroanlrun")
@@ -440,410 +408,374 @@ case ${step} in
export layout_x
export layout_y
- export wtime_aeroanlrun="00:30:00"
- export npe_aeroanlrun_gdas=$(( layout_x * layout_y * 6 ))
- export npe_aeroanlrun_gfs=$(( layout_x * layout_y * 6 ))
- export nth_aeroanlrun_gdas=1
- export nth_aeroanlrun_gfs=1
- export npe_node_aeroanlrun_gdas=$(( npe_node_max / nth_aeroanlrun_gdas ))
- export npe_node_aeroanlrun_gfs=$(( npe_node_max / nth_aeroanlrun_gfs ))
+ walltime="00:30:00"
+ ntasks=$(( layout_x * layout_y * 6 ))
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
-
- var_npe_node="npe_node_aeroanlrun_${RUN}"
- var_nth="nth_aeroanlrun_${RUN}"
- var_npe="npe_aeroanlrun_${RUN}"
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_aeroanlrun"="${!var_npe_node}" \
- "nth_aeroanlrun"="${!var_nth}" \
- "npe_aeroanlrun"="${!var_npe}"
- fi
;;
"aeroanlfinal")
- export wtime_aeroanlfinal="00:10:00"
- export npe_aeroanlfinal=1
- export nth_aeroanlfinal=1
- export npe_node_aeroanlfinal=$(( npe_node_max / nth_aeroanlfinal ))
- export memory_aeroanlfinal="3072M"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="3072M"
;;
"ocnanalprep")
- export wtime_ocnanalprep="00:10:00"
- export npe_ocnanalprep=1
- export nth_ocnanalprep=1
- export npe_node_ocnanalprep=$(( npe_node_max / nth_ocnanalprep ))
- export memory_ocnanalprep="24GB"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="24GB"
;;
"prepoceanobs")
- export wtime_prepoceanobs="00:10:00"
- export npe_prepoceanobs=1
- export nth_prepoceanobs=1
- export npe_node_prepoceanobs=$(( npe_node_max / nth_prepoceanobs ))
- export memory_prepoceanobs="48GB"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="48GB"
;;
"marinebmat")
npes=16
+ ntasks=16
case ${OCNRES} in
- "025") npes=480;;
- "050") npes=16;;
- "500") npes=16;;
+ "025") ntasks=480;;
+ "050") ntasks=16;;
+ "500") ntasks=16;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
- export wtime_marinebmat="00:30:00"
- export npe_marinebmat=${npes}
- export nth_marinebmat=1
+ walltime="00:30:00"
+ threads_per_task=1
export is_exclusive=True
- export npe_node_marinebmat=$(( npe_node_max / nth_marinebmat ))
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ocnanalrun")
- npes=16
+ ntasks=16
case ${OCNRES} in
"025")
- npes=480
- memory_ocnanalrun="96GB"
+ ntasks=480
+ memory="96GB"
;;
"050")
- npes=16
- memory_ocnanalrun="96GB"
+ ntasks=16
+ memory="96GB"
;;
"500")
- npes=16
- memory_ocnanalrun="24GB"
+ ntasks=16
+ memory="24GB"
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
- export wtime_ocnanalrun="00:15:00"
- export npe_ocnanalrun=${npes}
- export nth_ocnanalrun=1
+ walltime="00:15:00"
+ threads_per_task=1
export is_exclusive=True
- export npe_node_ocnanalrun=$(( npe_node_max / nth_ocnanalrun ))
- export memory_ocnanalrun
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ocnanalecen")
- npes=16
+ ntasks=16
case ${OCNRES} in
"025")
- npes=40
- memory_ocnanalecen="96GB"
+ ntasks=40
+ memory="96GB"
;;
"050")
- npes=16
- memory_ocnanalecen="96GB"
+ ntasks=16
+ memory="96GB"
;;
"500")
- npes=16
- memory_ocnanalecen="24GB"
+ ntasks=16
+ memory="24GB"
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
- export wtime_ocnanalecen="00:10:00"
- export npe_ocnanalecen=${npes}
- export nth_ocnanalecen=1
+ walltime="00:10:00"
+ threads_per_task=1
export is_exclusive=True
- export npe_node_ocnanalecen=$(( npe_node_max / nth_ocnanalecen ))
- export memory_ocnanalecen
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"marineanalletkf")
- npes=16
+ ntasks=16
case ${OCNRES} in
"025")
- npes=480
- memory_marineanalletkf="96GB"
+ ntasks=480
+ memory="96GB"
;;
"050")
- npes=16
- memory_marineanalletkf="96GB"
+ ntasks=16
+ memory="96GB"
;;
"500")
- npes=16
- memory_marineanalletkf="24GB"
+ ntasks=16
+ memory="24GB"
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
- export wtime_marineanalletkf="00:10:00"
- export npe_marineanalletkf=${npes}
- export nth_marineanalletkf=1
+ walltime="00:10:00"
+ threads_per_task=1
export is_exclusive=True
- export npe_node_marineanalletkf=$(( npe_node_max / nth_marineanalletkf ))
- export memory_marineanalletkf
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ocnanalchkpt")
- export wtime_ocnanalchkpt="00:10:00"
- export npe_ocnanalchkpt=1
- export nth_ocnanalchkpt=1
- export npe_node_ocnanalchkpt=$(( npe_node_max / nth_ocnanalchkpt ))
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
case ${OCNRES} in
"025")
- memory_ocnanalchkpt="128GB"
- npes=40;;
+ memory="128GB"
+ ntasks=40;;
"050")
- memory_ocnanalchkpt="32GB"
- npes=16;;
+ memory="32GB"
+ ntasks=16;;
"500")
- memory_ocnanalchkpt="32GB"
- npes=8;;
+ memory="32GB"
+ ntasks=8;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
- export npe_ocnanalchkpt=${npes}
- export memory_ocnanalchkpt
;;
"ocnanalpost")
- export wtime_ocnanalpost="00:30:00"
- export npe_ocnanalpost=${npe_node_max}
- export nth_ocnanalpost=1
- export npe_node_ocnanalpost=$(( npe_node_max / nth_ocnanalpost ))
+ walltime="00:30:00"
+ ntasks=${max_tasks_per_node}
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ocnanalvrfy")
- export wtime_ocnanalvrfy="00:35:00"
- export npe_ocnanalvrfy=1
- export nth_ocnanalvrfy=1
- export npe_node_ocnanalvrfy=$(( npe_node_max / nth_ocnanalvrfy ))
- export memory_ocnanalvrfy="24GB"
+ walltime="00:35:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="24GB"
;;
"anal")
- export wtime_anal_gdas="01:20:00"
- export wtime_anal_gfs="01:00:00"
+ walltime_gdas="01:20:00"
+ walltime_gfs="01:00:00"
case ${CASE} in
"C768")
- export npe_anal_gdas=780
- export npe_anal_gfs=825
- export nth_anal=5
+ ntasks_gdas=780
+ ntasks_gfs=825
+ threads_per_task=5
;;
"C384")
- export npe_anal_gdas=160
- export npe_anal_gfs=160
- export nth_anal=10
+ ntasks_gdas=160
+ ntasks_gfs=160
+ threads_per_task=10
;;
"C192" | "C96" | "C48")
- export npe_anal_gdas=84
- export npe_anal_gfs=84
- export nth_anal=5
+ ntasks_gdas=84
+ ntasks_gfs=84
+ threads_per_task=5
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
- export npe_node_anal=$(( npe_node_max / nth_anal ))
- export nth_cycle=${nth_anal}
- export npe_node_cycle=$(( npe_node_max / nth_cycle ))
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ export threads_per_task_cycle=${threads_per_task}
+ export tasks_per_node_cycle=$(( max_tasks_per_node / threads_per_task_cycle ))
export is_exclusive=True
-
- var_npe="npe_anal_${RUN}"
- if [[ -n "${!var_npe+0}" ]]; then
- declare -x "npe_anal"="${!var_npe}"
- fi
;;
"analcalc")
- export wtime_analcalc="00:15:00"
- export npe_analcalc=127
- export ntasks="${npe_analcalc}"
- export nth_analcalc=1
- export npe_node_analcalc=$(( npe_node_max / nth_analcalc ))
- export nth_echgres_gdas=4
- export nth_echgres_gfs=12
+ walltime="00:15:00"
+ ntasks=127
+ export ntasks_calcanl="${ntasks}"
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ export threads_per_task_echgres_gdas=4
+ export threads_per_task_echgres_gfs=12
export is_exclusive=True
- export memory_analcalc="48GB"
+ memory="48GB"
if [[ "${CASE}" == "C384" || "${CASE}" == "C768" ]]; then
- export memory_analcalc="${mem_node_max}"
- fi
-
- var_nth="nth_echgres_${RUN}"
- if [[ -n "${!var_nth+0}" ]]; then
- declare -x "nth_echgres"="${!var_nth}"
+ memory="${mem_node_max}"
fi
;;
"analdiag")
- export wtime_analdiag="00:15:00"
- export npe_analdiag=96 # Should be at least twice npe_ediag
- export nth_analdiag=1
- export npe_node_analdiag=$(( npe_node_max / nth_analdiag ))
- export memory_analdiag="48GB"
+ walltime="00:15:00"
+ ntasks=96 # Should be at least twice ediag's tasks
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="48GB"
;;
"sfcanl")
- export wtime_sfcanl="00:20:00"
- export npe_sfcanl=6
- export nth_sfcanl=1
- export npe_node_sfcanl=$(( npe_node_max / nth_sfcanl ))
+ walltime="00:20:00"
+ ntasks=${ntiles:-6}
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
;;
"fcst" | "efcs")
export is_exclusive=True
- if [[ "${step}" == "fcst" ]]; then
- _CDUMP_LIST=${CDUMP:-"gdas gfs"}
- elif [[ "${step}" == "efcs" ]]; then
- _CDUMP_LIST=${CDUMP:-"enkfgdas enkfgfs"}
- fi
-
- # During workflow creation, we need resources for all CDUMPs and CDUMP is undefined
- for _CDUMP in ${_CDUMP_LIST}; do
- if [[ "${_CDUMP}" =~ "gfs" ]]; then
- export layout_x=${layout_x_gfs}
- export layout_y=${layout_y_gfs}
- export WRITE_GROUP=${WRITE_GROUP_GFS}
- export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GFS}
- ntasks_fv3=${ntasks_fv3_gfs}
- ntasks_quilt=${ntasks_quilt_gfs}
- nthreads_fv3=${nthreads_fv3_gfs}
- nthreads_ufs=${nthreads_ufs_gfs}
- # Will not be set if we are skipping the mediator
- nthreads_mediator=${nthreads_mediator_gfs:-}
- elif [[ "${_CDUMP}" =~ "gdas" ]]; then
- export layout_x=${layout_x_gdas}
- export layout_y=${layout_y_gdas}
- export WRITE_GROUP=${WRITE_GROUP_GDAS}
- export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GDAS}
- ntasks_fv3=${ntasks_fv3_gdas}
- ntasks_quilt=${ntasks_quilt_gdas}
- nthreads_fv3=${nthreads_fv3_gdas}
- nthreads_ufs=${nthreads_ufs_gdas}
- nthreads_mediator=${nthreads_mediator_gdas:-}
- fi
-
- # Determine if using ESMF-managed threading or traditional threading
- # If using traditional threading, set them to 1
- if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then
- export UFS_THREADS=1
- else # traditional threading
- export UFS_THREADS=${nthreads_ufs:-1}
- nthreads_fv3=1
- nthreads_mediator=1
- [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1
- [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1
- [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1
+ _RUN=${RUN:-"gfs"}
+ _RUN=${RUN/enkf/}
+
+ # Declare variables from config.ufs based on _RUN
+ # Export layout and write task variables, but not ntasks/threads
+ # Capitalize _RUN for write tasks
+ for var in layout_x layout_y ntasks_fv3 ntasks_quilt nthreads_fv3 nthreads_ufs \
+ WRITE_GROUP WRTTASK_PER_GROUP_PER_THREAD; do
+ if [[ ${var} =~ "layout" ]]; then
+ ufs_var_name="${var}_${_RUN}"
+ declare -x "${var}"="${!ufs_var_name}"
+ elif [[ ${var} =~ "WR" ]]; then
+ ufs_var_name="${var}_${_RUN^^}"
+ declare -x "${var}"="${!ufs_var_name}"
+ else
+ ufs_var_name="${var}_${_RUN}"
+ declare "${var}"="${!ufs_var_name}"
fi
+ done
- if (( ntiles > 6 )); then
- export layout_x_nest=${layout_x_nest:-10}
- export layout_y_nest=${layout_y_nest:-10}
- export npx_nest=${npx_nest:-1441}
- export npy_nest=${npy_nest:-961}
- fi
+ # Will not set mediator threads if we are skipping the mediator
+ if [[ ${_RUN} == "gfs" ]]; then
+ nthreads_mediator=${nthreads_mediator_gfs:-}
+ elif [[ ${_RUN} == "gdas" ]]; then
+ nthreads_mediator=${nthreads_mediator_gdas:-}
+ fi
- # PETS for the atmosphere dycore
- (( FV3PETS = ntasks_fv3 * nthreads_fv3 ))
- echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})"
+ # Determine if using ESMF-managed threading or traditional threading
+ # If using traditional threading, set them to 1
+ if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then
+ export UFS_THREADS=1
+ else # traditional threading
+ export UFS_THREADS=${nthreads_ufs:-1}
+ nthreads_fv3=1
+ nthreads_mediator=1
+ [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1
+ [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1
+ [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1
+ fi
- # PETS for quilting
- if [[ "${QUILTING:-}" == ".true." ]]; then
- (( QUILTPETS = ntasks_quilt * nthreads_fv3 ))
- (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD ))
- export WRTTASK_PER_GROUP
- else
- QUILTPETS=0
- fi
- echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})"
-
- # Total PETS for the atmosphere component
- ATMTHREADS=${nthreads_fv3}
- (( ATMPETS = FV3PETS + QUILTPETS ))
- export ATMPETS ATMTHREADS
- echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})"
-
- # Total PETS for the coupled model (starting w/ the atmosphere)
- NTASKS_TOT=${ATMPETS}
-
- # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks.
- # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance.
- # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit
- # TODO: Update reference when moved to ufs-weather-model RTD
- MEDTHREADS=${nthreads_mediator:-1}
- MEDPETS=${MEDPETS:-${FV3PETS}}
- (( "${MEDPETS}" > 300 )) && MEDPETS=300
- export MEDPETS MEDTHREADS
- echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})"
-
- CHMPETS=0; CHMTHREADS=0
- if [[ "${DO_AERO}" == "YES" ]]; then
- # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks).
- (( CHMTHREADS = ATMTHREADS ))
- (( CHMPETS = FV3PETS ))
- # Do not add to NTASKS_TOT
- echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})"
- fi
- export CHMPETS CHMTHREADS
-
- WAVPETS=0; WAVTHREADS=0
- if [[ "${DO_WAVE}" == "YES" ]]; then
- (( WAVPETS = ntasks_ww3 * nthreads_ww3 ))
- (( WAVTHREADS = nthreads_ww3 ))
- echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})"
- (( NTASKS_TOT = NTASKS_TOT + WAVPETS ))
- fi
- export WAVPETS WAVTHREADS
-
- OCNPETS=0; OCNTHREADS=0
- if [[ "${DO_OCN}" == "YES" ]]; then
- (( OCNPETS = ntasks_mom6 * nthreads_mom6 ))
- (( OCNTHREADS = nthreads_mom6 ))
- echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})"
- (( NTASKS_TOT = NTASKS_TOT + OCNPETS ))
- fi
- export OCNPETS OCNTHREADS
-
- ICEPETS=0; ICETHREADS=0
- if [[ "${DO_ICE}" == "YES" ]]; then
- (( ICEPETS = ntasks_cice6 * nthreads_cice6 ))
- (( ICETHREADS = nthreads_cice6 ))
- echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})"
- (( NTASKS_TOT = NTASKS_TOT + ICEPETS ))
- fi
- export ICEPETS ICETHREADS
+ if (( ntiles > 6 )); then
+ export layout_x_nest=${layout_x_nest:-10}
+ export layout_y_nest=${layout_y_nest:-10}
+ export npx_nest=${npx_nest:-1441}
+ export npy_nest=${npy_nest:-961}
+ fi
- echo "Total PETS for ${_CDUMP} = ${NTASKS_TOT}"
+ # PETS for the atmosphere dycore
+ (( FV3PETS = ntasks_fv3 * nthreads_fv3 ))
+ echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})"
+
+ # PETS for quilting
+ if [[ "${QUILTING:-}" == ".true." ]]; then
+ (( QUILTPETS = ntasks_quilt * nthreads_fv3 ))
+ (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD ))
+ export WRTTASK_PER_GROUP
+ else
+ QUILTPETS=0
+ fi
+ echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})"
+
+ # Total PETS for the atmosphere component
+ ATMTHREADS=${nthreads_fv3}
+ (( ATMPETS = FV3PETS + QUILTPETS ))
+ export ATMPETS ATMTHREADS
+ echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})"
+
+ # Total PETS for the coupled model (starting w/ the atmosphere)
+ NTASKS_TOT=${ATMPETS}
+
+ # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks.
+ # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance.
+ # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit
+ # TODO: Update reference when moved to ufs-weather-model RTD
+ MEDTHREADS=${nthreads_mediator:-1}
+ MEDPETS=${MEDPETS:-${FV3PETS}}
+ (( "${MEDPETS}" > 300 )) && MEDPETS=300
+ export MEDPETS MEDTHREADS
+ echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})"
+
+ CHMPETS=0; CHMTHREADS=0
+ if [[ "${DO_AERO}" == "YES" ]]; then
+ # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks).
+ (( CHMTHREADS = ATMTHREADS ))
+ (( CHMPETS = FV3PETS ))
+ # Do not add to NTASKS_TOT
+ echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})"
+ fi
+ export CHMPETS CHMTHREADS
+
+ WAVPETS=0; WAVTHREADS=0
+ if [[ "${DO_WAVE}" == "YES" ]]; then
+ (( WAVPETS = ntasks_ww3 * nthreads_ww3 ))
+ (( WAVTHREADS = nthreads_ww3 ))
+ echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})"
+ (( NTASKS_TOT = NTASKS_TOT + WAVPETS ))
+ fi
+ export WAVPETS WAVTHREADS
+
+ OCNPETS=0; OCNTHREADS=0
+ if [[ "${DO_OCN}" == "YES" ]]; then
+ (( OCNPETS = ntasks_mom6 * nthreads_mom6 ))
+ (( OCNTHREADS = nthreads_mom6 ))
+ echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})"
+ (( NTASKS_TOT = NTASKS_TOT + OCNPETS ))
+ fi
+ export OCNPETS OCNTHREADS
+
+ ICEPETS=0; ICETHREADS=0
+ if [[ "${DO_ICE}" == "YES" ]]; then
+ (( ICEPETS = ntasks_cice6 * nthreads_cice6 ))
+ (( ICETHREADS = nthreads_cice6 ))
+ echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})"
+ (( NTASKS_TOT = NTASKS_TOT + ICEPETS ))
+ fi
+ export ICEPETS ICETHREADS
- declare -x "npe_${step}_${_CDUMP}"="${NTASKS_TOT}"
- declare -x "nth_${step}_${_CDUMP}"="${UFS_THREADS}"
- declare -x "npe_node_${step}_${_CDUMP}"="${npe_node_max}"
+ echo "Total PETS for ${RUN:-gfs} = ${NTASKS_TOT}"
- done
+ declare -x "ntasks"="${NTASKS_TOT}"
+ declare -x "threads_per_task"="${UFS_THREADS}"
+ declare -x "tasks_per_node"="${max_tasks_per_node}"
case "${CASE}" in
"C48" | "C96" | "C192")
- declare -x "wtime_${step}_gdas"="00:20:00"
- declare -x "wtime_${step}_enkfgdas"="00:20:00"
- declare -x "wtime_${step}_gfs"="03:00:00"
- declare -x "wtime_${step}_enkfgfs"="00:20:00"
+ declare -x "walltime_gdas"="00:20:00"
+ declare -x "walltime_enkfgdas"="00:20:00"
+ declare -x "walltime_gfs"="03:00:00"
+ declare -x "walltime_enkfgfs"="00:20:00"
;;
"C384")
- declare -x "wtime_${step}_gdas"="00:30:00"
- declare -x "wtime_${step}_enkfgdas"="00:30:00"
- declare -x "wtime_${step}_gfs"="06:00:00"
- declare -x "wtime_${step}_enkfgfs"="00:30:00"
+ declare -x "walltime_gdas"="00:30:00"
+ declare -x "walltime_enkfgdas"="00:30:00"
+ declare -x "walltime_gfs"="06:00:00"
+ declare -x "walltime_enkfgfs"="00:30:00"
;;
"C768" | "C1152")
# Not valid resolutions for ensembles
- declare -x "wtime_${step}_gdas"="00:40:00"
- declare -x "wtime_${step}_gfs"="06:00:00"
+ declare -x "walltime_gdas"="00:40:00"
+ declare -x "walltime_gfs"="06:00:00"
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
@@ -851,175 +783,167 @@ case ${step} in
;;
esac
- var_npe_node="npe_node_${step}_${RUN}"
- var_nth="nth_${step}_${RUN}"
- var_npe="npe_${step}_${RUN}"
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_${step}"="${!var_npe_node}" \
- "nth_${step}"="${!var_nth}" \
- "npe_${step}"="${!var_npe}"
- fi
-
- unset _CDUMP _CDUMP_LIST
+ unset _RUN
unset NTASKS_TOT
;;
"oceanice_products")
- export wtime_oceanice_products="00:15:00"
- export npe_oceanice_products=1
- export npe_node_oceanice_products=1
- export nth_oceanice_products=1
- export memory_oceanice_products="96GB"
+ walltime="00:15:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="96GB"
;;
"upp")
case "${CASE}" in
"C48" | "C96")
- export npe_upp=${CASE:1}
+ ntasks=${CASE:1}
;;
"C192" | "C384" | "C768" )
- export npe_upp=120
- export memory_upp="${mem_node_max}"
+ ntasks=120
+ memory="${mem_node_max}"
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
- export npe_node_upp=${npe_upp}
+ tasks_per_node=${ntasks}
- export nth_upp=1
+ threads_per_task=1
- export wtime_upp="00:15:00"
- if (( npe_node_upp > npe_node_max )); then
- export npe_node_upp=${npe_node_max}
+ walltime="00:15:00"
+ if (( tasks_per_node > max_tasks_per_node )); then
+ tasks_per_node=${max_tasks_per_node}
fi
export is_exclusive=True
;;
"atmos_products")
- export wtime_atmos_products="00:15:00"
- export npe_atmos_products=24
- export nth_atmos_products=1
- export npe_node_atmos_products="${npe_atmos_products}"
+ walltime="00:15:00"
+ ntasks=24
+ threads_per_task=1
+ tasks_per_node="${ntasks}"
export is_exclusive=True
;;
"verfozn")
- export wtime_verfozn="00:05:00"
- export npe_verfozn=1
- export nth_verfozn=1
- export npe_node_verfozn=1
- export memory_verfozn="1G"
+ walltime="00:05:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="1G"
;;
"verfrad")
- export wtime_verfrad="00:40:00"
- export npe_verfrad=1
- export nth_verfrad=1
- export npe_node_verfrad=1
- export memory_verfrad="5G"
+ walltime="00:40:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="5G"
;;
"vminmon")
- export wtime_vminmon="00:05:00"
- export npe_vminmon=1
- export nth_vminmon=1
- export npe_node_vminmon=1
- export memory_vminmon="1G"
+ walltime="00:05:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="1G"
;;
"tracker")
- export wtime_tracker="00:10:00"
- export npe_tracker=1
- export nth_tracker=1
- export npe_node_tracker=1
- export memory_tracker="4G"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="4G"
;;
"genesis")
- export wtime_genesis="00:25:00"
- export npe_genesis=1
- export nth_genesis=1
- export npe_node_genesis=1
- export memory_genesis="10G"
+ walltime="00:25:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="10G"
;;
"genesis_fsu")
- export wtime_genesis_fsu="00:10:00"
- export npe_genesis_fsu=1
- export nth_genesis_fsu=1
- export npe_node_genesis_fsu=1
- export memory_genesis_fsu="10G"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=1
+ memory="10G"
;;
"fit2obs")
- export wtime_fit2obs="00:20:00"
- export npe_fit2obs=3
- export nth_fit2obs=1
- export npe_node_fit2obs=1
- export memory_fit2obs="20G"
+ walltime="00:20:00"
+ ntasks=3
+ threads_per_task=1
+ tasks_per_node=1
+ memory="20G"
+ [[ ${CASE} == "C768" ]] && memory="80GB"
;;
"metp")
- export nth_metp=1
- export wtime_metp_gdas="03:00:00"
- export wtime_metp_gfs="06:00:00"
- export npe_metp=4
- export npe_node_metp=4
+ threads_per_task=1
+ walltime_gdas="03:00:00"
+ walltime_gfs="06:00:00"
+ ntasks=4
+ tasks_per_node=4
export is_exclusive=True
;;
"echgres")
- export wtime_echgres="00:10:00"
- export npe_echgres=3
- export nth_echgres=${npe_node_max}
- export npe_node_echgres=1
+ walltime="00:10:00"
+ ntasks=3
+ threads_per_task=${max_tasks_per_node}
+ tasks_per_node=1
;;
"init")
- export wtime_init="00:30:00"
- export npe_init=24
- export nth_init=1
- export npe_node_init=6
- export memory_init="70GB"
+ walltime="00:30:00"
+ ntasks=24
+ threads_per_task=1
+ tasks_per_node=6
+ memory="70GB"
;;
"init_chem")
- export wtime_init_chem="00:30:00"
- export npe_init_chem=1
- export npe_node_init_chem=1
+ walltime="00:30:00"
+ ntasks=1
+ tasks_per_node=1
export is_exclusive=True
;;
"mom6ic")
- export wtime_mom6ic="00:30:00"
- export npe_mom6ic=24
- export npe_node_mom6ic=24
+ walltime="00:30:00"
+ ntasks=24
+ tasks_per_node=24
export is_exclusive=True
;;
"arch" | "earc" | "getic")
- declare -x "wtime_${step}"="06:00:00"
- declare -x "npe_${step}"="1"
- declare -x "npe_node_${step}"="1"
- declare -x "nth_${step}"="1"
- declare -x "memory_${step}"="4096M"
+ walltime="06:00:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="4096M"
;;
"cleanup")
- export wtime_cleanup="00:15:00"
- export npe_cleanup=1
- export npe_node_cleanup=1
- export nth_cleanup=1
- export memory_cleanup="4096M"
+ walltime="00:15:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="4096M"
;;
"stage_ic")
- export wtime_stage_ic="00:15:00"
- export npe_stage_ic=1
- export npe_node_stage_ic=1
- export nth_stage_ic=1
+ walltime="00:15:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
export is_exclusive=True
;;
@@ -1027,360 +951,324 @@ case ${step} in
export layout_x=${layout_x_atmensanl}
export layout_y=${layout_y_atmensanl}
- export wtime_atmensanlinit="00:10:00"
- export npe_atmensanlinit=1
- export nth_atmensanlinit=1
- export npe_node_atmensanlinit=$(( npe_node_max / nth_atmensanlinit ))
- export memory_atmensanlinit="3072M"
+ walltime="00:10:00"
+ ntasks=1
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="3072M"
;;
"atmensanlletkf")
export layout_x=${layout_x_atmensanl}
export layout_y=${layout_y_atmensanl}
- export wtime_atmensanlletkf="00:30:00"
- export npe_atmensanlletkf_enkfgdas=$(( layout_x * layout_y * 6 ))
- export npe_atmensanlletkf_enkfgfs=$(( layout_x * layout_y * 6 ))
- export nth_atmensanlletkf_enkfgdas=1
- export nth_atmensanlletkf_enkfgfs=${nth_atmensanlletkf_enkfgdas}
- export npe_node_atmensanlletkf_enkfgdas=$(( npe_node_max / nth_atmensanlletkf_enkfgdas ))
- export npe_node_atmensanlletkf_enkfgfs=$(( npe_node_max / nth_atmensanlletkf_enkfgfs ))
- export memory_atmensanlletkf="96GB"
+ walltime="00:30:00"
+ ntasks=$(( layout_x * layout_y * 6 ))
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="96GB"
export is_exclusive=True
-
- var_npe_node="npe_node_atmensanlletkf_${RUN}"
- var_nth="nth_atmensanlletkf_${RUN}"
- var_npe="npe_atmensanlletkf_${RUN}"
- # RUN is set to a single value at setup time, so these won't be found
- # TODO rework setup_xml.py to initialize RUN to the applicable option
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_atmensanlletkf"="${!var_npe_node}" \
- "nth_atmensanlletkf"="${!var_nth}" \
- "npe_atmensanlletkf"="${!var_npe}"
- fi
;;
"atmensanlfv3inc")
export layout_x=${layout_x_atmensanl}
export layout_y=${layout_y_atmensanl}
- export wtime_atmensanlfv3inc="00:30:00"
- export npe_atmensanlfv3inc_enkfgdas=$(( layout_x * layout_y * 6 ))
- export npe_atmensanlfv3inc_enkfgfs=$(( layout_x * layout_y * 6 ))
- export nth_atmensanlfv3inc_enkfgdas=1
- export nth_atmensanlfv3inc_enkfgfs=${nth_atmensanlfv3inc_enkfgdas}
- export npe_node_atmensanlfv3inc_enkfgdas=$(( npe_node_max / nth_atmensanlfv3inc_enkfgdas ))
- export npe_node_atmensanlfv3inc_enkfgfs=$(( npe_node_max / nth_atmensanlfv3inc_enkfgfs ))
- export memory_atmensanlfv3inc="96GB"
+ walltime="00:30:00"
+ ntasks=$(( layout_x * layout_y * 6 ))
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="96GB"
export is_exclusive=True
-
- var_npe_node="npe_node_atmensanlfv3inc_${RUN}"
- var_nth="nth_atmensanlfv3inc_${RUN}"
- var_npe="npe_atmensanlfv3inc_${RUN}"
- # RUN is set to a single value at setup time, so these won't be found
- # TODO rework setup_xml.py to initialize RUN to the applicable option
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_atmensanlfv3inc"="${!var_npe_node}" \
- "nth_atmensanlfv3inc"="${!var_nth}" \
- "npe_atmensanlfv3inc"="${!var_npe}"
- fi
;;
"atmensanlfinal")
- export wtime_atmensanlfinal="00:30:00"
- export npe_atmensanlfinal=${npe_node_max}
- export nth_atmensanlfinal=1
- export npe_node_atmensanlfinal=$(( npe_node_max / nth_atmensanlfinal ))
+ walltime="00:30:00"
+ ntasks=${max_tasks_per_node}
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
;;
"eobs" | "eomg")
- export wtime_eobs="00:15:00"
- export wtime_eomg="00:30:00"
+ if [[ "${step}" == "eobs" ]]; then
+ walltime="00:15:00"
+ else
+ walltime="00:30:00"
+ fi
+
case ${CASE} in
- "C768") export npe_eobs=200;;
- "C384") export npe_eobs=100;;
- "C192" | "C96" | "C48") export npe_eobs=40;;
+ "C768") ntasks=200;;
+ "C384") ntasks=100;;
+ "C192" | "C96" | "C48") ntasks=40;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
- export npe_eomg=${npe_eobs}
- export nth_eobs=2
- export nth_eomg=${nth_eobs}
+ threads_per_task=2
# NOTE The number of tasks and cores used must be the same for eobs
# See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
- export npe_node_eobs=$(( npe_node_max / nth_eobs ))
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
- export npe_node_eomg=${npe_node_eobs}
- # Unset npe_node_eobs if it is not a multiple of npe_node_max
+ # Unset tasks_per_node if it is not a multiple of max_tasks_per_node
# to prevent dropping data on the floor. This should be set int
# config.resources.{machine} instead. This will result in an error at
# experiment setup time if not set in config.resources.{machine}.
- if [[ $(( npe_node_max % npe_node_eobs )) != 0 ]]; then
- unset npe_node_max
+ if [[ $(( max_tasks_per_node % tasks_per_node )) != 0 ]]; then
+ unset max_tasks_per_node
fi
;;
"ediag")
- export wtime_ediag="00:15:00"
- export npe_ediag=48
- export nth_ediag=1
- export npe_node_ediag=$(( npe_node_max / nth_ediag ))
- export memory_ediag="30GB"
+ walltime="00:15:00"
+ ntasks=48
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ memory="30GB"
;;
"eupd")
- export wtime_eupd="00:30:00"
+ walltime="00:30:00"
case ${CASE} in
"C768")
- export npe_eupd=480
- export nth_eupd=6
+ ntasks=480
+ threads_per_task=6
;;
"C384")
- export npe_eupd=270
- export nth_eupd=8
+ ntasks=270
+ threads_per_task=8
;;
"C192" | "C96" | "C48")
- export npe_eupd=42
- export nth_eupd=2
+ ntasks=42
+ threads_per_task=2
;;
*)
echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
- export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
;;
"ecen")
- export wtime_ecen="00:10:00"
- export npe_ecen=80
- export nth_ecen=4
+ walltime="00:10:00"
+ ntasks=80
+ threads_per_task=4
if [[ ${CASE} == "C384" || ${CASE} == "C192" || ${CASE} == "C96" || ${CASE} == "C48" ]]; then
- export nth_ecen=2
+ threads_per_task=2
fi
- export npe_node_ecen=$(( npe_node_max / nth_ecen ))
- export nth_cycle=${nth_ecen}
- export npe_node_cycle=$(( npe_node_max / nth_cycle ))
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ export threads_per_task_cycle=${threads_per_task}
+ export tasks_per_node_cycle=${tasks_per_node}
export is_exclusive=True
;;
"esfc")
- export wtime_esfc="00:15:00"
- export npe_esfc=80
- export nth_esfc=1
- export npe_node_esfc=$(( npe_node_max / nth_esfc ))
- export nth_cycle=${nth_esfc}
- export npe_node_cycle=$(( npe_node_max / nth_cycle ))
+ walltime="00:15:00"
+ ntasks=80
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
+ threads_per_task_cycle=${threads_per_task}
+ tasks_per_node_cycle=$(( max_tasks_per_node / threads_per_task_cycle ))
;;
"epos")
- export wtime_epos="00:15:00"
- [[ ${CASE} == "C768" ]] && export wtime_epos="00:25:00"
- export npe_epos=80
- export nth_epos=1
- export npe_node_epos=$(( npe_node_max / nth_epos ))
+ walltime="00:15:00"
+ [[ ${CASE} == "C768" ]] && walltime="00:25:00"
+ ntasks=80
+ threads_per_task=1
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
export is_exclusive=True
;;
"postsnd")
- export wtime_postsnd="02:00:00"
- export npe_postsnd=40
- export nth_postsnd=8
- export npe_node_postsnd=10
- export npe_postsndcfp=9
- export npe_node_postsndcfp=1
- postsnd_req_cores=$(( npe_node_postsnd * nth_postsnd ))
- if (( postsnd_req_cores > npe_node_max )); then
- export npe_node_postsnd=$(( npe_node_max / nth_postsnd ))
+ walltime="02:00:00"
+ ntasks=40
+ threads_per_task=8
+ tasks_per_node=10
+ export ntasks_postsndcfp=9
+ export tasks_per_node_postsndcfp=1
+ postsnd_req_cores=$(( tasks_per_node * threads_per_task ))
+ if (( postsnd_req_cores > max_tasks_per_node )); then
+ tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
fi
export is_exclusive=True
;;
"awips")
- export wtime_awips="03:30:00"
- export npe_awips=1
- export npe_node_awips=1
- export nth_awips=1
- export memory_awips="3GB"
+ walltime="03:30:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="3GB"
;;
"npoess")
- export wtime_npoess="03:30:00"
- export npe_npoess=1
- export npe_node_npoess=1
- export nth_npoess=1
- export memory_npoess="3GB"
+ walltime="03:30:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="3GB"
;;
"gempak")
- export wtime_gempak="00:30:00"
- export npe_gempak_gdas=2
- export npe_gempak_gfs=28
- export npe_node_gempak_gdas=2
- export npe_node_gempak_gfs=28
- export nth_gempak=1
- export memory_gempak_gdas="4GB"
- export memory_gempak_gfs="2GB"
-
- var_npe_node="npe_node_gempak_${RUN}"
- var_npe="npe_gempak_${RUN}"
- # RUN is set to a single value at setup time, so these won't be found
- # TODO rework setup_xml.py to initialize RUN to the applicable option
- if [[ -n "${!var_npe_node+0}" ]]; then
- declare -x "npe_node_gempak"="${!var_npe_node}" \
- "npe_gempak"="${!var_npe}"
- fi
+ walltime="00:30:00"
+ ntasks_gdas=2
+ ntasks_gfs=28
+ tasks_per_node_gdas=2
+ tasks_per_node_gfs=28
+ threads_per_task=1
+ memory_gdas="4GB"
+ memory_gfs="2GB"
;;
"mos_stn_prep")
- export wtime_mos_stn_prep="00:10:00"
- export npe_mos_stn_prep=3
- export npe_node_mos_stn_prep=3
- export nth_mos_stn_prep=1
- export memory_mos_stn_prep="5GB"
- export NTASK="${npe_mos_stn_prep}"
- export PTILE="${npe_node_mos_stn_prep}"
+ walltime="00:10:00"
+ ntasks=3
+ tasks_per_node=3
+ threads_per_task=1
+ memory="5GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_grd_prep")
- export wtime_mos_grd_prep="00:10:00"
- export npe_mos_grd_prep=4
- export npe_node_mos_grd_prep=4
- export nth_mos_grd_prep=1
- export memory_mos_grd_prep="16GB"
- export NTASK="${npe_mos_grd_prep}"
- export PTILE="${npe_node_mos_grd_prep}"
+ walltime="00:10:00"
+ ntasks=4
+ tasks_per_node=4
+ threads_per_task=1
+ memory="16GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_ext_stn_prep")
- export wtime_mos_ext_stn_prep="00:15:00"
- export npe_mos_ext_stn_prep=2
- export npe_node_mos_ext_stn_prep=2
- export nth_mos_ext_stn_prep=1
- export memory_mos_ext_stn_prep="5GB"
- export NTASK="${npe_mos_ext_stn_prep}"
- export PTILE="${npe_node_mos_ext_stn_prep}"
+ walltime="00:15:00"
+ ntasks=2
+ tasks_per_node=2
+ threads_per_task=1
+ memory="5GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_ext_grd_prep")
- export wtime_mos_ext_grd_prep="00:10:00"
- export npe_mos_ext_grd_prep=7
- export npe_node_mos_ext_grd_prep=7
- export nth_mos_ext_grd_prep=1
- export memory_mos_ext_grd_prep="3GB"
- export NTASK="${npe_mos_ext_grd_prep}"
- export PTILE="${npe_node_mos_ext_grd_prep}"
+ walltime="00:10:00"
+ ntasks=7
+ tasks_per_node=7
+ threads_per_task=1
+ memory="3GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_stn_fcst")
- export wtime_mos_stn_fcst="00:10:00"
- export npe_mos_stn_fcst=5
- export npe_node_mos_stn_fcst=5
- export nth_mos_stn_fcst=1
- export memory_mos_stn_fcst="40GB"
- export NTASK="${npe_mos_stn_fcst}"
- export PTILE="${npe_node_mos_stn_fcst}"
+ walltime="00:10:00"
+ ntasks=5
+ tasks_per_node=5
+ threads_per_task=1
+ memory="40GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_grd_fcst")
- export wtime_mos_grd_fcst="00:10:00"
- export npe_mos_grd_fcst=7
- export npe_node_mos_grd_fcst=7
- export nth_mos_grd_fcst=1
- export memory_mos_grd_fcst="50GB"
- export NTASK="${npe_mos_grd_fcst}"
- export PTILE="${npe_node_mos_grd_fcst}"
+ walltime="00:10:00"
+ ntasks=7
+ tasks_per_node=7
+ threads_per_task=1
+ memory="50GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_ext_stn_fcst")
- export wtime_mos_ext_stn_fcst="00:20:00"
- export npe_mos_ext_stn_fcst=3
- export npe_node_mos_ext_stn_fcst=3
- export nth_mos_ext_stn_fcst=1
- export memory_mos_ext_stn_fcst="50GB"
- export NTASK="${npe_mos_ext_stn_fcst}"
- export PTILE="${npe_node_mos_ext_stn_fcst}"
+ walltime="00:20:00"
+ ntasks=3
+ tasks_per_node=3
+ threads_per_task=1
+ memory="50GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
export prepost=True
;;
"mos_ext_grd_fcst")
- export wtime_mos_ext_grd_fcst="00:10:00"
- export npe_mos_ext_grd_fcst=7
- export npe_node_mos_ext_grd_fcst=7
- export nth_mos_ext_grd_fcst=1
- export memory_mos_ext_grd_fcst="50GB"
- export NTASK="${npe_mos_ext_grd_fcst}"
- export PTILE="${npe_node_mos_ext_grd_fcst}"
+ walltime="00:10:00"
+ ntasks=7
+ tasks_per_node=7
+ threads_per_task=1
+ memory="50GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
;;
"mos_stn_prdgen")
- export wtime_mos_stn_prdgen="00:10:00"
- export npe_mos_stn_prdgen=1
- export npe_node_mos_stn_prdgen=1
- export nth_mos_stn_prdgen=1
- export memory_mos_stn_prdgen="15GB"
- export NTASK="${npe_mos_stn_prdgen}"
- export PTILE="${npe_node_mos_stn_prdgen}"
+ walltime="00:10:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="15GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
export prepost=True
;;
"mos_grd_prdgen")
- export wtime_mos_grd_prdgen="00:40:00"
- export npe_mos_grd_prdgen=72
- export npe_node_mos_grd_prdgen=18
- export nth_mos_grd_prdgen=4
- export memory_mos_grd_prdgen="20GB"
- export NTASK="${npe_mos_grd_prdgen}"
- export PTILE="${npe_node_mos_grd_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_grd_prdgen}"
+ walltime="00:40:00"
+ ntasks=72
+ tasks_per_node=18
+ threads_per_task=4
+ memory="20GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
+ export OMP_NUM_THREADS="${threads_per_task}"
;;
"mos_ext_stn_prdgen")
- export wtime_mos_ext_stn_prdgen="00:10:00"
- export npe_mos_ext_stn_prdgen=1
- export npe_node_mos_ext_stn_prdgen=1
- export nth_mos_ext_stn_prdgen=1
- export memory_mos_ext_stn_prdgen="15GB"
- export NTASK="${npe_mos_ext_stn_prdgen}"
- export PTILE="${npe_node_mos_ext_stn_prdgen}"
+ walltime="00:10:00"
+ ntasks=1
+ tasks_per_node=1
+ threads_per_task=1
+ memory="15GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
export prepost=True
;;
"mos_ext_grd_prdgen")
- export wtime_mos_ext_grd_prdgen="00:30:00"
- export npe_mos_ext_grd_prdgen=96
- export npe_node_mos_ext_grd_prdgen=6
- export nth_mos_ext_grd_prdgen=16
- export memory_mos_ext_grd_prdgen="30GB"
- export NTASK="${npe_mos_ext_grd_prdgen}"
- export PTILE="${npe_node_mos_ext_grd_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_ext_grd_prdgen}"
+ walltime="00:30:00"
+ ntasks=96
+ tasks_per_node=6
+ threads_per_task=16
+ memory="30GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
+ export OMP_NUM_THREADS="${threads_per_task}"
;;
"mos_wx_prdgen")
- export wtime_mos_wx_prdgen="00:10:00"
- export npe_mos_wx_prdgen=4
- export npe_node_mos_wx_prdgen=2
- export nth_mos_wx_prdgen=2
- export memory_mos_wx_prdgen="10GB"
- export NTASK="${npe_mos_wx_prdgen}"
- export PTILE="${npe_node_mos_wx_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_wx_prdgen}"
+ walltime="00:10:00"
+ ntasks=4
+ tasks_per_node=2
+ threads_per_task=2
+ memory="10GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
+ export OMP_NUM_THREADS="${threads_per_task}"
;;
"mos_wx_ext_prdgen")
- export wtime_mos_wx_ext_prdgen="00:10:00"
- export npe_mos_wx_ext_prdgen=4
- export npe_node_mos_wx_ext_prdgen=2
- export nth_mos_wx_ext_prdgen=2
- export memory_mos_wx_ext_prdgen="10GB"
- export NTASK="${npe_mos_wx_ext_prdgen}"
- export PTILE="${npe_node_mos_wx_ext_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_wx_ext_prdgen}"
+ walltime="00:10:00"
+ ntasks=4
+ tasks_per_node=2
+ threads_per_task=2
+ memory="10GB"
+ NTASK="${ntasks}"
+ export PTILE="${tasks_per_node}"
+ export OMP_NUM_THREADS="${threads_per_task}"
;;
*)
@@ -1390,15 +1278,19 @@ case ${step} in
esac
-# Unset dynamic variable names
-unset var_NTASKS \
- var_npe \
- var_npe_node \
- var_nth
-
# Get machine-specific resources, overriding/extending the above assignments
if [[ -f "${EXPDIR}/config.resources.${machine}" ]]; then
source "${EXPDIR}/config.resources.${machine}"
fi
+# Check for RUN-specific variables and export them
+for resource_var in threads_per_task ntasks tasks_per_node NTASKS memory walltime; do
+ run_resource_var="${resource_var}_${RUN}"
+ if [[ -n "${!run_resource_var+0}" ]]; then
+ declare -x "${resource_var}"="${!run_resource_var}"
+ elif [[ -n "${!resource_var+0}" ]]; then
+ export "${resource_var?}"
+ fi
+done
+
echo "END: config.resources"
diff --git a/parm/config/gfs/config.resources.GAEA b/parm/config/gfs/config.resources.GAEA
index 3f0934edc2..51007b5b4f 100644
--- a/parm/config/gfs/config.resources.GAEA
+++ b/parm/config/gfs/config.resources.GAEA
@@ -8,10 +8,10 @@ case ${step} in
# See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
case ${CASE} in
"C768" | "C384")
- export npe_node_eobs=50
+ export tasks_per_node=50
;;
*)
- export npe_node_eobs=40
+ export tasks_per_node=40
;;
esac
;;
diff --git a/parm/config/gfs/config.resources.HERA b/parm/config/gfs/config.resources.HERA
index cfd614961d..36f50508c3 100644
--- a/parm/config/gfs/config.resources.HERA
+++ b/parm/config/gfs/config.resources.HERA
@@ -5,30 +5,29 @@
case ${step} in
"anal")
if [[ "${CASE}" == "C384" ]]; then
- export npe_anal_gdas=270
- export npe_anal_gfs=270
- export nth_anal=8
- export npe_node_anal=$(( npe_node_max / nth_anal ))
+ export ntasks=270
+ export threads_per_task_anal=8
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
fi
;;
"eupd")
case ${CASE} in
"C384")
- export npe_eupd=80
+ export ntasks=80
;;
"C192" | "C96" | "C48")
- export nth_eupd=4
+ export threads_per_task=4
;;
*)
;;
esac
- export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ecen")
- if [[ "${CASE}" == "C768" ]]; then export nth_ecen=6; fi
- export npe_node_ecen=$(( npe_node_max / nth_ecen ))
+ if [[ "${CASE}" == "C768" ]]; then export threads_per_task=6; fi
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
*)
diff --git a/parm/config/gfs/config.resources.HERCULES b/parm/config/gfs/config.resources.HERCULES
index 7d2ca646e6..7a5a74f69c 100644
--- a/parm/config/gfs/config.resources.HERCULES
+++ b/parm/config/gfs/config.resources.HERCULES
@@ -8,9 +8,8 @@ case ${step} in
# See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
# For Hercules, this is only an issue at C384; use 20 tasks/node
if [[ ${CASE} = "C384" ]]; then
- export npe_node_eobs=20
+ export tasks_per_node=20
fi
- export npe_node_eomg=${npe_node_eobs}
;;
*)
;;
diff --git a/parm/config/gfs/config.resources.JET b/parm/config/gfs/config.resources.JET
index de2ec6547a..47b953c0f4 100644
--- a/parm/config/gfs/config.resources.JET
+++ b/parm/config/gfs/config.resources.JET
@@ -5,10 +5,9 @@
case ${step} in
"anal")
if [[ "${CASE}" == "C384" ]]; then
- export npe_anal_gdas=270
- export npe_anal_gfs=270
- export nth_anal=8
- export npe_node_anal=$(( npe_node_max / nth_anal ))
+ export ntasks=270
+ export threads_per_task=8
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
fi
;;
@@ -17,35 +16,35 @@ case ${step} in
# The number of tasks and cores used must be the same for eobs
# See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
# This would also be an issues for vjet and sjet if anyone runs on those nodes.
- export npe_node_eobs=10
+ export tasks_per_node=10
fi
;;
"eupd")
case ${CASE} in
"C384")
- export npe_eupd=80
+ export ntasks=80
;;
"C192" | "C96" | "C48")
- export nth_eupd=4
+ export threads_per_task=4
;;
*)
;;
esac
- export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ecen")
- if [[ "${CASE}" == "C768" ]]; then export nth_ecen=6; fi
- export npe_node_ecen=$(( npe_node_max / nth_ecen ))
+ if [[ "${CASE}" == "C768" ]]; then export threads_per_task=6; fi
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"upp")
- export memory_upp="${mem_node_max}"
+ export memory="${mem_node_max}"
;;
"esfc")
- export memory_esfc="${mem_node_max}"
+ export memory="${mem_node_max}"
;;
*)
diff --git a/parm/config/gfs/config.resources.S4 b/parm/config/gfs/config.resources.S4
index 3f6654f8d6..1af64bf250 100644
--- a/parm/config/gfs/config.resources.S4
+++ b/parm/config/gfs/config.resources.S4
@@ -11,47 +11,47 @@ case ${step} in
#On the S4-s4 partition, this is accomplished by increasing the task
#count to a multiple of 32
if [[ ${PARTITION_BATCH} = "s4" ]]; then
- export npe_anal_gdas=416
- export npe_anal_gfs=416
+ export ntasks_gdas=416
+ export ntasks_gfs=416
fi
#S4 is small, so run this task with just 1 thread
- export nth_anal=1
- export wtime_anal_gdas="02:00:00"
- export wtime_anal_gfs="02:00:00"
+ export threads_per_task=1
+ export walltime_gdas="02:00:00"
+ export walltime_gfs="02:00:00"
;;
"C192" | "C96" | "C48")
- export nth_anal=4
+ export threads_per_task=4
if [[ ${PARTITION_BATCH} == "s4" ]]; then
- export npe_anal_gdas=88
- export npe_anal_gfs=88
+ export ntasks_gdas=88
+ export ntasks_gfs=88
elif [[ ${PARTITION_BATCH} == "ivy" ]]; then
- export npe_anal_gdas=90
- export npe_anal_gfs=90
+ export ntasks_gdas=90
+ export ntasks_gfs=90
fi
;;
*)
;;
esac
- export npe_node_anal=$(( npe_node_max / nth_anal ))
+ export tasks_node=$(( max_tasks_per_node / threads_per_task ))
;;
"eobs")
# The number of tasks and cores used must be the same for eobs
# See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
# For S4, this is accomplished by running 10 tasks/node
- export npe_node_eobs=10
+ export tasks_per_node=10
;;
"eupd")
if [[ "${CASE}" == "C384" ]]; then
- export npe_eupd=160
- export nth_eupd=2
+ export ntasks=160
+ export threads_per_task=2
fi
- export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"ediag")
- export memory_ediag="${mem_node_max}"
+ export memory="${mem_node_max}"
;;
*)
diff --git a/parm/config/gfs/config.resources.WCOSS2 b/parm/config/gfs/config.resources.WCOSS2
index 7e4314a0e9..a0a69fa8d1 100644
--- a/parm/config/gfs/config.resources.WCOSS2
+++ b/parm/config/gfs/config.resources.WCOSS2
@@ -5,50 +5,50 @@
case ${step} in
"prep")
export is_exclusive=True
- export memory_prep="480GB"
+ export memory="480GB"
;;
"anal")
if [[ "${CASE}" == "C768" ]]; then
- export nth_anal=8
- # Make npe a multiple of 16
- export npe_anal_gdas=784
- export npe_anal_gfs=832
- export npe_node_anal=$(( npe_node_max / nth_anal ))
+ export threads_per_task=8
+ # Make ntasks a multiple of 16
+ export ntasks_gdas=784
+ export ntasks_gfs=832
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
fi
;;
"fit2obs")
- export npe_node_fit2obs=3
+ export tasks_per_node=3
;;
"echgres")
- export memory_echgres="200GB"
+ export memory="200GB"
;;
"arch" | "earc" | "getic")
- declare -x "memory_${step}"="50GB"
+ declare -x "memory"="50GB"
;;
"eupd")
case ${CASE} in
"C768" | "C384")
- export npe_eupd=315
- export nth_eupd=14
+ export ntasks=315
+ export threads_per_task=14
;;
*)
;;
esac
- export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ export tasks_per_node=$(( max_tasks_per_node / threads_per_task ))
;;
"eobs")
case ${CASE} in
"C768" | "C384")
- export npe_node_eobs=50
+ export tasks_per_node=50
;;
*)
- export npe_node_eobs=40
+ export tasks_per_node=40
;;
esac
;;
diff --git a/parm/config/gfs/config.wave b/parm/config/gfs/config.wave
index 568aeb1e1c..db4eb9f708 100644
--- a/parm/config/gfs/config.wave
+++ b/parm/config/gfs/config.wave
@@ -10,10 +10,10 @@ echo "BEGIN: config.wave"
# Some others are also used across the workflow in wave component scripts
# General runtime labels
-export CDUMPwave="${RUN}wave"
+export RUNwave="${RUN}wave"
# In GFS/GDAS, restart files are generated/read from gdas runs
-export CDUMPRSTwave="gdas"
+export RUNRSTwave="gdas"
#grid dependent variable defaults
export waveGRDN='1' # grid number for ww3_multi
diff --git a/parm/config/gfs/config.waveprep b/parm/config/gfs/config.waveprep
index 1c9a40c1d8..1f746eab77 100644
--- a/parm/config/gfs/config.waveprep
+++ b/parm/config/gfs/config.waveprep
@@ -21,7 +21,7 @@ export WAV_CUR_CDO_SMOOTH="NO"
export CDO_ROOT=${CDO_ROOT:-/usrx/local/dev/packages/cdo/1.9.8}
if [ "${WW3ICEINP}" = "YES" ]; then
- export WAVICEFILE=${CDUMP}.t${cyc}z.seaice.5min.grib2
+ export WAVICEFILE=${RUN}.t${cyc}z.seaice.5min.grib2
fi
echo "END: config.waveprep"
diff --git a/scripts/exgdas_atmos_chgres_forenkf.sh b/scripts/exgdas_atmos_chgres_forenkf.sh
index 183179fcdb..1833ed7b1f 100755
--- a/scripts/exgdas_atmos_chgres_forenkf.sh
+++ b/scripts/exgdas_atmos_chgres_forenkf.sh
@@ -24,7 +24,6 @@ pwd=$(pwd)
# Base variables
CDATE=${CDATE:-"2001010100"}
-CDUMP=${CDUMP:-"enkfgdas"}
GDUMP=${GDUMP:-"gdas"}
# Derived base variables
@@ -164,7 +163,7 @@ EOF
chmod 755 $DATA/mp_chgres.sh
ncmd=$(cat $DATA/mp_chgres.sh | wc -l)
if [ $ncmd -gt 0 ]; then
- ncmd_max=$((ncmd < npe_node_max ? ncmd : npe_node_max))
+ ncmd_max=$((ncmd < max_tasks_per_node ? ncmd : max_tasks_per_node))
APRUNCFP_CHGRES=$(eval echo $APRUNCFP)
export pgm=$CHGRESNCEXEC
diff --git a/scripts/exgdas_enkf_update.sh b/scripts/exgdas_enkf_update.sh
index c878dc721b..e924274d39 100755
--- a/scripts/exgdas_enkf_update.sh
+++ b/scripts/exgdas_enkf_update.sh
@@ -259,7 +259,7 @@ if [[ $USE_CFP = "YES" ]]; then
chmod 755 $DATA/mp_untar.sh
ncmd=$(cat $DATA/mp_untar.sh | wc -l)
if [[ $ncmd -gt 0 ]]; then
- ncmd_max=$((ncmd < npe_node_max ? ncmd : npe_node_max))
+ ncmd_max=$((ncmd < max_tasks_per_node ? ncmd : max_tasks_per_node))
APRUNCFP=$(eval echo $APRUNCFP)
$APRUNCFP $DATA/mp_untar.sh
export err=$?; err_chk
diff --git a/scripts/exgfs_aero_init_aerosol.py b/scripts/exgfs_aero_init_aerosol.py
index 1c81880ca9..d098368202 100755
--- a/scripts/exgfs_aero_init_aerosol.py
+++ b/scripts/exgfs_aero_init_aerosol.py
@@ -14,7 +14,7 @@
CDATE: Initial time in YYYYMMDDHH format
STEP_GFS: Forecast cadence (frequency) in hours
FHMAX_GFS: Forecast length in hours
-CDUMP: Forecast phase (gfs or gdas). Currently always expected to be gfs.
+RUN: Forecast phase (gfs or gdas). Currently always expected to be gfs.
ROTDIR: Rotating (COM) directory
USHgfs: Path to global-workflow `ush` directory
PARMgfs: Path to global-workflow `parm` directory
@@ -41,14 +41,14 @@
from functools import partial
# Constants
-atm_base_pattern = "{rot_dir}/{cdump}.%Y%m%d/%H/model_data/atmos/input" # Location of atmosphere ICs
+atm_base_pattern = "{rot_dir}/{run}.%Y%m%d/%H/model_data/atmos/input" # Location of atmosphere ICs
atm_file_pattern = "{path}/gfs_data.{tile}.nc" # Atm IC file names
atm_ctrl_pattern = "{path}/gfs_ctrl.nc" # Atm IC control file name
-restart_base_pattern = "{rot_dir}/{cdump}.%Y%m%d/%H/model_data/atmos/restart" # Location of restart files (time of previous run)
+restart_base_pattern = "{rot_dir}/{run}.%Y%m%d/%H/model_data/atmos/restart" # Location of restart files (time of previous run)
restart_file_pattern = "{file_base}/{timestamp}fv_core.res.{tile}.nc" # Name of restart data files (time when restart is valid)
tracer_file_pattern = "{file_base}/{timestamp}fv_tracer.res.{tile}.nc" # Name of restart tracer files (time when restart is valid)
dycore_file_pattern = "{file_base}/{timestamp}fv_core.res.nc" # Name of restart dycore file (time when restart is valid)
-tracer_list_file_pattern = "{parm_gfs}/ufs/gocart/gocart_tracer.list" # Text list of tracer names to copy
+tracer_list_file_pattern = "{parm_gfs}/ufs/gocart/gocart_tracer.list" # Text list of tracer names to copy
merge_script_pattern = "{ush_gfs}/merge_fv3_aerosol_tile.py"
n_tiles = 6
max_lookback = 4 # Maximum number of past cycles to look for for tracer data
@@ -68,7 +68,7 @@ def main() -> None:
cdate = get_env_var("CDATE")
incr = int(get_env_var('STEP_GFS'))
fcst_length = int(get_env_var('FHMAX_GFS'))
- cdump = get_env_var("CDUMP")
+ run = get_env_var("RUN")
rot_dir = get_env_var("ROTDIR")
ush_gfs = get_env_var("USHgfs")
parm_gfs = get_env_var("PARMgfs")
@@ -86,7 +86,7 @@ def main() -> None:
print(f'{var} = {f"{var}"}')
atm_files, ctrl_files = get_atm_files(atm_source_path)
- tracer_files, rest_files, core_files = get_restart_files(time, incr, max_lookback, fcst_length, rot_dir, cdump)
+ tracer_files, rest_files, core_files = get_restart_files(time, incr, max_lookback, fcst_length, rot_dir, run)
if (tracer_files is not None):
merge_tracers(merge_script, atm_files, tracer_files, rest_files, core_files[0], ctrl_files[0], tracer_list_file)
@@ -167,7 +167,7 @@ def get_atm_files(path: str) -> typing.List[typing.List[str]]:
return file_list
-def get_restart_files(time: datetime, incr: int, max_lookback: int, fcst_length: int, rot_dir: str, cdump: str) -> typing.List[typing.List[str]]:
+def get_restart_files(time: datetime, incr: int, max_lookback: int, fcst_length: int, rot_dir: str, run: str) -> typing.List[typing.List[str]]:
'''
Determines the last cycle where all the necessary restart files are available. Ideally the immediate previous cycle
@@ -183,8 +183,8 @@ def get_restart_files(time: datetime, incr: int, max_lookback: int, fcst_length:
Length of forecast in hours
rot_dir : str
Path to the ROTDIR (COM) directory
- cdump : str
- CDUMP of current forecast portion (currently should always be 'gfs')
+ run : str
+ RUN of current forecast portion (currently should always be 'gfs')
Returns
----------
diff --git a/scripts/exgfs_wave_post_pnt.sh b/scripts/exgfs_wave_post_pnt.sh
index b251661ee6..0b8874f3fb 100755
--- a/scripts/exgfs_wave_post_pnt.sh
+++ b/scripts/exgfs_wave_post_pnt.sh
@@ -44,7 +44,7 @@ source "${USHgfs}/preamble.sh"
# Set wave model ID tag to include member number
# if ensemble; waveMEMB var empty in deterministic
- export WAV_MOD_TAG=${CDUMP}wave${waveMEMB}
+ export WAV_MOD_TAG=${RUN}wave${waveMEMB}
echo "HAS BEGUN on $(hostname)"
echo "Starting WAVE PNT POSTPROCESSOR SCRIPT for $WAV_MOD_TAG"
diff --git a/scripts/exglobal_archive.py b/scripts/exglobal_archive.py
index af396d382e..ec8154317f 100755
--- a/scripts/exglobal_archive.py
+++ b/scripts/exglobal_archive.py
@@ -29,7 +29,7 @@ def main():
'DOIAU', 'OCNRES', 'ICERES', 'NUM_SND_COLLECTIVES', 'FHOUT_WAV',
'FHOUT_HF_WAV', 'FHMAX_WAV', 'FHMAX_HF_WAV', 'FHMAX_WAV_GFS',
'restart_interval_gdas', 'restart_interval_gfs',
- 'AERO_ANL_CDUMP', 'AERO_FCST_CDUMP', 'DOIBP_WAV', 'DO_JEDIOCNVAR',
+ 'AERO_ANL_RUN', 'AERO_FCST_RUN', 'DOIBP_WAV', 'DO_JEDIOCNVAR',
'NMEM_ENS', 'DO_JEDIATMVAR', 'DO_VRFY_OCEANDA', 'FHMAX_FITS',
'IAUFHRS', 'DO_FIT2OBS']
diff --git a/scripts/exglobal_atmos_analysis.sh b/scripts/exglobal_atmos_analysis.sh
index 0e78bed2e0..932adf8e10 100755
--- a/scripts/exglobal_atmos_analysis.sh
+++ b/scripts/exglobal_atmos_analysis.sh
@@ -26,7 +26,7 @@ pwd=$(pwd)
# Base variables
CDATE=${CDATE:-"2001010100"}
-CDUMP=${CDUMP:-"gdas"}
+rCDUMP=${rCDUMP:-"gdas"}
GDUMP=${GDUMP:-"gdas"}
# Derived base variables
@@ -581,8 +581,8 @@ if [ ${GENDIAG} = "YES" ] ; then
if [ -d ${DIAG_DIR} ]; then
rm -rf ${DIAG_DIR}
fi
- npe_m1="$((${npe_gsi}-1))"
- for pe in $(seq 0 ${npe_m1}); do
+ ntasks_m1="$((ntasks-1))"
+ for pe in $(seq 0 ${ntasks_m1}); do
pedir="dir."$(printf %04i ${pe})
mkdir -p ${DIAG_DIR}/${pedir}
${NLN} ${DIAG_DIR}/${pedir} ${pedir}
@@ -675,7 +675,7 @@ EOFunzip
chmod 755 ${DATA}/mp_unzip.sh
ncmd=$(cat ${DATA}/mp_unzip.sh | wc -l)
if [ ${ncmd} -gt 0 ]; then
- ncmd_max=$((ncmd < npe_node_max ? ncmd : npe_node_max))
+ ncmd_max=$((ncmd < max_tasks_per_node ? ncmd : max_tasks_per_node))
APRUNCFP_UNZIP=$(eval echo ${APRUNCFP})
${APRUNCFP_UNZIP} ${DATA}/mp_unzip.sh
export err=$?; err_chk
@@ -990,7 +990,7 @@ cd ${pwd}
if [ ${SENDECF} = "YES" -a "${RUN}" != "enkf" ]; then
ecflow_client --event release_fcst
fi
-echo "${CDUMP} ${CDATE} atminc done at $(date)" > ${COM_ATMOS_ANALYSIS}/${APREFIX}loginc.txt
+echo "${rCDUMP} ${CDATE} atminc done at $(date)" > ${COM_ATMOS_ANALYSIS}/${APREFIX}loginc.txt
################################################################################
diff --git a/scripts/exglobal_atmos_analysis_calc.sh b/scripts/exglobal_atmos_analysis_calc.sh
index f94ebb02fa..423e800879 100755
--- a/scripts/exglobal_atmos_analysis_calc.sh
+++ b/scripts/exglobal_atmos_analysis_calc.sh
@@ -25,7 +25,7 @@ source "${USHgfs}/preamble.sh"
pwd=$(pwd)
# Base variables
-CDUMP=${CDUMP:-"gdas"}
+rCDUMP=${rCDUMP:-"gdas"}
GDUMP=${GDUMP:-"gdas"}
# Utilities
@@ -181,7 +181,7 @@ if [ $DOGAUSFCANL = "YES" ]; then
export err=$?; err_chk
fi
-echo "${CDUMP} ${PDY}${cyc} atmanl and sfcanl done at $(date)" > "${COM_ATMOS_ANALYSIS}/${APREFIX}loganl.txt"
+echo "${rCDUMP} ${PDY}${cyc} atmanl and sfcanl done at $(date)" > "${COM_ATMOS_ANALYSIS}/${APREFIX}loganl.txt"
################################################################################
# Postprocessing
diff --git a/scripts/exglobal_atmos_products.sh b/scripts/exglobal_atmos_products.sh
index 9067819380..51e1a108bb 100755
--- a/scripts/exglobal_atmos_products.sh
+++ b/scripts/exglobal_atmos_products.sh
@@ -11,7 +11,7 @@ INTERP_ATMOS_SFLUXSH=${INTERP_ATMOS_SFLUXSH:-"${USHgfs}/interp_atmos_sflux.sh"}
# Variables used in this job
downset=${downset:-1} # No. of groups of pressure grib2 products to create
-npe_atmos_products=${npe_atmos_products:-8} # no. of processors available to process each group
+ntasks_atmos_products=${ntasks_atmos_products:-8} # no. of processors available to process each group
# WGNE related options
WGNE=${WGNE:-NO} # Create WGNE products
@@ -72,7 +72,7 @@ for (( nset=1 ; nset <= downset ; nset++ )); do
echo "Begin processing nset = ${nset}"
# Number of processors available to process $nset
- nproc=${npe_atmos_products}
+ nproc=${ntasks}
# Each set represents a group of files
if (( nset == 1 )); then
diff --git a/scripts/exglobal_diag.sh b/scripts/exglobal_diag.sh
index e1faa7b2ee..ed9bef05df 100755
--- a/scripts/exglobal_diag.sh
+++ b/scripts/exglobal_diag.sh
@@ -26,7 +26,6 @@ pwd=$(pwd)
# Base variables
CDATE="${PDY}${cyc}"
-CDUMP=${CDUMP:-"gdas"}
GDUMP=${GDUMP:-"gdas"}
# Utilities
@@ -222,7 +221,7 @@ EOFdiag
chmod 755 $DATA/mp_diag.sh
ncmd=$(cat $DATA/mp_diag.sh | wc -l)
if [ $ncmd -gt 0 ]; then
- ncmd_max=$((ncmd < npe_node_max ? ncmd : npe_node_max))
+ ncmd_max=$((ncmd < max_tasks_per_node ? ncmd : max_tasks_per_node))
APRUNCFP_DIAG=$(eval echo $APRUNCFP)
$APRUNCFP_DIAG $DATA/mp_diag.sh
export err=$?; err_chk
diff --git a/scripts/exglobal_forecast.sh b/scripts/exglobal_forecast.sh
index e2bccd43db..4ce9d7894d 100755
--- a/scripts/exglobal_forecast.sh
+++ b/scripts/exglobal_forecast.sh
@@ -31,7 +31,7 @@
## 1. restart file except sfc_data, $gmemdir/RESTART/$PDY.$cyc.*.nc
## 2. sfcanl_data, $memdir/RESTART/$PDY.$cyc.*.nc
## 3. coupler_res, $gmemdir/RESTART/$PDY.$cyc.coupler.res
-## 4. increment file, $memdir/${CDUMP}.t${cyc}z.atminc.nc
+## 4. increment file, $memdir/${RUN}.t${cyc}z.atminc.nc
## OR $DATA/INPUT/fv3_increment.nc
## Cold start files:
## 1. initial condition, $memdir/INPUT/*.nc
@@ -54,9 +54,9 @@
##
## Data output (location, name)
## If quilting=true and output grid is gaussian grid:
-## 1. atmf data, $memdir/${CDUMP}.t${cyc}z.atmf${FH3}.$OUTPUT_FILE
-## 2. sfcf data, $memdir/${CDUMP}.t${cyc}z.sfcf${FH3}.$OUTPUT_FILE
-## 3. logf data, $memdir/${CDUMP}.t${cyc}z.logf${FH3}.$OUTPUT_FILE
+## 1. atmf data, $memdir/${RUN}.t${cyc}z.atmf${FH3}.$OUTPUT_FILE
+## 2. sfcf data, $memdir/${RUN}.t${cyc}z.sfcf${FH3}.$OUTPUT_FILE
+## 3. logf data, $memdir/${RUN}.t${cyc}z.logf${FH3}.$OUTPUT_FILE
## If quilting=false and output grid is not gaussian grid:
## 1. NGGPS2D, $memdir/nggps2d.tile${n}.nc
## 2. NGGPS3D, $memdir/nggps3d.tile${n}.nc
diff --git a/sorc/wxflow b/sorc/wxflow
index 1356acdb2b..d314e06510 160000
--- a/sorc/wxflow
+++ b/sorc/wxflow
@@ -1 +1 @@
-Subproject commit 1356acdb2bbca28e442597699da1a295faa18fe3
+Subproject commit d314e065101041a4d45e5a11ec19cd2dc5f38c67
diff --git a/ush/calcanl_gfs.py b/ush/calcanl_gfs.py
index ceb75104a2..5d97d25dfd 100755
--- a/ush/calcanl_gfs.py
+++ b/ush/calcanl_gfs.py
@@ -19,7 +19,7 @@
def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix,
ComIn_Ges, GPrefix,
FixDir, atmges_ens_mean, RunDir, NThreads, NEMSGet, IAUHrs,
- ExecCMD, ExecCMDMPI, ExecAnl, ExecChgresInc, Cdump, JEDI):
+ ExecCMD, ExecCMDMPI, ExecAnl, ExecChgresInc, run, JEDI):
print('calcanl_gfs beginning at: ', datetime.datetime.utcnow())
IAUHH = IAUHrs
@@ -38,7 +38,7 @@ def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix,
gsi_utils.link_file(RunDir + '/siganl', CalcAnlDir + '/anl.06')
gsi_utils.copy_file(ExecChgresInc, CalcAnlDir + '/chgres_inc.x')
# for ensemble res analysis
- if Cdump in ["gdas", "gfs"]:
+ if Run in ["gdas", "gfs"]:
CalcAnlDir = RunDir + '/calcanl_ensres_' + format(fh, '02')
if not os.path.exists(CalcAnlDir):
gsi_utils.make_dir(CalcAnlDir)
@@ -166,7 +166,7 @@ def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix,
[hosts.append(x) for x in hosts_tmp if x not in hosts]
nhosts = len(hosts)
ExecCMDMPI_host = 'mpiexec -l -n ' + str(nFH)
- tasks = int(os.getenv('ntasks', 1))
+ tasks = int(os.getenv('ntasks_calcanl', 1))
print('nhosts,tasks=', nhosts, tasks)
if levs > tasks:
ExecCMDMPILevs_host = 'mpiexec -l -n ' + str(tasks)
@@ -298,7 +298,7 @@ def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix,
sys.exit(exit_fullres)
# compute determinstic analysis on ensemble resolution
- if Cdump in ["gdas", "gfs"]:
+ if Run in ["gdas", "gfs"]:
chgres_jobs = []
for fh in IAUHH:
# first check to see if guess file exists
@@ -359,7 +359,7 @@ def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix,
ExecChgresInc = os.getenv('CHGRESINCEXEC', './interp_inc.x')
NEMSGet = os.getenv('NEMSIOGET', 'nemsio_get')
IAUHrs = list(map(int, os.getenv('IAUFHRS', '6').split(',')))
- Cdump = os.getenv('CDUMP', 'gdas')
+ Run = os.getenv('RUN', 'gdas')
JEDI = gsi_utils.isTrue(os.getenv('DO_JEDIATMVAR', 'YES'))
print(locals())
@@ -367,4 +367,4 @@ def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix,
ComIn_Ges, GPrefix,
FixDir, atmges_ens_mean, RunDir, NThreads, NEMSGet, IAUHrs,
ExecCMD, ExecCMDMPI, ExecAnl, ExecChgresInc,
- Cdump, JEDI)
+ Run, JEDI)
diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh
index 9183e86002..ebf7cfd282 100755
--- a/ush/forecast_predet.sh
+++ b/ush/forecast_predet.sh
@@ -72,8 +72,8 @@ FV3_restarts(){
common_predet(){
echo "SUB ${FUNCNAME[0]}: Defining variables for shared through model components"
- CDUMP=${CDUMP:-gdas}
- rCDUMP=${rCDUMP:-${CDUMP}}
+ RUN=${RUN:-gdas}
+ rCDUMP=${rCDUMP:-${RUN}}
CDATE=${CDATE:-"${PDY}${cyc}"}
ENSMEM=${ENSMEM:-000}
diff --git a/ush/getdump.sh b/ush/getdump.sh
index 58906cdedc..12deb725e1 100755
--- a/ush/getdump.sh
+++ b/ush/getdump.sh
@@ -5,9 +5,9 @@ source "${USHgfs}/preamble.sh"
COMPONENT=${COMPONENT:-atmos}
CDATE=${1:-""}
-CDUMP=${2:-""}
-SOURCE_DIR=${3:-$DMPDIR/${CDUMP}${DUMP_SUFFIX}.${PDY}/${cyc}/${COMPONENT}}
-TARGET_DIR=${4:-$ROTDIR/${CDUMP}.${PDY}/${cyc}/${COMPONENT}}
+RUN=${2:-""}
+SOURCE_DIR=${3:-$DMPDIR/${RUN}${DUMP_SUFFIX}.${PDY}/${cyc}/${COMPONENT}}
+TARGET_DIR=${4:-$ROTDIR/${RUN}.${PDY}/${cyc}/${COMPONENT}}
DUMP_SUFFIX=${DUMP_SUFFIX:-""}
@@ -24,7 +24,7 @@ if [ ! -s $TARGET_DIR ]; then mkdir -p $TARGET_DIR ;fi
# Set file prefix
cyc=$(echo $CDATE |cut -c 9-10)
-prefix="$CDUMP.t${cyc}z."
+prefix="$RUN.t${cyc}z."
# Link dump files from SOURCE_DIR to TARGET_DIR
diff --git a/workflow/applications/applications.py b/workflow/applications/applications.py
index 95d7cd9bb3..97a77c2c21 100644
--- a/workflow/applications/applications.py
+++ b/workflow/applications/applications.py
@@ -3,6 +3,7 @@
from typing import Dict, List, Any
from datetime import timedelta
from hosts import Host
+from pathlib import Path
from wxflow import Configuration, to_timedelta
from abc import ABC, ABCMeta, abstractmethod
@@ -31,7 +32,11 @@ def __init__(self, conf: Configuration) -> None:
self.scheduler = Host().scheduler
- _base = conf.parse_config('config.base')
+ # Save the configuration so we can source the config files when
+ # determining task resources
+ self.conf = conf
+
+ _base = self.conf.parse_config('config.base')
# Define here so the child __init__ functions can use it; will
# be overwritten later during _init_finalize().
self._base = _base
@@ -71,39 +76,39 @@ def __init__(self, conf: Configuration) -> None:
self.nens = _base.get('NMEM_ENS', 0)
- self.wave_cdumps = None
+ self.wave_runs = None
if self.do_wave:
- wave_cdump = _base.get('WAVE_CDUMP', 'BOTH').lower()
- if wave_cdump in ['both']:
- self.wave_cdumps = ['gfs', 'gdas']
- elif wave_cdump in ['gfs', 'gdas']:
- self.wave_cdumps = [wave_cdump]
-
- self.aero_anl_cdumps = None
- self.aero_fcst_cdumps = None
+ wave_run = _base.get('WAVE_RUN', 'BOTH').lower()
+ if wave_run in ['both']:
+ self.wave_runs = ['gfs', 'gdas']
+ elif wave_run in ['gfs', 'gdas']:
+ self.wave_runs = [wave_run]
+
+ self.aero_anl_runs = None
+ self.aero_fcst_runs = None
if self.do_aero:
- aero_anl_cdump = _base.get('AERO_ANL_CDUMP', 'BOTH').lower()
- if aero_anl_cdump in ['both']:
- self.aero_anl_cdumps = ['gfs', 'gdas']
- elif aero_anl_cdump in ['gfs', 'gdas']:
- self.aero_anl_cdumps = [aero_anl_cdump]
- aero_fcst_cdump = _base.get('AERO_FCST_CDUMP', None).lower()
- if aero_fcst_cdump in ['both']:
- self.aero_fcst_cdumps = ['gfs', 'gdas']
- elif aero_fcst_cdump in ['gfs', 'gdas']:
- self.aero_fcst_cdumps = [aero_fcst_cdump]
-
- def _init_finalize(self, conf: Configuration):
+ aero_anl_run = _base.get('AERO_ANL_RUN', 'BOTH').lower()
+ if aero_anl_run in ['both']:
+ self.aero_anl_runs = ['gfs', 'gdas']
+ elif aero_anl_run in ['gfs', 'gdas']:
+ self.aero_anl_runs = [aero_anl_run]
+ aero_fcst_run = _base.get('AERO_FCST_RUN', None).lower()
+ if aero_fcst_run in ['both']:
+ self.aero_fcst_runs = ['gfs', 'gdas']
+ elif aero_fcst_run in ['gfs', 'gdas']:
+ self.aero_fcst_runs = [aero_fcst_run]
+
+ def _init_finalize(self, *args):
print("Finalizing initialize")
# Get a list of all possible config_files that would be part of the application
self.configs_names = self._get_app_configs()
# Source the config_files for the jobs in the application
- self.configs = self._source_configs(conf)
+ self.configs = self.source_configs()
# Update the base config dictionary base on application
- self.configs['base'] = self._update_base(self.configs['base'])
+ self.configs['base'] = self.update_base(self.configs['base'])
# Save base in the internal state since it is often needed
self._base = self.configs['base']
@@ -120,7 +125,7 @@ def _get_app_configs(self):
@staticmethod
@abstractmethod
- def _update_base(base_in: Dict[str, Any]) -> Dict[str, Any]:
+ def update_base(base_in: Dict[str, Any]) -> Dict[str, Any]:
'''
Make final updates to base and return an updated copy
@@ -137,9 +142,9 @@ def _update_base(base_in: Dict[str, Any]) -> Dict[str, Any]:
'''
pass
- def _source_configs(self, conf: Configuration) -> Dict[str, Any]:
+ def source_configs(self, run: str = "gfs", log: bool = True) -> Dict[str, Any]:
"""
- Given the configuration object and jobs,
+ Given the configuration object used to initialize this application,
source the configurations for each config and return a dictionary
Every config depends on "config.base"
"""
@@ -147,7 +152,7 @@ def _source_configs(self, conf: Configuration) -> Dict[str, Any]:
configs = dict()
# Return config.base as well
- configs['base'] = conf.parse_config('config.base')
+ configs['base'] = self.conf.parse_config('config.base')
# Source the list of all config_files involved in the application
for config in self.configs_names:
@@ -170,15 +175,15 @@ def _source_configs(self, conf: Configuration) -> Dict[str, Any]:
else:
files += [f'config.{config}']
- print(f'sourcing config.{config}')
- configs[config] = conf.parse_config(files)
+ print(f'sourcing config.{config}') if log else 0
+ configs[config] = self.conf.parse_config(files, RUN=run)
return configs
@abstractmethod
def get_task_names(self) -> Dict[str, List[str]]:
'''
- Create a list of task names for each CDUMP valid for the configuation.
+ Create a list of task names for each RUN valid for the configuation.
Parameters
----------
@@ -186,7 +191,7 @@ def get_task_names(self) -> Dict[str, List[str]]:
Returns
-------
- Dict[str, List[str]]: Lists of tasks for each CDUMP.
+ Dict[str, List[str]]: Lists of tasks for each RUN.
'''
pass
diff --git a/workflow/applications/gefs.py b/workflow/applications/gefs.py
index bdff2186d0..364ee2c48b 100644
--- a/workflow/applications/gefs.py
+++ b/workflow/applications/gefs.py
@@ -36,11 +36,11 @@ def _get_app_configs(self):
return configs
@staticmethod
- def _update_base(base_in):
+ def update_base(base_in):
base_out = base_in.copy()
base_out['INTERVAL_GFS'] = AppConfig.get_gfs_interval(base_in['gfs_cyc'])
- base_out['CDUMP'] = 'gefs'
+ base_out['RUN'] = 'gefs'
return base_out
@@ -79,4 +79,4 @@ def get_task_names(self):
if self.do_extractvars:
tasks += ['extractvars']
- return {f"{self._base['CDUMP']}": tasks}
+ return {f"{self._base['RUN']}": tasks}
diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py
index 8771729e3a..e049a7d422 100644
--- a/workflow/applications/gfs_cycled.py
+++ b/workflow/applications/gfs_cycled.py
@@ -21,14 +21,14 @@ def __init__(self, conf: Configuration):
self.do_vrfy_oceanda = self._base.get('DO_VRFY_OCEANDA', False)
self.lobsdiag_forenkf = False
- self.eupd_cdumps = None
+ self.eupd_runs = None
if self.do_hybvar:
self.lobsdiag_forenkf = self._base.get('lobsdiag_forenkf', False)
- eupd_cdump = self._base.get('EUPD_CYC', 'gdas').lower()
- if eupd_cdump in ['both']:
- self.eupd_cdumps = ['gfs', 'gdas']
- elif eupd_cdump in ['gfs', 'gdas']:
- self.eupd_cdumps = [eupd_cdump]
+ eupd_run = self._base.get('EUPD_CYC', 'gdas').lower()
+ if eupd_run in ['both']:
+ self.eupd_runs = ['gfs', 'gdas']
+ elif eupd_run in ['gfs', 'gdas']:
+ self.eupd_runs = [eupd_run]
def _get_app_configs(self):
"""
@@ -123,7 +123,7 @@ def _get_app_configs(self):
return configs
@staticmethod
- def _update_base(base_in):
+ def update_base(base_in):
return GFSCycledAppConfig.get_gfs_cyc_dates(base_in)
@@ -175,10 +175,10 @@ def get_task_names(self):
if not self.do_jediatmvar:
gdas_tasks += ['analdiag']
- if self.do_wave and 'gdas' in self.wave_cdumps:
+ if self.do_wave and 'gdas' in self.wave_runs:
gdas_tasks += wave_prep_tasks
- if self.do_aero and 'gdas' in self.aero_anl_cdumps:
+ if self.do_aero and 'gdas' in self.aero_anl_runs:
gdas_tasks += ['aeroanlinit', 'aeroanlrun', 'aeroanlfinal']
if self.do_prep_obs_aero:
gdas_tasks += ['prepobsaero']
@@ -189,7 +189,7 @@ def get_task_names(self):
gdas_tasks += ['atmupp']
gdas_tasks += ['atmos_prod']
- if self.do_wave and 'gdas' in self.wave_cdumps:
+ if self.do_wave and 'gdas' in self.wave_runs:
if self.do_wave_bnd:
gdas_tasks += wave_bndpnt_tasks
gdas_tasks += wave_post_tasks
@@ -214,10 +214,10 @@ def get_task_names(self):
# Collect "gfs" cycle tasks
gfs_tasks = gdas_gfs_common_tasks_before_fcst.copy()
- if self.do_wave and 'gfs' in self.wave_cdumps:
+ if self.do_wave and 'gfs' in self.wave_runs:
gfs_tasks += wave_prep_tasks
- if self.do_aero and 'gfs' in self.aero_anl_cdumps:
+ if self.do_aero and 'gfs' in self.aero_anl_runs:
gfs_tasks += ['aeroanlinit', 'aeroanlrun', 'aeroanlfinal']
if self.do_prep_obs_aero:
gfs_tasks += ['prepobsaero']
@@ -252,7 +252,7 @@ def get_task_names(self):
if self.do_metp:
gfs_tasks += ['metp']
- if self.do_wave and 'gfs' in self.wave_cdumps:
+ if self.do_wave and 'gfs' in self.wave_runs:
if self.do_wave_bnd:
gfs_tasks += wave_bndpnt_tasks
gfs_tasks += wave_post_tasks
@@ -286,15 +286,15 @@ def get_task_names(self):
tasks = dict()
tasks['gdas'] = gdas_tasks
- if self.do_hybvar and 'gdas' in self.eupd_cdumps:
+ if self.do_hybvar and 'gdas' in self.eupd_runs:
enkfgdas_tasks = hybrid_tasks + hybrid_after_eupd_tasks
tasks['enkfgdas'] = enkfgdas_tasks
- # Add CDUMP=gfs tasks if running early cycle
+ # Add RUN=gfs tasks if running early cycle
if self.gfs_cyc > 0:
tasks['gfs'] = gfs_tasks
- if self.do_hybvar and 'gfs' in self.eupd_cdumps:
+ if self.do_hybvar and 'gfs' in self.eupd_runs:
enkfgfs_tasks = hybrid_tasks + hybrid_after_eupd_tasks
enkfgfs_tasks.remove("echgres")
tasks['enkfgfs'] = enkfgfs_tasks
diff --git a/workflow/applications/gfs_forecast_only.py b/workflow/applications/gfs_forecast_only.py
index 938a514896..680588e4ca 100644
--- a/workflow/applications/gfs_forecast_only.py
+++ b/workflow/applications/gfs_forecast_only.py
@@ -70,11 +70,11 @@ def _get_app_configs(self):
return configs
@staticmethod
- def _update_base(base_in):
+ def update_base(base_in):
base_out = base_in.copy()
base_out['INTERVAL_GFS'] = AppConfig.get_gfs_interval(base_in['gfs_cyc'])
- base_out['CDUMP'] = 'gfs'
+ base_out['RUN'] = 'gfs'
return base_out
@@ -88,8 +88,8 @@ def get_task_names(self):
tasks = ['stage_ic']
if self.do_aero:
- aero_fcst_cdump = self._base.get('AERO_FCST_CDUMP', 'BOTH').lower()
- if self._base['CDUMP'] in aero_fcst_cdump or aero_fcst_cdump == "both":
+ aero_fcst_run = self._base.get('AERO_FCST_RUN', 'BOTH').lower()
+ if self._base['RUN'] in aero_fcst_run or aero_fcst_run == "both":
if not self._base['EXP_WARM_START']:
tasks += ['aerosol_init']
@@ -153,4 +153,4 @@ def get_task_names(self):
tasks += ['arch', 'cleanup'] # arch and cleanup **must** be the last tasks
- return {f"{self._base['CDUMP']}": tasks}
+ return {f"{self._base['RUN']}": tasks}
diff --git a/workflow/create_experiment.py b/workflow/create_experiment.py
index c4e30ff79c..1317f7be28 100755
--- a/workflow/create_experiment.py
+++ b/workflow/create_experiment.py
@@ -11,6 +11,14 @@
The yaml file are simply the arguments for these two scripts.
After this scripts runs the experiment is ready for launch.
+Environmental variables
+-----------------------
+ pslot
+ Name of the experiment
+
+ RUNTESTS
+ Root directory where the test EXPDIR and COMROOT will be placed
+
Output
------
Functionally an experiment is setup as a result running the two scripts described above
@@ -18,7 +26,6 @@
"""
import os
-import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py
index 24864d3b96..2041853352 100644
--- a/workflow/rocoto/gefs_tasks.py
+++ b/workflow/rocoto/gefs_tasks.py
@@ -6,8 +6,8 @@
class GEFSTasks(Tasks):
- def __init__(self, app_config: AppConfig, cdump: str) -> None:
- super().__init__(app_config, cdump)
+ def __init__(self, app_config: AppConfig, run: str) -> None:
+ super().__init__(app_config, run)
def stage_ic(self):
cpl_ic = self._configs['stage_ic']
@@ -215,13 +215,13 @@ def _atmosoceaniceprod(self, component: str):
fhout_ice_gfs = self._configs['base']['FHOUT_ICE_GFS']
products_dict = {'atmos': {'config': 'atmos_products',
'history_path_tmpl': 'COM_ATMOS_MASTER_TMPL',
- 'history_file_tmpl': f'{self.cdump}.t@Hz.master.grb2f#fhr#'},
+ 'history_file_tmpl': f'{self.run}.t@Hz.master.grb2f#fhr#'},
'ocean': {'config': 'oceanice_products',
'history_path_tmpl': 'COM_OCEAN_HISTORY_TMPL',
- 'history_file_tmpl': f'{self.cdump}.ocean.t@Hz.{fhout_ocn_gfs}hr_avg.f#fhr#.nc'},
+ 'history_file_tmpl': f'{self.run}.ocean.t@Hz.{fhout_ocn_gfs}hr_avg.f#fhr#.nc'},
'ice': {'config': 'oceanice_products',
'history_path_tmpl': 'COM_ICE_HISTORY_TMPL',
- 'history_file_tmpl': f'{self.cdump}.ice.t@Hz.{fhout_ice_gfs}hr_avg.f#fhr#.nc'}}
+ 'history_file_tmpl': f'{self.run}.ice.t@Hz.{fhout_ice_gfs}hr_avg.f#fhr#.nc'}}
component_dict = products_dict[component]
config = component_dict['config']
@@ -423,7 +423,7 @@ def wavepostbndpntbll(self):
# The wavepostbndpntbll job runs on forecast hours up to FHMAX_WAV_IBP
last_fhr = self._configs['wave']['FHMAX_WAV_IBP']
- data = f'{atmos_hist_path}/{self.cdump}.t@Hz.atm.logf{last_fhr:03d}.txt'
+ data = f'{atmos_hist_path}/{self.run}.t@Hz.atm.logf{last_fhr:03d}.txt'
dep_dict = {'type': 'data', 'data': data}
deps.append(rocoto.add_dependency(dep_dict))
diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py
index bdc98a36fb..2808f2bc4d 100644
--- a/workflow/rocoto/gfs_tasks.py
+++ b/workflow/rocoto/gfs_tasks.py
@@ -7,13 +7,13 @@
class GFSTasks(Tasks):
- def __init__(self, app_config: AppConfig, cdump: str) -> None:
- super().__init__(app_config, cdump)
+ def __init__(self, app_config: AppConfig, run: str) -> None:
+ super().__init__(app_config, run)
@staticmethod
- def _is_this_a_gdas_task(cdump, task_name):
- if cdump != 'enkfgdas':
- raise TypeError(f'{task_name} must be part of the "enkfgdas" cycle and not {cdump}')
+ def _is_this_a_gdas_task(run, task_name):
+ if run != 'enkfgdas':
+ raise TypeError(f'{task_name} must be part of the "enkfgdas" cycle and not {run}')
# Specific Tasks begin here
def stage_ic(self):
@@ -71,12 +71,12 @@ def stage_ic(self):
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('stage_ic')
- task_name = f'{self.cdump}stage_ic'
+ task_name = f'{self.run}stage_ic'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump,
+ 'cycledef': self.run,
'command': f'{self.HOMEgfs}/jobs/rocoto/stage_ic.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -96,7 +96,7 @@ def prep(self):
dump_path = self._template_to_rocoto_cycstring(self._base["COM_OBSDMP_TMPL"],
{'DMPDIR': dmpdir, 'DUMP_SUFFIX': dump_suffix})
- gfs_enkf = True if self.app_config.do_hybvar and 'gfs' in self.app_config.eupd_cdumps else False
+ gfs_enkf = True if self.app_config.do_hybvar and 'gfs' in self.app_config.eupd_runs else False
deps = []
dep_dict = {'type': 'metatask', 'name': 'gdasatmos_prod', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
@@ -104,17 +104,17 @@ def prep(self):
data = f'{atm_hist_path}/gdas.t@Hz.atmf009.nc'
dep_dict = {'type': 'data', 'data': data, 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{dump_path}/{self.cdump}.t@Hz.updated.status.tm00.bufr_d'
+ data = f'{dump_path}/{self.run}.t@Hz.updated.status.tm00.bufr_d'
dep_dict = {'type': 'data', 'data': data}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
- cycledef = self.cdump
- if self.cdump in ['gfs'] and gfs_enkf and gfs_cyc != 4:
+ cycledef = self.run
+ if self.run in ['gfs'] and gfs_enkf and gfs_cyc != 4:
cycledef = 'gdas'
resources = self.get_resource('prep')
- task_name = f'{self.cdump}prep'
+ task_name = f'{self.run}prep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -134,17 +134,17 @@ def waveinit(self):
resources = self.get_resource('waveinit')
dependencies = None
- cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump
+ cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run
if self.app_config.mode in ['cycled']:
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prep'}
deps.append(rocoto.add_dependency(dep_dict))
- if self.cdump in ['gdas']:
+ if self.run in ['gdas']:
dep_dict = {'type': 'cycleexist', 'condition': 'not', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='or', dep=deps)
- task_name = f'{self.cdump}waveinit'
+ task_name = f'{self.run}waveinit'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -163,12 +163,12 @@ def waveinit(self):
def waveprep(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}waveinit'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}waveinit'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump
+ cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run
resources = self.get_resource('waveprep')
- task_name = f'{self.cdump}waveprep'
+ task_name = f'{self.run}waveprep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -197,11 +197,11 @@ def aerosol_init(self):
dep_dict = {'type': 'data', 'data': data}
deps.append(rocoto.add_dependency(dep_dict))
- # Calculate offset based on CDUMP = gfs | gdas
+ # Calculate offset based on RUN = gfs | gdas
interval = None
- if self.cdump in ['gfs']:
+ if self.run in ['gfs']:
interval = self._base['INTERVAL_GFS']
- elif self.cdump in ['gdas']:
+ elif self.run in ['gdas']:
interval = self._base['INTERVAL']
offset = timedelta_to_HMS(-interval)
@@ -219,7 +219,7 @@ def aerosol_init(self):
cycledef = 'gfs_seq'
resources = self.get_resource('aerosol_init')
- task_name = f'{self.cdump}aerosol_init'
+ task_name = f'{self.run}aerosol_init'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -237,7 +237,7 @@ def aerosol_init(self):
def anal(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prep'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_hybvar:
dep_dict = {'type': 'metatask', 'name': 'enkfgdasepmn', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
@@ -247,12 +247,12 @@ def anal(self):
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('anal')
- task_name = f'{self.cdump}anal'
+ task_name = f'{self.run}anal'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/anal.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -267,24 +267,24 @@ def sfcanl(self):
deps = []
if self.app_config.do_jediatmvar:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlfinal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlfinal'}
else:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}anal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}anal'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_jedisnowda:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}snowanl'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}snowanl'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
else:
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('sfcanl')
- task_name = f'{self.cdump}sfcanl'
+ task_name = f'{self.run}sfcanl'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/sfcanl.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -299,24 +299,24 @@ def analcalc(self):
deps = []
if self.app_config.do_jediatmvar:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlfinal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlfinal'}
else:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}anal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}anal'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}sfcanl'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}sfcanl'}
deps.append(rocoto.add_dependency(dep_dict))
- if self.app_config.do_hybvar and self.cdump in ['gdas']:
+ if self.app_config.do_hybvar and self.run in ['gdas']:
dep_dict = {'type': 'task', 'name': 'enkfgdasechgres', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('analcalc')
- task_name = f'{self.cdump}analcalc'
+ task_name = f'{self.run}analcalc'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/analcalc.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -330,17 +330,17 @@ def analcalc(self):
def analdiag(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}anal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}anal'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('analdiag')
- task_name = f'{self.cdump}analdiag'
+ task_name = f'{self.run}analdiag'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/analdiag.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -354,17 +354,17 @@ def analdiag(self):
def prepatmiodaobs(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('prepatmiodaobs')
- task_name = f'{self.cdump}prepatmiodaobs'
+ task_name = f'{self.run}prepatmiodaobs'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/prepatmiodaobs.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -378,7 +378,7 @@ def prepatmiodaobs(self):
def atmanlinit(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prepatmiodaobs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prepatmiodaobs'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_hybvar:
dep_dict = {'type': 'metatask', 'name': 'enkfgdasepmn', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
@@ -388,14 +388,14 @@ def atmanlinit(self):
dependencies = rocoto.create_dependency(dep=deps)
gfs_cyc = self._base["gfs_cyc"]
- gfs_enkf = True if self.app_config.do_hybvar and 'gfs' in self.app_config.eupd_cdumps else False
+ gfs_enkf = True if self.app_config.do_hybvar and 'gfs' in self.app_config.eupd_runs else False
- cycledef = self.cdump
- if self.cdump in ['gfs'] and gfs_enkf and gfs_cyc != 4:
+ cycledef = self.run
+ if self.run in ['gfs'] and gfs_enkf and gfs_cyc != 4:
cycledef = 'gdas'
resources = self.get_resource('atmanlinit')
- task_name = f'{self.cdump}atmanlinit'
+ task_name = f'{self.run}atmanlinit'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -414,17 +414,17 @@ def atmanlinit(self):
def atmanlvar(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlinit'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlinit'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('atmanlvar')
- task_name = f'{self.cdump}atmanlvar'
+ task_name = f'{self.run}atmanlvar'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmanlvar.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -438,17 +438,17 @@ def atmanlvar(self):
def atmanlfv3inc(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlvar'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlvar'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('atmanlfv3inc')
- task_name = f'{self.cdump}atmanlfv3inc'
+ task_name = f'{self.run}atmanlfv3inc'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmanlfv3inc.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -462,17 +462,17 @@ def atmanlfv3inc(self):
def atmanlfinal(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlfv3inc'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlfv3inc'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('atmanlfinal')
- task_name = f'{self.cdump}atmanlfinal'
+ task_name = f'{self.run}atmanlfinal'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmanlfinal.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -485,17 +485,17 @@ def atmanlfinal(self):
def prepobsaero(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('prepobsaero')
- task_name = f'{self.cdump}prepobsaero'
+ task_name = f'{self.run}prepobsaero'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/prepobsaero.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -509,19 +509,19 @@ def prepobsaero(self):
def aeroanlinit(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prep'}
if self.app_config.do_prep_obs_aero:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prepobsaero'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prepobsaero'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('aeroanlinit')
- task_name = f'{self.cdump}aeroanlinit'
+ task_name = f'{self.run}aeroanlinit'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/aeroanlinit.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -535,17 +535,17 @@ def aeroanlinit(self):
def aeroanlrun(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}aeroanlinit'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}aeroanlinit'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('aeroanlrun')
- task_name = f'{self.cdump}aeroanlrun'
+ task_name = f'{self.run}aeroanlrun'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/aeroanlrun.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -559,17 +559,17 @@ def aeroanlrun(self):
def aeroanlfinal(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}aeroanlrun'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}aeroanlrun'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('aeroanlfinal')
- task_name = f'{self.cdump}aeroanlfinal'
+ task_name = f'{self.run}aeroanlfinal'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/aeroanlfinal.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -583,17 +583,17 @@ def aeroanlfinal(self):
def prepsnowobs(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('prepsnowobs')
- task_name = f'{self.cdump}prepsnowobs'
+ task_name = f'{self.run}prepsnowobs'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/prepsnowobs.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -607,17 +607,17 @@ def prepsnowobs(self):
def snowanl(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prepsnowobs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prepsnowobs'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('snowanl')
- task_name = f'{self.cdump}snowanl'
+ task_name = f'{self.run}snowanl'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/snowanl.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -638,12 +638,12 @@ def prepoceanobs(self):
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('prepoceanobs')
- task_name = f'{self.cdump}prepoceanobs'
+ task_name = f'{self.run}prepoceanobs'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/prepoceanobs.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -665,12 +665,12 @@ def marinebmat(self):
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('marinebmat')
- task_name = f'{self.cdump}marinebmat'
+ task_name = f'{self.run}marinebmat'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/marinebmat.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -684,21 +684,21 @@ def marinebmat(self):
def ocnanalprep(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}prepoceanobs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}prepoceanobs'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}marinebmat'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}marinebmat'}
deps.append(rocoto.add_dependency(dep_dict))
dep_dict = {'type': 'task', 'name': 'gdasfcst', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('ocnanalprep')
- task_name = f'{self.cdump}ocnanalprep'
+ task_name = f'{self.run}ocnanalprep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalprep.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -712,17 +712,17 @@ def ocnanalprep(self):
def ocnanalrun(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalprep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalprep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('ocnanalrun')
- task_name = f'{self.cdump}ocnanalrun'
+ task_name = f'{self.run}ocnanalrun'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalrun.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -736,17 +736,17 @@ def ocnanalrun(self):
def ocnanalecen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalrun'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalrun'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('ocnanalecen')
- task_name = f'{self.cdump}ocnanalecen'
+ task_name = f'{self.run}ocnanalecen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalecen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -761,23 +761,23 @@ def ocnanalchkpt(self):
deps = []
if self.app_config.do_hybvar:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalecen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalecen'}
else:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalrun'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalrun'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_mergensst:
- data = f'&ROTDIR;/{self.cdump}.@Y@m@d/@H/atmos/{self.cdump}.t@Hz.sfcanl.nc'
+ data = f'&ROTDIR;/{self.run}.@Y@m@d/@H/atmos/{self.run}.t@Hz.sfcanl.nc'
dep_dict = {'type': 'data', 'data': data}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('ocnanalchkpt')
- task_name = f'{self.cdump}ocnanalchkpt'
+ task_name = f'{self.run}ocnanalchkpt'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalchkpt.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -791,17 +791,17 @@ def ocnanalchkpt(self):
def ocnanalpost(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalchkpt'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalchkpt'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('ocnanalpost')
- task_name = f'{self.cdump}ocnanalpost'
+ task_name = f'{self.run}ocnanalpost'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalpost.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -815,17 +815,17 @@ def ocnanalpost(self):
def ocnanalvrfy(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalpost'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalpost'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('ocnanalvrfy')
- task_name = f'{self.cdump}ocnanalvrfy'
+ task_name = f'{self.run}ocnanalvrfy'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalvrfy.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -853,26 +853,26 @@ def fcst(self):
def _fcst_forecast_only(self):
dependencies = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}stage_ic'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}stage_ic'}
dependencies.append(rocoto.add_dependency(dep_dict))
- if self.app_config.do_wave and self.cdump in self.app_config.wave_cdumps:
+ if self.app_config.do_wave and self.run in self.app_config.wave_runs:
wave_job = 'waveprep' if self.app_config.model_app in ['ATMW'] else 'waveinit'
- dep_dict = {'type': 'task', 'name': f'{self.cdump}{wave_job}'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}{wave_job}'}
dependencies.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_aero and \
- self.cdump in self.app_config.aero_fcst_cdumps and \
+ self.run in self.app_config.aero_fcst_runs and \
not self._base['EXP_WARM_START']:
- # Calculate offset based on CDUMP = gfs | gdas
+ # Calculate offset based on RUN = gfs | gdas
interval = None
- if self.cdump in ['gfs']:
+ if self.run in ['gfs']:
interval = self._base['INTERVAL_GFS']
- elif self.cdump in ['gdas']:
+ elif self.run in ['gdas']:
interval = self._base['INTERVAL']
offset = timedelta_to_HMS(-interval)
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}aerosol_init'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}aerosol_init'}
deps.append(rocoto.add_dependency(dep_dict))
dep_dict = {'type': 'cycleexist', 'condition': 'not', 'offset': offset}
deps.append(rocoto.add_dependency(dep_dict))
@@ -881,12 +881,12 @@ def _fcst_forecast_only(self):
dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies)
resources = self.get_resource('fcst')
- task_name = f'{self.cdump}fcst'
+ task_name = f'{self.run}fcst'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -899,38 +899,38 @@ def _fcst_forecast_only(self):
def _fcst_cycled(self):
- dep_dict = {'type': 'task', 'name': f'{self.cdump}sfcanl'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}sfcanl'}
dep = rocoto.add_dependency(dep_dict)
dependencies = rocoto.create_dependency(dep=dep)
if self.app_config.do_jediocnvar:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ocnanalpost'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalpost'}
dependencies.append(rocoto.add_dependency(dep_dict))
- if self.app_config.do_aero and self.cdump in self.app_config.aero_anl_cdumps:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}aeroanlfinal'}
+ if self.app_config.do_aero and self.run in self.app_config.aero_anl_runs:
+ dep_dict = {'type': 'task', 'name': f'{self.run}aeroanlfinal'}
dependencies.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_jedisnowda:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}snowanl'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}snowanl'}
dependencies.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies)
- if self.cdump in ['gdas']:
+ if self.run in ['gdas']:
dep_dict = {'type': 'cycleexist', 'condition': 'not', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
dependencies.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='or', dep=dependencies)
- if self.app_config.do_wave and self.cdump in self.app_config.wave_cdumps:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}waveprep'}
+ if self.app_config.do_wave and self.run in self.app_config.wave_runs:
+ dep_dict = {'type': 'task', 'name': f'{self.run}waveprep'}
dependencies.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies)
- cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump
+ cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run
resources = self.get_resource('fcst')
- task_name = f'{self.cdump}fcst'
+ task_name = f'{self.run}fcst'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -955,23 +955,23 @@ def atmanlupp(self):
atm_anl_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_ANALYSIS_TMPL"])
deps = []
- data = f'{atm_anl_path}/{self.cdump}.t@Hz.atmanl.nc'
+ data = f'{atm_anl_path}/{self.run}.t@Hz.atmanl.nc'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{atm_anl_path}/{self.cdump}.t@Hz.sfcanl.nc'
+ data = f'{atm_anl_path}/{self.run}.t@Hz.sfcanl.nc'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{atm_anl_path}/{self.cdump}.t@Hz.loganl.txt'
+ data = f'{atm_anl_path}/{self.run}.t@Hz.loganl.txt'
dep_dict = {'type': 'data', 'data': data, 'age': 60}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps, dep_condition='and')
resources = self.get_resource('upp')
- task_name = f'{self.cdump}atmanlupp'
+ task_name = f'{self.run}atmanlupp'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': postenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/upp.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -990,17 +990,17 @@ def atmanlprod(self):
atm_master_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_MASTER_TMPL"])
deps = []
- data = f'{atm_master_path}/{self.cdump}.t@Hz.master.grb2anl'
+ data = f'{atm_master_path}/{self.run}.t@Hz.master.grb2anl'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('atmos_products')
- task_name = f'{self.cdump}atmanlprod'
+ task_name = f'{self.run}atmanlprod'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': postenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmos_products.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1031,20 +1031,20 @@ def _upptask(self, upp_run="forecast", task_id="atmupp"):
atm_hist_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_HISTORY_TMPL"])
deps = []
- data = f'{atm_hist_path}/{self.cdump}.t@Hz.atmf#fhr#.nc'
+ data = f'{atm_hist_path}/{self.run}.t@Hz.atmf#fhr#.nc'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{atm_hist_path}/{self.cdump}.t@Hz.sfcf#fhr#.nc'
+ data = f'{atm_hist_path}/{self.run}.t@Hz.sfcf#fhr#.nc'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{atm_hist_path}/{self.cdump}.t@Hz.atm.logf#fhr#.txt'
+ data = f'{atm_hist_path}/{self.run}.t@Hz.atm.logf#fhr#.txt'
dep_dict = {'type': 'data', 'data': data, 'age': 60}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps, dep_condition='and')
- cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump
+ cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run
resources = self.get_resource('upp')
- task_name = f'{self.cdump}{task_id}_f#fhr#'
+ task_name = f'{self.run}{task_id}_f#fhr#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -1056,10 +1056,10 @@ def _upptask(self, upp_run="forecast", task_id="atmupp"):
'maxtries': '&MAXTRIES;'
}
- fhrs = self._get_forecast_hours(self.cdump, self._configs['upp'])
+ fhrs = self._get_forecast_hours(self.run, self._configs['upp'])
fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])}
- metatask_dict = {'task_name': f'{self.cdump}{task_id}',
+ metatask_dict = {'task_name': f'{self.run}{task_id}',
'task_dict': task_dict,
'var_dict': fhr_var_dict
}
@@ -1081,13 +1081,13 @@ def _atmosoceaniceprod(self, component: str):
products_dict = {'atmos': {'config': 'atmos_products',
'history_path_tmpl': 'COM_ATMOS_MASTER_TMPL',
- 'history_file_tmpl': f'{self.cdump}.t@Hz.master.grb2f#fhr#'},
+ 'history_file_tmpl': f'{self.run}.t@Hz.master.grb2f#fhr#'},
'ocean': {'config': 'oceanice_products',
'history_path_tmpl': 'COM_OCEAN_HISTORY_TMPL',
- 'history_file_tmpl': f'{self.cdump}.ocean.t@Hz.6hr_avg.f#fhr#.nc'},
+ 'history_file_tmpl': f'{self.run}.ocean.t@Hz.6hr_avg.f#fhr#.nc'},
'ice': {'config': 'oceanice_products',
'history_path_tmpl': 'COM_ICE_HISTORY_TMPL',
- 'history_file_tmpl': f'{self.cdump}.ice.t@Hz.6hr_avg.f#fhr#.nc'}}
+ 'history_file_tmpl': f'{self.run}.ice.t@Hz.6hr_avg.f#fhr#.nc'}}
component_dict = products_dict[component]
config = component_dict['config']
@@ -1112,10 +1112,10 @@ def _atmosoceaniceprod(self, component: str):
else:
dependencies = rocoto.create_dependency(dep=deps)
- cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump
+ cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run
resources = self.get_resource(component_dict['config'])
- task_name = f'{self.cdump}{component}_prod_f#fhr#'
+ task_name = f'{self.run}{component}_prod_f#fhr#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -1127,14 +1127,14 @@ def _atmosoceaniceprod(self, component: str):
'maxtries': '&MAXTRIES;'
}
- fhrs = self._get_forecast_hours(self.cdump, self._configs[config], component)
+ fhrs = self._get_forecast_hours(self.run, self._configs[config], component)
# ocean/ice components do not have fhr 0 as they are averaged output
if component in ['ocean', 'ice'] and 0 in fhrs:
fhrs.remove(0)
fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])}
- metatask_dict = {'task_name': f'{self.cdump}{component}_prod',
+ metatask_dict = {'task_name': f'{self.run}{component}_prod',
'task_dict': task_dict,
'var_dict': fhr_var_dict
}
@@ -1147,18 +1147,18 @@ def wavepostsbs(self):
deps = []
for wave_grid in self._configs['wavepostsbs']['waveGRD'].split():
wave_hist_path = self._template_to_rocoto_cycstring(self._base["COM_WAVE_HISTORY_TMPL"])
- data = f'{wave_hist_path}/{self.cdump}wave.out_grd.{wave_grid}.@Y@m@d.@H0000'
+ data = f'{wave_hist_path}/{self.run}wave.out_grd.{wave_grid}.@Y@m@d.@H0000'
dep_dict = {'type': 'data', 'data': data}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('wavepostsbs')
- task_name = f'{self.cdump}wavepostsbs'
+ task_name = f'{self.run}wavepostsbs'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostsbs.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1171,17 +1171,17 @@ def wavepostsbs(self):
def wavepostbndpnt(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('wavepostbndpnt')
- task_name = f'{self.cdump}wavepostbndpnt'
+ task_name = f'{self.run}wavepostbndpnt'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostbndpnt.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1199,18 +1199,18 @@ def wavepostbndpntbll(self):
deps = []
atmos_hist_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_HISTORY_TMPL"])
- data = f'{atmos_hist_path}/{self.cdump}.t@Hz.atm.logf{last_fhr:03d}.txt'
+ data = f'{atmos_hist_path}/{self.run}.t@Hz.atm.logf{last_fhr:03d}.txt'
dep_dict = {'type': 'data', 'data': data}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('wavepostbndpntbll')
- task_name = f'{self.cdump}wavepostbndpntbll'
+ task_name = f'{self.run}wavepostbndpntbll'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostbndpntbll.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1223,20 +1223,20 @@ def wavepostbndpntbll(self):
def wavepostpnt(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_wave_bnd:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostbndpntbll'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostbndpntbll'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('wavepostpnt')
- task_name = f'{self.cdump}wavepostpnt'
+ task_name = f'{self.run}wavepostpnt'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostpnt.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1249,17 +1249,17 @@ def wavepostpnt(self):
def wavegempak(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostsbs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostsbs'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('wavegempak')
- task_name = f'{self.cdump}wavegempak'
+ task_name = f'{self.run}wavegempak'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/wavegempak.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1272,19 +1272,19 @@ def wavegempak(self):
def waveawipsbulls(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostsbs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostsbs'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostpnt'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostpnt'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('waveawipsbulls')
- task_name = f'{self.cdump}waveawipsbulls'
+ task_name = f'{self.run}waveawipsbulls'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/waveawipsbulls.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1297,17 +1297,17 @@ def waveawipsbulls(self):
def waveawipsgridded(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostsbs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostsbs'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('waveawipsgridded')
- task_name = f'{self.cdump}waveawipsgridded'
+ task_name = f'{self.run}waveawipsgridded'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/waveawipsgridded.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1320,17 +1320,17 @@ def waveawipsgridded(self):
def postsnd(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('postsnd')
- task_name = f'{self.cdump}postsnd'
+ task_name = f'{self.run}postsnd'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/postsnd.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1343,15 +1343,15 @@ def postsnd(self):
def fbwind(self):
- atmos_prod_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_GRIB_GRID_TMPL"], {'RUN': self.cdump, 'GRID': '0p25'})
+ atmos_prod_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_GRIB_GRID_TMPL"], {'RUN': self.run, 'GRID': '0p25'})
deps = []
- data = f'{atmos_prod_path}/{self.cdump}.t@Hz.pgrb2.0p25.f006'
+ data = f'{atmos_prod_path}/{self.run}.t@Hz.pgrb2.0p25.f006'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{atmos_prod_path}/{self.cdump}.t@Hz.pgrb2.0p25.f012'
+ data = f'{atmos_prod_path}/{self.run}.t@Hz.pgrb2.0p25.f012'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
- data = f'{atmos_prod_path}/{self.cdump}.t@Hz.pgrb2.0p25.f024'
+ data = f'{atmos_prod_path}/{self.run}.t@Hz.pgrb2.0p25.f024'
dep_dict = {'type': 'data', 'data': data, 'age': 120}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps, dep_condition='and')
@@ -1362,12 +1362,12 @@ def fbwind(self):
# prematurely starting with partial files. Unfortunately, the
# ability to "group" post would make this more convoluted than
# it should be and not worth the complexity.
- task_name = f'{self.cdump}fbwind'
+ task_name = f'{self.run}fbwind'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/fbwind.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1379,7 +1379,7 @@ def fbwind(self):
return task
@staticmethod
- def _get_awipsgroups(cdump, config):
+ def _get_awipsgroups(run, config):
fhmin = config['FHMIN']
fhmax = config['FHMAX']
@@ -1387,9 +1387,9 @@ def _get_awipsgroups(cdump, config):
# Get a list of all forecast hours
fhrs = []
- if cdump in ['gdas']:
+ if run in ['gdas']:
fhrs = range(fhmin, fhmax + fhout, fhout)
- elif cdump in ['gfs']:
+ elif run in ['gfs']:
fhmax = config['FHMAX_GFS']
fhout = config['FHOUT_GFS']
fhmax_hf = config['FHMAX_HF_GFS']
@@ -1417,7 +1417,7 @@ def _get_awipsgroups(cdump, config):
def awips_20km_1p0deg(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -1429,24 +1429,24 @@ def awips_20km_1p0deg(self):
awipsenvars.append(rocoto.create_envar(name=key, value=str(value)))
varname1, varname2, varname3 = 'grp', 'dep', 'lst'
- varval1, varval2, varval3 = self._get_awipsgroups(self.cdump, self._configs['awips'])
+ varval1, varval2, varval3 = self._get_awipsgroups(self.run, self._configs['awips'])
var_dict = {varname1: varval1, varname2: varval2, varname3: varval3}
resources = self.get_resource('awips')
- task_name = f'{self.cdump}awips_20km_1p0deg#{varname1}#'
+ task_name = f'{self.run}awips_20km_1p0deg#{varname1}#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': awipsenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/awips_20km_1p0deg.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}awips_20km_1p0deg',
+ metatask_dict = {'task_name': f'{self.run}awips_20km_1p0deg',
'task_dict': task_dict,
'var_dict': var_dict
}
@@ -1458,7 +1458,7 @@ def awips_20km_1p0deg(self):
def gempak(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmos_prod_f#fhr#'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmos_prod_f#fhr#'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -1468,22 +1468,22 @@ def gempak(self):
gempak_vars.append(rocoto.create_envar(name=key, value=str(value)))
resources = self.get_resource('gempak')
- task_name = f'{self.cdump}gempak_f#fhr#'
+ task_name = f'{self.run}gempak_f#fhr#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': gempak_vars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/gempak.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- fhrs = self._get_forecast_hours(self.cdump, self._configs['gempak'])
+ fhrs = self._get_forecast_hours(self.run, self._configs['gempak'])
fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])}
- fhr_metatask_dict = {'task_name': f'{self.cdump}gempak',
+ fhr_metatask_dict = {'task_name': f'{self.run}gempak',
'task_dict': task_dict,
'var_dict': fhr_var_dict}
@@ -1493,17 +1493,17 @@ def gempak(self):
def gempakmeta(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempak'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}gempak'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('gempak')
- task_name = f'{self.cdump}gempakmeta'
+ task_name = f'{self.run}gempakmeta'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/gempakmeta.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1516,17 +1516,17 @@ def gempakmeta(self):
def gempakmetancdc(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempak'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}gempak'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('gempak')
- task_name = f'{self.cdump}gempakmetancdc'
+ task_name = f'{self.run}gempakmetancdc'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/gempakmetancdc.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1539,17 +1539,17 @@ def gempakmetancdc(self):
def gempakncdcupapgif(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempak'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}gempak'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('gempak')
- task_name = f'{self.cdump}gempakncdcupapgif'
+ task_name = f'{self.run}gempakncdcupapgif'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/gempakncdcupapgif.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1562,7 +1562,7 @@ def gempakncdcupapgif(self):
def gempakpgrb2spec(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}npoess_pgrb2_0p5deg'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}npoess_pgrb2_0p5deg'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -1572,12 +1572,12 @@ def gempakpgrb2spec(self):
gempak_vars.append(rocoto.create_envar(name=key, value=str(value)))
resources = self.get_resource('gempak')
- task_name = f'{self.cdump}gempakgrb2spec_f#fhr#'
+ task_name = f'{self.run}gempakgrb2spec_f#fhr#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': gempak_vars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/gempakgrb2spec.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1593,10 +1593,10 @@ def gempakpgrb2spec(self):
}
local_config.update(goes_times)
- fhrs = self._get_forecast_hours(self.cdump, local_config)
+ fhrs = self._get_forecast_hours(self.run, local_config)
fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])}
- fhr_metatask_dict = {'task_name': f'{self.cdump}gempakgrb2spec',
+ fhr_metatask_dict = {'task_name': f'{self.run}gempakgrb2spec',
'task_dict': task_dict,
'var_dict': fhr_var_dict}
@@ -1607,19 +1607,19 @@ def gempakpgrb2spec(self):
def npoess_pgrb2_0p5deg(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlprod'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlprod'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}goesupp'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}goesupp'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps, dep_condition='and')
resources = self.get_resource('npoess')
- task_name = f'{self.cdump}npoess_pgrb2_0p5deg'
+ task_name = f'{self.run}npoess_pgrb2_0p5deg'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/npoess.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1632,17 +1632,17 @@ def npoess_pgrb2_0p5deg(self):
def verfozn(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}analdiag'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}analdiag'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('verfozn')
- task_name = f'{self.cdump}verfozn'
+ task_name = f'{self.run}verfozn'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/verfozn.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1655,17 +1655,17 @@ def verfozn(self):
def verfrad(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}analdiag'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}analdiag'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('verfrad')
- task_name = f'{self.cdump}verfrad'
+ task_name = f'{self.run}verfrad'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/verfrad.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1678,17 +1678,17 @@ def verfrad(self):
def vminmon(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}anal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}anal'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('vminmon')
- task_name = f'{self.cdump}vminmon'
+ task_name = f'{self.run}vminmon'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/vminmon.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1701,17 +1701,17 @@ def vminmon(self):
def tracker(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('tracker')
- task_name = f'{self.cdump}tracker'
+ task_name = f'{self.run}tracker'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/tracker.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1724,17 +1724,17 @@ def tracker(self):
def genesis(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('genesis')
- task_name = f'{self.cdump}genesis'
+ task_name = f'{self.run}genesis'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/genesis.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1747,17 +1747,17 @@ def genesis(self):
def genesis_fsu(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('genesis_fsu')
- task_name = f'{self.cdump}genesis_fsu'
+ task_name = f'{self.run}genesis_fsu'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/genesis_fsu.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1770,17 +1770,17 @@ def genesis_fsu(self):
def fit2obs(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('fit2obs')
- task_name = f'{self.cdump}fit2obs'
+ task_name = f'{self.run}fit2obs'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/fit2obs.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1793,7 +1793,7 @@ def fit2obs(self):
def metp(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}arch'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}arch'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
@@ -1812,19 +1812,19 @@ def metp(self):
resources = self.get_resource('metp')
- task_name = f'{self.cdump}metp#{varname1}#'
+ task_name = f'{self.run}metp#{varname1}#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': metpenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/metp.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}metp',
+ metatask_dict = {'task_name': f'{self.run}metp',
'task_dict': task_dict,
'var_dict': var_dict
}
@@ -1835,17 +1835,17 @@ def metp(self):
def mos_stn_prep(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_stn_prep')
- task_name = f'{self.cdump}mos_stn_prep'
+ task_name = f'{self.run}mos_stn_prep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_stn_prep.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1858,17 +1858,17 @@ def mos_stn_prep(self):
def mos_grd_prep(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_grd_prep')
- task_name = f'{self.cdump}mos_grd_prep'
+ task_name = f'{self.run}mos_grd_prep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_grd_prep.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1881,17 +1881,17 @@ def mos_grd_prep(self):
def mos_ext_stn_prep(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_ext_stn_prep')
- task_name = f'{self.cdump}mos_ext_stn_prep'
+ task_name = f'{self.run}mos_ext_stn_prep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_stn_prep.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1904,17 +1904,17 @@ def mos_ext_stn_prep(self):
def mos_ext_grd_prep(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_ext_grd_prep')
- task_name = f'{self.cdump}mos_ext_grd_prep'
+ task_name = f'{self.run}mos_ext_grd_prep'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_grd_prep.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1927,17 +1927,17 @@ def mos_ext_grd_prep(self):
def mos_stn_fcst(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_stn_prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_stn_prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_stn_fcst')
- task_name = f'{self.cdump}mos_stn_fcst'
+ task_name = f'{self.run}mos_stn_fcst'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_stn_fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1950,20 +1950,20 @@ def mos_stn_fcst(self):
def mos_grd_fcst(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_stn_prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_stn_prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_grd_prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_grd_prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_grd_fcst')
- task_name = f'{self.cdump}mos_grd_fcst'
+ task_name = f'{self.run}mos_grd_fcst'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_grd_fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -1976,20 +1976,20 @@ def mos_grd_fcst(self):
def mos_ext_stn_fcst(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_stn_prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_stn_prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_stn_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_stn_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_ext_stn_fcst')
- task_name = f'{self.cdump}mos_ext_stn_fcst'
+ task_name = f'{self.run}mos_ext_stn_fcst'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_stn_fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2002,23 +2002,23 @@ def mos_ext_stn_fcst(self):
def mos_ext_grd_fcst(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_stn_prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_stn_prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_grd_prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_grd_prep'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_grd_fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_grd_fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_ext_grd_fcst')
- task_name = f'{self.cdump}mos_ext_grd_fcst'
+ task_name = f'{self.run}mos_ext_grd_fcst'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_grd_fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2031,17 +2031,17 @@ def mos_ext_grd_fcst(self):
def mos_stn_prdgen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_stn_fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_stn_fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_stn_prdgen')
- task_name = f'{self.cdump}mos_stn_prdgen'
+ task_name = f'{self.run}mos_stn_prdgen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_stn_prdgen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2054,20 +2054,20 @@ def mos_stn_prdgen(self):
def mos_grd_prdgen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_grd_fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_grd_fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_stn_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_stn_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_grd_prdgen')
- task_name = f'{self.cdump}mos_grd_prdgen'
+ task_name = f'{self.run}mos_grd_prdgen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_grd_prdgen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2080,20 +2080,20 @@ def mos_grd_prdgen(self):
def mos_ext_stn_prdgen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_stn_fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_stn_fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_stn_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_stn_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_ext_stn_prdgen')
- task_name = f'{self.cdump}mos_ext_stn_prdgen'
+ task_name = f'{self.run}mos_ext_stn_prdgen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_stn_prdgen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2106,23 +2106,23 @@ def mos_ext_stn_prdgen(self):
def mos_ext_grd_prdgen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_grd_fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_grd_fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_grd_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_grd_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_stn_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_stn_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_ext_grd_prdgen')
- task_name = f'{self.cdump}mos_ext_grd_prdgen'
+ task_name = f'{self.run}mos_ext_grd_prdgen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_grd_prdgen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2135,17 +2135,17 @@ def mos_ext_grd_prdgen(self):
def mos_wx_prdgen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_grd_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_grd_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('mos_wx_prdgen')
- task_name = f'{self.cdump}mos_wx_prdgen'
+ task_name = f'{self.run}mos_wx_prdgen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_wx_prdgen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2158,20 +2158,20 @@ def mos_wx_prdgen(self):
def mos_wx_ext_prdgen(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_ext_grd_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_ext_grd_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_wx_prdgen'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_wx_prdgen'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('mos_wx_ext_prdgen')
- task_name = f'{self.cdump}mos_wx_ext_prdgen'
+ task_name = f'{self.run}mos_wx_ext_prdgen'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/mos_wx_ext_prdgen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2185,74 +2185,74 @@ def mos_wx_ext_prdgen(self):
def arch(self):
deps = []
if self.app_config.mode in ['cycled']:
- if self.cdump in ['gfs']:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlprod'}
+ if self.run in ['gfs']:
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlprod'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_vminmon:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}vminmon'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}vminmon'}
deps.append(rocoto.add_dependency(dep_dict))
- elif self.cdump in ['gdas']:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlprod'}
+ elif self.run in ['gdas']:
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmanlprod'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_fit2obs:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}fit2obs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}fit2obs'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_verfozn:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}verfozn'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}verfozn'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_verfrad:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}verfrad'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}verfrad'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_vminmon:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}vminmon'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}vminmon'}
deps.append(rocoto.add_dependency(dep_dict))
- if self.cdump in ['gfs'] and self.app_config.do_tracker:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}tracker'}
+ if self.run in ['gfs'] and self.app_config.do_tracker:
+ dep_dict = {'type': 'task', 'name': f'{self.run}tracker'}
deps.append(rocoto.add_dependency(dep_dict))
- if self.cdump in ['gfs'] and self.app_config.do_genesis:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}genesis'}
+ if self.run in ['gfs'] and self.app_config.do_genesis:
+ dep_dict = {'type': 'task', 'name': f'{self.run}genesis'}
deps.append(rocoto.add_dependency(dep_dict))
- if self.cdump in ['gfs'] and self.app_config.do_genesis_fsu:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}genesis_fsu'}
+ if self.run in ['gfs'] and self.app_config.do_genesis_fsu:
+ dep_dict = {'type': 'task', 'name': f'{self.run}genesis_fsu'}
deps.append(rocoto.add_dependency(dep_dict))
# Post job dependencies
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}atmos_prod'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_wave:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostsbs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostsbs'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostpnt'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostpnt'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_wave_bnd:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostbndpnt'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}wavepostbndpnt'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_ocean:
- if self.cdump in ['gfs']:
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}ocean_prod'}
+ if self.run in ['gfs']:
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}ocean_prod'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_ice:
- if self.cdump in ['gfs']:
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}ice_prod'}
+ if self.run in ['gfs']:
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}ice_prod'}
deps.append(rocoto.add_dependency(dep_dict))
# MOS job dependencies
- if self.cdump in ['gfs'] and self.app_config.do_mos:
+ if self.run in ['gfs'] and self.app_config.do_mos:
mos_jobs = ["stn_prep", "grd_prep", "ext_stn_prep", "ext_grd_prep",
"stn_fcst", "grd_fcst", "ext_stn_fcst", "ext_grd_fcst",
"stn_prdgen", "grd_prdgen", "ext_stn_prdgen", "ext_grd_prdgen",
"wx_prdgen", "wx_ext_prdgen"]
for job in mos_jobs:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}mos_{job}'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}mos_{job}'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('arch')
- task_name = f'{self.cdump}arch'
+ task_name = f'{self.run}arch'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/arch.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2266,37 +2266,37 @@ def arch(self):
# Cleanup
def cleanup(self):
deps = []
- if 'enkf' in self.cdump:
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}eamn'}
+ if 'enkf' in self.run:
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}eamn'}
deps.append(rocoto.add_dependency(dep_dict))
else:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}arch'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}arch'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_gempak:
- if self.cdump in ['gdas']:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}gempakmetancdc'}
+ if self.run in ['gdas']:
+ dep_dict = {'type': 'task', 'name': f'{self.run}gempakmetancdc'}
deps.append(rocoto.add_dependency(dep_dict))
- elif self.cdump in ['gfs']:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}gempakmeta'}
+ elif self.run in ['gfs']:
+ dep_dict = {'type': 'task', 'name': f'{self.run}gempakmeta'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}gempakncdcupapgif'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}gempakncdcupapgif'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_goes:
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempakgrb2spec'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}gempakgrb2spec'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}npoess_pgrb2_0p5deg'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}npoess_pgrb2_0p5deg'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('cleanup')
- task_name = f'{self.cdump}cleanup'
+ task_name = f'{self.run}cleanup'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/cleanup.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2310,19 +2310,19 @@ def cleanup(self):
# Start of ensemble tasks
def eobs(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump.replace("enkf","")}prep'}
+ dep_dict = {'type': 'task', 'name': f'{self.run.replace("enkf","")}prep'}
deps.append(rocoto.add_dependency(dep_dict))
dep_dict = {'type': 'metatask', 'name': 'enkfgdasepmn', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('eobs')
- task_name = f'{self.cdump}eobs'
+ task_name = f'{self.run}eobs'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/eobs.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2335,7 +2335,7 @@ def eobs(self):
def eomg(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}eobs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}eobs'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -2347,12 +2347,12 @@ def eomg(self):
eomgenvars.append(rocoto.create_envar(name=key, value=str(value)))
resources = self.get_resource('eomg')
- task_name = f'{self.cdump}eomg_mem#member#'
+ task_name = f'{self.run}eomg_mem#member#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': eomgenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/eomg.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2360,7 +2360,7 @@ def eomg(self):
}
member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(1, self.nmem + 1)])}
- metatask_dict = {'task_name': f'{self.cdump}eomg',
+ metatask_dict = {'task_name': f'{self.run}eomg',
'var_dict': member_var_dict,
'task_dict': task_dict,
}
@@ -2371,17 +2371,17 @@ def eomg(self):
def ediag(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}eobs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}eobs'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('ediag')
- task_name = f'{self.cdump}ediag'
+ task_name = f'{self.run}ediag'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ediag.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2395,19 +2395,19 @@ def ediag(self):
def eupd(self):
deps = []
if self.app_config.lobsdiag_forenkf:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}ediag'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}ediag'}
else:
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}eomg'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}eomg'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('eupd')
- task_name = f'{self.cdump}eupd'
+ task_name = f'{self.run}eupd'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/eupd.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2420,7 +2420,7 @@ def eupd(self):
def atmensanlinit(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump.replace("enkf","")}prepatmiodaobs'}
+ dep_dict = {'type': 'task', 'name': f'{self.run.replace("enkf","")}prepatmiodaobs'}
deps.append(rocoto.add_dependency(dep_dict))
dep_dict = {'type': 'metatask', 'name': 'enkfgdasepmn', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
@@ -2428,7 +2428,7 @@ def atmensanlinit(self):
cycledef = "gdas"
resources = self.get_resource('atmensanlinit')
- task_name = f'{self.cdump}atmensanlinit'
+ task_name = f'{self.run}atmensanlinit'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -2447,19 +2447,19 @@ def atmensanlinit(self):
def atmensanlletkf(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmensanlinit'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmensanlinit'}
deps.append(rocoto.add_dependency(dep_dict))
dep_dict = {'type': 'metatask', 'name': 'enkfgdasepmn', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('atmensanlletkf')
- task_name = f'{self.cdump}atmensanlletkf'
+ task_name = f'{self.run}atmensanlletkf'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmensanlletkf.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2473,19 +2473,19 @@ def atmensanlletkf(self):
def atmensanlfv3inc(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmensanlletkf'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmensanlletkf'}
deps.append(rocoto.add_dependency(dep_dict))
dep_dict = {'type': 'metatask', 'name': 'enkfgdasepmn', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('atmensanlfv3inc')
- task_name = f'{self.cdump}atmensanlfv3inc'
+ task_name = f'{self.run}atmensanlfv3inc'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmensanlfv3inc.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2499,17 +2499,17 @@ def atmensanlfv3inc(self):
def atmensanlfinal(self):
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmensanlfv3inc'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmensanlfv3inc'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
resources = self.get_resource('atmensanlfinal')
- task_name = f'{self.cdump}atmensanlfinal'
+ task_name = f'{self.run}atmensanlfinal'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/atmensanlfinal.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2546,12 +2546,12 @@ def _get_ecengroups():
return grp, dep, lst
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump.replace("enkf","")}analcalc'}
+ dep_dict = {'type': 'task', 'name': f'{self.run.replace("enkf","")}analcalc'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_jediatmens:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmensanlfinal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmensanlfinal'}
else:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}eupd'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}eupd'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
@@ -2567,19 +2567,19 @@ def _get_ecengroups():
resources = self.get_resource('ecen')
- task_name = f'{self.cdump}ecen#{varname1}#'
+ task_name = f'{self.run}ecen#{varname1}#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': ecenenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/ecen.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}ecmn',
+ metatask_dict = {'task_name': f'{self.run}ecmn',
'var_dict': var_dict,
'task_dict': task_dict
}
@@ -2589,25 +2589,25 @@ def _get_ecengroups():
def esfc(self):
- # eupd_cdump = 'gdas' if 'gdas' in self.app_config.eupd_cdumps else 'gfs'
+ # eupd_run = 'gdas' if 'gdas' in self.app_config.eupd_runs else 'gfs'
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump.replace("enkf","")}analcalc'}
+ dep_dict = {'type': 'task', 'name': f'{self.run.replace("enkf","")}analcalc'}
deps.append(rocoto.add_dependency(dep_dict))
if self.app_config.do_jediatmens:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}atmensanlfinal'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}atmensanlfinal'}
else:
- dep_dict = {'type': 'task', 'name': f'{self.cdump}eupd'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}eupd'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
resources = self.get_resource('esfc')
- task_name = f'{self.cdump}esfc'
+ task_name = f'{self.run}esfc'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': self.envars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/esfc.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
@@ -2621,9 +2621,9 @@ def esfc(self):
def efcs(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}ecmn'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}ecmn'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}esfc'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}esfc'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
dep_dict = {'type': 'cycleexist', 'condition': 'not', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"}
@@ -2637,10 +2637,10 @@ def efcs(self):
for key, value in efcsenvars_dict.items():
efcsenvars.append(rocoto.create_envar(name=key, value=str(value)))
- cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '')
+ cycledef = 'gdas_half,gdas' if self.run in ['enkfgdas'] else self.run.replace('enkf', '')
resources = self.get_resource('efcs')
- task_name = f'{self.cdump}fcst_mem#member#'
+ task_name = f'{self.run}fcst_mem#member#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -2653,7 +2653,7 @@ def efcs(self):
}
member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(1, self.nmem + 1)])}
- metatask_dict = {'task_name': f'{self.cdump}fcst',
+ metatask_dict = {'task_name': f'{self.run}fcst',
'var_dict': member_var_dict,
'task_dict': task_dict
}
@@ -2664,19 +2664,19 @@ def efcs(self):
def echgres(self):
- self._is_this_a_gdas_task(self.cdump, 'echgres')
+ self._is_this_a_gdas_task(self.run, 'echgres')
deps = []
- dep_dict = {'type': 'task', 'name': f'{self.cdump.replace("enkf","")}fcst'}
+ dep_dict = {'type': 'task', 'name': f'{self.run.replace("enkf","")}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst_mem001'}
+ dep_dict = {'type': 'task', 'name': f'{self.run}fcst_mem001'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
- cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump
+ cycledef = 'gdas_half,gdas' if self.run in ['enkfgdas'] else self.run
resources = self.get_resource('echgres')
- task_name = f'{self.cdump}echgres'
+ task_name = f'{self.run}echgres'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -2698,7 +2698,7 @@ def _get_eposgroups(epos):
fhmin = epos['FHMIN_ENKF']
fhmax = epos['FHMAX_ENKF']
fhout = epos['FHOUT_ENKF']
- if self.cdump == "enkfgfs":
+ if self.run == "enkfgfs":
fhmax = epos['FHMAX_ENKF_GFS']
fhout = epos['FHOUT_ENKF_GFS']
fhrs = range(fhmin, fhmax + fhout, fhout)
@@ -2717,7 +2717,7 @@ def _get_eposgroups(epos):
return grp, dep, lst
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}fcst'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -2731,11 +2731,11 @@ def _get_eposgroups(epos):
varval1, varval2, varval3 = _get_eposgroups(self._configs['epos'])
var_dict = {varname1: varval1, varname2: varval2, varname3: varval3}
- cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '')
+ cycledef = 'gdas_half,gdas' if self.run in ['enkfgdas'] else self.run.replace('enkf', '')
resources = self.get_resource('epos')
- task_name = f'{self.cdump}epos#{varname1}#'
+ task_name = f'{self.run}epos#{varname1}#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -2747,7 +2747,7 @@ def _get_eposgroups(epos):
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}epmn',
+ metatask_dict = {'task_name': f'{self.run}epmn',
'var_dict': var_dict,
'task_dict': task_dict
}
@@ -2759,7 +2759,7 @@ def _get_eposgroups(epos):
def earc(self):
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}epmn'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.run}epmn'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -2774,19 +2774,19 @@ def earc(self):
var_dict = {'grp': groups}
- task_name = f'{self.cdump}earc#grp#'
+ task_name = f'{self.run}earc#grp#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': earcenvars,
- 'cycledef': self.cdump.replace('enkf', ''),
+ 'cycledef': self.run.replace('enkf', ''),
'command': f'{self.HOMEgfs}/jobs/rocoto/earc.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}eamn',
+ metatask_dict = {'task_name': f'{self.run}eamn',
'var_dict': var_dict,
'task_dict': task_dict
}
diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py
index ca8d7f9857..72dfba3edf 100644
--- a/workflow/rocoto/tasks.py
+++ b/workflow/rocoto/tasks.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import copy
import numpy as np
from applications.applications import AppConfig
import rocoto.rocoto as rocoto
@@ -36,18 +37,23 @@ class Tasks:
'mos_stn_fcst', 'mos_grd_fcst', 'mos_ext_stn_fcst', 'mos_ext_grd_fcst',
'mos_stn_prdgen', 'mos_grd_prdgen', 'mos_ext_stn_prdgen', 'mos_ext_grd_prdgen', 'mos_wx_prdgen', 'mos_wx_ext_prdgen']
- def __init__(self, app_config: AppConfig, cdump: str) -> None:
+ def __init__(self, app_config: AppConfig, run: str) -> None:
- self.app_config = app_config
- self.cdump = cdump
+ self.app_config = copy.deepcopy(app_config)
+ self.run = run
+ # Re-source the configs with RUN specified
+ print(f"Source configs with RUN={run}")
+ self._configs = self.app_config.source_configs(run=run, log=False)
+ # Update the base config for the application
+ self._configs['base'] = self.app_config.update_base(self._configs['base'])
# Save dict_configs and base in the internal state (never know where it may be needed)
- self._configs = self.app_config.configs
self._base = self._configs['base']
+
self.HOMEgfs = self._base['HOMEgfs']
self.rotdir = self._base['ROTDIR']
self.pslot = self._base['PSLOT']
- if self.cdump == "enkfgfs":
+ if self.run == "enkfgfs":
self.nmem = int(self._base['NMEM_ENS_GFS'])
else:
self.nmem = int(self._base['NMEM_ENS'])
@@ -59,8 +65,7 @@ def __init__(self, app_config: AppConfig, cdump: str) -> None:
'HOMEgfs': self.HOMEgfs,
'EXPDIR': self._base.get('EXPDIR'),
'NET': self._base.get('NET'),
- 'CDUMP': self.cdump,
- 'RUN': self.cdump,
+ 'RUN': self.run,
'CDATE': '@Y@m@d@H',
'PDY': '@Y@m@d',
'cyc': '@H',
@@ -87,8 +92,8 @@ def _template_to_rocoto_cycstring(self, template: str, subs_dict: dict = {}) ->
Variables substitued by default:
${ROTDIR} -> '&ROTDIR;'
- ${RUN} -> self.cdump
- ${DUMP} -> self.cdump
+ ${RUN} -> self.run
+ ${DUMP} -> self.run
${MEMDIR} -> ''
${YMD} -> '@Y@m@d'
${HH} -> '@H'
@@ -110,8 +115,8 @@ def _template_to_rocoto_cycstring(self, template: str, subs_dict: dict = {}) ->
# Defaults
rocoto_conversion_dict = {
'ROTDIR': '&ROTDIR;',
- 'RUN': self.cdump,
- 'DUMP': self.cdump,
+ 'RUN': self.run,
+ 'DUMP': self.run,
'MEMDIR': '',
'YMD': '@Y@m@d',
'HH': '@H'
@@ -124,7 +129,7 @@ def _template_to_rocoto_cycstring(self, template: str, subs_dict: dict = {}) ->
rocoto_conversion_dict.get)
@staticmethod
- def _get_forecast_hours(cdump, config, component='atmos') -> List[str]:
+ def _get_forecast_hours(run, config, component='atmos') -> List[str]:
# Make a local copy of the config to avoid modifying the original
local_config = config.copy()
@@ -146,11 +151,11 @@ def _get_forecast_hours(cdump, config, component='atmos') -> List[str]:
# Get a list of all forecast hours
fhrs = []
- if cdump in ['gdas']:
+ if run in ['gdas']:
fhmax = local_config['FHMAX']
fhout = local_config['FHOUT']
fhrs = list(range(fhmin, fhmax + fhout, fhout))
- elif cdump in ['gfs', 'gefs']:
+ elif run in ['gfs', 'gefs']:
fhmax = local_config['FHMAX_GFS']
fhout = local_config['FHOUT_GFS']
fhmax_hf = local_config['FHMAX_HF_GFS']
@@ -165,7 +170,7 @@ def get_resource(self, task_name):
Given a task name (task_name) and its configuration (task_names),
return a dictionary of resources (task_resource) used by the task.
Task resource dictionary includes:
- account, walltime, cores, nodes, ppn, threads, memory, queue, partition, native
+ account, walltime, ntasks, nodes, ppn, threads, memory, queue, partition, native
"""
scheduler = self.app_config.scheduler
@@ -174,33 +179,16 @@ def get_resource(self, task_name):
account = task_config['ACCOUNT']
- if f'wtime_{task_name}_{self.cdump}' in task_config:
- walltime = task_config[f'wtime_{task_name}_{self.cdump}']
- else:
- walltime = task_config[f'wtime_{task_name}']
-
- if f'npe_{task_name}_{self.cdump}' in task_config:
- cores = task_config[f'npe_{task_name}_{self.cdump}']
- else:
- cores = task_config[f'npe_{task_name}']
-
- if f'npe_node_{task_name}_{self.cdump}' in task_config:
- ppn = task_config[f'npe_node_{task_name}_{self.cdump}']
- else:
- ppn = task_config[f'npe_node_{task_name}']
+ walltime = task_config[f'walltime']
+ ntasks = task_config[f'ntasks']
+ ppn = task_config[f'tasks_per_node']
- nodes = int(np.ceil(float(cores) / float(ppn)))
+ nodes = int(np.ceil(float(ntasks) / float(ppn)))
- if f'nth_{task_name}_{self.cdump}' in task_config:
- threads = task_config[f'nth_{task_name}_{self.cdump}']
- else:
- threads = task_config[f'nth_{task_name}']
+ threads = task_config[f'threads_per_task']
- if f'memory_{task_name}_{self.cdump}' in task_config:
- memory = task_config[f'memory_{task_name}_{self.cdump}']
- else:
- # Memory is not required
- memory = task_config.get(f'memory_{task_name}', None)
+ # Memory is not required
+ memory = task_config.get(f'memory', None)
if scheduler in ['pbspro']:
if task_config.get('prepost', False):
@@ -235,7 +223,7 @@ def get_resource(self, task_name):
task_resource = {'account': account,
'walltime': walltime,
'nodes': nodes,
- 'cores': cores,
+ 'ntasks': ntasks,
'ppn': ppn,
'threads': threads,
'memory': memory,
diff --git a/workflow/rocoto/workflow_tasks.py b/workflow/rocoto/workflow_tasks.py
index 84af898d36..78c31dba1b 100644
--- a/workflow/rocoto/workflow_tasks.py
+++ b/workflow/rocoto/workflow_tasks.py
@@ -14,10 +14,10 @@ def get_wf_tasks(app_config: AppConfig) -> List:
"""
tasks = []
- # Loop over all keys of cycles (CDUMP)
- for cdump, cdump_tasks in app_config.task_names.items():
- task_obj = tasks_factory.create(app_config.net, app_config, cdump) # create Task object based on cdump
- for task_name in cdump_tasks:
+ # Loop over all keys of cycles (RUN)
+ for run, run_tasks in app_config.task_names.items():
+ task_obj = tasks_factory.create(app_config.net, app_config, run) # create Task object based on run
+ for task_name in run_tasks:
tasks.append(task_obj.get_task(task_name))
return tasks
diff --git a/workflow/setup_expt.py b/workflow/setup_expt.py
index b44842b982..3e70df0f02 100755
--- a/workflow/setup_expt.py
+++ b/workflow/setup_expt.py
@@ -73,10 +73,10 @@ def fill_ROTDIR_cycled(host, inputs):
# Test if we are using the new COM structure or the old flat one for ICs
if inputs.start in ['warm']:
- pathstr = os.path.join(inputs.icsdir, f'{inputs.cdump}.{rdatestr[:8]}',
+ pathstr = os.path.join(inputs.icsdir, f'{inputs.run}.{rdatestr[:8]}',
rdatestr[8:], 'model_data', 'atmos')
else:
- pathstr = os.path.join(inputs.icsdir, f'{inputs.cdump}.{idatestr[:8]}',
+ pathstr = os.path.join(inputs.icsdir, f'{inputs.run}.{idatestr[:8]}',
idatestr[8:], 'model_data', 'atmos')
if os.path.isdir(pathstr):
@@ -132,8 +132,8 @@ def link_files_from_src_to_dst(src_dir, dst_dir):
# Link ensemble member initial conditions
if inputs.nens > 0:
- previous_cycle_dir = f'enkf{inputs.cdump}.{rdatestr[:8]}/{rdatestr[8:]}'
- current_cycle_dir = f'enkf{inputs.cdump}.{idatestr[:8]}/{idatestr[8:]}'
+ previous_cycle_dir = f'enkf{inputs.run}.{rdatestr[:8]}/{rdatestr[8:]}'
+ current_cycle_dir = f'enkf{inputs.run}.{idatestr[:8]}/{idatestr[8:]}'
for ii in range(1, inputs.nens + 1):
memdir = f'mem{ii:03d}'
@@ -155,7 +155,7 @@ def link_files_from_src_to_dst(src_dir, dst_dir):
link_files_from_src_to_dst(src_dir, dst_dir)
# First 1/2 cycle needs a MOM6 increment
- incfile = f'enkf{inputs.cdump}.t{idatestr[8:]}z.ocninc.nc'
+ incfile = f'enkf{inputs.run}.t{idatestr[8:]}z.ocninc.nc'
src_file = os.path.join(inputs.icsdir, current_cycle_dir, memdir, src_ocn_anl_dir, incfile)
dst_file = os.path.join(rotdir, current_cycle_dir, memdir, dst_ocn_anl_dir, incfile)
makedirs_if_missing(os.path.join(rotdir, current_cycle_dir, memdir, dst_ocn_anl_dir))
@@ -176,8 +176,8 @@ def link_files_from_src_to_dst(src_dir, dst_dir):
link_files_from_src_to_dst(src_dir, dst_dir)
# Link deterministic initial conditions
- previous_cycle_dir = f'{inputs.cdump}.{rdatestr[:8]}/{rdatestr[8:]}'
- current_cycle_dir = f'{inputs.cdump}.{idatestr[:8]}/{idatestr[8:]}'
+ previous_cycle_dir = f'{inputs.run}.{rdatestr[:8]}/{rdatestr[8:]}'
+ current_cycle_dir = f'{inputs.run}.{idatestr[:8]}/{idatestr[8:]}'
# Link atmospheric files
if inputs.start in ['warm']:
@@ -198,7 +198,7 @@ def link_files_from_src_to_dst(src_dir, dst_dir):
link_files_from_src_to_dst(src_dir, dst_dir)
# First 1/2 cycle needs a MOM6 increment
- incfile = f'{inputs.cdump}.t{idatestr[8:]}z.ocninc.nc'
+ incfile = f'{inputs.run}.t{idatestr[8:]}z.ocninc.nc'
src_file = os.path.join(inputs.icsdir, current_cycle_dir, src_ocn_anl_dir, incfile)
dst_file = os.path.join(rotdir, current_cycle_dir, dst_ocn_anl_dir, incfile)
makedirs_if_missing(os.path.join(rotdir, current_cycle_dir, dst_ocn_anl_dir))
@@ -224,26 +224,26 @@ def link_files_from_src_to_dst(src_dir, dst_dir):
dst_dir = os.path.join(rotdir, current_cycle_dir, dst_atm_anl_dir)
makedirs_if_missing(dst_dir)
for ftype in ['abias', 'abias_pc', 'abias_air', 'radstat']:
- fname = f'{inputs.cdump}.t{idatestr[8:]}z.{ftype}'
+ fname = f'{inputs.run}.t{idatestr[8:]}z.{ftype}'
src_file = os.path.join(src_dir, fname)
if os.path.exists(src_file):
os.symlink(src_file, os.path.join(dst_dir, fname))
# First 1/2 cycle also needs a atmos increment if doing warm start
if inputs.start in ['warm']:
for ftype in ['atmi003.nc', 'atminc.nc', 'atmi009.nc']:
- fname = f'{inputs.cdump}.t{idatestr[8:]}z.{ftype}'
+ fname = f'{inputs.run}.t{idatestr[8:]}z.{ftype}'
src_file = os.path.join(src_dir, fname)
if os.path.exists(src_file):
os.symlink(src_file, os.path.join(dst_dir, fname))
if inputs.nens > 0:
- current_cycle_dir = f'enkf{inputs.cdump}.{idatestr[:8]}/{idatestr[8:]}'
+ current_cycle_dir = f'enkf{inputs.run}.{idatestr[:8]}/{idatestr[8:]}'
for ii in range(1, inputs.nens + 1):
memdir = f'mem{ii:03d}'
src_dir = os.path.join(inputs.icsdir, current_cycle_dir, memdir, src_atm_anl_dir)
dst_dir = os.path.join(rotdir, current_cycle_dir, memdir, dst_atm_anl_dir)
makedirs_if_missing(dst_dir)
for ftype in ['ratmi003.nc', 'ratminc.nc', 'ratmi009.nc']:
- fname = f'enkf{inputs.cdump}.t{idatestr[8:]}z.{ftype}'
+ fname = f'enkf{inputs.run}.t{idatestr[8:]}z.{ftype}'
src_file = os.path.join(src_dir, fname)
if os.path.exists(src_file):
os.symlink(src_file, os.path.join(dst_dir, fname))
@@ -426,7 +426,7 @@ def _common_args(parser):
def _gfs_args(parser):
parser.add_argument('--start', help='restart mode: warm or cold', type=str,
choices=['warm', 'cold'], required=False, default='cold')
- parser.add_argument('--cdump', help='CDUMP to start the experiment',
+ parser.add_argument('--run', help='RUN to start the experiment',
type=str, required=False, default='gdas')
# --configdir is hidden from help
parser.add_argument('--configdir', help=SUPPRESS, type=str, required=False, default=os.path.join(_top, 'parm/config/gfs'))