diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile
index c6aa0887c7..956bd692dd 100644
--- a/ci/Jenkinsfile
+++ b/ci/Jenkinsfile
@@ -1,9 +1,10 @@
def Machine = 'none'
def machine = 'none'
-def HOME = 'none'
+def CUSTOM_WORKSPACE = 'none'
def caseList = ''
// Location of the custom workspaces for each machine in the CI system. They are persitent for each iteration of the PR.
-def custom_workspace = [hera: '/scratch1/NCEPDEV/global/CI', orion: '/work2/noaa/stmp/CI/ORION', hercules: '/work2/noaa/stmp/CI/HERCULES']
+def NodeName = [hera: 'Hera-EMC', orion: 'Orion-EMC', hercules: 'Hercules-EMC', gaea: 'Gaea']
+def custom_workspace = [hera: '/scratch1/NCEPDEV/global/CI', orion: '/work2/noaa/stmp/CI/ORION', hercules: '/work2/noaa/stmp/CI/HERCULES', gaea: '/gpfs/f5/epic/proj-shared/global/CI']
def repo_url = 'git@github.com:NOAA-EMC/global-workflow.git'
def STATUS = 'Passed'
@@ -40,9 +41,9 @@ pipeline {
echo "This is parent job so getting list of nodes matching labels:"
for (label in pullRequest.labels) {
if (label.matches("CI-(.*?)-Ready")) {
- def Machine_name = label.split('-')[1].toString()
+ def machine_name = label.split('-')[1].toString().toLowerCase()
jenkins.model.Jenkins.get().computers.each { c ->
- if (c.node.selfLabel.name == "${Machine_name}-EMC") {
+ if (c.node.selfLabel.name == NodeName[machine_name]) {
run_nodes.add(c.node.selfLabel.name)
}
}
@@ -70,25 +71,25 @@ pipeline {
}
stage('2. Get Common Workspace') {
- agent { label "${machine}-emc" }
+ agent { label NodeName[machine].toLowerCase() }
steps {
script {
Machine = machine[0].toUpperCase() + machine.substring(1)
echo "Getting Common Workspace for ${Machine}"
ws("${custom_workspace[machine]}/${env.CHANGE_ID}") {
properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in', 'Hercules-EMC', 'Hera-EMC', 'Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])])
- HOME = "${WORKSPACE}"
- sh(script: "mkdir -p ${HOME}/RUNTESTS;rm -Rf ${HOME}/RUNTESTS/*")
+ CUSTOM_WORKSPACE = "${WORKSPACE}"
+ sh(script: "mkdir -p ${CUSTOM_WORKSPACE}/RUNTESTS;rm -Rf ${CUSTOM_WORKSPACE}/RUNTESTS/*")
sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --add-label "CI-${Machine}-Building" --remove-label "CI-${Machine}-Ready" """)
}
- echo "Building and running on ${Machine} in directory ${HOME}"
+ echo "Building and running on ${Machine} in directory ${CUSTOM_WORKSPACE}"
}
}
}
stage('3. Build System') {
matrix {
- agent { label "${machine}-emc" }
+ agent { label NodeName[machine].toLowerCase() }
//options {
// throttle(['global_matrix_build'])
//}
@@ -102,7 +103,7 @@ pipeline {
stage('build system') {
steps {
script {
- def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to build the system on per system basis under the common workspace HOME
+ def HOMEgfs = "${CUSTOM_WORKSPACE}/${system}" // local HOMEgfs is used to build the system on per system basis under the custome workspace for each buile system
sh(script: "mkdir -p ${HOMEgfs}")
ws(HOMEgfs) {
if (fileExists("${HOMEgfs}/sorc/BUILT_semaphor")) { // if the system is already built, skip the build in the case of re-runs
@@ -112,7 +113,16 @@ pipeline {
sh(script: './link_workflow.sh')
}
} else {
- checkout scm
+ try {
+ echo "Checking out the code for ${system} on ${Machine} using scm in ${HOMEgfs}"
+ checkout scm
+ } catch (Exception e) {
+ if (env.CHANGE_ID) {
+ sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Checkout **Failed** on ${Machine}: ${e.getMessage()}" """)
+ }
+ echo "Failed to checkout: ${e.getMessage()}"
+ STATUS = 'Failed'
+ }
def gist_url = ""
def error_logs = ""
def error_logs_message = ""
@@ -173,7 +183,7 @@ pipeline {
stage('4. Run Tests') {
failFast false
matrix {
- agent { label "${machine}-emc" }
+ agent { label NodeName[machine].toLowerCase() }
axes {
axis {
name 'Case'
@@ -189,11 +199,11 @@ pipeline {
}
steps {
script {
- sh(script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp")
- def yaml_case = readYaml file: "${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp"
+ sh(script: "sed -n '/{.*}/!p' ${CUSTOM_WORKSPACE}/gfs/ci/cases/pr/${Case}.yaml > ${CUSTOM_WORKSPACE}/gfs/ci/cases/pr/${Case}.yaml.tmp")
+ def yaml_case = readYaml file: "${CUSTOM_WORKSPACE}/gfs/ci/cases/pr/${Case}.yaml.tmp"
system = yaml_case.experiment.system
- def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to populate the XML on per system basis
- env.RUNTESTS = "${HOME}/RUNTESTS"
+ def HOMEgfs = "${CUSTOM_WORKSPACE}/${system}" // local HOMEgfs is used to populate the XML on per system basis
+ env.RUNTESTS = "${CUSTOM_WORKSPACE}/RUNTESTS"
sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml")
}
}
@@ -206,15 +216,15 @@ pipeline {
failFast false
steps {
script {
- HOMEgfs = "${HOME}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments
- def pslot = sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${HOME}/RUNTESTS ${Case}", returnStdout: true).trim()
- def error_file = "${HOME}/RUNTESTS/${pslot}_error.logs"
+ HOMEgfs = "${CUSTOM_WORKSPACE}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments
+ def pslot = sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${CUSTOM_WORKSPACE}/RUNTESTS ${Case}", returnStdout: true).trim()
+ def error_file = "${CUSTOM_WORKSPACE}/RUNTESTS/${pslot}_error.logs"
sh(script: " rm -f ${error_file}")
try {
- sh(script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot} ${system}")
+ sh(script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${CUSTOM_WORKSPACE} ${pslot} ${system}")
} catch (Exception error_experment) {
sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh cancel_batch_jobs ${pslot}")
- ws(HOME) {
+ ws(CUSTOM_WORKSPACE) {
def error_logs = ""
def error_logs_message = ""
if (fileExists(error_file)) {
@@ -222,11 +232,11 @@ pipeline {
def lines = fileContent.readLines()
for (line in lines) {
echo "archiving: ${line}"
- if (fileExists("${HOME}/${line}") && readFile("${HOME}/${line}").length() > 0) {
+ if (fileExists("${CUSTOM_WORKSPACE}/${line}") && readFile("${CUSTOM_WORKSPACE}/${line}").length() > 0) {
try {
archiveArtifacts artifacts: "${line}", fingerprint: true
- error_logs = error_logs + "${HOME}/${line} "
- error_logs_message = error_logs_message + "${HOME}/${line}\n"
+ error_logs = error_logs + "${CUSTOM_WORKSPACE}/${line} "
+ error_logs_message = error_logs_message + "${CUSTOM_WORKSPACE}/${line}\n"
} catch (Exception error_arch) {
echo "Failed to archive error log ${line}: ${error_arch.getMessage()}"
}
@@ -240,12 +250,12 @@ pipeline {
echo "Failed to comment on PR: ${error_comment.getMessage()}"
}
} else {
- echo "No error logs found for failed cases in $HOME/RUNTESTS/${pslot}_error.logs"
+ echo "No error logs found for failed cases in $CUSTOM_WORKSPACE/RUNTESTS/${pslot}_error.logs"
}
STATUS = 'Failed'
try {
sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --remove-label "CI-${Machine}-Running" --add-label "CI-${Machine}-${STATUS}" """, returnStatus: true)
- sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Experiment ${Case} **FAILED** on ${Machine} in\n\\`${HOME}/RUNTESTS/${pslot}\\`" """)
+ sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Experiment ${Case} **FAILED** on ${Machine} in\n\\`${CUSTOM_WORKSPACE}/RUNTESTS/${pslot}\\`" """)
} catch (Exception e) {
echo "Failed to update label from Running to ${STATUS}: ${e.getMessage()}"
}
@@ -259,19 +269,30 @@ pipeline {
}
}
stage( '5. FINALIZE' ) {
- when {
- expression {
- STATUS == 'Passed'
- }
- }
- agent { label "${machine}-emc" }
+ agent { label NodeName[machine].toLowerCase() }
steps {
script {
- try {
- sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --remove-label "CI-${Machine}-Running" --remove-label "CI-${Machine}-Building" --add-label "CI-${Machine}-${STATUS}" """, returnStatus: true)
- sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "**CI ${STATUS}** ${Machine} at
Built and ran in directory \\`${HOME}\\`" """, returnStatus: true)
- } catch (Exception e) {
- echo "Failed to update label from Running to ${STATUS}: ${e.getMessage()}"
+ sh(script: """
+ labels=\$(gh pr view ${env.CHANGE_ID} --repo ${repo_url} --json labels --jq '.labels[].name')
+ for label in \$labels; do
+ if [[ "\$label" == *"${Machine}"* ]]; then
+ gh pr edit ${env.CHANGE_ID} --repo ${repo_url} --remove-label "\$label"
+ fi
+ done
+ """, returnStatus: true)
+ sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --add-label "CI-${Machine}-${STATUS}" """, returnStatus: true)
+ if (fileExists("${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log")) {
+ sh(script: """echo "**CI ${STATUS}** ${Machine} at
Built and ran in directory \\`${CUSTOM_WORKSPACE}\\`\n\\`\\`\\`\n" | cat - ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log > temp && mv temp ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log""", returnStatus: true)
+ sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body-file ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log """, returnStatus: true)
+ }
+ if (STATUS == 'Passed') {
+ try {
+ sh(script: "rm -Rf ${CUSTOM_WORKSPACE}/*")
+ } catch (Exception e) {
+ echo "Failed to remove custom work directory ${CUSTOM_WORKSPACE} on ${Machine}: ${e.getMessage()}"
+ }
+ } else {
+ echo "Failed to build and run Global-workflow in ${CUSTOM_WORKSPACE} and on ${Machine}"
}
}
}
diff --git a/ci/cases/pr/C96_atmaerosnowDA.yaml b/ci/cases/pr/C96_atmaerosnowDA.yaml
index edde37cbf7..7e22955a37 100644
--- a/ci/cases/pr/C96_atmaerosnowDA.yaml
+++ b/ci/cases/pr/C96_atmaerosnowDA.yaml
@@ -19,4 +19,3 @@ arguments:
skip_ci_on_hosts:
- orion
- hercules
- - wcoss2
diff --git a/ci/scripts/check_ci.sh b/ci/scripts/check_ci.sh
index cd907d34aa..04dd92f4a6 100755
--- a/ci/scripts/check_ci.sh
+++ b/ci/scripts/check_ci.sh
@@ -175,7 +175,7 @@ for pr in ${pr_list}; do
DATE=$(date +'%D %r')
echo "Experiment ${pslot} **SUCCESS** on ${MACHINE_ID^} at ${DATE}" >> "${output_ci_single}"
echo "Experiment ${pslot} *** SUCCESS *** at ${DATE}" >> "${output_ci}"
- "${GH}" pr comment "${pr}" --repo "${REPO_URL}" --body-file "${output_ci_single}"
+ # "${GH}" pr comment "${pr}" --repo "${REPO_URL}" --body-file "${output_ci_single}"
fi
done
done
diff --git a/ci/scripts/run-check_ci.sh b/ci/scripts/run-check_ci.sh
index 5c891fc4bd..5c49a21c4b 100755
--- a/ci/scripts/run-check_ci.sh
+++ b/ci/scripts/run-check_ci.sh
@@ -101,9 +101,7 @@ while true; do
if [[ "${ROCOTO_STATE}" == "DONE" ]]; then
{
- echo "Experiment ${pslot} Completed ${CYCLES_DONE} Cycles at $(date)" || true
- echo "with ${SUCCEEDED} successfully completed jobs" || true
- echo "Experiment ${pslot} Completed: *SUCCESS*"
+ echo "Experiment ${pslot} Completed ${CYCLES_DONE} Cycles: *SUCCESS* at $(date)" || true
} | tee -a "${run_check_logfile}"
rc=0
break
diff --git a/env/AWSPW.env b/env/AWSPW.env
index 7d81000f5c..a4f598d3d7 100755
--- a/env/AWSPW.env
+++ b/env/AWSPW.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
@@ -26,13 +21,11 @@ ulimit -a
if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
@@ -109,21 +102,6 @@ elif [[ "${step}" = "awips" ]]; then
[[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
-elif [[ "${step}" = "gempak" ]]; then
-
- export CFP_MP="YES"
-
- if [[ ${CDUMP} == "gfs" ]]; then
- npe_gempak=${npe_gempak_gfs}
- npe_node_gempak=${npe_node_gempak_gfs}
- fi
-
- nth_max=$((npe_node_max / npe_node_gempak))
-
- export NTHREADS_GEMPAK=${nth_gempak:-1}
- [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
- export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}"
-
elif [[ "${step}" = "fit2obs" ]]; then
diff --git a/env/CONTAINER.env b/env/CONTAINER.env
index 77768b485b..c40543794b 100755
--- a/env/CONTAINER.env
+++ b/env/CONTAINER.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
diff --git a/env/GAEA.env b/env/GAEA.env
index c19fecc934..5509a29a3f 100755
--- a/env/GAEA.env
+++ b/env/GAEA.env
@@ -3,8 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "fcst atmos_products"
exit 1
fi
@@ -19,13 +17,11 @@ ulimit -a
if [[ "${step}" = "fcst" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
diff --git a/env/HERA.env b/env/HERA.env
index 94bab36703..db63f0bfa5 100755
--- a/env/HERA.env
+++ b/env/HERA.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
@@ -219,13 +214,11 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
@@ -314,10 +307,6 @@ elif [[ "${step}" = "gempak" ]]; then
export CFP_MP="YES"
- if [[ ${CDUMP} == "gfs" ]]; then
- npe_node_gempak=${npe_node_gempak_gfs}
- fi
-
nth_max=$((npe_node_max / npe_node_gempak))
export NTHREADS_GEMPAK=${nth_gempak:-1}
diff --git a/env/HERCULES.env b/env/HERCULES.env
index d43dedad8d..77e57e066d 100755
--- a/env/HERCULES.env
+++ b/env/HERCULES.env
@@ -3,9 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "fcst post"
- echo "Note: Hercules is only set up to run in forecast-only mode"
exit 1
fi
@@ -211,13 +208,12 @@ case ${step} in
"fcst" | "efcs")
export OMP_STACKSIZE=512M
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
@@ -312,18 +308,7 @@ case ${step} in
;;
"gempak")
- export CFP_MP="YES"
-
- if [[ ${CDUMP} == "gfs" ]]; then
- npe_gempak=${npe_gempak_gfs}
- npe_node_gempak=${npe_node_gempak_gfs}
- fi
-
- nth_max=$((npe_node_max / npe_node_gempak))
-
- export NTHREADS_GEMPAK=${nth_gempak:-1}
- [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
- export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}"
+ echo "WARNING: ${step} is not enabled on ${machine}!"
;;
"fit2obs")
diff --git a/env/JET.env b/env/JET.env
index 668ec1c2e4..bb9826f331 100755
--- a/env/JET.env
+++ b/env/JET.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
@@ -192,13 +187,11 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
diff --git a/env/ORION.env b/env/ORION.env
index afd1cda052..502e99e192 100755
--- a/env/ORION.env
+++ b/env/ORION.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
@@ -226,14 +221,11 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- export OMP_STACKSIZE=512M
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
@@ -319,18 +311,7 @@ elif [[ "${step}" = "awips" ]]; then
elif [[ "${step}" = "gempak" ]]; then
- export CFP_MP="YES"
-
- if [[ ${CDUMP} == "gfs" ]]; then
- npe_gempak=${npe_gempak_gfs}
- npe_node_gempak=${npe_node_gempak_gfs}
- fi
-
- nth_max=$((npe_node_max / npe_node_gempak))
-
- export NTHREADS_GEMPAK=${nth_gempak:-1}
- [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
- export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}"
+ echo "WARNING: ${step} is not enabled on ${machine}!"
elif [[ "${step}" = "fit2obs" ]]; then
diff --git a/env/S4.env b/env/S4.env
index 8a368bf1d6..190c7295f4 100755
--- a/env/S4.env
+++ b/env/S4.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
@@ -179,13 +174,11 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
@@ -198,6 +191,7 @@ elif [[ "${step}" = "upp" ]]; then
export NTHREADS_UPP=${nth_upp:-1}
[[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
+ export OMP_NUM_THREADS="${NTHREADS_UPP}"
export APRUN_UPP="${launcher} -n ${npe_upp}"
elif [[ "${step}" = "atmos_products" ]]; then
diff --git a/env/WCOSS2.env b/env/WCOSS2.env
index 9fe9179e6b..befca81d26 100755
--- a/env/WCOSS2.env
+++ b/env/WCOSS2.env
@@ -3,11 +3,6 @@
if [[ $# -ne 1 ]]; then
echo "Must specify an input argument to set runtime environment variables!"
- echo "argument can be any one of the following:"
- echo "atmanlvar atmanlfv3inc atmensanlletkf atmensanlfv3inc aeroanlrun snowanl"
- echo "anal sfcanl fcst post metp"
- echo "eobs eupd ecen esfc efcs epos"
- echo "postsnd awips gempak"
exit 1
fi
@@ -38,7 +33,6 @@ elif [[ "${step}" = "prep_emissions" ]]; then
elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then
export USE_CFP="YES"
- if [[ "${step}" = "waveprep" ]] && [[ "${CDUMP}" = "gfs" ]]; then export NTASKS=${NTASKS_gfs} ; fi
export wavempexec="${launcher} -np"
export wave_mpmd=${mpmd_opt}
@@ -182,13 +176,11 @@ elif [[ "${step}" = "eupd" ]]; then
elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
- if [[ "${CDUMP}" =~ "gfs" ]]; then
- nprocs="npe_${step}_gfs"
- ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
- else
- nprocs="npe_${step}"
- ppn="npe_node_${step}"
- fi
+ ppn="npe_node_${step}_${RUN}"
+ [[ -z "${!ppn+0}" ]] && ppn="npe_node_${step}"
+ nprocs="npe_${step}_${RUN}"
+ [[ -z ${!nprocs+0} ]] && nprocs="npe_${step}"
+
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
@@ -291,11 +283,6 @@ elif [[ "${step}" = "awips" ]]; then
elif [[ "${step}" = "gempak" ]]; then
- if [[ ${CDUMP} == "gfs" ]]; then
- npe_gempak=${npe_gempak_gfs}
- npe_node_gempak=${npe_node_gempak_gfs}
- fi
-
nth_max=$((npe_node_max / npe_node_gempak))
export NTHREADS_GEMPAK=${nth_gempak:-1}
diff --git a/jobs/JGLOBAL_WAVE_INIT b/jobs/JGLOBAL_WAVE_INIT
index 7cadfe9f87..3a0a8b43a8 100755
--- a/jobs/JGLOBAL_WAVE_INIT
+++ b/jobs/JGLOBAL_WAVE_INIT
@@ -10,9 +10,10 @@ export errchk=${errchk:-err_chk}
export MP_PULSE=0
# Set COM Paths
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_PREP
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMOUT_WAVE_PREP:COM_WAVE_PREP_TMPL
-mkdir -m 775 -p ${COM_WAVE_PREP}
+if [[ ! -d "${COMOUT_WAVE_PREP}" ]]; then mkdir -p "${COMOUT_WAVE_PREP}"; fi
# Set mpi serial command
export wavempexec=${wavempexec:-"mpirun -n"}
diff --git a/jobs/JGLOBAL_WAVE_POST_BNDPNT b/jobs/JGLOBAL_WAVE_POST_BNDPNT
index d1c9227895..808ba7d9f3 100755
--- a/jobs/JGLOBAL_WAVE_POST_BNDPNT
+++ b/jobs/JGLOBAL_WAVE_POST_BNDPNT
@@ -9,9 +9,12 @@ export errchk=${errchk:-err_chk}
export MP_PULSE=0
# Set COM Paths and GETGES environment
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_PREP COM_WAVE_HISTORY COM_WAVE_STATION
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMIN_WAVE_HISTORY:COM_WAVE_HISTORY_TMPL \
+ COMOUT_WAVE_STATION:COM_WAVE_STATION_TMPL
-if [[ ! -d ${COM_WAVE_STATION} ]]; then mkdir -p "${COM_WAVE_STATION}"; fi
+if [[ ! -d "${COMOUT_WAVE_STATION}" ]]; then mkdir -p "${COMOUT_WAVE_STATION}"; fi
# Set wave model ID tag to include member number
# if ensemble; waveMEMB var empty in deterministic
diff --git a/jobs/JGLOBAL_WAVE_POST_BNDPNTBLL b/jobs/JGLOBAL_WAVE_POST_BNDPNTBLL
index ea4bb30cfb..c85b1cb5f3 100755
--- a/jobs/JGLOBAL_WAVE_POST_BNDPNTBLL
+++ b/jobs/JGLOBAL_WAVE_POST_BNDPNTBLL
@@ -13,9 +13,12 @@ export CDATE=${PDY}${cyc}
export MP_PULSE=0
# Set COM Paths and GETGES environment
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_PREP COM_WAVE_HISTORY COM_WAVE_STATION
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMIN_WAVE_HISTORY:COM_WAVE_HISTORY_TMPL \
+ COMOUT_WAVE_STATION:COM_WAVE_STATION_TMPL
-if [[ ! -d ${COM_WAVE_STATION} ]]; then mkdir -p "${COM_WAVE_STATION}"; fi
+if [[ ! -d "${COMOUT_WAVE_STATION}" ]]; then mkdir -p "${COMOUT_WAVE_STATION}"; fi
# Set wave model ID tag to include member number
# if ensemble; waveMEMB var empty in deterministic
diff --git a/jobs/JGLOBAL_WAVE_POST_PNT b/jobs/JGLOBAL_WAVE_POST_PNT
index a946ae537d..769159be61 100755
--- a/jobs/JGLOBAL_WAVE_POST_PNT
+++ b/jobs/JGLOBAL_WAVE_POST_PNT
@@ -9,9 +9,12 @@ export errchk=${errchk:-err_chk}
export MP_PULSE=0
# Set COM Paths and GETGES environment
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_PREP COM_WAVE_HISTORY COM_WAVE_STATION
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMIN_WAVE_HISTORY:COM_WAVE_HISTORY_TMPL \
+ COMOUT_WAVE_STATION:COM_WAVE_STATION_TMPL
-if [[ ! -d ${COM_WAVE_STATION} ]]; then mkdir -p "${COM_WAVE_STATION}"; fi
+if [[ ! -d "${COMOUT_WAVE_STATION}" ]]; then mkdir -p "${COMOUT_WAVE_STATION}"; fi
# Set wave model ID tag to include member number
# if ensemble; waveMEMB var empty in deterministic
diff --git a/jobs/JGLOBAL_WAVE_POST_SBS b/jobs/JGLOBAL_WAVE_POST_SBS
index 89d8013ea1..53ac4b2083 100755
--- a/jobs/JGLOBAL_WAVE_POST_SBS
+++ b/jobs/JGLOBAL_WAVE_POST_SBS
@@ -9,9 +9,15 @@ export errchk=${errchk:-err_chk}
export MP_PULSE=0
# Set COM Paths and GETGES environment
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_PREP COM_WAVE_HISTORY COM_WAVE_GRID
-
-mkdir -p "${COM_WAVE_GRID}"
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMIN_WAVE_HISTORY:COM_WAVE_HISTORY_TMPL \
+ COMOUT_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMOUT_WAVE_GRID:COM_WAVE_GRID_TMPL
+
+for out_dir in "${COMOUT_WAVE_PREP}" "${COMOUT_WAVE_GRID}"; do
+ if [[ ! -d "${out_dir}" ]]; then mkdir -p "${out_dir}"; fi
+done
# Set wave model ID tag to include member number
diff --git a/jobs/JGLOBAL_WAVE_PRDGEN_BULLS b/jobs/JGLOBAL_WAVE_PRDGEN_BULLS
index 86ad17e8b0..ebecf716af 100755
--- a/jobs/JGLOBAL_WAVE_PRDGEN_BULLS
+++ b/jobs/JGLOBAL_WAVE_PRDGEN_BULLS
@@ -13,9 +13,11 @@ export SENDDBN_NTC=${SENDDBN_NTC:-YES}
export SENDDBN=${SENDDBN:-NO}
export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn}
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_STATION COM_WAVE_WMO
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_WAVE_STATION:COM_WAVE_STATION_TMPL \
+ COMOUT_WAVE_WMO:COM_WAVE_WMO_TMPL
-if [[ ! -d ${COM_WAVE_WMO} ]]; then mkdir -p "${COM_WAVE_WMO}"; fi
+if [[ ! -d ${COMOUT_WAVE_WMO} ]]; then mkdir -p "${COMOUT_WAVE_WMO}"; fi
###################################
# Execute the Script
diff --git a/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED b/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED
index 5a4250fb57..208b36c535 100755
--- a/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED
+++ b/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED
@@ -13,11 +13,12 @@ export SENDDBN_NTC=${SENDDBN_NTC:-YES}
export SENDDBN=${SENDDBN:-NO}
export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn}
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_GRID COM_WAVE_WMO
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_WAVE_GRID:COM_WAVE_GRID_TMPL \
+ COMOUT_WAVE_WMO:COM_WAVE_WMO_TMPL
-if [[ ! -d ${COM_WAVE_WMO} ]]; then mkdir -p "${COM_WAVE_WMO}"; fi
+if [[ ! -d ${COMOUT_WAVE_WMO} ]]; then mkdir -p "${COMOUT_WAVE_WMO}"; fi
-mkdir -p "${COM_WAVE_WMO}"
###################################
# Execute the Script
diff --git a/jobs/JGLOBAL_WAVE_PREP b/jobs/JGLOBAL_WAVE_PREP
index 866f3cb318..65928b870d 100755
--- a/jobs/JGLOBAL_WAVE_PREP
+++ b/jobs/JGLOBAL_WAVE_PREP
@@ -17,9 +17,12 @@ export MP_PULSE=0
export CDO=${CDO_ROOT}/bin/cdo
# Set COM Paths and GETGES environment
-YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS COM_WAVE_PREP
-declare_from_tmpl -rx COM_RTOFS
-[[ ! -d ${COM_WAVE_PREP} ]] && mkdir -m 775 -p "${COM_WAVE_PREP}"
+YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \
+ COMIN_OBS:COM_OBS_TMPL \
+ COMIN_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMOUT_WAVE_PREP:COM_WAVE_PREP_TMPL \
+ COMIN_RTOFS:COM_RTOFS_TMPL
+if [[ ! -d ${COMOUT_WAVE_PREP} ]]; then mkdir -p "${COMOUT_WAVE_PREP}"; fi
# Execute the Script
${SCRgfs}/exgfs_wave_prep.sh
diff --git a/modulefiles/module_gwsetup.hercules.lua b/modulefiles/module_gwsetup.hercules.lua
index 795b295b30..9d845fb71d 100644
--- a/modulefiles/module_gwsetup.hercules.lua
+++ b/modulefiles/module_gwsetup.hercules.lua
@@ -3,7 +3,7 @@ Load environment to run GFS workflow ci scripts on Hercules
]])
load(pathJoin("contrib","0.1"))
-load(pathJoin("rocoto","1.3.5"))
+load(pathJoin("rocoto","1.3.7"))
prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-1.6.0/envs/gsi-addon-env/install/modulefiles/Core")
diff --git a/parm/config/gefs/config.base b/parm/config/gefs/config.base
index 16e0fefaba..9808b96579 100644
--- a/parm/config/gefs/config.base
+++ b/parm/config/gefs/config.base
@@ -99,7 +99,6 @@ export EXPDIR="@EXPDIR@/${PSLOT}"
export ROTDIR="@COMROOT@/${PSLOT}"
export DATAROOT="${STMP}/RUNDIRS/${PSLOT}" # TODO: set via prod_envir in Ops
-export RUNDIR="${DATAROOT}" # TODO: Should be removed; use DATAROOT instead
export ARCDIR="${NOSCRUB}/archive/${PSLOT}"
export ATARDIR="@ATARDIR@"
diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources
index b2ca5b3e51..7c3d77de1d 100644
--- a/parm/config/gefs/config.resources
+++ b/parm/config/gefs/config.resources
@@ -202,7 +202,7 @@ case ${step} in
declare -x "wtime_${step}_gfs"="06:00:00"
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs
index 8beb0652f7..2da6e7a2f0 100644
--- a/parm/config/gefs/config.ufs
+++ b/parm/config/gefs/config.ufs
@@ -294,6 +294,7 @@ if [[ "${skip_mom6}" == "false" ]]; then
export cplflx=".true."
model_list="${model_list}.ocean"
nthreads_mom6=1
+ MOM6_DIAG_MISVAL="-1e34"
case "${mom6_res}" in
"500")
ntasks_mom6=8
@@ -308,7 +309,6 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RIVER_RUNOFF='False'
eps_imesh="4.0e-1"
MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_25L.nc"
- MOM6_DIAG_MISVAL="0.0"
MOM6_ALLOW_LANDMASK_CHANGES='False'
TOPOEDITS=""
;;
@@ -324,13 +324,11 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RESTART_SETTING='r'
MOM6_RIVER_RUNOFF='False'
eps_imesh="2.5e-1"
- TOPOEDITS="topo_edits_011818.nc"
- if [[ "${DO_JEDIOCNVAR:-NO}" = "YES" ]]; then
- MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
- MOM6_DIAG_MISVAL="0.0"
- else
+ TOPOEDITS="ufs.topo_edits_011818.nc"
+ if [[ ${RUN} == "gfs" || "${RUN}" == "gefs" ]]; then
MOM6_DIAG_COORD_DEF_Z_FILE="interpolate_zgrid_40L.nc"
- MOM6_DIAG_MISVAL="-1e34"
+ else
+ MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
fi
MOM6_ALLOW_LANDMASK_CHANGES='True'
;;
@@ -346,12 +344,10 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RESTART_SETTING='n'
MOM6_RIVER_RUNOFF='True'
eps_imesh="1.0e-1"
- if [[ "${DO_JEDIOCNVAR:-NO}" = "YES" ]]; then
- MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
- MOM6_DIAG_MISVAL="0.0"
- else
+ if [[ ${RUN} == "gfs" || "${RUN}" == "gefs" ]]; then
MOM6_DIAG_COORD_DEF_Z_FILE="interpolate_zgrid_40L.nc"
- MOM6_DIAG_MISVAL="-1e34"
+ else
+ MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
fi
MOM6_ALLOW_LANDMASK_CHANGES='False'
TOPOEDITS=""
@@ -368,12 +364,10 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RIVER_RUNOFF='True'
MOM6_RESTART_SETTING="r"
eps_imesh="1.0e-1"
- if [[ "${DO_JEDIOCNVAR:-NO}" = "YES" ]]; then
- MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
- MOM6_DIAG_MISVAL="0.0"
- else
+ if [[ ${RUN} == "gfs" || "${RUN}" == "gefs" ]]; then
MOM6_DIAG_COORD_DEF_Z_FILE="interpolate_zgrid_40L.nc"
- MOM6_DIAG_MISVAL="-1e34"
+ else
+ MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
fi
MOM6_ALLOW_LANDMASK_CHANGES='False'
TOPOEDITS=""
diff --git a/parm/config/gfs/config.anal b/parm/config/gfs/config.anal
index 5c20a198de..2c55d85ff4 100644
--- a/parm/config/gfs/config.anal
+++ b/parm/config/gfs/config.anal
@@ -19,12 +19,8 @@ if [[ "${CDUMP}" = "gfs" ]] ; then
export DIAG_TARBALL="YES"
fi
-export npe_gsi=${npe_anal}
-
-if [[ "${CDUMP}" == "gfs" ]] ; then
- export npe_gsi=${npe_anal_gfs}
- export nth_anal=${nth_anal_gfs}
-fi
+npe_var="npe_anal_${RUN/enkf}"
+export npe_gsi="${!npe_var}"
# Set parameters specific to L127
if [[ ${LEVS} = "128" ]]; then
diff --git a/parm/config/gfs/config.analcalc b/parm/config/gfs/config.analcalc
index 9405114ecc..d9501503f0 100644
--- a/parm/config/gfs/config.analcalc
+++ b/parm/config/gfs/config.analcalc
@@ -6,10 +6,6 @@
echo "BEGIN: config.analcalc"
# Get task specific resources
-. $EXPDIR/config.resources analcalc
-
-if [[ "$CDUMP" == "gfs" ]]; then
- export nth_echgres=$nth_echgres_gfs
-fi
+. ${EXPDIR}/config.resources analcalc
echo "END: config.analcalc"
diff --git a/parm/config/gfs/config.base b/parm/config/gfs/config.base
index f893eaaf4e..9fd494a9eb 100644
--- a/parm/config/gfs/config.base
+++ b/parm/config/gfs/config.base
@@ -128,7 +128,6 @@ if [[ "${PDY}${cyc}" -ge "2019092100" && "${PDY}${cyc}" -le "2019110700" ]]; the
export DUMP_SUFFIX="p" # Use dumps from NCO GFS v15.3 parallel
fi
export DATAROOT="${STMP}/RUNDIRS/${PSLOT}" # TODO: set via prod_envir in Ops
-export RUNDIR="${DATAROOT}" # TODO: Should be removed; use DATAROOT instead
export ARCDIR="${NOSCRUB}/archive/${PSLOT}"
export ATARDIR="@ATARDIR@"
@@ -295,7 +294,7 @@ export FHOUT_GFS=3 # 3 for ops
export FHMAX_HF_GFS=@FHMAX_HF_GFS@
export FHOUT_HF_GFS=1
export FHOUT_OCN_GFS=6
-export FHOUT_ICE_GFS=6
+export FHOUT_ICE_GFS=6
export FHMIN_WAV=0
export FHOUT_WAV=3
export FHMAX_HF_WAV=120
diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources
index e5f741cf7e..2e910d4eb4 100644
--- a/parm/config/gfs/config.resources
+++ b/parm/config/gfs/config.resources
@@ -3,6 +3,8 @@
########## config.resources ##########
# Set resource information for job tasks
# e.g. walltime, node, cores per node, memory etc.
+# Note: machine-specific resources should be placed into the appropriate config file:
+# config.resources.${machine}
if (( $# != 1 )); then
@@ -33,16 +35,53 @@ step=$1
echo "BEGIN: config.resources"
case ${machine} in
- "WCOSS2") npe_node_max=128;;
- "HERA") npe_node_max=40;;
- "GAEA") npe_node_max=128;;
- "ORION") npe_node_max=40;;
- "HERCULES") npe_node_max=80;;
+ "WCOSS2")
+ npe_node_max=128
+ # shellcheck disable=SC2034
+ mem_node_max="500GB"
+ ;;
+ "HERA")
+ npe_node_max=40
+ # shellcheck disable=SC2034
+ mem_node_max="96GB"
+ ;;
+ "GAEA")
+ npe_node_max=128
+ # shellcheck disable=SC2034
+ mem_node_max="251GB"
+ ;;
+ "ORION")
+ npe_node_max=40
+ # shellcheck disable=SC2034
+ mem_node_max="192GB"
+ ;;
+ "HERCULES")
+ npe_node_max=80
+ # shellcheck disable=SC2034
+ mem_node_max="512GB"
+ ;;
"JET")
case ${PARTITION_BATCH} in
- "xjet") npe_node_max=24;;
- "vjet" | "sjet") npe_node_max=16;;
- "kjet") npe_node_max=40;;
+ "xjet")
+ npe_node_max=24
+ # shellcheck disable=SC2034
+ mem_node_max="61GB"
+ ;;
+ "vjet")
+ npe_node_max=16
+ # shellcheck disable=SC2034
+ mem_node_max="61GB"
+ ;;
+ "sjet")
+ npe_node_max=16
+ # shellcheck disable=SC2034
+ mem_node_max="29GB"
+ ;;
+ "kjet")
+ npe_node_max=40
+ # shellcheck disable=SC2034
+ mem_node_max="88GB"
+ ;;
*)
echo "FATAL ERROR: Unknown partition ${PARTITION_BATCH} specified for ${machine}"
exit 3
@@ -50,8 +89,15 @@ case ${machine} in
;;
"S4")
case ${PARTITION_BATCH} in
- "s4") npe_node_max=32;;
- "ivy") npe_node_max=20;;
+ "s4") npe_node_max=32
+ # shellcheck disable=SC2034
+ mem_node_max="168GB"
+ ;;
+ "ivy")
+ npe_node_max=20
+ # shellcheck disable=SC2034
+ mem_node_max="128GB"
+ ;;
*)
echo "FATAL ERROR: Unknown partition ${PARTITION_BATCH} specified for ${machine}"
exit 3
@@ -60,15 +106,22 @@ case ${machine} in
"AWSPW")
export PARTITION_BATCH="compute"
npe_node_max=40
+ # TODO Supply a max mem/node value for AWS
+ # shellcheck disable=SC2034
+ mem_node_max=""
;;
"CONTAINER")
npe_node_max=1
+ # TODO Supply a max mem/node value for a container
+ # shellcheck disable=SC2034
+ mem_node_max=""
;;
*)
echo "FATAL ERROR: Unknown machine encountered by ${BASH_SOURCE[0]}"
exit 2
;;
esac
+
export npe_node_max
case ${step} in
@@ -77,11 +130,7 @@ case ${step} in
export npe_prep=4
export npe_node_prep=2
export nth_prep=1
- if [[ "${machine}" == "WCOSS2" ]]; then
- export is_exclusive=True
- else
- export memory_prep="40GB"
- fi
+ export memory_prep="40GB"
;;
"prepsnowobs")
@@ -118,26 +167,39 @@ case ${step} in
"waveprep")
export wtime_waveprep="00:10:00"
- export npe_waveprep=5
+ export npe_waveprep_gdas=5
export npe_waveprep_gfs=65
- export nth_waveprep=1
+ export nth_waveprep_gdas=1
export nth_waveprep_gfs=1
- export npe_node_waveprep=$(( npe_node_max / nth_waveprep ))
+ export npe_node_waveprep_gdas=$(( npe_node_max / nth_waveprep_gdas ))
export npe_node_waveprep_gfs=$(( npe_node_max / nth_waveprep_gfs ))
- export NTASKS=${npe_waveprep}
+ export NTASKS_gdas=${npe_waveprep_gdas}
export NTASKS_gfs=${npe_waveprep_gfs}
- export memory_waveprep="100GB"
+ export memory_waveprep_gdas="100GB"
export memory_waveprep_gfs="150GB"
+
+ var_npe_node="npe_node_waveprep_${RUN}"
+ var_nth="nth_waveprep_${RUN}"
+ var_npe="npe_waveprep_${RUN}"
+ var_NTASKS="ntasks_${RUN}"
+ # RUN is set to a single value at setup time, so these won't be found
+ # TODO rework setup_xml.py to initialize RUN to the applicable option
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_waveprep"="${!var_npe_node}" \
+ "nth_waveprep"="${!var_nth}" \
+ "npe_waveprep"="${!var_npe}" \
+ "NTASKS"="${!var_NTASKS}"
+ fi
;;
"wavepostsbs")
- export wtime_wavepostsbs="00:20:00"
+ export wtime_wavepostsbs_gdas="00:20:00"
export wtime_wavepostsbs_gfs="03:00:00"
export npe_wavepostsbs=8
export nth_wavepostsbs=1
export npe_node_wavepostsbs=$(( npe_node_max / nth_wavepostsbs ))
export NTASKS=${npe_wavepostsbs}
- export memory_wavepostsbs="10GB"
+ export memory_wavepostsbs_gdas="10GB"
export memory_wavepostsbs_gfs="10GB"
;;
@@ -229,13 +291,23 @@ case ${step} in
export layout_y=${layout_y_atmanl}
export wtime_atmanlvar="00:30:00"
- export npe_atmanlvar=$(( layout_x * layout_y * 6 ))
+ export npe_atmanlvar_gdas=$(( layout_x * layout_y * 6 ))
export npe_atmanlvar_gfs=$(( layout_x * layout_y * 6 ))
- export nth_atmanlvar=1
- export nth_atmanlvar_gfs=${nth_atmanlvar}
- export npe_node_atmanlvar=$(( npe_node_max / nth_atmanlvar ))
+ export nth_atmanlvar_gdas=1
+ export nth_atmanlvar_gfs=${nth_atmanlvar_gdas}
+ export npe_node_atmanlvar_gdas=$(( npe_node_max / nth_atmanlvar_gdas ))
+ export npe_node_atmanlvar_gfs=$(( npe_node_max / nth_atmanlvar_gfs ))
export memory_atmanlvar="96GB"
export is_exclusive=True
+
+ var_npe_node="npe_node_atmanlvar_${RUN}"
+ var_nth="nth_atmanlvar_${RUN}"
+ var_npe="npe_atmanlvar_${RUN}"
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_atmanlvar"="${!var_npe_node}" \
+ "nth_atmanlvar"="${!var_nth}" \
+ "npe_atmanlvar"="${!var_npe}"
+ fi
;;
"atmanlfv3inc")
@@ -243,13 +315,23 @@ case ${step} in
export layout_y=${layout_y_atmanl}
export wtime_atmanlfv3inc="00:30:00"
- export npe_atmanlfv3inc=$(( layout_x * layout_y * 6 ))
+ export npe_atmanlfv3inc_gdas=$(( layout_x * layout_y * 6 ))
export npe_atmanlfv3inc_gfs=$(( layout_x * layout_y * 6 ))
- export nth_atmanlfv3inc=1
- export nth_atmanlfv3inc_gfs=${nth_atmanlfv3inc}
- export npe_node_atmanlfv3inc=$(( npe_node_max / nth_atmanlfv3inc ))
+ export nth_atmanlfv3inc_gdas=1
+ export nth_atmanlfv3inc_gfs=${nth_atmanlfv3inc_gdas}
+ export npe_node_atmanlfv3inc_gdas=$(( npe_node_max / nth_atmanlfv3inc_gdas ))
+ export npe_node_atmanlfv3inc_gfs=$(( npe_node_max / nth_atmanlfv3inc_gfs ))
export memory_atmanlfv3inc="96GB"
export is_exclusive=True
+
+ var_npe_node="npe_node_atmanlfv3inc_${RUN}"
+ var_nth="nth_atmanlfv3inc_${RUN}"
+ var_npe="npe_atmanlfv3inc_${RUN}"
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_atmanlfv3inc"="${!var_npe_node}" \
+ "nth_atmanlfv3inc"="${!var_nth}" \
+ "npe_atmanlfv3inc"="${!var_npe}"
+ fi
;;
"atmanlfinal")
@@ -276,7 +358,7 @@ case ${step} in
layout_y=1
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
esac
@@ -318,7 +400,7 @@ case ${step} in
layout_y=1
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
esac
@@ -351,7 +433,7 @@ case ${step} in
layout_y=1
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
esac
@@ -359,12 +441,22 @@ case ${step} in
export layout_y
export wtime_aeroanlrun="00:30:00"
- export npe_aeroanlrun=$(( layout_x * layout_y * 6 ))
+ export npe_aeroanlrun_gdas=$(( layout_x * layout_y * 6 ))
export npe_aeroanlrun_gfs=$(( layout_x * layout_y * 6 ))
- export nth_aeroanlrun=1
+ export nth_aeroanlrun_gdas=1
export nth_aeroanlrun_gfs=1
- export npe_node_aeroanlrun=$(( npe_node_max / nth_aeroanlrun ))
+ export npe_node_aeroanlrun_gdas=$(( npe_node_max / nth_aeroanlrun_gdas ))
+ export npe_node_aeroanlrun_gfs=$(( npe_node_max / nth_aeroanlrun_gfs ))
export is_exclusive=True
+
+ var_npe_node="npe_node_aeroanlrun_${RUN}"
+ var_nth="nth_aeroanlrun_${RUN}"
+ var_npe="npe_aeroanlrun_${RUN}"
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_aeroanlrun"="${!var_npe_node}" \
+ "nth_aeroanlrun"="${!var_nth}" \
+ "npe_aeroanlrun"="${!var_npe}"
+ fi
;;
"aeroanlfinal")
@@ -398,7 +490,7 @@ case ${step} in
"050") npes=16;;
"500") npes=16;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${OCNRES}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
@@ -425,7 +517,7 @@ case ${step} in
memory_ocnanalrun="24GB"
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${OCNRES}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
@@ -453,7 +545,7 @@ case ${step} in
memory_ocnanalecen="24GB"
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${OCNRES}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
@@ -481,7 +573,7 @@ case ${step} in
memory_ocnanalletkf="24GB"
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${OCNRES}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
@@ -510,7 +602,7 @@ case ${step} in
memory_ocnanalchkpt="32GB"
npes=8;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${OCNRES}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}"
exit 4
esac
export npe_ocnanalchkpt=${npes}
@@ -533,59 +625,26 @@ case ${step} in
;;
"anal")
- export wtime_anal="01:20:00"
+ export wtime_anal_gdas="01:20:00"
export wtime_anal_gfs="01:00:00"
- export npe_anal=780
- export nth_anal=5
- export npe_anal_gfs=825
- export nth_anal_gfs=5
- if [[ "${machine}" == "WCOSS2" ]]; then
- export nth_anal=8
- export nth_anal_gfs=8
- fi
case ${CASE} in
+ "C768")
+ export npe_anal_gdas=780
+ export npe_anal_gfs=825
+ export nth_anal=5
+ ;;
"C384")
- export npe_anal=160
+ export npe_anal_gdas=160
export npe_anal_gfs=160
export nth_anal=10
- export nth_anal_gfs=10
- if [[ ${machine} = "HERA" ]]; then
- export npe_anal=270
- export npe_anal_gfs=270
- export nth_anal=8
- export nth_anal_gfs=8
- fi
- if [[ ${machine} = "S4" ]]; then
- #On the S4-s4 partition, this is accomplished by increasing the task
- #count to a multiple of 32
- if [[ ${PARTITION_BATCH} = "s4" ]]; then
- export npe_anal=416
- export npe_anal_gfs=416
- fi
- #S4 is small, so run this task with just 1 thread
- export nth_anal=1
- export nth_anal_gfs=1
- export wtime_anal="02:00:00"
- fi
;;
"C192" | "C96" | "C48")
- export npe_anal=84
+ export npe_anal_gdas=84
export npe_anal_gfs=84
- if [[ ${machine} == "S4" ]]; then
- export nth_anal=4
- export nth_anal_gfs=4
- #Adjust job count for S4
- if [[ ${PARTITION_BATCH} == "s4" ]]; then
- export npe_anal=88
- export npe_anal_gfs=88
- elif [[ ${PARTITION_BATCH} == "ivy" ]]; then
- export npe_anal=90
- export npe_anal_gfs=90
- fi
- fi
+ export nth_anal=5
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
@@ -593,18 +652,31 @@ case ${step} in
export nth_cycle=${nth_anal}
export npe_node_cycle=$(( npe_node_max / nth_cycle ))
export is_exclusive=True
+
+ var_npe="npe_anal_${RUN}"
+ if [[ -n "${!var_npe+0}" ]]; then
+ declare -x "npe_anal"="${!var_npe}"
+ fi
;;
"analcalc")
- export wtime_analcalc="00:10:00"
+ export wtime_analcalc="00:15:00"
export npe_analcalc=127
export ntasks="${npe_analcalc}"
export nth_analcalc=1
- export nth_echgres=4
- export nth_echgres_gfs=12
export npe_node_analcalc=$(( npe_node_max / nth_analcalc ))
+ export nth_echgres_gdas=4
+ export nth_echgres_gfs=12
export is_exclusive=True
export memory_analcalc="48GB"
+ if [[ "${CASE}" == "C384" || "${CASE}" == "C768" ]]; then
+ export memory_analcalc="${mem_node_max}"
+ fi
+
+ var_nth="nth_echgres_${RUN}"
+ if [[ -n "${!var_nth+0}" ]]; then
+ declare -x "nth_echgres"="${!var_nth}"
+ fi
;;
"analdiag")
@@ -643,6 +715,18 @@ case ${step} in
ntasks_quilt=${ntasks_quilt_gfs}
nthreads_fv3=${nthreads_fv3_gfs}
nthreads_ufs=${nthreads_ufs_gfs}
+ # Will not be set if we are skipping the mediator
+ nthreads_mediator=${nthreads_mediator_gfs:-}
+ elif [[ "${_CDUMP}" =~ "gdas" ]]; then
+ export layout_x=${layout_x_gdas}
+ export layout_y=${layout_y_gdas}
+ export WRITE_GROUP=${WRITE_GROUP_GDAS}
+ export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GDAS}
+ ntasks_fv3=${ntasks_fv3_gdas}
+ ntasks_quilt=${ntasks_quilt_gdas}
+ nthreads_fv3=${nthreads_fv3_gdas}
+ nthreads_ufs=${nthreads_ufs_gdas}
+ nthreads_mediator=${nthreads_mediator_gdas:-}
fi
# Determine if using ESMF-managed threading or traditional threading
@@ -737,37 +821,45 @@ case ${step} in
echo "Total PETS for ${_CDUMP} = ${NTASKS_TOT}"
- if [[ "${_CDUMP}" =~ "gfs" ]]; then
- declare -x "npe_${step}_gfs"="${NTASKS_TOT}"
- declare -x "nth_${step}_gfs"="${UFS_THREADS}"
- declare -x "npe_node_${step}_gfs"="${npe_node_max}"
- else
- declare -x "npe_${step}"="${NTASKS_TOT}"
- declare -x "nth_${step}"="${UFS_THREADS}"
- declare -x "npe_node_${step}"="${npe_node_max}"
- fi
+ declare -x "npe_${step}_${_CDUMP}"="${NTASKS_TOT}"
+ declare -x "nth_${step}_${_CDUMP}"="${UFS_THREADS}"
+ declare -x "npe_node_${step}_${_CDUMP}"="${npe_node_max}"
done
case "${CASE}" in
"C48" | "C96" | "C192")
- declare -x "wtime_${step}"="00:20:00"
+ declare -x "wtime_${step}_gdas"="00:20:00"
+ declare -x "wtime_${step}_enkfgdas"="00:20:00"
declare -x "wtime_${step}_gfs"="03:00:00"
+ declare -x "wtime_${step}_enkfgfs"="00:20:00"
;;
"C384")
- declare -x "wtime_${step}"="00:30:00"
+ declare -x "wtime_${step}_gdas"="00:30:00"
+ declare -x "wtime_${step}_enkfgdas"="00:30:00"
declare -x "wtime_${step}_gfs"="06:00:00"
+ declare -x "wtime_${step}_enkfgfs"="00:30:00"
;;
"C768" | "C1152")
- declare -x "wtime_${step}"="00:30:00"
+ # Not valid resolutions for ensembles
+ declare -x "wtime_${step}_gdas"="00:40:00"
declare -x "wtime_${step}_gfs"="06:00:00"
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
+ var_npe_node="npe_node_${step}_${RUN}"
+ var_nth="nth_${step}_${RUN}"
+ var_npe="npe_${step}_${RUN}"
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_${step}"="${!var_npe_node}" \
+ "nth_${step}"="${!var_nth}" \
+ "npe_${step}"="${!var_npe}"
+ fi
+
unset _CDUMP _CDUMP_LIST
unset NTASKS_TOT
;;
@@ -785,21 +877,15 @@ case ${step} in
"C48" | "C96")
export npe_upp=${CASE:1}
;;
- "C192" | "C384")
+ "C192" | "C384" | "C768" )
export npe_upp=120
- export memory_upp="96GB"
- ;;
- "C768")
- export npe_upp=120
- export memory_upp="96GB"
- if [[ ${machine} == "WCOSS2" ]]; then export memory_upp="480GB" ; fi
+ export memory_upp="${mem_node_max}"
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
- if [[ ${machine} == "JET" ]]; then unset memory_upp ; fi
export npe_node_upp=${npe_upp}
export nth_upp=1
@@ -816,10 +902,6 @@ case ${step} in
export npe_atmos_products=24
export nth_atmos_products=1
export npe_node_atmos_products="${npe_atmos_products}"
- export wtime_atmos_products_gfs="${wtime_atmos_products}"
- export npe_atmos_products_gfs="${npe_atmos_products}"
- export nth_atmos_products_gfs="${nth_atmos_products}"
- export npe_node_atmos_products_gfs="${npe_node_atmos_products}"
export is_exclusive=True
;;
@@ -844,10 +926,6 @@ case ${step} in
export npe_vminmon=1
export nth_vminmon=1
export npe_node_vminmon=1
- export wtime_vminmon_gfs="00:05:00"
- export npe_vminmon_gfs=1
- export nth_vminmon_gfs=1
- export npe_node_vminmon_gfs=1
export memory_vminmon="1G"
;;
@@ -881,17 +959,14 @@ case ${step} in
export nth_fit2obs=1
export npe_node_fit2obs=1
export memory_fit2obs="20G"
- if [[ ${machine} == "WCOSS2" ]]; then export npe_node_fit2obs=3 ; fi
;;
"metp")
export nth_metp=1
- export wtime_metp="03:00:00"
+ export wtime_metp_gdas="03:00:00"
+ export wtime_metp_gfs="06:00:00"
export npe_metp=4
export npe_node_metp=4
- export wtime_metp_gfs="06:00:00"
- export npe_metp_gfs=4
- export npe_node_metp_gfs=4
export is_exclusive=True
;;
@@ -900,9 +975,6 @@ case ${step} in
export npe_echgres=3
export nth_echgres=${npe_node_max}
export npe_node_echgres=1
- if [[ "${machine}" == "WCOSS2" ]]; then
- export memory_echgres="200GB"
- fi
;;
"init")
@@ -933,9 +1005,6 @@ case ${step} in
declare -x "npe_node_${step}"="1"
declare -x "nth_${step}"="1"
declare -x "memory_${step}"="4096M"
- if [[ "${machine}" == "WCOSS2" ]]; then
- declare -x "memory_${step}"="50GB"
- fi
;;
"cleanup")
@@ -970,13 +1039,25 @@ case ${step} in
export layout_y=${layout_y_atmensanl}
export wtime_atmensanlletkf="00:30:00"
- export npe_atmensanlletkf=$(( layout_x * layout_y * 6 ))
- export npe_atmensanlletkf_gfs=$(( layout_x * layout_y * 6 ))
- export nth_atmensanlletkf=1
- export nth_atmensanlletkf_gfs=${nth_atmensanlletkf}
- export npe_node_atmensanlletkf=$(( npe_node_max / nth_atmensanlletkf ))
+ export npe_atmensanlletkf_enkfgdas=$(( layout_x * layout_y * 6 ))
+ export npe_atmensanlletkf_enkfgfs=$(( layout_x * layout_y * 6 ))
+ export nth_atmensanlletkf_enkfgdas=1
+ export nth_atmensanlletkf_enkfgfs=${nth_atmensanlletkf_enkfgdas}
+ export npe_node_atmensanlletkf_enkfgdas=$(( npe_node_max / nth_atmensanlletkf_enkfgdas ))
+ export npe_node_atmensanlletkf_enkfgfs=$(( npe_node_max / nth_atmensanlletkf_enkfgfs ))
export memory_atmensanlletkf="96GB"
export is_exclusive=True
+
+ var_npe_node="npe_node_atmensanlletkf_${RUN}"
+ var_nth="nth_atmensanlletkf_${RUN}"
+ var_npe="npe_atmensanlletkf_${RUN}"
+ # RUN is set to a single value at setup time, so these won't be found
+ # TODO rework setup_xml.py to initialize RUN to the applicable option
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_atmensanlletkf"="${!var_npe_node}" \
+ "nth_atmensanlletkf"="${!var_nth}" \
+ "npe_atmensanlletkf"="${!var_npe}"
+ fi
;;
"atmensanlfv3inc")
@@ -984,13 +1065,25 @@ case ${step} in
export layout_y=${layout_y_atmensanl}
export wtime_atmensanlfv3inc="00:30:00"
- export npe_atmensanlfv3inc=$(( layout_x * layout_y * 6 ))
- export npe_atmensanlfv3inc_gfs=$(( layout_x * layout_y * 6 ))
- export nth_atmensanlfv3inc=1
- export nth_atmensanlfv3inc_gfs=${nth_atmensanlfv3inc}
- export npe_node_atmensanlfv3inc=$(( npe_node_max / nth_atmensanlfv3inc ))
+ export npe_atmensanlfv3inc_enkfgdas=$(( layout_x * layout_y * 6 ))
+ export npe_atmensanlfv3inc_enkfgfs=$(( layout_x * layout_y * 6 ))
+ export nth_atmensanlfv3inc_enkfgdas=1
+ export nth_atmensanlfv3inc_enkfgfs=${nth_atmensanlfv3inc_enkfgdas}
+ export npe_node_atmensanlfv3inc_enkfgdas=$(( npe_node_max / nth_atmensanlfv3inc_enkfgdas ))
+ export npe_node_atmensanlfv3inc_enkfgfs=$(( npe_node_max / nth_atmensanlfv3inc_enkfgfs ))
export memory_atmensanlfv3inc="96GB"
export is_exclusive=True
+
+ var_npe_node="npe_node_atmensanlfv3inc_${RUN}"
+ var_nth="nth_atmensanlfv3inc_${RUN}"
+ var_npe="npe_atmensanlfv3inc_${RUN}"
+ # RUN is set to a single value at setup time, so these won't be found
+ # TODO rework setup_xml.py to initialize RUN to the applicable option
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_atmensanlfv3inc"="${!var_npe_node}" \
+ "nth_atmensanlfv3inc"="${!var_nth}" \
+ "npe_atmensanlfv3inc"="${!var_npe}"
+ fi
;;
"atmensanlfinal")
@@ -1009,27 +1102,25 @@ case ${step} in
"C384") export npe_eobs=100;;
"C192" | "C96" | "C48") export npe_eobs=40;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
export npe_eomg=${npe_eobs}
export nth_eobs=2
export nth_eomg=${nth_eobs}
+ # NOTE The number of tasks and cores used must be the same for eobs
+ # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
export npe_node_eobs=$(( npe_node_max / nth_eobs ))
export is_exclusive=True
- # The number of tasks and cores used must be the same for eobs
- # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
- # For S4, this is accomplished by running 10 tasks/node
- if [[ ${machine} = "S4" ]]; then
- export npe_node_eobs=10
- elif [[ ${machine} = "HERCULES" ]]; then
- # For Hercules, this is only an issue at C384; use 20 tasks/node
- if [[ ${CASE} = "C384" ]]; then
- export npe_node_eobs=20
- fi
- fi
export npe_node_eomg=${npe_node_eobs}
+ # Unset npe_node_eobs if it is not a multiple of npe_node_max
+ # to prevent dropping data on the floor. This should be set int
+ # config.resources.{machine} instead. This will result in an error at
+ # experiment setup time if not set in config.resources.{machine}.
+ if [[ $(( npe_node_max % npe_node_eobs )) != 0 ]]; then
+ unset npe_node_max
+ fi
;;
"ediag")
@@ -1046,33 +1137,17 @@ case ${step} in
"C768")
export npe_eupd=480
export nth_eupd=6
- if [[ "${machine}" == "WCOSS2" ]]; then
- export npe_eupd=315
- export nth_eupd=14
- fi
;;
"C384")
export npe_eupd=270
export nth_eupd=8
- if [[ "${machine}" == "WCOSS2" ]]; then
- export npe_eupd=315
- export nth_eupd=14
- elif [[ "${machine}" == "S4" ]]; then
- export npe_eupd=160
- export nth_eupd=2
- elif [[ "${machine}" == "HERA" ]]; then
- export npe_eupd=80
- fi
;;
"C192" | "C96" | "C48")
export npe_eupd=42
export nth_eupd=2
- if [[ "${machine}" == "HERA" || "${machine}" == "JET" ]]; then
- export nth_eupd=4
- fi
;;
*)
- echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
+ echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}"
exit 4
;;
esac
@@ -1084,7 +1159,6 @@ case ${step} in
export wtime_ecen="00:10:00"
export npe_ecen=80
export nth_ecen=4
- if [[ "${machine}" == "HERA" ]]; then export nth_ecen=6; fi
if [[ ${CASE} == "C384" || ${CASE} == "C192" || ${CASE} == "C96" || ${CASE} == "C48" ]]; then
export nth_ecen=2
fi
@@ -1101,11 +1175,11 @@ case ${step} in
export npe_node_esfc=$(( npe_node_max / nth_esfc ))
export nth_cycle=${nth_esfc}
export npe_node_cycle=$(( npe_node_max / nth_cycle ))
- if [[ ${machine} != "JET" ]]; then export memory_esfc="80G" ; fi
;;
"epos")
export wtime_epos="00:15:00"
+ [[ ${CASE} == "C768" ]] && export wtime_epos="00:25:00"
export npe_epos=80
export nth_epos=1
export npe_node_epos=$(( npe_node_max / nth_epos ))
@@ -1144,13 +1218,22 @@ case ${step} in
"gempak")
export wtime_gempak="03:00:00"
- export npe_gempak=2
+ export npe_gempak_gdas=2
export npe_gempak_gfs=28
- export npe_node_gempak=2
+ export npe_node_gempak_gdas=2
export npe_node_gempak_gfs=28
export nth_gempak=1
- export memory_gempak="4GB"
+ export memory_gempak_gdas="4GB"
export memory_gempak_gfs="2GB"
+
+ var_npe_node="npe_node_gempak_${RUN}"
+ var_npe="npe_gempak_${RUN}"
+ # RUN is set to a single value at setup time, so these won't be found
+ # TODO rework setup_xml.py to initialize RUN to the applicable option
+ if [[ -n "${!var_npe_node+0}" ]]; then
+ declare -x "npe_node_gempak"="${!var_npe_node}" \
+ "npe_gempak"="${!var_npe}"
+ fi
;;
"mos_stn_prep")
@@ -1307,4 +1390,15 @@ case ${step} in
esac
+# Unset dynamic variable names
+unset var_NTASKS \
+ var_npe \
+ var_npe_node \
+ var_nth
+
+# Get machine-specific resources, overriding/extending the above assignments
+if [[ -f "${EXPDIR}/config.resources.${machine}" ]]; then
+ source "${EXPDIR}/config.resources.${machine}"
+fi
+
echo "END: config.resources"
diff --git a/parm/config/gfs/config.resources.GAEA b/parm/config/gfs/config.resources.GAEA
new file mode 100644
index 0000000000..64990b299f
--- /dev/null
+++ b/parm/config/gfs/config.resources.GAEA
@@ -0,0 +1,22 @@
+#! /usr/bin/env bash
+
+# Gaea-specific job resources
+
+case ${step} in
+ "eobs")
+ # The number of tasks and cores used must be the same for eobs
+ # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
+ case ${CASE} in
+ "C768" | "C384")
+ export npe_node_eobs=50
+ ;;
+ *)
+ export npe_node_eobs=40
+ ;;
+ esac
+ ;;
+
+ *)
+ ;;
+
+esac
diff --git a/parm/config/gfs/config.resources.HERA b/parm/config/gfs/config.resources.HERA
new file mode 100644
index 0000000000..cfd614961d
--- /dev/null
+++ b/parm/config/gfs/config.resources.HERA
@@ -0,0 +1,36 @@
+#! /usr/bin/env bash
+
+# Hera-specific job resources
+
+case ${step} in
+ "anal")
+ if [[ "${CASE}" == "C384" ]]; then
+ export npe_anal_gdas=270
+ export npe_anal_gfs=270
+ export nth_anal=8
+ export npe_node_anal=$(( npe_node_max / nth_anal ))
+ fi
+ ;;
+
+ "eupd")
+ case ${CASE} in
+ "C384")
+ export npe_eupd=80
+ ;;
+ "C192" | "C96" | "C48")
+ export nth_eupd=4
+ ;;
+ *)
+ ;;
+ esac
+ export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ ;;
+
+ "ecen")
+ if [[ "${CASE}" == "C768" ]]; then export nth_ecen=6; fi
+ export npe_node_ecen=$(( npe_node_max / nth_ecen ))
+ ;;
+
+ *)
+ ;;
+esac
diff --git a/parm/config/gfs/config.resources.HERCULES b/parm/config/gfs/config.resources.HERCULES
new file mode 100644
index 0000000000..7d2ca646e6
--- /dev/null
+++ b/parm/config/gfs/config.resources.HERCULES
@@ -0,0 +1,17 @@
+#! /usr/bin/env bash
+
+# Hercules-specific job resources
+
+case ${step} in
+ "eobs" | "eomg")
+ # The number of tasks and cores used must be the same for eobs
+ # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
+ # For Hercules, this is only an issue at C384; use 20 tasks/node
+ if [[ ${CASE} = "C384" ]]; then
+ export npe_node_eobs=20
+ fi
+ export npe_node_eomg=${npe_node_eobs}
+ ;;
+ *)
+ ;;
+esac
diff --git a/parm/config/gfs/config.resources.JET b/parm/config/gfs/config.resources.JET
new file mode 100644
index 0000000000..de2ec6547a
--- /dev/null
+++ b/parm/config/gfs/config.resources.JET
@@ -0,0 +1,53 @@
+#! /usr/bin/env bash
+
+# Jet-specific job resources
+
+case ${step} in
+ "anal")
+ if [[ "${CASE}" == "C384" ]]; then
+ export npe_anal_gdas=270
+ export npe_anal_gfs=270
+ export nth_anal=8
+ export npe_node_anal=$(( npe_node_max / nth_anal ))
+ fi
+ ;;
+
+ "eobs")
+ if [[ "${PARTITION_BATCH}" == "xjet" ]]; then
+ # The number of tasks and cores used must be the same for eobs
+ # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
+ # This would also be an issues for vjet and sjet if anyone runs on those nodes.
+ export npe_node_eobs=10
+ fi
+ ;;
+
+ "eupd")
+ case ${CASE} in
+ "C384")
+ export npe_eupd=80
+ ;;
+ "C192" | "C96" | "C48")
+ export nth_eupd=4
+ ;;
+ *)
+ ;;
+ esac
+ export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ ;;
+
+ "ecen")
+ if [[ "${CASE}" == "C768" ]]; then export nth_ecen=6; fi
+ export npe_node_ecen=$(( npe_node_max / nth_ecen ))
+ ;;
+
+ "upp")
+ export memory_upp="${mem_node_max}"
+ ;;
+
+ "esfc")
+ export memory_esfc="${mem_node_max}"
+ ;;
+
+ *)
+ ;;
+esac
diff --git a/parm/config/gfs/config.resources.S4 b/parm/config/gfs/config.resources.S4
new file mode 100644
index 0000000000..3f6654f8d6
--- /dev/null
+++ b/parm/config/gfs/config.resources.S4
@@ -0,0 +1,59 @@
+#! /usr/bin/env bash
+
+# S4-specific job resources
+
+case ${step} in
+ "anal")
+ case ${CASE} in
+ "C384")
+ #Some of the intermediate data can be lost if the number of tasks
+ #per node does not match the number of reserved cores/node.
+ #On the S4-s4 partition, this is accomplished by increasing the task
+ #count to a multiple of 32
+ if [[ ${PARTITION_BATCH} = "s4" ]]; then
+ export npe_anal_gdas=416
+ export npe_anal_gfs=416
+ fi
+ #S4 is small, so run this task with just 1 thread
+ export nth_anal=1
+ export wtime_anal_gdas="02:00:00"
+ export wtime_anal_gfs="02:00:00"
+ ;;
+ "C192" | "C96" | "C48")
+ export nth_anal=4
+ if [[ ${PARTITION_BATCH} == "s4" ]]; then
+ export npe_anal_gdas=88
+ export npe_anal_gfs=88
+ elif [[ ${PARTITION_BATCH} == "ivy" ]]; then
+ export npe_anal_gdas=90
+ export npe_anal_gfs=90
+ fi
+ ;;
+ *)
+ ;;
+ esac
+ export npe_node_anal=$(( npe_node_max / nth_anal ))
+ ;;
+
+ "eobs")
+ # The number of tasks and cores used must be the same for eobs
+ # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details
+ # For S4, this is accomplished by running 10 tasks/node
+ export npe_node_eobs=10
+ ;;
+
+ "eupd")
+ if [[ "${CASE}" == "C384" ]]; then
+ export npe_eupd=160
+ export nth_eupd=2
+ fi
+ export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ ;;
+
+ "ediag")
+ export memory_ediag="${mem_node_max}"
+ ;;
+
+ *)
+ ;;
+esac
diff --git a/parm/config/gfs/config.resources.WCOSS2 b/parm/config/gfs/config.resources.WCOSS2
new file mode 100644
index 0000000000..7e4314a0e9
--- /dev/null
+++ b/parm/config/gfs/config.resources.WCOSS2
@@ -0,0 +1,59 @@
+#! /usr/bin/env bash
+
+# WCOSS2-specific job resources
+
+case ${step} in
+ "prep")
+ export is_exclusive=True
+ export memory_prep="480GB"
+ ;;
+
+ "anal")
+ if [[ "${CASE}" == "C768" ]]; then
+ export nth_anal=8
+ # Make npe a multiple of 16
+ export npe_anal_gdas=784
+ export npe_anal_gfs=832
+ export npe_node_anal=$(( npe_node_max / nth_anal ))
+ fi
+ ;;
+
+ "fit2obs")
+ export npe_node_fit2obs=3
+ ;;
+
+ "echgres")
+ export memory_echgres="200GB"
+ ;;
+
+ "arch" | "earc" | "getic")
+ declare -x "memory_${step}"="50GB"
+ ;;
+
+ "eupd")
+ case ${CASE} in
+ "C768" | "C384")
+ export npe_eupd=315
+ export nth_eupd=14
+ ;;
+ *)
+ ;;
+ esac
+ export npe_node_eupd=$(( npe_node_max / nth_eupd ))
+ ;;
+
+ "eobs")
+ case ${CASE} in
+ "C768" | "C384")
+ export npe_node_eobs=50
+ ;;
+ *)
+ export npe_node_eobs=40
+ ;;
+ esac
+ ;;
+
+ *)
+ ;;
+
+esac
diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs
index 9f6c47ce72..5a57a27007 100644
--- a/parm/config/gfs/config.ufs
+++ b/parm/config/gfs/config.ufs
@@ -88,16 +88,16 @@ fi
case "${fv3_res}" in
"C48")
export DELTIM=1200
- export layout_x=1
- export layout_y=1
+ export layout_x_gdas=1
+ export layout_y_gdas=1
export layout_x_gfs=1
export layout_y_gfs=1
- export nthreads_fv3=1
+ export nthreads_fv3_gdas=1
export nthreads_fv3_gfs=1
- export nthreads_ufs=1
+ export nthreads_ufs_gdas=1
export nthreads_ufs_gfs=1
- export xr_cnvcld=".false." # Do not pass conv. clouds to Xu-Randall cloud fraction
- export cdmbgwd="0.071,2.1,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
+ export xr_cnvcld=".false." # Do not pass conv. clouds to Xu-Randall cloud fraction
+ export cdmbgwd="0.071,2.1,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="40.0,1.77,1.0,1.0" # settings for GSL drag suite
export knob_ugwp_tauamp=6.0e-3 # setting for UGWPv1 non-stationary GWD
export k_split=1
@@ -105,8 +105,8 @@ case "${fv3_res}" in
export tau=10.0
export rf_cutoff=100.0
export fv_sg_adj=3600
- export WRITE_GROUP=1
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=1
+ export WRITE_GROUP_GDAS=1
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=1
export WRITE_GROUP_GFS=1
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=1
;;
@@ -124,22 +124,22 @@ case "${fv3_res}" in
export npy_nest=241
export NEST_DLON=0.25
export NEST_DLAT=0.25
- export WRITE_GROUP=2
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=2
+ export WRITE_GROUP_GDAS=2
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=2
export WRITE_GROUP_GFS=2
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=2
else
export DELTIM=600
- export layout_x=2
- export layout_y=2
+ export layout_x_gdas=2
+ export layout_y_gdas=2
export layout_x_gfs=2
export layout_y_gfs=2
- export nthreads_fv3=1
+ export nthreads_fv3_gdas=1
export nthreads_fv3_gfs=1
- export nthreads_ufs=1
+ export nthreads_ufs_gdas=1
export nthreads_ufs_gfs=1
- export xr_cnvcld=.false. # Do not pass conv. clouds to Xu-Randall cloud fraction
- export cdmbgwd="0.14,1.8,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
+ export xr_cnvcld=.false. # Do not pass conv. clouds to Xu-Randall cloud fraction
+ export cdmbgwd="0.14,1.8,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="20.0,2.5,1.0,1.0" # settings for GSL drag suite
export knob_ugwp_tauamp=3.0e-3 # setting for UGWPv1 non-stationary GWD
export k_split=1
@@ -147,8 +147,8 @@ case "${fv3_res}" in
export tau=8.0
export rf_cutoff=100.0
export fv_sg_adj=1800
- export WRITE_GROUP=1
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=1
+ export WRITE_GROUP_GDAS=1
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=1
export WRITE_GROUP_GFS=1
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=1
fi
@@ -167,19 +167,19 @@ case "${fv3_res}" in
export npy_nest=481
export NEST_DLON=0.125
export NEST_DLAT=0.125
- export WRITE_GROUP=2
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=15
+ export WRITE_GROUP_GDAS=2
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=15
export WRITE_GROUP_GFS=2
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=15
else
export DELTIM=600
- export layout_x=4
- export layout_y=6
+ export layout_x_gdas=4
+ export layout_y_gdas=6
export layout_x_gfs=4
export layout_y_gfs=6
- export nthreads_fv3=1
+ export nthreads_fv3_gdas=1
export nthreads_fv3_gfs=2
- export nthreads_ufs=1
+ export nthreads_ufs_gdas=1
export nthreads_ufs_gfs=2
export cdmbgwd="0.23,1.5,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="10.0,3.5,1.0,1.0" # settings for GSL drag suite
@@ -189,8 +189,8 @@ case "${fv3_res}" in
export tau=6.0
export rf_cutoff=100.0
export fv_sg_adj=1800
- export WRITE_GROUP=1
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=10
+ export WRITE_GROUP_GDAS=1
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=10
export WRITE_GROUP_GFS=2
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=5
fi
@@ -198,8 +198,8 @@ case "${fv3_res}" in
"C384")
if [[ "${DO_NEST:-NO}" == "YES" ]] ; then
export DELTIM=150
- export layout_x=8
- export layout_y=8
+ export layout_x_gdas=8
+ export layout_y_gdas=8
export layout_x_gfs=8
export layout_y_gfs=8
export layout_x_nest=34
@@ -211,19 +211,19 @@ case "${fv3_res}" in
export npy_nest=961
export NEST_DLON=0.0625
export NEST_DLAT=0.0625
- export WRITE_GROUP=2
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=20
+ export WRITE_GROUP_GDAS=2
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=20
export WRITE_GROUP_GFS=2
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=20
else
export DELTIM=300
- export layout_x=8
- export layout_y=8
+ export layout_x_gdas=8
+ export layout_y_gdas=8
export layout_x_gfs=8
export layout_y_gfs=8
- export nthreads_fv3=2
+ export nthreads_fv3_gdas=2
export nthreads_fv3_gfs=2
- export nthreads_ufs=2
+ export nthreads_ufs_gdas=2
export nthreads_ufs_gfs=2
export cdmbgwd="1.1,0.72,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="5.0,5.0,1.0,1.0" # settings for GSL drag suite
@@ -233,8 +233,8 @@ case "${fv3_res}" in
export tau=4.0
export rf_cutoff=100.0
export fv_sg_adj=900
- export WRITE_GROUP=4
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=10
+ export WRITE_GROUP_GDAS=4
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=10
export WRITE_GROUP_GFS=4
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=10
fi
@@ -242,13 +242,14 @@ case "${fv3_res}" in
"C768")
if [[ "${DO_NEST:-NO}" == "YES" ]] ; then
export DELTIM=75
- export layout_x=16
- export layout_y=10
+ export layout_x_gdas=16
+ export layout_y_gdas=10
export layout_x_gfs=16
export layout_y_gfs=10
export layout_x_nest=48
export layout_y_nest=45
- export nthreads_fv3=2
+ export nthreads_fv3_nest=2
+ export nthreads_fv3_gdas=2
export nthreads_fv3_gfs=2
export nest_refine=4
export nest_ioffset=24
@@ -257,19 +258,19 @@ case "${fv3_res}" in
export npy_nest=1921
export NEST_DLON=0.0325
export NEST_DLAT=0.0325
- export WRITE_GROUP=2
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=90
+ export WRITE_GROUP_GDAS=2
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=90
export WRITE_GROUP_GFS=2
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=90
else
export DELTIM=150
- export layout_x=8
- export layout_y=12
+ export layout_x_gdas=8
+ export layout_y_gdas=12
export layout_x_gfs=12
export layout_y_gfs=16
- export nthreads_fv3=4
+ export nthreads_fv3_gdas=4
export nthreads_fv3_gfs=4
- export nthreads_ufs=4
+ export nthreads_ufs_gdas=4
export nthreads_ufs_gfs=4
export cdmbgwd="4.0,0.15,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="2.5,7.5,1.0,1.0" # settings for GSL drag suite
@@ -279,21 +280,21 @@ case "${fv3_res}" in
export tau=3.0
export rf_cutoff=100.0
export fv_sg_adj=450
- export WRITE_GROUP=2
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=10
+ export WRITE_GROUP_GDAS=2
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=10
export WRITE_GROUP_GFS=4
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=20 #Note this should be 10 for WCOSS2
fi
;;
"C1152")
export DELTIM=150
- export layout_x=8
- export layout_y=16
+ export layout_x_gdas=8
+ export layout_y_gdas=16
export layout_x_gfs=8
export layout_y_gfs=16
- export nthreads_fv3=4
+ export nthreads_fv3_gdas=4
export nthreads_fv3_gfs=4
- export nthreads_ufs=4
+ export nthreads_ufs_gdas=4
export nthreads_ufs_gfs=4
export cdmbgwd="4.0,0.10,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="1.67,8.8,1.0,1.0" # settings for GSL drag suite
@@ -303,20 +304,20 @@ case "${fv3_res}" in
export tau=2.5
export rf_cutoff=100.0
export fv_sg_adj=450
- export WRITE_GROUP=4
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=10 # TODO: refine these numbers when a case is available
+ export WRITE_GROUP_GDAS=4
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=10 # TODO: refine these numbers when a case is available
export WRITE_GROUP_GFS=4
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=20 # TODO: refine these numbers when a case is available
;;
"C3072")
export DELTIM=90
export layout_x=16
- export layout_y=32
- export layout_x_gfs=16
+ export layout_y_gdas=32
+ export layout_x_gfs_gdas=16
export layout_y_gfs=32
- export nthreads_fv3=4
+ export nthreads_fv3_gdas=4
export nthreads_fv3_gfs=4
- export nthreads_ufs=4
+ export nthreads_ufs_gdas=4
export nthreads_ufs_gfs=4
export cdmbgwd="4.0,0.05,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling
export cdmbgwd_gsl="0.625,14.1,1.0,1.0" # settings for GSL drag suite
@@ -326,8 +327,8 @@ case "${fv3_res}" in
export tau=0.5
export rf_cutoff=100.0
export fv_sg_adj=300
- export WRITE_GROUP=4
- export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE=10 # TODO: refine these numbers when a case is available
+ export WRITE_GROUP_GDAS=4
+ export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=10 # TODO: refine these numbers when a case is available
export WRITE_GROUP_GFS=4
export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=10 # TODO: refine these numbers when a case is available
;;
@@ -337,22 +338,22 @@ case "${fv3_res}" in
;;
esac
-(( WRTTASK_PER_GROUP_PER_THREAD = WRTTASK_PER_GROUP_PER_THREAD_PER_TILE * 6 ))
+(( WRTTASK_PER_GROUP_PER_THREAD_GDAS = WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS * 6 ))
(( WRTTASK_PER_GROUP_PER_THREAD_GFS = WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS * 6 ))
-export WRTTASK_PER_GROUP_PER_THREAD
+export WRTTASK_PER_GROUP_PER_THREAD_GDAS
export WRTTASK_PER_GROUP_PER_THREAD_GFS
-(( ntasks_fv3 = layout_x * layout_y * 6 ))
+(( ntasks_fv3_gdas = layout_x_gdas * layout_y_gdas * 6 ))
(( ntasks_fv3_gfs = layout_x_gfs * layout_y_gfs * 6 ))
if [[ "${DO_NEST:-NO}" == "YES" ]] ; then
(( ntasks_fv3_gfs += layout_x_nest * layout_y_nest ))
fi
-export ntasks_fv3
+export ntasks_fv3_gdas
export ntasks_fv3_gfs
-(( ntasks_quilt = WRITE_GROUP * WRTTASK_PER_GROUP_PER_THREAD ))
+(( ntasks_quilt_gdas = WRITE_GROUP_GDAS * WRTTASK_PER_GROUP_PER_THREAD_GDAS ))
(( ntasks_quilt_gfs = WRITE_GROUP_GFS * WRTTASK_PER_GROUP_PER_THREAD_GFS ))
-export ntasks_quilt
+export ntasks_quilt_gdas
export ntasks_quilt_gfs
# Determine whether to use parallel NetCDF based on resolution
@@ -389,7 +390,8 @@ model_list="atm"
# Mediator specific settings
if [[ "${skip_mediator}" == "false" ]]; then
export cpl=".true."
- export nthreads_mediator=${nthreads_fv3} # Use same threads as FV3
+ export nthreads_mediator_gfs=${nthreads_fv3_gfs} # Use same threads as FV3
+ export nthreads_mediator_gdas=${nthreads_fv3_gdas}
export CCPP_SUITE="FV3_GFS_v17_coupled_p8_ugwpv1" # TODO: Does this include FV3_GFS_v17_p8? Can this be used instead of FV3_GFS_v17_p8?
fi
@@ -399,6 +401,7 @@ if [[ "${skip_mom6}" == "false" ]]; then
export cplflx=".true."
model_list="${model_list}.ocean"
nthreads_mom6=1
+ MOM6_DIAG_MISVAL="-1e34"
case "${mom6_res}" in
"500")
ntasks_mom6=8
@@ -413,7 +416,6 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RIVER_RUNOFF='False'
eps_imesh="4.0e-1"
MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_25L.nc"
- MOM6_DIAG_MISVAL="0.0"
MOM6_ALLOW_LANDMASK_CHANGES='False'
TOPOEDITS=""
;;
@@ -430,12 +432,10 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RIVER_RUNOFF='False'
eps_imesh="2.5e-1"
TOPOEDITS="ufs.topo_edits_011818.nc"
- if [[ "${DO_JEDIOCNVAR:-NO}" = "YES" ]]; then
- MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
- MOM6_DIAG_MISVAL="0.0"
- else
+ if [[ ${RUN} == "gfs" || "${RUN}" == "gefs" ]]; then
MOM6_DIAG_COORD_DEF_Z_FILE="interpolate_zgrid_40L.nc"
- MOM6_DIAG_MISVAL="-1e34"
+ else
+ MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
fi
MOM6_ALLOW_LANDMASK_CHANGES='True'
;;
@@ -451,12 +451,10 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RESTART_SETTING='n'
MOM6_RIVER_RUNOFF='True'
eps_imesh="1.0e-1"
- if [[ "${DO_JEDIOCNVAR:-NO}" = "YES" ]]; then
- MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
- MOM6_DIAG_MISVAL="0.0"
- else
+ if [[ ${RUN} == "gfs" || "${RUN}" == "gefs" ]]; then
MOM6_DIAG_COORD_DEF_Z_FILE="interpolate_zgrid_40L.nc"
- MOM6_DIAG_MISVAL="-1e34"
+ else
+ MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
fi
MOM6_ALLOW_LANDMASK_CHANGES='False'
TOPOEDITS=""
@@ -473,12 +471,10 @@ if [[ "${skip_mom6}" == "false" ]]; then
MOM6_RIVER_RUNOFF='True'
MOM6_RESTART_SETTING="r"
eps_imesh="1.0e-1"
- if [[ "${DO_JEDIOCNVAR:-NO}" = "YES" ]]; then
- MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
- MOM6_DIAG_MISVAL="0.0"
- else
+ if [[ ${RUN} == "gfs" || "${RUN}" == "gefs" ]]; then
MOM6_DIAG_COORD_DEF_Z_FILE="interpolate_zgrid_40L.nc"
- MOM6_DIAG_MISVAL="-1e34"
+ else
+ MOM6_DIAG_COORD_DEF_Z_FILE="oceanda_zgrid_75L.nc"
fi
MOM6_ALLOW_LANDMASK_CHANGES='False'
TOPOEDITS=""
diff --git a/scripts/exgfs_wave_init.sh b/scripts/exgfs_wave_init.sh
index 9e3ca0b497..17e6cec042 100755
--- a/scripts/exgfs_wave_init.sh
+++ b/scripts/exgfs_wave_init.sh
@@ -83,15 +83,15 @@ source "${USHgfs}/preamble.sh"
grdALL=$(printf "%s\n" "${array[@]}" | sort -u | tr '\n' ' ')
for grdID in ${grdALL}; do
- if [[ -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
+ if [[ -f "${COMOUT_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
set +x
- echo " Mod def file for ${grdID} found in ${COM_WAVE_PREP}. copying ...."
+ echo " Mod def file for ${grdID} found in ${COMOUT_WAVE_PREP}. copying ...."
set_trace
- cp "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "mod_def.${grdID}"
+ cp "${COMOUT_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "mod_def.${grdID}"
else
set +x
- echo " Mod def file for ${grdID} not found in ${COM_WAVE_PREP}. Setting up to generate ..."
+ echo " Mod def file for ${grdID} not found in ${COMOUT_WAVE_PREP}. Setting up to generate ..."
echo ' '
set_trace
if [ -f ${FIXgfs}/wave/ww3_grid.inp.$grdID ]
@@ -125,7 +125,6 @@ source "${USHgfs}/preamble.sh"
fi
#TO DO: how do we say "it's unstructured, and therefore need to have error check here"
- [[ ! -d "${COM_WAVE_PREP}" ]] && mkdir -m 775 -p "${COM_WAVE_PREP}"
if [ ${CFP_MP:-"NO"} = "YES" ]; then
echo "$nmoddef ${USHgfs}/wave_grid_moddef.sh $grdID > $grdID.out 2>&1" >> cmdfile
else
@@ -190,7 +189,7 @@ source "${USHgfs}/preamble.sh"
# 1.a.3 File check
for grdID in ${grdALL}; do
- if [[ -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
+ if [[ -f "${COMOUT_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
set +x
echo ' '
echo " mod_def.$grdID succesfully created/copied "
@@ -213,10 +212,10 @@ source "${USHgfs}/preamble.sh"
# Copy to other members if needed
if (( NMEM_ENS > 0 )); then
for mem in $(seq -f "%03g" 1 "${NMEM_ENS}"); do
- MEMDIR="mem${mem}" YMD=${PDY} HH=${cyc} declare_from_tmpl COM_WAVE_PREP_MEM:COM_WAVE_PREP_TMPL
- mkdir -p "${COM_WAVE_PREP_MEM}"
+ MEMDIR="mem${mem}" YMD=${PDY} HH=${cyc} declare_from_tmpl COMOUT_WAVE_PREP_MEM:COM_WAVE_PREP_TMPL
+ mkdir -p "${COMOUT_WAVE_PREP_MEM}"
for grdID in ${grdALL}; do
- ${NLN} "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "${COM_WAVE_PREP_MEM}/${RUN}wave.mod_def.${grdID}"
+ ${NLN} "${COMOUT_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "${COMOUT_WAVE_PREP_MEM}/${RUN}wave.mod_def.${grdID}"
done
done
fi
diff --git a/scripts/exgfs_wave_post_gridded_sbs.sh b/scripts/exgfs_wave_post_gridded_sbs.sh
index 02aa8c456d..b0cca34bd1 100755
--- a/scripts/exgfs_wave_post_gridded_sbs.sh
+++ b/scripts/exgfs_wave_post_gridded_sbs.sh
@@ -20,6 +20,8 @@
# 2020-06-10 J-Henrique Alves: Porting to R&D machine Hera
# 2020-07-31 Jessica Meixner: Removing points, now gridded data only
#
+# COM inputs:
+#
# $Id$
#
# Attributes:
@@ -103,12 +105,12 @@ source "${USHgfs}/preamble.sh"
# 1.a.1 Copy model definition files
for grdID in ${waveGRD} ${wavepostGRD} ${waveinterpGRD}; do
- if [[ -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
+ if [[ -f "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
set +x
- echo " Mod def file for ${grdID} found in ${COM_WAVE_PREP}. copying ...."
+ echo " Mod def file for ${grdID} found in ${COMIN_WAVE_PREP}. copying ...."
set_trace
- cp -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "mod_def.${grdID}"
+ cp -f "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "mod_def.${grdID}"
fi
done
@@ -257,9 +259,8 @@ source "${USHgfs}/preamble.sh"
if [ $fhr = $fhrg ]
then
-
for wavGRD in ${waveGRD}; do
- gfile="${COM_WAVE_HISTORY}/${WAV_MOD_TAG}.out_grd.${wavGRD}.${YMD}.${HMS}"
+ gfile="${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_grd.${wavGRD}.${YMD}.${HMS}"
if ! wait_for_file "${gfile}" "${sleep_interval}" "${iwaitmax}"; then
echo " FATAL ERROR : NO RAW FIELD OUTPUT FILE out_grd.${grdID}"
echo "${WAV_MOD_TAG} post ${grdID} ${PDY} ${cycle} : field output missing."
@@ -405,7 +406,7 @@ source "${USHgfs}/preamble.sh"
ENSTAG=""
if [ ${waveMEMB} ]; then ENSTAG=".${membTAG}${waveMEMB}" ; fi
gribchk="${RUN}wave.${cycle}${ENSTAG}.${GRDNAME}.${GRDRES}.f${FH3}.grib2"
- if [ ! -s ${COM_WAVE_GRID}/${gribchk} ]; then
+ if [ ! -s ${COMOUT_WAVE_GRID}/${gribchk} ]; then
set +x
echo ' '
echo '********************************************'
diff --git a/scripts/exgfs_wave_post_pnt.sh b/scripts/exgfs_wave_post_pnt.sh
index 93bdbeaf32..b251661ee6 100755
--- a/scripts/exgfs_wave_post_pnt.sh
+++ b/scripts/exgfs_wave_post_pnt.sh
@@ -22,6 +22,10 @@
# 2020-07-30 Jessica Meixner: Points only - no gridded data
# 2020-09-29 Jessica Meixner: optimized by changing loop structures
#
+# COM inputs:
+# - ${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID}
+# - ${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${PDY}.${HMS}
+#
# $Id$
#
# Attributes:
@@ -117,12 +121,12 @@ source "${USHgfs}/preamble.sh"
# Copy model definition files
iloop=0
for grdID in ${waveuoutpGRD}; do
- if [[ -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
+ if [[ -f "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]]; then
set +x
- echo " Mod def file for ${grdID} found in ${COM_WAVE_PREP}. copying ...."
+ echo " Mod def file for ${grdID} found in ${COMIN_WAVE_PREP}. copying ...."
set_trace
- cp -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "mod_def.${grdID}"
+ cp -f "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" "mod_def.${grdID}"
iloop=$((iloop + 1))
fi
done
@@ -247,11 +251,10 @@ source "${USHgfs}/preamble.sh"
-e "s/FORMAT/F/g" \
ww3_outp_spec.inp.tmpl > ww3_outp.inp
- ${NLN} mod_def.${waveuoutpGRD} mod_def.ww3
- HH=$(date --utc -d "${PDY:0:8} ${cyc} + ${FHMIN_WAV} hours" +%H)
- HMS="${HH}0000"
- if [[ -f "${COM_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${PDY}.${HMS}" ]]; then
- ${NLN} "${COM_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${PDY}.${HMS}" \
+ ${NLN} mod_def.$waveuoutpGRD mod_def.ww3
+ HMS="${cyc}0000"
+ if [[ -f "${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${PDY}.${HMS}" ]]; then
+ ${NLN} "${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${PDY}.${HMS}" \
"./out_pnt.${waveuoutpGRD}"
else
echo '*************************************************** '
@@ -372,7 +375,7 @@ source "${USHgfs}/preamble.sh"
export BULLDATA=${DATA}/output_$YMDHMS
cp $DATA/mod_def.${waveuoutpGRD} mod_def.${waveuoutpGRD}
- pfile="${COM_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${YMD}.${HMS}"
+ pfile="${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_pnt.${waveuoutpGRD}.${YMD}.${HMS}"
if [ -f ${pfile} ]
then
${NLN} ${pfile} ./out_pnt.${waveuoutpGRD}
@@ -696,6 +699,6 @@ source "${USHgfs}/preamble.sh"
# 4. Ending output
-exit $exit_code
+exit "${exit_code}"
# End of MWW3 point prostprocessor script ---------------------------------------- #
diff --git a/scripts/exgfs_wave_prdgen_bulls.sh b/scripts/exgfs_wave_prdgen_bulls.sh
index 2bf90cdf2b..5f5b2c531e 100755
--- a/scripts/exgfs_wave_prdgen_bulls.sh
+++ b/scripts/exgfs_wave_prdgen_bulls.sh
@@ -8,6 +8,10 @@
# Remarks : #
# - Supplemental error output is witten to the gfswave_prdgbulls.log file. #
# #
+# COM inputs: #
+# - ${COMIN_WAVE_STATION}/${RUNwave}.${cycle}.cbull_tar #
+# COM outputs: #
+# - ${COMOUT_WAVE_WMO}/awipsbull.${cycle}.${RUNwave} #
# #
# Origination : 05/02/2007 #
# Last update : 08/20/2020 #
@@ -52,11 +56,11 @@ source "${USHgfs}/preamble.sh"
# 1. Get necessary files
set +x
- echo " Copying bulletins from ${COM_WAVE_STATION}"
+ echo " Copying bulletins from ${COMIN_WAVE_STATION}"
set_trace
# 1.a Link the input file and untar it
- BullIn="${COM_WAVE_STATION}/${RUNwave}.${cycle}.cbull_tar"
+ BullIn="${COMIN_WAVE_STATION}/${RUNwave}.${cycle}.cbull_tar"
if [ -f $BullIn ]; then
cp $BullIn cbull.tar
else
@@ -170,7 +174,7 @@ source "${USHgfs}/preamble.sh"
set_trace
formbul.pl -d "${headr}" -f "${fname}" -j "${job}" -m "${RUNwave}" \
- -p "${COM_WAVE_WMO}" -s "NO" -o "${oname}" > formbul.out 2>&1
+ -p "${COMOUT_WAVE_WMO}" -s "NO" -o "${oname}" > formbul.out 2>&1
OK=$?
if [ "$OK" != '0' ] || [ ! -f $oname ]; then
@@ -196,15 +200,15 @@ source "${USHgfs}/preamble.sh"
# 3. Send output files to the proper destination
set_trace
-cp "awipsbull.${cycle}.${RUNwave}" "${COM_WAVE_WMO}/awipsbull.${cycle}.${RUNwave}"
+cp "awipsbull.${cycle}.${RUNwave}" "${COMOUT_WAVE_WMO}/awipsbull.${cycle}.${RUNwave}"
if [ "$SENDDBN_NTC" = YES ]; then
make_ntc_bull.pl "WMOBH" "NONE" "KWBC" "NONE" "${DATA}/awipsbull.${cycle}.${RUNwave}" \
- "${COM_WAVE_WMO}/awipsbull.${cycle}.${RUNwave}"
+ "${COMOUT_WAVE_WMO}/awipsbull.${cycle}.${RUNwave}"
else
if [ "${envir}" = "para" ] || [ "${envir}" = "test" ] || [ "${envir}" = "dev" ]; then
echo "Making NTC bulletin for parallel environment, but do not alert."
(export SENDDBN=NO; make_ntc_bull.pl "WMOBH" "NONE" "KWBC" "NONE" \
- "${DATA}/awipsbull.${cycle}.${RUNwave}" "${COM_WAVE_WMO}/awipsbull.${cycle}.${RUNwave}")
+ "${DATA}/awipsbull.${cycle}.${RUNwave}" "${COMOUT_WAVE_WMO}/awipsbull.${cycle}.${RUNwave}")
fi
fi
diff --git a/scripts/exgfs_wave_prdgen_gridded.sh b/scripts/exgfs_wave_prdgen_gridded.sh
index c896423ac1..9111c81273 100755
--- a/scripts/exgfs_wave_prdgen_gridded.sh
+++ b/scripts/exgfs_wave_prdgen_gridded.sh
@@ -8,6 +8,11 @@
# Remarks : #
# - Supplemental error output is witten to the wave.log file. #
# #
+# COM inputs: #
+# - ${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdID}.f${fhr}.grib2 #
+# #
+# COM outputs: #
+# - ${COMOUT_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut} #
# #
# Origination : 05/02/2007 #
# Last update : 10/08/2020 #
@@ -104,7 +109,6 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m}
echo "$RUNwave $grdID ${fhr} prdgen $date $cycle : GRIB file missing." >> $wavelog
err=1;export err;${errchk} || exit ${err}
fi
-
GRIBOUT=$RUNwave.$cycle.$grdID.f${fhr}.clipped.grib2
iparam=1
@@ -216,16 +220,16 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m}
#set_trace
#set +x
echo " Saving $AWIPSGRB.$grdOut.f${fhr} as grib2.$cycle.awipsww3_${grdID}.f${fhr}"
- echo " in ${COM_WAVE_WMO}"
+ echo " in ${COMOUT_WAVE_WMO}"
#set_trace
- cp "${AWIPSGRB}.${grdID}.f${fhr}" "${COM_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut}"
+ cp "${AWIPSGRB}.${grdID}.f${fhr}" "${COMOUT_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut}"
#set +x
if [ "$SENDDBN" = 'YES' ]
then
echo " Sending $AWIPSGRB.$grdID.f${fhr} to DBRUN."
- "${DBNROOT}/bin/dbn_alert" GRIB_LOW "${RUN}" "${job}" "${COM_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut}"
+ "${DBNROOT}/bin/dbn_alert" GRIB_LOW "${RUN}" "${job}" "${COMOUT_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut}"
fi
rm -f $AWIPSGRB.$grdID.f${fhr} tocgrib2.out
done # For grids
diff --git a/scripts/exgfs_wave_prep.sh b/scripts/exgfs_wave_prep.sh
index 1fbe7dd767..f83ead2c22 100755
--- a/scripts/exgfs_wave_prep.sh
+++ b/scripts/exgfs_wave_prep.sh
@@ -17,6 +17,13 @@
# Remarks : #
# - For non-fatal errors output is witten to the wave.log file. #
# #
+# COM inputs: #
+# - ${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID} #
+# - ${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f#HHH_prog.nc #
+# #
+# COM outputs: #
+# - ${COMOUT_WAVE_PREP}/${RUN}wave.${WAVECUR_FID}.$cycle.cur #
+# #
# Update record : #
# #
# - Origination: 01-Mar-2007 #
@@ -162,12 +169,12 @@ source "${USHgfs}/preamble.sh"
for grdID in $grdINP $waveGRD
do
- if [ -f "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]
+ if [ -f "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" ]
then
set +x
- echo " Mod def file for $grdID found in ${COM_WAVE_PREP}. copying ...."
+ echo " Mod def file for $grdID found in ${COMIN_WAVE_PREP}. copying ...."
set_trace
- cp ${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID} mod_def.$grdID
+ cp ${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${grdID} mod_def.$grdID
else
set +x
@@ -322,19 +329,19 @@ source "${USHgfs}/preamble.sh"
ymdh_rtofs=$ymdh_beg
if [ "$FHMAX_WAV_CUR" -le 72 ]; then
- rtofsfile1="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f024_prog.nc"
- rtofsfile2="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f048_prog.nc"
- rtofsfile3="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f072_prog.nc"
+ rtofsfile1="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f024_prog.nc"
+ rtofsfile2="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f048_prog.nc"
+ rtofsfile3="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f072_prog.nc"
if [ ! -f $rtofsfile1 ] || [ ! -f $rtofsfile2 ] || [ ! -f $rtofsfile3 ]; then
#Needed current files are not available, so use RTOFS from previous day
export RPDY=$($NDATE -24 ${RPDY}00 | cut -c1-8)
fi
else
- rtofsfile1="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f096_prog.nc"
- rtofsfile2="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f120_prog.nc"
- rtofsfile3="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f144_prog.nc"
- rtofsfile4="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f168_prog.nc"
- rtofsfile5="${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f192_prog.nc"
+ rtofsfile1="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f096_prog.nc"
+ rtofsfile2="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f120_prog.nc"
+ rtofsfile3="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f144_prog.nc"
+ rtofsfile4="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f168_prog.nc"
+ rtofsfile5="${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_f192_prog.nc"
if [ ! -f $rtofsfile1 ] || [ ! -f $rtofsfile2 ] || [ ! -f $rtofsfile3 ] ||
[ ! -f $rtofsfile4 ] || [ ! -f $rtofsfile5 ]; then
#Needed current files are not available, so use RTOFS from previous day
@@ -360,8 +367,8 @@ source "${USHgfs}/preamble.sh"
fhr_rtofs=$(${NHOUR} ${ymdh_rtofs} ${RPDY}00)
fh3_rtofs=$(printf "%03d" "${fhr_rtofs#0}")
- curfile1h=${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_${fext}${fh3_rtofs}_prog.nc
- curfile3h=${COM_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_${fext}${fh3_rtofs}_prog.nc
+ curfile1h=${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_${fext}${fh3_rtofs}_prog.nc
+ curfile3h=${COMIN_RTOFS}/${WAVECUR_DID}.${RPDY}/rtofs_glo_2ds_${fext}${fh3_rtofs}_prog.nc
if [ -s ${curfile1h} ] && [ "${FLGHF}" = "T" ] ; then
curfile=${curfile1h}
@@ -465,7 +472,7 @@ source "${USHgfs}/preamble.sh"
cat $file >> cur.${WAVECUR_FID}
done
- cp -f cur.${WAVECUR_FID} ${COM_WAVE_PREP}/${RUN}wave.${WAVECUR_FID}.$cycle.cur
+ cp -f cur.${WAVECUR_FID} ${COMOUT_WAVE_PREP}/${RUN}wave.${WAVECUR_FID}.$cycle.cur
else
echo ' '
diff --git a/scripts/exglobal_cleanup.sh b/scripts/exglobal_cleanup.sh
index 7c3dfafbad..1150ca6d1d 100755
--- a/scripts/exglobal_cleanup.sh
+++ b/scripts/exglobal_cleanup.sh
@@ -2,6 +2,9 @@
source "${USHgfs}/preamble.sh"
+###############################################################
+echo "Begin Cleanup ${DATAROOT}!"
+
# Remove DATAoutput from the forecast model run
# TODO: Handle this better
DATAfcst="${DATAROOT}/${RUN}fcst.${PDY:-}${cyc}"
@@ -9,6 +12,19 @@ if [[ -d "${DATAfcst}" ]]; then rm -rf "${DATAfcst}"; fi
#DATAefcs="${DATAROOT}/${RUN}efcs???${PDY:-}${cyc}"
rm -rf "${DATAROOT}/${RUN}efcs"*"${PDY:-}${cyc}"
+# Search and delete files/directories from DATAROOT/ older than ${purge_every_days} days
+# purge_every_days should be a positive integer
+purge_every_days=3
+
+# Find and delete files older than ${purge_every_days} days
+find "${DATAROOT}/"* -type f -mtime "+${purge_every_days}" -exec rm -f {} \;
+
+# Find and delete directories older than ${purge_every_days} days
+find "${DATAROOT}/"* -type d -mtime "+${purge_every_days}" -exec rm -rf {} \;
+
+echo "Cleanup ${DATAROOT} completed!"
+###############################################################
+
###############################################################
# Clean up previous cycles; various depths
# PRIOR CYCLE: Leave the prior cycle alone
@@ -67,7 +83,7 @@ for (( current_date=first_date; current_date <= last_date; \
# shellcheck disable=SC2312
if [[ $(tail -n 1 "${rocotolog}") =~ "This cycle is complete: Success" ]]; then
YMD="${current_PDY}" HH="${current_cyc}" declare_from_tmpl \
- COMOUT_TOP:COM_TOP_TMPL
+ COMOUT_TOP:COM_TOP_TMPL
if [[ -d "${COMOUT_TOP}" ]]; then
IFS=", " read -r -a exclude_list <<< "${exclude_string:-}"
remove_files "${COMOUT_TOP}" "${exclude_list[@]:-}"
diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh
index 2cc34eaacd..993331d70b 100755
--- a/ush/forecast_postdet.sh
+++ b/ush/forecast_postdet.sh
@@ -268,20 +268,23 @@ FV3_out() {
fi
fi
- # Get list of FV3 restart files
- local file_list fv3_file
- file_list=$(FV3_restarts)
+ ### Check that there are restart files to copy
+ if [[ ${#restart_dates[@]} -gt 0 ]]; then
+ # Get list of FV3 restart files
+ local file_list fv3_file
+ file_list=$(FV3_restarts)
- # Copy restarts for the dates collected above to COM
- for restart_date in "${restart_dates[@]}"; do
- echo "Copying FV3 restarts for 'RUN=${RUN}' at ${restart_date}"
- for fv3_file in ${file_list}; do
- ${NCP} "${DATArestart}/FV3_RESTART/${restart_date}.${fv3_file}" \
- "${COMOUT_ATMOS_RESTART}/${restart_date}.${fv3_file}"
+ # Copy restarts for the dates collected above to COM
+ for restart_date in "${restart_dates[@]}"; do
+ echo "Copying FV3 restarts for 'RUN=${RUN}' at ${restart_date}"
+ for fv3_file in ${file_list}; do
+ ${NCP} "${DATArestart}/FV3_RESTART/${restart_date}.${fv3_file}" \
+ "${COMOUT_ATMOS_RESTART}/${restart_date}.${fv3_file}"
+ done
done
- done
- echo "SUB ${FUNCNAME[0]}: Output data for FV3 copied"
+ echo "SUB ${FUNCNAME[0]}: Output data for FV3 copied"
+ fi
}
# Disable variable not used warnings
diff --git a/ush/wave_grib2_sbs.sh b/ush/wave_grib2_sbs.sh
index 431387cccd..99f89f3f37 100755
--- a/ush/wave_grib2_sbs.sh
+++ b/ush/wave_grib2_sbs.sh
@@ -72,7 +72,7 @@ if [[ -n ${waveMEMB} ]]; then ENSTAG=".${membTAG}${waveMEMB}" ; fi
outfile="${WAV_MOD_TAG}.${cycle}${ENSTAG}.${grdnam}.${grdres}.f${FH3}.grib2"
# Only create file if not present in COM
-if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
+if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then
set +x
echo ' '
@@ -83,7 +83,7 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
set_trace
if [[ -z "${PDY}" ]] || [[ -z ${cyc} ]] || [[ -z "${cycle}" ]] || [[ -z "${EXECgfs}" ]] || \
- [[ -z "${COM_WAVE_GRID}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${gribflags}" ]] || \
+ [[ -z "${COMOUT_WAVE_GRID}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${gribflags}" ]] || \
[[ -z "${GRIDNR}" ]] || [[ -z "${MODNR}" ]] || \
[[ -z "${SENDDBN}" ]]; then
set +x
@@ -157,11 +157,11 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
fi
if (( fhr > 0 )); then
- ${WGRIB2} gribfile -set_date "${PDY}${cyc}" -set_ftime "${fhr} hour fcst" -grib "${COM_WAVE_GRID}/${outfile}"
+ ${WGRIB2} gribfile -set_date "${PDY}${cyc}" -set_ftime "${fhr} hour fcst" -grib "${COMOUT_WAVE_GRID}/${outfile}"
err=$?
else
${WGRIB2} gribfile -set_date "${PDY}${cyc}" -set_ftime "${fhr} hour fcst" \
- -set table_1.4 1 -set table_1.2 1 -grib "${COM_WAVE_GRID}/${outfile}"
+ -set table_1.4 1 -set table_1.2 1 -grib "${COMOUT_WAVE_GRID}/${outfile}"
err=$?
fi
@@ -177,7 +177,7 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
fi
# Create index
- ${WGRIB2} -s "${COM_WAVE_GRID}/${outfile}" > "${COM_WAVE_GRID}/${outfile}.idx"
+ ${WGRIB2} -s "${COMOUT_WAVE_GRID}/${outfile}" > "${COMOUT_WAVE_GRID}/${outfile}.idx"
# Create grib2 subgrid is this is the source grid
if [[ "${grdID}" = "${WAV_SUBGRBSRC}" ]]; then
@@ -186,14 +186,14 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
subgrbnam=$(echo ${!subgrb} | cut -d " " -f 21)
subgrbres=$(echo ${!subgrb} | cut -d " " -f 22)
subfnam="${WAV_MOD_TAG}.${cycle}${ENSTAG}.${subgrbnam}.${subgrbres}.f${FH3}.grib2"
- ${COPYGB2} -g "${subgrbref}" -i0 -x "${COM_WAVE_GRID}/${outfile}" "${COM_WAVE_GRID}/${subfnam}"
- ${WGRIB2} -s "${COM_WAVE_GRID}/${subfnam}" > "${COM_WAVE_GRID}/${subfnam}.idx"
+ ${COPYGB2} -g "${subgrbref}" -i0 -x "${COMOUT_WAVE_GRID}/${outfile}" "${COMOUT_WAVE_GRID}/${subfnam}"
+ ${WGRIB2} -s "${COMOUT_WAVE_GRID}/${subfnam}" > "${COMOUT_WAVE_GRID}/${subfnam}.idx"
done
fi
# 1.e Save in /com
- if [[ ! -s "${COM_WAVE_GRID}/${outfile}" ]]; then
+ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}" ]]; then
set +x
echo ' '
echo '********************************************* '
@@ -205,7 +205,7 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
set_trace
exit 4
fi
- if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
+ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then
set +x
echo ' '
echo '*************************************************** '
@@ -220,11 +220,11 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
if [[ "${SENDDBN}" = 'YES' ]] && [[ ${outfile} != *global.0p50* ]]; then
set +x
- echo " Alerting GRIB file as ${COM_WAVE_GRID}/${outfile}"
- echo " Alerting GRIB index file as ${COM_WAVE_GRID}/${outfile}.idx"
+ echo " Alerting GRIB file as ${COMOUT_WAVE_GRID}/${outfile}"
+ echo " Alerting GRIB index file as ${COMOUT_WAVE_GRID}/${outfile}.idx"
set_trace
- "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2" "${job}" "${COM_WAVE_GRID}/${outfile}"
- "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2_WIDX" "${job}" "${COM_WAVE_GRID}/${outfile}.idx"
+ "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2" "${job}" "${COMOUT_WAVE_GRID}/${outfile}"
+ "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2_WIDX" "${job}" "${COMOUT_WAVE_GRID}/${outfile}.idx"
else
echo "${outfile} is global.0p50 or SENDDBN is NO, no alert sent"
fi
@@ -245,7 +245,7 @@ if [[ ! -s "${COM_WAVE_GRID}/${outfile}.idx" ]]; then
else
set +x
echo ' '
- echo " File ${COM_WAVE_GRID}/${outfile} found, skipping generation process"
+ echo " File ${COMOUT_WAVE_GRID}/${outfile} found, skipping generation process"
echo ' '
set_trace
fi
diff --git a/ush/wave_grid_interp_sbs.sh b/ush/wave_grid_interp_sbs.sh
index e6f0a1a1aa..31b7808c16 100755
--- a/ush/wave_grid_interp_sbs.sh
+++ b/ush/wave_grid_interp_sbs.sh
@@ -66,7 +66,7 @@ source "${USHgfs}/preamble.sh"
set_trace
if [[ -z "${PDY}" ]] || [[ -z "${cyc}" ]] || [[ -z "${cycle}" ]] || [[ -z "${EXECgfs}" ]] || \
- [[ -z "${COM_WAVE_PREP}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${SENDDBN}" ]] || \
+ [[ -z "${COMOUT_WAVE_PREP}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${SENDDBN}" ]] || \
[ -z "${waveGRD}" ]
then
set +x
@@ -75,7 +75,7 @@ source "${USHgfs}/preamble.sh"
echo '*** EXPORTED VARIABLES IN postprocessor NOT SET ***'
echo '***************************************************'
echo ' '
- echo "${PDY}${cyc} ${cycle} ${EXECgfs} ${COM_WAVE_PREP} ${WAV_MOD_TAG} ${SENDDBN} ${waveGRD}"
+ echo "${PDY}${cyc} ${cycle} ${EXECgfs} ${COMOUT_WAVE_PREP} ${WAV_MOD_TAG} ${SENDDBN} ${waveGRD}"
set_trace
exit 1
fi
@@ -171,9 +171,9 @@ source "${USHgfs}/preamble.sh"
# 1.c Save in /com
set +x
- echo " Saving GRID file as ${COM_WAVE_PREP}/${WAV_MOD_TAG}.out_grd.${grdID}.${PDY}${cyc}"
+ echo " Saving GRID file as ${COMOUT_WAVE_PREP}/${WAV_MOD_TAG}.out_grd.${grdID}.${PDY}${cyc}"
set_trace
- cp "${DATA}/output_${ymdh}0000/out_grd.${grdID}" "${COM_WAVE_PREP}/${WAV_MOD_TAG}.out_grd.${grdID}.${PDY}${cyc}"
+ cp "${DATA}/output_${ymdh}0000/out_grd.${grdID}" "${COMOUT_WAVE_PREP}/${WAV_MOD_TAG}.out_grd.${grdID}.${PDY}${cyc}"
# if [ "$SENDDBN" = 'YES' ]
# then
diff --git a/ush/wave_grid_moddef.sh b/ush/wave_grid_moddef.sh
index 2deb98ce9c..1e8c44054a 100755
--- a/ush/wave_grid_moddef.sh
+++ b/ush/wave_grid_moddef.sh
@@ -109,7 +109,7 @@ source "${USHgfs}/preamble.sh"
if [[ -f mod_def.ww3 ]]
then
- cp mod_def.ww3 "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}"
+ cp mod_def.ww3 "${COMOUT_WAVE_PREP}/${RUN}wave.mod_def.${grdID}"
mv mod_def.ww3 "../mod_def.${grdID}"
else
set +x
diff --git a/ush/wave_prnc_ice.sh b/ush/wave_prnc_ice.sh
index 5e6ba82731..be089c30bd 100755
--- a/ush/wave_prnc_ice.sh
+++ b/ush/wave_prnc_ice.sh
@@ -55,8 +55,8 @@ source "${USHgfs}/preamble.sh"
echo "Making ice fields."
if [[ -z "${YMDH}" ]] || [[ -z "${cycle}" ]] || \
- [[ -z "${COM_WAVE_PREP}" ]] || [[ -z "${FIXgfs}" ]] || [[ -z "${EXECgfs}" ]] || \
- [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${WAVEICE_FID}" ]] || [[ -z "${COM_OBS}" ]]; then
+ [[ -z "${COMOUT_WAVE_PREP}" ]] || [[ -z "${FIXgfs}" ]] || [[ -z "${EXECgfs}" ]] || \
+ [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${WAVEICE_FID}" ]] || [[ -z "${COMIN_OBS}" ]]; then
set +x
echo ' '
@@ -77,7 +77,7 @@ source "${USHgfs}/preamble.sh"
# 1. Get the necessary files
# 1.a Copy the ice data file
- file=${COM_OBS}/${WAVICEFILE}
+ file=${COMIN_OBS}/${WAVICEFILE}
if [ -f $file ]
then
@@ -178,9 +178,9 @@ source "${USHgfs}/preamble.sh"
fi
set +x
- echo " Saving ice.ww3 as ${COM_WAVE_PREP}/${icefile}"
+ echo " Saving ice.ww3 as ${COMOUT_WAVE_PREP}/${icefile}"
set_trace
- cp ice.ww3 "${COM_WAVE_PREP}/${icefile}"
+ cp ice.ww3 "${COMOUT_WAVE_PREP}/${icefile}"
rm -f ice.ww3
# --------------------------------------------------------------------------- #
diff --git a/ush/wave_tar.sh b/ush/wave_tar.sh
index e01ef61f15..f82849854f 100755
--- a/ush/wave_tar.sh
+++ b/ush/wave_tar.sh
@@ -76,7 +76,7 @@ source "${USHgfs}/preamble.sh"
# 0.c Define directories and the search path.
# The tested variables should be exported by the postprocessor script.
- if [[ -z "${cycle}" ]] || [[ -z "${COM_WAVE_STATION}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || \
+ if [[ -z "${cycle}" ]] || [[ -z "${COMOUT_WAVE_STATION}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || \
[[ -z "${SENDDBN}" ]] || [[ -z "${STA_DIR}" ]]; then
set +x
echo ' '
@@ -179,10 +179,10 @@ source "${USHgfs}/preamble.sh"
set +x
echo ' '
- echo " Moving tar file ${file_name} to ${COM_WAVE_STATION} ..."
+ echo " Moving tar file ${file_name} to ${COMOUT_WAVE_STATION} ..."
set_trace
- cp "${file_name}" "${COM_WAVE_STATION}/."
+ cp "${file_name}" "${COMOUT_WAVE_STATION}/."
exit=$?
@@ -202,11 +202,11 @@ source "${USHgfs}/preamble.sh"
then
set +x
echo ' '
- echo " Alerting TAR file as ${COM_WAVE_STATION}/${file_name}"
+ echo " Alerting TAR file as ${COMOUT_WAVE_STATION}/${file_name}"
echo ' '
set_trace
"${DBNROOT}/bin/dbn_alert MODEL" "${alertName}_WAVE_TAR" "${job}" \
- "${COM_WAVE_STATION}/${file_name}"
+ "${COMOUT_WAVE_STATION}/${file_name}"
fi
# --------------------------------------------------------------------------- #
diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py
index ad135be713..a126992cee 100644
--- a/workflow/rocoto/tasks.py
+++ b/workflow/rocoto/tasks.py
@@ -178,25 +178,34 @@ def get_resource(self, task_name):
account = task_config['ACCOUNT_SERVICE'] if task_name in Tasks.SERVICE_TASKS else task_config['ACCOUNT']
- walltime = task_config[f'wtime_{task_name}']
- if self.cdump in ['gfs'] and f'wtime_{task_name}_gfs' in task_config.keys():
- walltime = task_config[f'wtime_{task_name}_gfs']
+ if f'wtime_{task_name}_{self.cdump}' in task_config:
+ walltime = task_config[f'wtime_{task_name}_{self.cdump}']
+ else:
+ walltime = task_config[f'wtime_{task_name}']
- cores = task_config[f'npe_{task_name}']
- if self.cdump in ['gfs'] and f'npe_{task_name}_gfs' in task_config.keys():
- cores = task_config[f'npe_{task_name}_gfs']
+ if f'npe_{task_name}_{self.cdump}' in task_config:
+ cores = task_config[f'npe_{task_name}_{self.cdump}']
+ else:
+ cores = task_config[f'npe_{task_name}']
- ppn = task_config[f'npe_node_{task_name}']
- if self.cdump in ['gfs'] and f'npe_node_{task_name}_gfs' in task_config.keys():
- ppn = task_config[f'npe_node_{task_name}_gfs']
+ if f'npe_node_{task_name}_{self.cdump}' in task_config:
+ ppn = task_config[f'npe_node_{task_name}_{self.cdump}']
+ else:
+ ppn = task_config[f'npe_node_{task_name}']
nodes = int(np.ceil(float(cores) / float(ppn)))
- threads = task_config[f'nth_{task_name}']
- if self.cdump in ['gfs'] and f'nth_{task_name}_gfs' in task_config.keys():
- threads = task_config[f'nth_{task_name}_gfs']
+ if f'nth_{task_name}_{self.cdump}' in task_config:
+ threads = task_config[f'nth_{task_name}_{self.cdump}']
+ else:
+ threads = task_config[f'nth_{task_name}']
+
+ if f'memory_{task_name}_{self.cdump}' in task_config:
+ memory = task_config[f'memory_{task_name}_{self.cdump}']
+ else:
+ # Memory is not required
+ memory = task_config.get(f'memory_{task_name}', None)
- memory = task_config.get(f'memory_{task_name}', None)
if scheduler in ['pbspro']:
if task_config.get('prepost', False):
memory += ':prepost=true'