From 1fc8b5bf55581de18e786611c65bc3f84bb0c082 Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Tue, 5 Dec 2023 15:47:52 -0500 Subject: [PATCH] Separate offline UPP from interpolated grib2 product generation (#2110) This PR: - Replaces the use of `JGLOBAL_ATMOS_POST` with `JGLOBAL_ATMOS_UPP` and `JGLOBAL_ATMOS_PRODUCTS`. - Introduces `JGLOBAL_ATMOS_PRODUCTS` that is responsible for creating grib2 products from the master file produced by UPP (inline from the model, or offline by running `JGLOBAL_ATMOS_UPP`). Rocoto job `atmos_products.sh`, ex-script `exglobal_atmos_products.sh` are also added along with a `config.atmos_products`. Updates are made to the relevant `env` files as well as `config.resources` to reflect the addition of this job. - reduces the number of cores needed appropriately for forecast products when there is no offline UPP necessary. - renames older `fv3gfs_dwn_nems.sh` to `interp_atmos_master.sh` and `inter_flux.sh` to `interp_atmos_sflux.sh` - removes `fv3gfs_downstream_nems.sh` as it is absorbed into `exglobal_atmos_products.sh` - removes no longer used scripts for `gfs_post.sh`, `gfs_transfer.sh` (DBN alerts are absorbed into `exglobal_atmos_products.sh`), `post.sh`, `JGLOBAL_ATMOS_POST`, `ex${RUN}_atmos_post.sh` scripts. - Rocoto workflow related scripts are updated to add `atmos_products` as a job. When `WRITE_DOPOST` is `.false.`, `upp.sh` job to run the UPP offline is also added. Dependencies have been appropriately applied. - removes `PGB1F` option that creates 1-degree grib1 products that are no longer needed in GFSv17. The hacks from (now deleted) `post.sh` had to be reinstated in `upp.sh` to load the modules from UPP instead of `load_fv3gfs_modules.sh`. --- env/HERA.env | 12 +- env/JET.env | 13 +- env/ORION.env | 13 +- env/S4.env | 13 +- env/WCOSS2.env | 13 +- jobs/JGLOBAL_ATMOS_POST | 105 ----- jobs/JGLOBAL_ATMOS_PRODUCTS | 47 ++ jobs/rocoto/atmos_products.sh | 31 ++ jobs/rocoto/post.sh | 44 -- jobs/rocoto/postanl.sh | 1 - jobs/rocoto/upp.sh | 45 +- parm/config/gefs/config.resources | 19 +- parm/config/gfs/config.atmos_products | 34 ++ parm/config/gfs/config.base.emc.dyn | 1 + parm/config/gfs/config.ocnpost | 6 +- parm/config/gfs/config.post | 35 -- parm/config/gfs/config.resources | 26 +- parm/config/gfs/config.upp | 2 +- parm/post/upp.yaml | 5 +- scripts/exgdas_atmos_post.sh | 329 -------------- scripts/exgfs_atmos_post.sh | 410 ------------------ scripts/exglobal_atmos_products.sh | 253 +++++++++++ sorc/link_workflow.sh | 28 +- ush/fv3gfs_downstream_nems.sh | 179 -------- ush/gfs_post.sh | 409 ----------------- ush/gfs_transfer.sh | 33 -- ush/inter_flux.sh | 56 --- ...gfs_dwn_nems.sh => interp_atmos_master.sh} | 1 - ush/interp_atmos_sflux.sh | 49 +++ ush/python/pygfs/task/upp.py | 8 + versions/build.hera.ver | 1 + versions/build.jet.ver | 1 + versions/build.orion.ver | 1 + versions/build.s4.ver | 1 + versions/build.spack.ver | 2 - versions/run.hera.ver | 2 + versions/run.jet.ver | 2 + versions/run.orion.ver | 2 + versions/run.s4.ver | 2 + workflow/applications/applications.py | 2 +- workflow/applications/gfs_cycled.py | 31 +- workflow/applications/gfs_forecast_only.py | 91 ++-- workflow/rocoto/gfs_tasks.py | 268 ++++++------ workflow/rocoto/rocoto.py | 2 - workflow/rocoto/tasks.py | 11 +- 45 files changed, 713 insertions(+), 1926 deletions(-) delete mode 100755 jobs/JGLOBAL_ATMOS_POST create mode 100755 jobs/JGLOBAL_ATMOS_PRODUCTS create mode 100755 jobs/rocoto/atmos_products.sh delete mode 100755 jobs/rocoto/post.sh delete mode 120000 jobs/rocoto/postanl.sh create mode 100644 parm/config/gfs/config.atmos_products delete mode 100644 parm/config/gfs/config.post delete mode 100755 scripts/exgdas_atmos_post.sh delete mode 100755 scripts/exgfs_atmos_post.sh create mode 100755 scripts/exglobal_atmos_products.sh delete mode 100755 ush/fv3gfs_downstream_nems.sh delete mode 100755 ush/gfs_post.sh delete mode 100755 ush/gfs_transfer.sh delete mode 100755 ush/inter_flux.sh rename ush/{fv3gfs_dwn_nems.sh => interp_atmos_master.sh} (99%) create mode 100755 ush/interp_atmos_sflux.sh diff --git a/env/HERA.env b/env/HERA.env index ddb2ae13ca..284d54820e 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -206,18 +206,9 @@ elif [[ "${step}" = "upp" ]]; then [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}" -elif [[ "${step}" = "post" ]]; then - - nth_max=$((npe_node_max / npe_node_post)) - - export NTHREADS_NP=${nth_np:-1} - [[ ${NTHREADS_NP} -gt ${nth_max} ]] && export NTHREADS_NP=${nth_max} - export APRUN_NP="${launcher} -n ${npe_post} --cpus-per-task=${NTHREADS_NP}" +elif [[ "${step}" = "atmos_products" ]]; then export USE_CFP="YES" # Use MPMD for downstream product generation on Hera - export NTHREADS_DWN=${nth_dwn:-1} - [[ ${NTHREADS_DWN} -gt ${nth_max} ]] && export NTHREADS_DWN=${nth_max} - export APRUN_DWN="${launcher} -n ${npe_dwn} --cpus-per-task=${NTHREADS_DWN}" elif [[ "${step}" = "ecen" ]]; then @@ -302,4 +293,3 @@ elif [[ "${step}" = "fit2obs" ]]; then export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}" fi - diff --git a/env/JET.env b/env/JET.env index 5f0f95202a..7bb152c5f3 100755 --- a/env/JET.env +++ b/env/JET.env @@ -193,18 +193,9 @@ elif [[ "${step}" = "upp" ]]; then [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp}" -elif [[ "${step}" = "post" ]]; then +elif [[ "${step}" = "atmos_products" ]]; then - nth_max=$((npe_node_max / npe_node_post)) - - export NTHREADS_NP=${nth_np:-1} - [[ ${NTHREADS_NP} -gt ${nth_max} ]] && export NTHREADS_NP=${nth_max} - export APRUN_NP="${launcher} -n ${npe_post}" - - export USE_CFP="YES" # Use MPMD for downstream product generation on Jet - export NTHREADS_DWN=${nth_dwn:-1} - [[ ${NTHREADS_DWN} -gt ${nth_max} ]] && export NTHREADS_DWN=${nth_max} - export APRUN_DWN="${launcher} -n ${npe_dwn}" + export USE_CFP="YES" # Use MPMD for downstream product generation elif [[ "${step}" = "ecen" ]]; then diff --git a/env/ORION.env b/env/ORION.env index 688b5cb790..d91fd4db03 100755 --- a/env/ORION.env +++ b/env/ORION.env @@ -206,18 +206,9 @@ elif [[ "${step}" = "upp" ]]; then [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}" -elif [[ "${step}" = "post" ]]; then +elif [[ "${step}" = "atmos_products" ]]; then - nth_max=$((npe_node_max / npe_node_post)) - - export NTHREADS_NP=${nth_np:-1} - [[ ${NTHREADS_NP} -gt ${nth_max} ]] && export NTHREADS_NP=${nth_max} - export APRUN_NP="${launcher} -n ${npe_post} --cpus-per-task=${NTHREADS_NP}" - - export USE_CFP="YES" # Use MPMD for downstream product generation on Orion - export NTHREADS_DWN=${nth_dwn:-1} - [[ ${NTHREADS_DWN} -gt ${nth_max} ]] && export NTHREADS_DWN=${nth_max} - export APRUN_DWN="${launcher} -n ${npe_dwn} --cpus-per-task=${NTHREADS_DWN}" + export USE_CFP="YES" # Use MPMD for downstream product generation elif [[ "${step}" = "ecen" ]]; then diff --git a/env/S4.env b/env/S4.env index 8f01e61a7e..3dab3fc3e7 100755 --- a/env/S4.env +++ b/env/S4.env @@ -179,18 +179,9 @@ elif [[ "${step}" = "upp" ]]; then [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp}" -elif [[ "${step}" = "post" ]]; then +elif [[ "${step}" = "atmos_products" ]]; then - nth_max=$((npe_node_max / npe_node_post)) - - export NTHREADS_NP=${nth_np:-1} - [[ ${NTHREADS_NP} -gt ${nth_max} ]] && export NTHREADS_NP=${nth_max} - export APRUN_NP="${launcher} -n ${npe_post}" - - export USE_CFP="YES" # Use MPMD for downstream product generation on S4 - export NTHREADS_DWN=${nth_dwn:-1} - [[ ${NTHREADS_DWN} -gt ${nth_max} ]] && export NTHREADS_DWN=${nth_max} - export APRUN_DWN="${launcher} -n ${npe_dwn}" + export USE_CFP="YES" # Use MPMD for downstream product generation elif [[ "${step}" = "ecen" ]]; then diff --git a/env/WCOSS2.env b/env/WCOSS2.env index 9d8f459aa1..068b69fd7b 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -191,18 +191,9 @@ elif [[ "${step}" = "upp" ]]; then [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp} -ppn ${npe_node_upp} --cpu-bind depth --depth ${NTHREADS_UPP}" -elif [[ "${step}" = "post" ]]; then +elif [[ "${step}" = "atmos_products" ]]; then - nth_max=$((npe_node_max / npe_node_post)) - - export NTHREADS_NP=${nth_np:-1} - [[ ${NTHREADS_NP} -gt ${nth_max} ]] && export NTHREADS_NP=${nth_max} - export APRUN_NP="${launcher} -n ${npe_np:-${npe_post}} -ppn ${npe_node_post} --cpu-bind depth --depth ${NTHREADS_NP}" - - export USE_CFP="YES" # Use MPMD for downstream product generation on WCOSS2 - export NTHREADS_DWN=${nth_dwn:-1} - [[ ${NTHREADS_DWN} -gt ${nth_max} ]] && export NTHREADS_DWN=${nth_max} - export APRUN_DWN="${launcher} -np ${npe_dwn} ${mpmd_opt}" + export USE_CFP="YES" # Use MPMD for downstream product generation elif [[ "${step}" = "ecen" ]]; then diff --git a/jobs/JGLOBAL_ATMOS_POST b/jobs/JGLOBAL_ATMOS_POST deleted file mode 100755 index 07890b9df5..0000000000 --- a/jobs/JGLOBAL_ATMOS_POST +++ /dev/null @@ -1,105 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "post" -c "base post" - - -#################################### -# Specify version numbers -#################################### -export crtm_ver=${post_crtm_ver:-v2.2.6} -export gfs_ver=${gfs_ver:-v15.0.0} -export hwrf_ver=${hwrf_ver:-v11.0.5} -export g2tmpl_ver=${g2tmpl_ver:-v1.5.0} - -############################################## -# Set variables used in the exglobal script -############################################## -export CDUMP=${RUN/enkf} - -############################################## -# Begin JOB SPECIFIC work -############################################## -export APRUNP=${APRUN:-${APRUN_NP}} -export RERUN=${RERUN:-NO} -export HOMECRTM=${HOMECRTM:-${PACKAGEROOT}/lib/crtm/${crtm_ver}} -export FIXCRTM=${CRTM_FIX:-${HOMECRTM}/fix} -export PARMpost=${PARMpost:-${HOMEgfs}/parm/post} -export INLINE_POST=${WRITE_DOPOST:-".false."} - -# Construct COM variables from templates -YMD=${PDY} HH=${cyc} generate_com -rx COM_ATMOS_RESTART COM_ATMOS_ANALYSIS COM_ATMOS_HISTORY COM_ATMOS_MASTER -if [[ ! -d ${COM_ATMOS_MASTER} ]]; then mkdir -m 775 -p "${COM_ATMOS_MASTER}"; fi - -if [[ ${GOESF} == "YES" ]]; then - YMD=${PDY} HH=${cyc} generate_com -rx COM_ATMOS_GOES - if [[ ! -d ${COM_ATMOS_GOES} ]]; then mkdir -m 775 -p "${COM_ATMOS_GOES}"; fi -fi - -for grid in '0p25' '0p50' '1p00'; do - prod_dir="COM_ATMOS_GRIB_${grid}" - GRID=${grid} YMD=${PDY} HH=${cyc} generate_com -rx "${prod_dir}:COM_ATMOS_GRIB_GRID_TMPL" - if [[ ! -d "${!prod_dir}" ]]; then mkdir -m 775 -p "${!prod_dir}"; fi -done - -if [ "${RUN}" = gfs ];then - export FHOUT_PGB=${FHOUT_GFS:-3} #Output frequency of gfs pgb file at 1.0 and 0.5 deg. -fi -if [ "${RUN}" = gdas ]; then - export IGEN_GFS="gfs_avn" - export IGEN_ANL="anal_gfs" - export IGEN_FCST="gfs_avn" - export IGEN_GDAS_ANL="anal_gdas" - export FHOUT_PGB=${FHOUT:-1} #Output frequency of gfs pgb file at 1.0 and 0.5 deg. -fi - -if [ "${GRIBVERSION}" = grib2 ]; then - export IGEN_ANL="anal_gfs" - export IGEN_FCST="gfs_avn" - export IGEN_GFS="gfs_avn" -fi - -####################################### -# Specify Restart File Name to Key Off -####################################### -# TODO Improve the name of this variable -export restart_file=${COM_ATMOS_HISTORY}/${RUN}.t${cyc}z.atm.logf - -#################################### -# Specify Timeout Behavior of Post -# -# SLEEP_TIME - Amount of time to wait for -# a restart file before exiting -# SLEEP_INT - Amount of time to wait between -# checking for restart files -#################################### -export SLEEP_TIME=900 -export SLEEP_INT=5 - - -############################################################### -# Run relevant exglobal script - -"${HOMEgfs}/scripts/ex${RUN}_atmos_post.sh" -status=$? -(( status != 0 )) && exit "${status}" - -############################################## -# End JOB SPECIFIC work -############################################## - -############################################## -# Final processing -############################################## -if [ -e "${pgmout}" ]; then - cat "${pgmout}" -fi - -########################################## -# Remove the Temporary working directory -########################################## -cd "${DATAROOT}" || exit 1 -[[ "${KEEPDATA:-NO}" = "NO" ]] && rm -rf "${DATA}" - - -exit 0 diff --git a/jobs/JGLOBAL_ATMOS_PRODUCTS b/jobs/JGLOBAL_ATMOS_PRODUCTS new file mode 100755 index 0000000000..24e7edacdd --- /dev/null +++ b/jobs/JGLOBAL_ATMOS_PRODUCTS @@ -0,0 +1,47 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" +source "${HOMEgfs}/ush/jjob_header.sh" -e "atmos_products" -c "base atmos_products" + + +############################################## +# Begin JOB SPECIFIC work +############################################## + +# Construct COM variables from templates +YMD=${PDY} HH=${cyc} generate_com -rx COM_ATMOS_ANALYSIS COM_ATMOS_HISTORY COM_ATMOS_MASTER + +for grid in '0p25' '0p50' '1p00'; do + prod_dir="COM_ATMOS_GRIB_${grid}" + GRID=${grid} YMD=${PDY} HH=${cyc} generate_com -rx "${prod_dir}:COM_ATMOS_GRIB_GRID_TMPL" + if [[ ! -d "${!prod_dir}" ]]; then mkdir -m 775 -p "${!prod_dir}"; fi +done + +# Variables used in this job +export PREFIX="${RUN}.t${cyc}z." + +############################################################### +# Run exglobal script +"${HOMEgfs}/scripts/exglobal_atmos_products.sh" +status=$? +(( status != 0 )) && exit "${status}" + +############################################## +# End JOB SPECIFIC work +############################################## + +############################################## +# Final processing +############################################## +if [[ -e "${pgmout}" ]]; then + cat "${pgmout}" +fi + +########################################## +# Remove the Temporary working directory +########################################## +cd "${DATAROOT}" || exit 1 +[[ "${KEEPDATA:-NO}" = "NO" ]] && rm -rf "${DATA}" + + +exit 0 diff --git a/jobs/rocoto/atmos_products.sh b/jobs/rocoto/atmos_products.sh new file mode 100755 index 0000000000..fbc10cadbd --- /dev/null +++ b/jobs/rocoto/atmos_products.sh @@ -0,0 +1,31 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" + +############################################################### +## atmosphere products driver script +## FHRLST : forecast hour list to post-process (e.g. -f001, f000, f000_f001_f002, ...) +############################################################### + +# Source FV3GFS workflow modules +. "${HOMEgfs}/ush/load_fv3gfs_modules.sh" +status=$? +if (( status != 0 )); then exit "${status}"; fi + +export job="atmos_products" +export jobid="${job}.$$" + +############################################################### +# shellcheck disable=SC2153,SC2001 +IFS='_' read -ra fhrs <<< "${FHRLST//f}" # strip off the 'f's and convert to array + +#--------------------------------------------------------------- +# Execute the JJOB +for fhr in "${fhrs[@]}"; do + export FORECAST_HOUR=$(( 10#${fhr} )) + "${HOMEgfs}/jobs/JGLOBAL_ATMOS_PRODUCTS" + status=$? + if (( status != 0 )); then exit "${status}"; fi +done + +exit 0 diff --git a/jobs/rocoto/post.sh b/jobs/rocoto/post.sh deleted file mode 100755 index 92abef8767..0000000000 --- a/jobs/rocoto/post.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" - -############################################################### -## NCEP post driver script -## FHRLST : forecast hourlist to be post-process (e.g. anl, f000, f000_f001_f002, ...) -############################################################### - -# TODO clean this up once ncdiag/1.1.2 is installed on WCOSS2 -source "${HOMEgfs}/ush/detect_machine.sh" -if [[ "${MACHINE_ID}" = "wcoss2" ]]; then - # Temporarily load modules from UPP - source "${HOMEgfs}/ush/module-setup.sh" - module use "${HOMEgfs}/sorc/ufs_model.fd/FV3/upp/modulefiles" - module load "${MACHINE_ID}" - module load prod_util - module load cray-pals - module load cfp - module load libjpeg - module load grib_util - module load wgrib2 - export WGRIB2=wgrib2 - # End hack -else - . ${HOMEgfs}/ush/load_fv3gfs_modules.sh - status=$? - [[ ${status} -ne 0 ]] && exit ${status} -fi - -export job="post" -export jobid="${job}.$$" - -fhrlst=$(echo ${FHRLST} | sed -e 's/_/ /g; s/f/ /g; s/,/ /g') - -#--------------------------------------------------------------- -for fhr in ${fhrlst}; do - export post_times=${fhr} - "${HOMEgfs}/jobs/JGLOBAL_ATMOS_POST" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" -done - -exit 0 diff --git a/jobs/rocoto/postanl.sh b/jobs/rocoto/postanl.sh deleted file mode 120000 index 29e1fc721f..0000000000 --- a/jobs/rocoto/postanl.sh +++ /dev/null @@ -1 +0,0 @@ -post.sh \ No newline at end of file diff --git a/jobs/rocoto/upp.sh b/jobs/rocoto/upp.sh index 23432a5b14..6ab243d58f 100755 --- a/jobs/rocoto/upp.sh +++ b/jobs/rocoto/upp.sh @@ -5,13 +5,35 @@ source "${HOMEgfs}/ush/preamble.sh" ############################################################### ## Offline UPP driver script ## UPP_RUN: analysis, forecast, goes, wafs. See upp.yaml for valid options -## FHRLST : forecast hourlist to be post-process (e.g. anl, f000, f000_f001_f002, ...) +## FHRLST : forecast hourlist to be post-process (e.g. f000, f000_f001_f002, ...) ############################################################### # Source FV3GFS workflow modules -. "${HOMEgfs}/ush/load_fv3gfs_modules.sh" -status=$? -[[ ${status} -ne 0 ]] && exit "${status}" +#. "${HOMEgfs}/ush/load_fv3gfs_modules.sh" +#status=$? +#if (( status != 0 )); then exit "${status}"; fi +# Temporarily load modules from UPP on WCOSS2 +source "${HOMEgfs}/ush/detect_machine.sh" +if [[ "${MACHINE_ID}" = "wcoss2" ]]; then + set +x + source "${HOMEgfs}/ush/module-setup.sh" + module use "${HOMEgfs}/sorc/ufs_model.fd/FV3/upp/modulefiles" + module load "${MACHINE_ID}" + module load prod_util + module load cray-pals + module load cfp + module load libjpeg + module load grib_util/1.2.3 + module load wgrib2/2.0.8 + export WGRIB2=wgrib2 + module load python/3.8.6 + module laod crtm/2.4.0 # TODO: This is only needed when UPP_RUN=goes. Is there a better way to handle this? + set_trace +else + . "${HOMEgfs}/ush/load_fv3gfs_modules.sh" + status=$? + if (( status != 0 )); then exit "${status}"; fi +fi ############################################################### # setup python path for workflow utilities and tasks @@ -23,18 +45,15 @@ export job="upp" export jobid="${job}.$$" ############################################################### -# Execute the JJOB -if [[ "${UPP_RUN}" = "analysis" ]]; then - unset FHRLST - FHRLST="f000" -fi -fhrlst=$(echo "${FHRLST}" | sed -e 's/_/ /g; s/f/ /g; s/,/ /g') +# shellcheck disable=SC2153,SC2001 +IFS='_' read -ra fhrs <<< "${FHRLST//f}" # strip off the 'f's convert to array -for fhr in ${fhrlst}; do - export FORECAST_HOUR=${fhr} +# Execute the JJOB +for fhr in "${fhrs[@]}"; do + export FORECAST_HOUR=$(( 10#${fhr} )) "${HOMEgfs}/jobs/JGLOBAL_ATMOS_UPP" status=$? - [[ ${status} -ne 0 ]] && exit "${status}" + if (( status != 0 )); then exit "${status}"; fi done exit 0 diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index d033ec8d42..40860f7b3a 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -9,7 +9,7 @@ if [[ $# -ne 1 ]]; then echo "Must specify an input task argument to set resource variables!" echo "argument can be any one of the following:" echo "stage_ic aerosol_init" - echo "sfcanl analcalc analdiag fcst post fit2obs metp arch echgres" + echo "sfcanl analcalc analdiag fcst fit2obs metp arch echgres" echo "ecen esfc efcs epos earc" echo "init_chem mom6ic ocnpost" echo "waveinit waveprep wavepostsbs wavepostbndpnt wavepostbndpntbll wavepostpnt" @@ -330,23 +330,6 @@ elif [[ ${step} = "ocnpost" ]]; then npe_ocnpost=2 fi -elif [[ ${step} = "post" ]]; then - - export wtime_post="00:12:00" - export wtime_post_gfs="01:00:00" - export npe_post=126 - res=$(echo "${CASE}" | cut -c2-) - if (( npe_post > res )); then - export npe_post=${res} - fi - export nth_post=1 - export npe_node_post=${npe_post} - export npe_node_post_gfs=${npe_post} - export npe_node_dwn=${npe_node_max} - if [[ "${npe_node_post}" -gt "${npe_node_max}" ]]; then export npe_node_post=${npe_node_max} ; fi - if [[ "${npe_node_post_gfs}" -gt "${npe_node_max}" ]]; then export npe_node_post_gfs=${npe_node_max} ; fi - export is_exclusive=True - elif [[ "${step}" = "fit2obs" ]]; then export wtime_fit2obs="00:20:00" diff --git a/parm/config/gfs/config.atmos_products b/parm/config/gfs/config.atmos_products new file mode 100644 index 0000000000..d8b1d6e32b --- /dev/null +++ b/parm/config/gfs/config.atmos_products @@ -0,0 +1,34 @@ +#! /usr/bin/env bash + +########## config.atmos_products ########## +# atmosphere grib2 products specific + +echo "BEGIN: config.atmos_products" + +# Get task specific resources +. "${EXPDIR}/config.resources" atmos_products + +# No. of forecast hours to process in a single job +export NFHRS_PER_GROUP=3 + +# Scripts used by this job +export INTERP_ATMOS_MASTERSH="${HOMEgfs}/ush/interp_atmos_master.sh" +export INTERP_ATMOS_SFLUXSH="${HOMEgfs}/ush/interp_atmos_sflux.sh" + +if [[ "${RUN:-}" == "gdas" ]]; then + export downset=1 + export FHOUT_PGBS=${FHOUT:-1} # Output frequency of supplemental gfs pgb file at 1.0 and 0.5 deg + export FLXGF="NO" # Create interpolated sflux.1p00 file +elif [[ "${RUN:-}" == "gfs" ]]; then + export downset=2 + export FHOUT_PGBS=${FHOUT_GFS:-3} # Output frequency of supplemental gfs pgb file at 1.0 and 0.5 deg + export FLXGF="YES" # Create interpolated sflux.1p00 file +fi + +# paramlist files for the different forecast hours and downsets +export paramlista="${HOMEgfs}/parm/post/global_1x1_paramlist_g2" +export paramlista_anl="${HOMEgfs}/parm/post/global_1x1_paramlist_g2.anl" +export paramlista_f000="${HOMEgfs}/parm/post/global_1x1_paramlist_g2.f000" +export paramlistb="${HOMEgfs}/parm/post/global_master-catchup_parmlist_g2" + +echo "END: config.atmos_products" diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 22b72fe873..4451c049b0 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -53,6 +53,7 @@ export NOSCRUB="@NOSCRUB@" export BASE_GIT="@BASE_GIT@" # Toggle to turn on/off GFS downstream processing. +export DO_GOES="NO" # GOES products export DO_BUFRSND="NO" # BUFR sounding products export DO_GEMPAK="NO" # GEMPAK products export DO_AWIPS="NO" # AWIPS products diff --git a/parm/config/gfs/config.ocnpost b/parm/config/gfs/config.ocnpost index a6330207fb..2505431401 100644 --- a/parm/config/gfs/config.ocnpost +++ b/parm/config/gfs/config.ocnpost @@ -5,7 +5,7 @@ echo "BEGIN: config.ocnpost" # Get task specific resources -source $EXPDIR/config.resources ocnpost +source "${EXPDIR}/config.resources" ocnpost # Convert netcdf files to grib files using post job #------------------------------------------- @@ -23,7 +23,7 @@ if [[ "${machine}" = "WCOSS2" ]]; then export MAKE_OCN_GRIB="NO" fi -# No. of concurrent post jobs [0 implies sequential] -export NPOSTGRP=5 +# No. of forecast hours to process in a single job +export NFHRS_PER_GROUP=3 echo "END: config.ocnpost" diff --git a/parm/config/gfs/config.post b/parm/config/gfs/config.post deleted file mode 100644 index 652869e2c9..0000000000 --- a/parm/config/gfs/config.post +++ /dev/null @@ -1,35 +0,0 @@ -#! /usr/bin/env bash - -########## config.post ########## -# Post specific - -echo "BEGIN: config.post" - -# Get task specific resources -. $EXPDIR/config.resources post - -# No. of concurrent post jobs [0 implies sequential] -export NPOSTGRP=42 -export OUTTYP=4 -export MODEL_OUT_FORM=netcdfpara - -# Post driver job that calls gfs_post.sh and downstream jobs -export POSTJJOBSH="$HOMEpost/jobs/JGLOBAL_POST" -export GFSDOWNSH="$HOMEpost/ush/fv3gfs_downstream_nems.sh" -export GFSDWNSH="$HOMEpost/ush/fv3gfs_dwn_nems.sh" - -export POSTGPSH="$HOMEpost/ush/gfs_post.sh" -export POSTGPEXEC="$HOMEpost/exec/upp.x" -export GOESF=NO # goes image -export FLXF=YES # grib2 flux file written by post - -export npe_postgp=$npe_post -export nth_postgp=1 - -export GFS_DOWNSTREAM="YES" -export downset=2 -export npe_dwn=24 - -export GRIBVERSION='grib2' - -echo "END: config.post" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 4d975c6003..46be4fbda4 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -14,7 +14,8 @@ if [[ $# -ne 1 ]]; then echo "atmensanlinit atmensanlrun atmensanlfinal" echo "landanl" echo "aeroanlinit aeroanlrun aeroanlfinal" - echo "anal sfcanl analcalc analdiag fcst post echgres" + echo "anal sfcanl analcalc analdiag fcst echgres" + echo "upp atmos_products" echo "tracker genesis genesis_fsu" echo "verfozn verfrad vminmon fit2obs metp arch cleanup" echo "eobs ediag eomg eupd ecen esfc efcs epos earc" @@ -705,21 +706,16 @@ elif [[ "${step}" = "upp" ]]; then fi export is_exclusive=True -elif [[ ${step} = "post" ]]; then +elif [[ ${step} = "atmos_products" ]]; then - export wtime_post="00:12:00" - export wtime_post_gfs="01:00:00" - export npe_post=126 - res=$(echo "${CASE}" | cut -c2-) - if (( npe_post > res )); then - export npe_post=${res} - fi - export nth_post=1 - export npe_node_post=${npe_post} - export npe_node_post_gfs=${npe_post} - export npe_node_dwn=${npe_node_max} - if [[ "${npe_node_post}" -gt "${npe_node_max}" ]]; then export npe_node_post=${npe_node_max} ; fi - if [[ "${npe_node_post_gfs}" -gt "${npe_node_max}" ]]; then export npe_node_post_gfs=${npe_node_max} ; fi + export wtime_atmos_products="00:15:00" + export npe_atmos_products=24 + export nth_atmos_products=1 + export npe_node_atmos_products="${npe_atmos_products}" + export wtime_atmos_products_gfs="${wtime_atmos_products}" + export npe_atmos_products_gfs="${npe_atmos_products}" + export nth_atmos_products_gfs="${nth_atmos_products}" + export npe_node_atmos_products_gfs="${npe_node_atmos_products}" export is_exclusive=True elif [[ ${step} = "verfozn" ]]; then diff --git a/parm/config/gfs/config.upp b/parm/config/gfs/config.upp index f439e4ea2e..a1bd0a7d34 100644 --- a/parm/config/gfs/config.upp +++ b/parm/config/gfs/config.upp @@ -11,6 +11,6 @@ echo "BEGIN: config.upp" export UPP_CONFIG="${HOMEgfs}/parm/post/upp.yaml" # No. of forecast hours to process in a single job -export NUPPGRP=3 +export NFHRS_PER_GROUP=3 echo "END: config.upp" diff --git a/parm/post/upp.yaml b/parm/post/upp.yaml index fcac567bcb..651f3c12a8 100644 --- a/parm/post/upp.yaml +++ b/parm/post/upp.yaml @@ -23,8 +23,8 @@ analysis: - ["{{ COM_ATMOS_ANALYSIS }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.sfcanl.nc", "{{ DATA }}/{{ flux_filename }}"] data_out: copy: - - ["{{ DATA }}/GFSPRS.GrbF00", "{{ COM_ATMOS_MASTER }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.masteranl.grb2"] - - ["{{ DATA }}/GFSPRS.GrbF00.idx", "{{ COM_ATMOS_MASTER }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.masteranl.grb2.idx"] + - ["{{ DATA }}/GFSPRS.GrbF00", "{{ COM_ATMOS_MASTER }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.master.grb2anl"] + - ["{{ DATA }}/GFSPRS.GrbF00.idx", "{{ COM_ATMOS_MASTER }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.master.grb2ianl"] forecast: config: @@ -88,4 +88,3 @@ goes: copy: - ["{{ DATA }}/GFSPRS.GrbF{{ '%02d' % forecast_hour }}", "{{ COM_ATMOS_MASTER }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.goesmasterf{{ '%03d' % forecast_hour }}.grb2"] - ["{{ DATA }}/GFSPRS.GrbF{{ '%02d' % forecast_hour }}.idx", "{{ COM_ATMOS_MASTER }}/{{ RUN }}.t{{ current_cycle | strftime('%H') }}z.goesmasterf{{ '%03d' % forecast_hour }}.grb2.idx"] - diff --git a/scripts/exgdas_atmos_post.sh b/scripts/exgdas_atmos_post.sh deleted file mode 100755 index 4b23200628..0000000000 --- a/scripts/exgdas_atmos_post.sh +++ /dev/null @@ -1,329 +0,0 @@ -#! /usr/bin/env bash - -##################################################################### -# echo "-----------------------------------------------------" -# echo " exgdas_nceppost.sh" -# echo " Sep 07 - Chuang - Modified script to run unified post" -# echo " July 14 - Carlis - Changed to 0.25 deg grib2 master file" -# echo " Feb 16 - Lin - Modify to use Vertical Structure" -# echo " Aug 17 - Meng - Modify to use 3-digit forecast hour naming" -# echo " master and flux files" -# echo " Dec 17 - Meng - Link sfc data file to flxfile " -# echo " since fv3gfs does not output sfc files any more." -# echo " Dec 17 - Meng - Add fv3gfs_downstream_nems.sh for pgb processing " -# echo " and remove writing data file to /nwges" -# echo " Jan 18 - Meng - For EE2 standard, move IDRT POSTGPVARS setting" -# echo " from j-job script." -# echo " Feb 18 - Meng - Removed legacy setting for generating grib1 data" -# echo " and reading sigio model outputs." -# echo " Aug 20 - Meng - Remove .ecf extentsion per EE2 review." -# echo " Sep 20 - Meng - Update clean up files per EE2 review." -# echo " Mar 21 - Meng - Update POSTGRB2TBL default setting." -# echo " Oct 21 - Meng - Remove jlogfile for wcoss2 transition." -# echo " Feb 22 - Lin - Exception handling if anl input not found." -# echo "-----------------------------------------------------" -##################################################################### - -source "${HOMEgfs}/ush/preamble.sh" - -cd "${DATA}" || exit 1 - -export POSTGPSH=${POSTGPSH:-${USHgfs}/gfs_post.sh} -export GFSDOWNSH=${GFSDOWNSH:-${USHgfs}/fv3gfs_downstream_nems.sh} -export GFSDWNSH=${GFSDWNSH:-${USHgfs}/fv3gfs_dwn_nems.sh} -export INLINE_POST=${INLINE_POST:-".false."} - -############################################################ -# Define Variables: -# ----------------- -# fhr is the current forecast hour. -# SLEEP_TIME is the number of seconds to sleep before exiting with error. -# SLEEP_INT is the number of seconds to sleep between restrt file checks. -# restart_file is the name of the file to key off of to kick off post. -############################################################ - -export IO=${LONB:-1440} -export JO=${LATB:-721} -# specify default model output format: 3 for sigio and 4 -# for nemsio -export OUTTYP=${OUTTYP:-4} -export PREFIX=${PREFIX:-${RUN}.t${cyc}z.} -export machine=${machine:-WCOSS2} - -########################### -# Specify Output layers -########################### -export POSTGPVARS="KPO=57,PO=1000.,975.,950.,925.,900.,875.,850.,825.,800.,775.,750.,725.,700.,675.,650.,625.,600.,575.,550.,525.,500.,475.,450.,425.,400.,375.,350.,325.,300.,275.,250.,225.,200.,175.,150.,125.,100.,70.,50.,40.,30.,20.,15.,10.,7.,5.,3.,2.,1.,0.7,0.4,0.2,0.1,0.07,0.04,0.02,0.01,rdaod=.true.," - -########################################################## -# Specify variable to directly output pgrb2 files for GDAS/GFS -########################################################## -export IDRT=${IDRT:-0} # IDRT=0 is setting for outputting grib files on lat/lon grid - -############################################################ -# Post Analysis Files before starting the Forecast Post -############################################################ -# Chuang: modify to process analysis when post_times is 00 -stime="$(echo "${post_times}" | cut -c1-3)" -export stime -export loganl="${COM_ATMOS_ANALYSIS}/${PREFIX}atmanl.nc" - -if [[ "${stime}" = "anl" ]]; then - if [[ -f "${loganl}" ]]; then - # add new environmental variables for running new ncep post - # Validation date - - export VDATE=${PDY}${cyc} - - # set outtyp to 1 because we need to run chgres in the post before model start running chgres - # otherwise set to 0, then chgres will not be executed in global_nceppost.sh - - export OUTTYP=${OUTTYP:-4} - - # specify output file name from chgres which is input file name to nceppost - # if model already runs gfs io, make sure GFSOUT is linked to the gfsio file - # new imported variable for global_nceppost.sh - - export GFSOUT=${RUN}.${cycle}.gfsioanl - - # specify smaller control file for GDAS because GDAS does not - # produce flux file, the default will be /nwprod/parm/gfs_cntrl.parm - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - export POSTGRB2TBL=${POSTGRB2TBL:-${g2tmpl_ROOT}/share/params_grib2_tbl_new} - export PostFlatFile=${PostFlatFile:-${PARMpost}/postxconfig-NT-GFS-ANL.txt} - export CTLFILE=${PARMpost}/postcntrl_gfs_anl.xml - fi - - [[ -f flxfile ]] && rm flxfile ; [[ -f nemsfile ]] && rm nemsfile - - ln -fs "${COM_ATMOS_ANALYSIS}/${PREFIX}atmanl.nc" nemsfile - export NEMSINP=nemsfile - ln -fs "${COM_ATMOS_ANALYSIS}/${PREFIX}sfcanl.nc" flxfile - export FLXINP=flxfile - export PGBOUT=pgbfile - export PGIOUT=pgifile - export PGBOUT2=pgbfile.grib2 - export PGIOUT2=pgifile.grib2.idx - export IGEN="${IGEN_ANL}" - export FILTER=0 - - # specify fhr even for analysis because postgp uses it - # export fhr=00 - - ${POSTGPSH} - export err=$?; err_chk - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - mv "${PGBOUT}" "${PGBOUT2}" - - #Proces pgb files - export FH=-1 - export downset=${downset:-1} - ${GFSDOWNSH} - export err=$?; err_chk - fi - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - MASTERANL=${PREFIX}master.grb2anl - ##########XXW Accord to Boi, fortran index should use *if${fhr}, wgrib index use .idx - #MASTERANLIDX=${RUN}.${cycle}.master.grb2${fhr3}.idx - MASTERANLIDX=${PREFIX}master.grb2ianl - cp "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERANL}" - ${GRB2INDEX} "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERANLIDX}" - fi - - if [[ "${SENDDBN}" = 'YES' ]]; then - run="$(echo "${RUN}" | tr '[:lower:]' '[:upper:]')" - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_MSC_sfcanl" "${job}" "${COM_ATMOS_ANALYSIS}/${PREFIX}sfcanl.nc" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_SA" "${job}" "${COM_ATMOS_ANALYSIS}/${PREFIX}atmanl.nc" - "${DBNROOT}/bin/dbn_alert" MODEL "GDAS_PGA_GB2" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.anl" - "${DBNROOT}/bin/dbn_alert" MODEL "GDAS_PGA_GB2_WIDX" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.anl.idx" - fi - fi - rm pgbfile.grib2 - else - #### atmanl file not found need failing job - echo " *** FATAL ERROR: No model anl file output " - export err=9 - err_chk - fi -else ## not_anl if_stimes - SLEEP_LOOP_MAX=$(( SLEEP_TIME / SLEEP_INT )) - - ############################################################ - # Loop Through the Post Forecast Files - ############################################################ - - for fhr in ${post_times}; do - # Enforce decimal math expressions - d_fhr=$((10#${fhr})) - ############################### - # Start Looping for the - # existence of the restart files - ############################### - export pgm="postcheck" - ic=1 - while (( ic <= SLEEP_LOOP_MAX )); do - if [[ -f "${restart_file}${fhr}.txt" ]]; then - break - else - ic=$(( ic + 1 )) - sleep "${SLEEP_INT}" - fi - ############################### - # If we reach this point assume - # fcst job never reached restart - # period and error exit - ############################### - if (( ic == SLEEP_LOOP_MAX )); then - echo " *** FATAL ERROR: No model output for f${fhr} " - export err=9 - err_chk - fi - done - - ############################### - # Put restart files into /nwges - # for backup to start Model Fcst - ############################### - [[ -f flxfile ]] && rm flxfile - [[ -f nemsfile ]] && rm nemsfile - ln -sf "${COM_ATMOS_HISTORY}/${PREFIX}atmf${fhr}.nc" nemsfile - export NEMSINP=nemsfile - ln -sf "${COM_ATMOS_HISTORY}/${PREFIX}sfcf${fhr}.nc" flxfile - export FLXINP=flxfile - - if (( d_fhr > 0 )); then - export IGEN=${IGEN_FCST} - else - export IGEN=${IGEN_ANL} - fi - - # add new environmental variables for running new ncep post - # Validation date - - # No shellcheck, NDATE is not a typo - # shellcheck disable=SC2153 - VDATE="$(${NDATE} "+${fhr}" "${PDY}${cyc}")" - # shellcheck disable= - export VDATE - - # set to 3 to output lat/lon grid - - export OUTTYP=${OUTTYP:-4} - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - export POSTGRB2TBL="${POSTGRB2TBL:-${g2tmpl_ROOT}/share/params_grib2_tbl_new}" - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS.txt" - if [[ "${RUN}" = gfs ]]; then - export IGEN="${IGEN_GFS}" - if (( d_fhr > 0 )); then export IGEN="${IGEN_FCST}" ; fi - else - export IGEN="${IGEN_GDAS_ANL}" - if (( d_fhr > 0 )); then export IGEN="${IGEN_FCST}" ; fi - fi - if [[ "${RUN}" = gfs ]]; then - if (( d_fhr == 0 )); then - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-F00.txt" - export CTLFILE="${PARMpost}/postcntrl_gfs_f00.xml" - else - export CTLFILE="${CTLFILEGFS:-${PARMpost}/postcntrl_gfs.xml}" - fi - else - if (( d_fhr == 0 )); then - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-F00.txt" - export CTLFILE="${CTLFILEGFS:-${PARMpost}/postcntrl_gfs_f00.xml}" - else - export CTLFILE="${CTLFILEGFS:-${PARMpost}/postcntrl_gfs.xml}" - fi - fi - fi - - export FLXIOUT=flxifile - export PGBOUT=pgbfile - export PGIOUT=pgifile - export PGBOUT2=pgbfile.grib2 - export PGIOUT2=pgifile.grib2.idx - export FILTER=0 - export fhr3=${fhr} - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - MASTERFHR=${PREFIX}master.grb2f${fhr} - MASTERFHRIDX=${PREFIX}master.grb2if${fhr} - fi - - if [[ "${INLINE_POST}" = ".false." ]]; then - ${POSTGPSH} - else - cp "${COM_ATMOS_MASTER}/${MASTERFHR}" "${PGBOUT}" - fi - export err=$?; err_chk - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - mv "${PGBOUT}" "${PGBOUT2}" - fi - - #wm Process pgb files - export FH=$(( 10#${fhr} + 0 )) - export downset=${downset:-1} - ${GFSDOWNSH} - export err=$?; err_chk - - if [[ "${SENDDBN}" = "YES" ]]; then - run="$(echo "${RUN}" | tr '[:lower:]' '[:upper:]')" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_PGB2_0P25" "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.f${fhr}" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_PGB2_0P25_WIDX ""${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.f${fhr}.idx" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_PGB_GB2" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.f${fhr}" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_PGB_GB2_WIDX" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.f${fhr}.idx" - fi - - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - if [[ "${INLINE_POST}" = ".false." ]]; then - cp "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERFHR}" - fi - ${GRB2INDEX} "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERFHRIDX}" - fi - - # Model generated flux files will be in nemsio after FY17 upgrade - # use post to generate Grib2 flux files - - if (( OUTTYP == 4 )) ; then - export NEMSINP=${COM_ATMOS_HISTORY}/${PREFIX}atmf${fhr}.nc - export FLXINP=${COM_ATMOS_HISTORY}/${PREFIX}sfcf${fhr}.nc - if (( d_fhr == 0 )); then - export PostFlatFile=${PARMpost}/postxconfig-NT-GFS-FLUX-F00.txt - export CTLFILE=${PARMpost}/postcntrl_gfs_flux_f00.xml - else - export PostFlatFile=${PARMpost}/postxconfig-NT-GFS-FLUX.txt - export CTLFILE=${PARMpost}/postcntrl_gfs_flux.xml - fi - export PGBOUT=fluxfile - export FILTER=0 - FLUXFL=${PREFIX}sfluxgrbf${fhr}.grib2 - FLUXFLIDX=${PREFIX}sfluxgrbf${fhr}.grib2.idx - - if [[ "${INLINE_POST}" = ".false." ]]; then - ${POSTGPSH} - export err=$?; err_chk - mv fluxfile "${COM_ATMOS_MASTER}/${FLUXFL}" - fi - ${WGRIB2} -s "${COM_ATMOS_MASTER}/${FLUXFL}" > "${COM_ATMOS_MASTER}/${FLUXFLIDX}" - fi - - if [[ "${SENDDBN}" = 'YES' ]] && [[ "${RUN}" = 'gdas' ]] && (( d_fhr % 3 == 0 )); then - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_SF" "${job}" "${COM_ATMOS_HISTORY}/${PREFIX}atmf${fhr}.nc" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_BF" "${job}" "${COM_ATMOS_HISTORY}/${PREFIX}sfcf${fhr}.nc" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_SGB_GB2" "${job}" "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrbf${fhr}.grib2" - "${DBNROOT}/bin/dbn_alert" MODEL "${run}_SGB_GB2_WIDX ""${job}" "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrbf${fhr}.grib2.idx" - fi - - [[ -f pgbfile.grib2 ]] && rm pgbfile.grib2 - [[ -f flxfile ]] && rm flxfile - done -fi ## end_if_times - -exit 0 - -################## END OF SCRIPT ####################### diff --git a/scripts/exgfs_atmos_post.sh b/scripts/exgfs_atmos_post.sh deleted file mode 100755 index 4100b60437..0000000000 --- a/scripts/exgfs_atmos_post.sh +++ /dev/null @@ -1,410 +0,0 @@ -#! /usr/bin/env bash - -##################################################################### -# echo "-----------------------------------------------------" -# echo " exgfs_nceppost.sh" -# echo " Apr 99 - Michaud - Generated to post global forecast" -# echo " Mar 03 - Zhu - Add post for 0.5x0.5 degree" -# echo " Nov 03 - Gilbert - Modified from exglobal_post.sh.sms" -# echo " to run only one master post job." -# echo " Jan 07 - Cooke - Add DBNet Alert for Master files" -# echo " May 07 - Chuang - Modified scripts to run unified post" -# echo " Feb 10 - Carlis - Add 12-hr accum precip bucket at f192" -# echo " Jun 12 - Wang - Add option for grb2" -# echo " Jul 14 - Carlis - Add 0.25 deg master " -# echo " Mar 17 - F Yang - Modified for running fv3gfs" -# echo " Aug 17 - Meng - Add flags for turning on/off flx, gtg " -# echo " and satellite look like file creation" -# echo " and use 3-digit forecast hour naming" -# echo " post output files" -# echo " Dec 17 - Meng - Link sfc data file to flxfile " -# echo " since fv3gfs does not output sfc files any more." -# echo " Dec 17 - Meng - Add fv3gfs_downstream_nems.sh for pgb processing " -# echo " Jan 18 - Meng - Add flag PGBF for truning on/off pgb processing. " -# echo " Jan 18 - Meng - For EE2 standard, move IDRT POSTGPVARS setting" -# echo " from j-job script." -# echo " Feb 18 - Meng - Removed legacy setting for generating grib1 data" -# echo " and reading sigio model outputs." -# echo " Aug 20 - Meng - Remove .ecf extentsion per EE2 review." -# echo " Sep 20 - Meng - Update clean up files per EE2 review." -# echo " Dec 20 - Meng - Add alert for special data file." -# echo " Mar 21 - Meng - Update POSTGRB2TBL default setting." -# echo " Oct 21 - Meng - Remove jlogfile for wcoss2 transition." -# echo " Feb 22 - Lin - Exception handling if anl input not found." -# echo "-----------------------------------------------------" -##################################################################### - -source "${HOMEgfs}/ush/preamble.sh" - -cd "${DATA}" || exit 1 - -export POSTGPSH=${POSTGPSH:-${USHgfs}/gfs_post.sh} -export GFSDOWNSH=${GFSDOWNSH:-${USHgfs}/fv3gfs_downstream_nems.sh} -export GFSDOWNSHF=${GFSDOWNSHF:-${USHgfs}/inter_flux.sh} -export GFSDWNSH=${GFSDWNSH:-${USHgfs}/fv3gfs_dwn_nems.sh} -export INLINE_POST=${INLINE_POST:-".false."} - -############################################################ -# Define Variables: -# ----------------- -# FH is the current forecast hour. -# SLEEP_TIME is the number of seconds to sleep before exiting with error. -# SLEEP_INT is the number of seconds to sleep between restrt file checks. -# restart_file is the name of the file to key off of to kick off post. -############################################################ -export IO=${LONB:-1440} -export JO=${LATB:-721} -export OUTTYP=${OUTTYP:-4} -export FLXF=${FLXF:-"YES"} -export FLXGF=${FLXGF:-"YES"} -export GOESF=${GOESF:-"YES"} -export PGBF=${PGBF:-"YES"} -export TCYC=${TCYC:-".t${cyc}z."} -export PREFIX=${PREFIX:-${RUN}${TCYC}} -export machine=${machine:-WCOSS2} - -########################### -# Specify Output layers -########################### -export POSTGPVARS="KPO=57,PO=1000.,975.,950.,925.,900.,875.,850.,825.,800.,775.,750.,725.,700.,675.,650.,625.,600.,575.,550.,525.,500.,475.,450.,425.,400.,375.,350.,325.,300.,275.,250.,225.,200.,175.,150.,125.,100.,70.,50.,40.,30.,20.,15.,10.,7.,5.,3.,2.,1.,0.7,0.4,0.2,0.1,0.07,0.04,0.02,0.01,rdaod=.true.," - -########################################################## -# Specify variable to directly output pgrb2 files for GDAS/GFS -########################################################## -export IDRT=${IDRT:-0} # IDRT=0 is setting for outputting grib files on lat/lon grid - -############################################################ -# Post Analysis Files before starting the Forecast Post -############################################################ -# Process analysis when post_times is 00 -stime="$(echo "${post_times}" | cut -c1-3)" -export stime -export loganl="${COM_ATMOS_ANALYSIS}/${PREFIX}atmanl.nc" - -if [[ "${stime}" = "anl" ]]; then - if [[ -f "${loganl}" ]]; then - # add new environmental variables for running new ncep post - # Validation date - export VDATE=${PDY}${cyc} - # specify output file name from chgres which is input file name to nceppost - # if model already runs gfs io, make sure GFSOUT is linked to the gfsio file - # new imported variable for global_nceppost.sh - export GFSOUT=${PREFIX}gfsioanl - - # specify smaller control file for GDAS because GDAS does not - # produce flux file, the default will be /nwprod/parm/gfs_cntrl.parm - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - # use grib2 nomonic table in product g2tmpl directory as default - export POSTGRB2TBL=${POSTGRB2TBL:-${g2tmpl_ROOT}/share/params_grib2_tbl_new} - export PostFlatFile=${PostFlatFile:-${PARMpost}/postxconfig-NT-GFS-ANL.txt} - export CTLFILE=${PARMpost}/postcntrl_gfs_anl.xml - fi - - [[ -f flxfile ]] && rm flxfile ; [[ -f nemsfile ]] && rm nemsfile - ln -fs "${COM_ATMOS_ANALYSIS}/${PREFIX}atmanl.nc" nemsfile - export NEMSINP=nemsfile - ln -fs "${COM_ATMOS_ANALYSIS}/${PREFIX}sfcanl.nc" flxfile - export FLXINP=flxfile - - export PGBOUT=pgbfile - export PGIOUT=pgifile - export PGBOUT2=pgbfile.grib2 - export PGIOUT2=pgifile.grib2.idx - export IGEN=${IGEN_ANL} - export FILTER=0 - - ${POSTGPSH} - export err=$?; err_chk - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - mv "${PGBOUT}" "${PGBOUT2}" - fi - - # Process pgb files - if [[ "${PGBF}" = 'YES' ]]; then - export FH=-1 - export downset=${downset:-2} - ${GFSDOWNSH} - export err=$?; err_chk - fi - - export fhr3=anl - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - MASTERANL=${PREFIX}master.grb2${fhr3} - MASTERANLIDX=${PREFIX}master.grb2i${fhr3} - cp "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERANL}" - ${GRB2INDEX} "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERANLIDX}" - fi - - if [[ "${SENDDBN}" = 'YES' ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_MSC_sfcanl "${job}" "${COM_ATMOS_ANALYSIS}/${PREFIX}sfcanl.nc" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_SA "${job}" "${COM_ATMOS_ANALYSIS}/${PREFIX}atmanl.nc" - if [[ "${PGBF}" = 'YES' ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P25 "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.anl" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P25_WIDX "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.anl.idx" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P25 "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2b.0p25.anl" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P25_WIDX "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2b.0p25.anl.idx" - - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P5 "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.anl" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P5_WIDX "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.anl.idx" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P5 "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2b.0p50.anl" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P5_WIDX "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2b.0p50.anl.idx" - - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_1P0 "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.anl" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_1P0_WIDX "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.anl.idx" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_1P0 "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2b.1p00.anl" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_1P0_WIDX "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2b.1p00.anl.idx" - fi - fi - [[ -f pgbfile.grib2 ]] && rm pgbfile.grib2 - # ecflow_client --event release_pgrb2_anl - else - #### atmanl file not found need failing job - echo " *** FATAL ERROR: No model anl file output " - export err=9 - err_chk - fi -else ## not_anl if_stime - SLEEP_LOOP_MAX=$(( SLEEP_TIME / SLEEP_INT )) - - ############################################################ - # Loop Through the Post Forecast Files - ############################################################ - - for fhr3 in ${post_times}; do - echo "Start processing fhr=${fhr3}" - fhr=$(( 10#${fhr3} )) - ############################### - # Start Looping for the - # existence of the restart files - ############################### - export pgm="postcheck" - ic=1 - while (( ic <= SLEEP_LOOP_MAX )); do - if [[ -f "${restart_file}${fhr3}.txt" ]]; then - break - else - ic=$(( ic + 1 )) - sleep "${SLEEP_INT}" - fi - ############################### - # If we reach this point assume - # fcst job never reached restart - # period and error exit - ############################### - if (( ic == SLEEP_LOOP_MAX )); then - echo " *** FATAL ERROR: No model output for f${fhr3} " - export err=9 - err_chk - fi - done - - ############################### - # Put restart files into /nwges - # for backup to start Model Fcst - ############################### - [[ -f flxfile ]] && rm flxfile ; [[ -f nemsfile ]] && rm nemsfile - ln -fs "${COM_ATMOS_HISTORY}/${PREFIX}atmf${fhr3}.nc" nemsfile - export NEMSINP=nemsfile - ln -fs "${COM_ATMOS_HISTORY}/${PREFIX}sfcf${fhr3}.nc" flxfile - export FLXINP=flxfile - - if (( fhr > 0 )); then - export IGEN=${IGEN_FCST} - else - export IGEN=${IGEN_ANL} - fi - - # No shellcheck, NDATE is not a typo - # shellcheck disable=SC2153 - VDATE="$(${NDATE} "+${fhr}" "${PDY}${cyc}")" - # shellcheck disable= - export VDATE - export OUTTYP=${OUTTYP:-4} - export GFSOUT="${PREFIX}gfsio${fhr3}" - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - export POSTGRB2TBL="${POSTGRB2TBL:-${g2tmpl_ROOT}/share/params_grib2_tbl_new}" - export PostFlatFile="${PostFlatFile:-${PARMpost}/postxconfig-NT-GFS.txt}" - - if [[ "${RUN}" = "gfs" ]]; then - export IGEN=${IGEN_GFS} - if (( fhr > 0 )); then export IGEN=${IGEN_FCST} ; fi - else - export IGEN=${IGEN_GDAS_ANL} - if (( fhr > 0 )); then export IGEN=${IGEN_FCST} ; fi - fi - if [[ "${RUN}" = "gfs" ]]; then - if (( fhr == 0 )); then - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-F00.txt" - export CTLFILE="${PARMpost}/postcntrl_gfs_f00.xml" - else - export CTLFILE="${CTLFILEGFS:-${PARMpost}/postcntrl_gfs.xml}" - fi - else - if (( fhr == 0 )); then - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-F00.txt" - export CTLFILE="${CTLFILEGFS:-${PARMpost}/postcntrl_gfs_f00.xml}" - else - export CTLFILE="${CTLFILEGFS:-${PARMpost}/postcntrl_gfs.xml}" - fi - fi - fi - - export FLXIOUT=flxifile - export PGBOUT=pgbfile - export PGIOUT=pgifile - export PGBOUT2=pgbfile.grib2 - export PGIOUT2=pgifile.grib2.idx - export FILTER=0 - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - MASTERFL=${PREFIX}master.grb2f${fhr3} - MASTERFLIDX=${PREFIX}master.grb2if${fhr3} - fi - - if [[ "${INLINE_POST}" = ".false." ]]; then - ${POSTGPSH} - else - cp -p "${COM_ATMOS_MASTER}/${MASTERFL}" "${PGBOUT}" - fi - export err=$?; err_chk - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - mv "${PGBOUT}" "${PGBOUT2}" - fi - - # Process pgb files - if [[ "${PGBF}" = 'YES' ]]; then - export FH=$(( fhr )) - export downset=${downset:-2} - ${GFSDOWNSH} - export err=$?; err_chk - fi - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - if [[ "${INLINE_POST}" = ".false." ]]; then - cp "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERFL}" - fi - ${GRB2INDEX} "${PGBOUT2}" "${COM_ATMOS_MASTER}/${MASTERFLIDX}" - fi - - if [[ "${SENDDBN}" = 'YES' ]]; then - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - if [[ "${PGBF}" = 'YES' ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P25 "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.f${fhr3}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P25_WIDX "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.f${fhr3}.idx" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P25 "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2b.0p25.f${fhr3}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P25_WIDX "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2b.0p25.f${fhr3}.idx" - - if [[ -s "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.f${fhr3}" ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P5 "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.f${fhr3}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_0P5_WIDX "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.f${fhr3}.idx" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P5 "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2b.0p50.f${fhr3}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_0P5_WIDX "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2b.0p50.f${fhr3}.idx" - fi - - if [[ -s "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.f${fhr3}" ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_1P0 "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.f${fhr3}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2_1P0_WIDX "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.f${fhr3}.idx" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_1P0 "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2b.1p00.f${fhr3}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_PGB2B_1P0_WIDX "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2b.1p00.f${fhr3}.idx" - fi - fi - fi - fi - - export fhr - "${USHgfs}/gfs_transfer.sh" - [[ -f pgbfile.grib2 ]] && rm pgbfile.grib2 - - - # use post to generate GFS Grib2 Flux file as model generated Flux file - # will be in nemsio format after FY17 upgrade. - if (( OUTTYP == 4 )) && [[ "${FLXF}" == "YES" ]]; then - if (( fhr == 0 )); then - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-FLUX-F00.txt" - export CTLFILE="${PARMpost}/postcntrl_gfs_flux_f00.xml" - else - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-FLUX.txt" - export CTLFILE="${PARMpost}/postcntrl_gfs_flux.xml" - fi - export PGBOUT=fluxfile - export FILTER=0 - export FLUXFL=${PREFIX}sfluxgrbf${fhr3}.grib2 - FLUXFLIDX=${PREFIX}sfluxgrbf${fhr3}.grib2.idx - - if [[ "${INLINE_POST}" = ".false." ]]; then - ${POSTGPSH} - export err=$?; err_chk - mv fluxfile "${COM_ATMOS_MASTER}/${FLUXFL}" - fi - ${WGRIB2} -s "${COM_ATMOS_MASTER}/${FLUXFL}" > "${COM_ATMOS_MASTER}/${FLUXFLIDX}" - - #Add extra flux.1p00 file for coupled - if [[ "${FLXGF}" = 'YES' ]]; then - export FH=$(( fhr )) - ${GFSDOWNSHF} - export err=$?; err_chk - fi - - if [[ "${SENDDBN}" = 'YES' ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_SGB_GB2 "${job}" "${COM_ATMOS_MASTER}/${FLUXFL}" - "${DBNROOT}/bin/dbn_alert" MODEL GFS_SGB_GB2_WIDX "${job}" "${COM_ATMOS_MASTER}/${FLUXFLIDX}" - fi - fi - - # process satellite look alike separately so that master pgb gets out in time - # set outtyp to 2 because master post already generates gfs io files - if [[ "${GOESF}" = "YES" ]]; then - export OUTTYP=${OUTTYP:-4} - - # specify output file name from chgres which is input file name to nceppost - # if model already runs gfs io, make sure GFSOUT is linked to the gfsio file - # new imported variable for global_post.sh - - export GFSOUT=${PREFIX}gfsio${fhr3} - - # link satellite coefficients files, use hwrf version as ops crtm 2.0.5 - # does not new coefficient files used by post - export FIXCRTM="${FIXCRTM:-${CRTM_FIX}}" - "${USHgfs}/link_crtm_fix.sh" "${FIXCRTM}" - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - export PostFlatFile="${PARMpost}/postxconfig-NT-GFS-GOES.txt" - export CTLFILE="${PARMpost}/postcntrl_gfs_goes.xml" - fi - export FLXINP=flxfile - export FLXIOUT=flxifile - export PGBOUT=goesfile - export PGIOUT=goesifile - export FILTER=0 - export IO=0 - export JO=0 - export IGEN=0 - - if [[ "${NET}" = "gfs" ]]; then - ${POSTGPSH} - export err=$?; err_chk - fi - - if [[ "${GRIBVERSION}" = 'grib2' ]]; then - SPECIALFL="${PREFIX}special.grb2" - SPECIALFLIDX="${PREFIX}special.grb2i" - fi - - mv goesfile "${COM_ATMOS_GOES}/${SPECIALFL}f${fhr3}" - mv goesifile "${COM_ATMOS_GOES}/${SPECIALFLIDX}f${fhr3}" - - if [[ "${SENDDBN}" = "YES" ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_SPECIAL_GB2 "${job}" "${COM_ATMOS_GOES}/${SPECIALFL}f${fhr3}" - fi - fi - # end of satellite processing - done - - #---------------------------------- -fi ## end_if_stime - -exit 0 - -################## END OF SCRIPT ####################### diff --git a/scripts/exglobal_atmos_products.sh b/scripts/exglobal_atmos_products.sh new file mode 100755 index 0000000000..beeea99161 --- /dev/null +++ b/scripts/exglobal_atmos_products.sh @@ -0,0 +1,253 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" + +# Programs used +export WGRIB2=${WGRIB2:-${wgrib2_ROOT}/bin/wgrib2} + +# Scripts used +INTERP_ATMOS_MASTERSH=${INTERP_ATMOS_MASTERSH:-"${HOMEgfs}/ush/interp_atmos_master.sh"} +INTERP_ATMOS_SFLUXSH=${INTERP_ATMOS_SFLUXSH:-"${HOMEgfs}/ush/interp_atmos_sflux.sh"} + +# Variables used in this job +downset=${downset:-1} # No. of groups of pressure grib2 products to create +npe_atmos_products=${npe_atmos_products:-8} # no. of processors available to process each group + +cd "${DATA}" || exit 1 + +# Set paramlist files based on FORECAST_HOUR (-1, 0, 3, 6, etc.) +# Determine if supplemental products (PGBS) (1-degree and 1/2-degree) should be generated +if (( FORECAST_HOUR <= 0 )); then + if (( FORECAST_HOUR < 0 )); then + fhr3="anl" + paramlista="${paramlista_anl}" + FLXGF="NO" + elif (( FORECAST_HOUR == 0 )); then + fhr3=$(printf "f%03d" "${FORECAST_HOUR}") + paramlista="${paramlista_f000}" + fi + PGBS="YES" +else + fhr3=$(printf "f%03d" "${FORECAST_HOUR}") + if (( FORECAST_HOUR%FHOUT_PGBS == 0 )); then + PGBS="YES" + fi +fi + +#----------------------------------------------------- +# Section creating pressure grib2 interpolated products + +# Files needed by ${INTERP_ATMOS_MASTERSH} +MASTER_FILE="${COM_ATMOS_MASTER}/${PREFIX}master.grb2${fhr3}" + +# Get inventory from ${MASTER_FILE} that matches patterns from ${paramlista} +# Extract this inventory from ${MASTER_FILE} into a smaller tmpfile or tmpfileb based on paramlista or paramlistb +# shellcheck disable=SC2312 +${WGRIB2} "${MASTER_FILE}" | grep -F -f "${paramlista}" | ${WGRIB2} -i -grib "tmpfile_${fhr3}" "${MASTER_FILE}" +export err=$?; err_chk +# Do the same as above for ${paramlistb} +if (( downset == 2 )); then + # shellcheck disable=SC2312 + ${WGRIB2} "${MASTER_FILE}" | grep -F -f "${paramlistb}" | ${WGRIB2} -i -grib "tmpfileb_${fhr3}" "${MASTER_FILE}" + export err=$?; err_chk +fi + +# Determine grids once and save them as a string and an array for processing +grid_string="0p25" +if [[ "${PGBS:-}" == "YES" ]]; then + grid_string="${grid_string}:0p50:1p00" +else + echo "Supplemental product generation is disable for fhr = ${fhr3}" + PGBS="NO" # Can't generate supplemental products if PGBS is not YES +fi +# Also transform the ${grid_string} into an array for processing +IFS=':' read -ra grids <<< "${grid_string}" + +for (( nset=1 ; nset <= downset ; nset++ )); do + + echo "Begin processing nset = ${nset}" + + # Number of processors available to process $nset + nproc=${npe_atmos_products} + + # Each set represents a group of files + if (( nset == 1 )); then + grp="" # TODO: this should be "a" when we eventually rename the pressure grib2 files per EE2 convention + elif (( nset == 2 )); then + grp="b" + fi + + # process grib2 chunkfiles to interpolate using MPMD + tmpfile="tmpfile${grp}_${fhr3}" + + # shellcheck disable=SC2312 + ncount=$(${WGRIB2} "${tmpfile}" | wc -l) + if (( nproc > ncount )); then + echo "WARNING: Total no. of available processors '${nproc}' exceeds no. of records '${ncount}' in ${tmpfile}" + echo "Reduce nproc to ${ncount} (or less) to not waste resources" + fi + inv=$(( ncount / nproc )) + rm -f "${DATA}/poescript" + + last=0 + for (( iproc = 1 ; iproc <= nproc ; iproc++ )); do + first=$((last + 1)) + last=$((last + inv)) + if (( last > ncount )); then (( last = ncount )); fi + + # if final record of is u-component, add next record v-component + # if final record is land, add next record icec + # grep returns 1 if no match is found, so temporarily turn off exit on non-zero rc + set +e + # shellcheck disable=SC2312 + ${WGRIB2} -d "${last}" "${tmpfile}" | grep -E -i "ugrd|ustm|uflx|u-gwd|land" + rc=$? + set_strict + if (( rc == 0 )); then # Matched the grep + last=$(( last + 1 )) + fi + if (( iproc == nproc )); then + last=${ncount} + fi + + # Break tmpfile into processor specific chunks in preparation for MPMD + ${WGRIB2} "${tmpfile}" -for "${first}":"${last}" -grib "${tmpfile}_${iproc}" + export err=$?; err_chk + input_file="${tmpfile}_${iproc}" + output_file_prefix="pgb2${grp}file_${fhr3}_${iproc}" + echo "${INTERP_ATMOS_MASTERSH} ${input_file} ${output_file_prefix} ${grid_string}" >> "${DATA}/poescript" + + # if at final record and have not reached the final processor then write echo's to + # poescript for remaining processors + if (( last == ncount )); then + for (( pproc = iproc+1 ; pproc < nproc ; pproc++ )); do + echo "/bin/echo ${pproc}" >> "${DATA}/poescript" + done + break + fi + done # for (( iproc = 1 ; iproc <= nproc ; iproc++ )); do + + # Run with MPMD or serial + if [[ "${USE_CFP:-}" = "YES" ]]; then + "${HOMEgfs}/ush/run_mpmd.sh" "${DATA}/poescript" + export err=$? + else + chmod 755 "${DATA}/poescript" + bash +x "${DATA}/poescript" 2>&1 mpmd.out + export err=$? + fi + err_chk + + # We are in a loop over downset, save output from mpmd into nset specific output + cat mpmd.out # so we capture output into the main logfile + mv mpmd.out "mpmd_${nset}.out" + + # Concatenate grib files from each processor into a single one + # and clean-up as you go + echo "Concatenating processor-specific grib2 files into a single product file" + for (( iproc = 1 ; iproc <= nproc ; iproc++ )); do + for grid in "${grids[@]}"; do + cat "pgb2${grp}file_${fhr3}_${iproc}_${grid}" >> "pgb2${grp}file_${fhr3}_${grid}" + rm "pgb2${grp}file_${fhr3}_${iproc}_${grid}" + done + # There is no further use of the processor specific tmpfile; delete it + rm "${tmpfile}_${iproc}" + done + + # Move to COM and index the product grib files + for grid in "${grids[@]}"; do + prod_dir="COM_ATMOS_GRIB_${grid}" + ${NCP} "pgb2${grp}file_${fhr3}_${grid}" "${!prod_dir}/${PREFIX}pgrb2${grp}.${grid}.${fhr3}" + ${WGRIB2} -s "pgb2${grp}file_${fhr3}_${grid}" > "${!prod_dir}/${PREFIX}pgrb2${grp}.${grid}.${fhr3}.idx" + done + + echo "Finished processing nset = ${nset}" + +done # for (( nset=1 ; nset <= downset ; nset++ )) + +#--------------------------------------------------------------- + +# Section creating slfux grib2 interpolated products +# Create 1-degree sflux grib2 output +# move to COM and index it +if [[ "${FLXGF:-}" == "YES" ]]; then + + # Files needed by ${INTERP_ATMOS_SFLUXSH} + FLUX_FILE="${COM_ATMOS_MASTER}/${PREFIX}sfluxgrb${fhr3}.grib2" + + input_file="${FLUX_FILE}" + output_file_prefix="sflux_${fhr3}" + grid_string="1p00" + "${INTERP_ATMOS_SFLUXSH}" "${input_file}" "${output_file_prefix}" "${grid_string}" + export err=$?; err_chk + + # Move to COM and index the product sflux file + IFS=':' read -ra grids <<< "${grid_string}" + for grid in "${grids[@]}"; do + prod_dir="COM_ATMOS_GRIB_${grid}" + ${NCP} "sflux_${fhr3}_${grid}" "${!prod_dir}/${PREFIX}flux.${grid}.${fhr3}" + ${WGRIB2} -s "sflux_${fhr3}_${grid}" > "${!prod_dir}/${PREFIX}flux.${grid}.${fhr3}.idx" + done +fi + +#--------------------------------------------------------------- + +# Start sending DBN alerts +# Everything below this line is for sending files to DBN (SENDDBN=YES) +if [[ "${SENDDBN:-}" == "YES" ]]; then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2_0P25" "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2_0P25_WIDX" "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2.0p25.${fhr3}.idx" + if [[ "${RUN}" == "gfs" ]]; then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2B_0P25" "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2b.0p25.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2B_0P25_WIDX" "${job}" "${COM_ATMOS_GRIB_0p25}/${PREFIX}pgrb2b.0p25.${fhr3}.idx" + if [[ -s "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.f${fhr3}" ]]; then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2_0P5" "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2_0P5_WIDX" "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2.0p50.${fhr3}.idx" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2B_0P5" "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2b.0p50.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2B_0P5_WIDX" "${job}" "${COM_ATMOS_GRIB_0p50}/${PREFIX}pgrb2b.0p50.${fhr3}.idx" + fi + if [[ -s "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.f${fhr3}" ]]; then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2_1P0" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2_1P0_WIDX" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.${fhr3}.idx" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2B_1P0" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2b.1p00.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB2B_1P0_WIDX" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2b.1p00.${fhr3}.idx" + fi + fi + + if [[ "${fhr3}" == "anl" ]]; then + + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_MSC_sfcanl" "${job}" "${COM_ATMOS_ANALYSIS}/${PREFIX}sfc${fhr3}.nc" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SA" "${job}" "${COM_ATMOS_ANALYSIS}/${PREFIX}atm${fhr3}.nc" + + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGA_GB2" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGA_GB2_WIDX" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.${fhr3}.idx" + + else # forecast hours f000, f003, f006, etc. + + if [[ "${RUN}" == "gdas" ]]; then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB_GB2" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.${fhr3}" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_PGB_GB2_WIDX" "${job}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb2.1p00.${fhr3}.idx" + if (( FORECAST_HOUR % 3 == 0 )); then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SF" "${job}" "${COM_ATMOS_HISTORY}/${PREFIX}atm${fhr3}.nc" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_BF" "${job}" "${COM_ATMOS_HISTORY}/${PREFIX}sfc${fhr3}.nc" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SGB_GB2" "${job}" "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrb${fhr3}.grib2" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SGB_GB2_WIDX" "${job}" "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrb${fhr3}.grib2.idx" + fi + elif [[ "${RUN}" == "gfs" ]]; then + + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SF" "${job}" "${COM_ATMOS_HISTORY}/${PREFIX}atm${fhr3}.nc" + if (( fhr > 0 && fhr <= 84 )) || (( fhr == 120 )); then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_BF" "${job}" "${COM_ATMOS_HISTORY}/${PREFIX}sfc${fhr3}.nc" + fi + + if [[ -s "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrbf${fhr3}.grib2" ]]; then + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SGB_GB2" "${job}" "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrb${fhr3}.grib2" + "${DBNROOT}/bin/dbn_alert" MODEL "${RUN^^}_SGB_GB2_WIDX" "${job}" "${COM_ATMOS_MASTER}/${PREFIX}sfluxgrb${fhr3}.grib2.idx" + fi + fi # end if RUN=gfs + + fi # end if fhr3=anl + +fi # end if SENDDBN=YES + +exit 0 diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 2c37046da1..4558bed4d5 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -41,6 +41,14 @@ while getopts ":ho" option; do done shift $((OPTIND-1)) +# LINK is always ln, LINK_OR_COPY can be ln or cp depending on RUN_ENVIR being emc or nco, respectively +LINK="ln -fs" +if [[ "${RUN_ENVIR}" == "nco" ]]; then + LINK_OR_COPY="cp -rp" +else + LINK_OR_COPY="ln -fs" +fi + # shellcheck disable=SC1091 COMPILER="intel" source "${HOMEgfs}/sorc/gfs_utils.fd/ush/detect_machine.sh" # (sets MACHINE_ID) # shellcheck disable= @@ -49,15 +57,8 @@ machine=$(echo "${MACHINE_ID}" | cut -d. -f1) #------------------------------ #--Set up build.ver and run.ver #------------------------------ -if [[ "${machine}" == "wcoss2" ]]; then - cp "${HOMEgfs}/versions/build.${machine}.ver" "${HOMEgfs}/versions/build.ver" - cp "${HOMEgfs}/versions/run.${machine}.ver" "${HOMEgfs}/versions/run.ver" -else - cp "${HOMEgfs}/versions/build.spack.ver" "${HOMEgfs}/versions/build.ver" - cp "${HOMEgfs}/versions/run.spack.ver" "${HOMEgfs}/versions/run.ver" - cat "${HOMEgfs}/versions/build.${machine}.ver" >> "${HOMEgfs}/versions/build.ver" - cat "${HOMEgfs}/versions/run.${machine}.ver" >> "${HOMEgfs}/versions/run.ver" -fi +${LINK_OR_COPY} "${HOMEgfs}/versions/build.${machine}.ver" "${HOMEgfs}/versions/build.ver" +${LINK_OR_COPY} "${HOMEgfs}/versions/run.${machine}.ver" "${HOMEgfs}/versions/run.ver" #------------------------------ #--model fix fields @@ -77,15 +78,6 @@ esac # Source fix version file source "${HOMEgfs}/versions/fix.ver" -# LINK is always ln, LINK_OR_COPY can be ln or cp depending on RUN_EVNVIR being emc or nco, respectively -LINK="ln -fs" -if [[ "${RUN_ENVIR}" == "nco" ]]; then - LINK_OR_COPY="cp -rp" -else - LINK_OR_COPY="ln -fs" -fi - - # Link wxflow in ush/python, workflow and ci/scripts # TODO: This will be unnecessary when wxflow is part of the virtualenv cd "${HOMEgfs}/ush/python" || exit 1 diff --git a/ush/fv3gfs_downstream_nems.sh b/ush/fv3gfs_downstream_nems.sh deleted file mode 100755 index c09b9a7d3f..0000000000 --- a/ush/fv3gfs_downstream_nems.sh +++ /dev/null @@ -1,179 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" "${FH}" - -# Programs used -export WGRIB2=${WGRIB2:-${wgrib2_ROOT}/bin/wgrib2} -CNVGRIB=${CNVGRIB:-${grib_util_ROOT}/bin/cnvgrib} -GRBINDEX=${GRBINDEX:-${wgrib2_ROOT}/bin/grbindex} - -# Scripts used -GFSDWNSH=${GFSDWNSH:-"${HOMEgfs}/ush/fv3gfs_dwn_nems.sh"} - -# variables used here and in $GFSDWNSH -PGBOUT2=${PGBOUT2:-"master.grib2"} # grib2 file from UPP -FH=$(( ${FH:-0} )) # Forecast hour to process -FHOUT_PGB=${FHOUT_PGB:-3} # Output frequency of GFS PGB file at 1-degree and 0.5 degree -npe_dwn=${npe_dwn:-24} -downset=${downset:-1} -PREFIX=${PREFIX:-"${RUN:-gfs}.t${cyc}z."} -PGBS=${PGBS:-"NO"} # YES - generate 1 and 1/2-degree grib2 data -PGB1F=${PGB1F:-"NO"} # YES - generate 1-degree grib1 data - -# Files used -if (( FH == -1 )); then - fhr3="anl" - PGBS="YES" - paramlista=${paramlist:-"${HOMEgfs}/parm/post/global_1x1_paramlist_g2.anl"} -elif (( FH == 0 )); then - fhr3="f000" - PGBS="YES" - paramlista=${paramlist:-"${HOMEgfs}/parm/post/global_1x1_paramlist_g2.f000"} -else - fhr3=$(printf "f%03d" "${FH}") - if (( FH%FHOUT_PGB == 0 )); then - PGBS="YES" - fi - paramlista=${paramlist:-"${HOMEgfs}/parm/post/global_1x1_paramlist_g2"} -fi -paramlistb=${paramlistb:-"${HOMEgfs}/parm/post/global_master-catchup_parmlist_g2"} - -# Get inventory from ${PGBOUT2} that matches patterns from ${paramlista} -# Extract this inventory from ${PGBOUT2} into a smaller tmpfile or tmpfileb based on paramlista or paramlistb -# shellcheck disable=SC2312 -${WGRIB2} "${PGBOUT2}" | grep -F -f "${paramlista}" | ${WGRIB2} -i -grib "tmpfile_${fhr3}" "${PGBOUT2}" -export err=$?; err_chk -# Do the same as above for ${paramlistb} -if (( downset = 2 )); then - # shellcheck disable=SC2312 - ${WGRIB2} "${PGBOUT2}" | grep -F -f "${paramlistb}" | ${WGRIB2} -i -grib "tmpfileb_${fhr3}" "${PGBOUT2}" - export err=$?; err_chk -fi - -# Determine grids once and save them as a string and an array for processing -grid_string="0p25" -if [[ "${PGBS}" = "YES" ]]; then - grid_string="${grid_string}:0p50:1p00" -fi -# Also transform the ${grid_string} into an array for processing -IFS=':' read -ra grids <<< "${grid_string}" - -#----------------------------------------------------- -nproc=${nproc:-${npe_dwn}} - -#.............................................. -for (( nset=1 ; nset <= downset ; nset++ )); do - - echo "Begin processing nset = ${nset}" - - # Each set represents a group of files - if (( nset == 1 )); then - grp="" # TODO: this should be "a" when we eventually rename the pressure grib2 files per EE2 convention - elif (( nset == 2 )); then - grp="b" - fi - - # process Grib files to run downstream jobs using MPMD - tmpfile="tmpfile${grp}_${fhr3}" - - # shellcheck disable=SC2312 - ncount=$(${WGRIB2} "${tmpfile}" | wc -l) - if (( nproc > ncount )); then - echo "FATAL ERROR: Total number of records in ${tmpfile} is not right" # No, the no. of records < no. of processors - export err=8 - err_chk - fi - inv=$(( ncount / nproc )) - rm -f "${DATA}/poescript" - - last=0 - for (( iproc = 1 ; iproc <= nproc ; iproc++ )); do - first=$((last + 1)) - last=$((last + inv)) - if (( last > ncount )); then (( last = ncount )); fi - - # if final record of is u-component, add next record v-component - # if final record is land, add next record icec - # grep returns 1 if no match is found, so temporarily turn off exit on non-zero rc - set +e - # shellcheck disable=SC2312 - ${WGRIB2} -d "${last}" "${tmpfile}" | grep -E -i "ugrd|ustm|uflx|u-gwd|land" - rc=$? - set_strict - if (( rc == 0 )); then # Matched the grep - last=$(( last + 1 )) - fi - if (( iproc == nproc )); then - last=${ncount} - fi - - # Break tmpfile into processor specific chunks in preparation for MPMD - ${WGRIB2} "${tmpfile}" -for "${first}":"${last}" -grib "${tmpfile}_${iproc}" - export err=$?; err_chk - input_file="${tmpfile}_${iproc}" - output_file_prefix="pgb2${grp}file_${fhr3}_${iproc}" - echo "${GFSDWNSH} ${input_file} ${output_file_prefix} ${grid_string}" >> "${DATA}/poescript" - - # if at final record and have not reached the final processor then write echo's to - # poescript for remaining processors - if (( last == ncount )); then - for (( pproc = iproc+1 ; pproc < nproc ; pproc++ )); do - echo "/bin/echo ${pproc}" >> "${DATA}/poescript" - done - break - fi - done # for (( iproc = 1 ; iproc <= nproc ; iproc++ )); do - - # Run with MPMD or serial - if [[ "${USE_CFP:-}" = "YES" ]]; then - "${HOMEgfs}/ush/run_mpmd.sh" "${DATA}/poescript" - export err=$? - else - chmod 755 "${DATA}/poescript" - bash +x "${DATA}/poescript" 2>&1 mpmd.out - export err=$? - fi - err_chk - - # We are in a loop over downset, save output from mpmd into nset specific output - cat mpmd.out # so we capture output into the main logfile - mv mpmd.out "mpmd_${nset}.out" - - # Concatenate grib files from each processor into a single one - # and clean-up as you go - echo "Concatenating processor specific grib2 files into a single product" - for (( iproc = 1 ; iproc <= nproc ; iproc++ )); do - for grid in "${grids[@]}"; do - cat "pgb2${grp}file_${fhr3}_${iproc}_${grid}" >> "pgb2${grp}file_${fhr3}_${grid}" - rm "pgb2${grp}file_${fhr3}_${iproc}_${grid}" - done - # There is no further use of the processor specific tmpfile; delete it - rm "${tmpfile}_${iproc}" - done - - # Move to COM and index the product grib files - for grid in "${grids[@]}"; do - prod_dir="COM_ATMOS_GRIB_${grid}" - ${NCP} "pgb2${grp}file_${fhr3}_${grid}" "${!prod_dir}/${PREFIX}pgrb2${grp}.${grid}.${fhr3}" - ${WGRIB2} -s "pgb2${grp}file_${fhr3}_${grid}" > "${!prod_dir}/${PREFIX}pgrb2${grp}.${grid}.${fhr3}.idx" - done - - # Create supplemental 1-degree grib1 output TODO: who needs 1-degree grib1 product? - # move to COM and index it - if (( nset == 1 )); then - if [[ "${PGBS}" = "YES" ]]; then - if [[ "${PGB1F}" = "YES" ]]; then - ${CNVGRIB} -g21 "pgb2${grp}file_${fhr3}_1p00" "pgb${grp}file_${fhr3}_1p00" - export err=$?; err_chk - ${NCP} "pgb${grp}file_${fhr3}_1p00" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb${grp}.1p00.${fhr3}" - ${GRBINDEX} "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb${grp}.1p00.${fhr3}" "${COM_ATMOS_GRIB_1p00}/${PREFIX}pgrb${grp}.1p00.${fhr3}.idx" - fi - fi - fi - - echo "Finished processing nset = ${nset}" - - -done # for (( nset=1 ; nset <= downset ; nset++ )) - -exit 0 diff --git a/ush/gfs_post.sh b/ush/gfs_post.sh deleted file mode 100755 index 7249ae7555..0000000000 --- a/ush/gfs_post.sh +++ /dev/null @@ -1,409 +0,0 @@ -#! /usr/bin/env bash - -################################################################################ -#### UNIX Script Documentation Block -# . . -# Script name: gfs_post.sh -# Script description: Posts the global pressure GRIB file -# -# Author: Mark Iredell Org: NP23 Date: 1999-05-01 -# -# Abstract: This script reads a single global GFS IO file and (optionally) -# a global flux file and creates a global pressure GRIB file. -# The resolution and generating code of the output GRIB file can also -# be set in the argument list. -# -# Script history log: -# 1999-05-01 Mark Iredell -# 2007-04-04 Huiya Chuang: Modify the script to run unified post -# 2012-06-04 Jun Wang: add grib2 option -# 2015-03-20 Lin Gan: add Perl for Post XML performance upgrade -# 2016-02-08 Lin Gan: Modify to use Vertical Structure -# 2018-02-05 Wen Meng: For EE2 standard, create gfs_post.sh based -# global_post.sh and change EXECglobal to EXECgfs; -# Remove legacy setting for reading non-nemsio model output -# and generating grib1 data -# 2019-06-02 Wen Meng: Remove the links of gfs fix files. -# -# Usage: global_postgp.sh SIGINP FLXINP FLXIOUT PGBOUT PGIOUT IGEN -# -# Input script positional parameters: -# 1 Input sigma file -# defaults to $SIGINP -# 2 Input flux file -# defaults to $FLXINP -# 3 Output flux index file -# defaults to $FLXIOUT -# 4 Output pressure GRIB file -# defaults to $PGBOUT -# 5 Output pressure GRIB index file -# defaults to $PGIOUT, then to none -# 8 Model generating code, -# defaults to $IGEN, then to input sigma generating code -# -# Imported Shell Variables: -# SIGINP Input sigma file -# overridden by $1 -# FLXINP Input flux file -# overridden by $2 -# FLXIOUT Output flux index file -# overridden by $3 -# PGBOUT Output pressure GRIB file -# overridden by $4. If not defined, -# post will use the filename specified in -# the control file -# PGIOUT Output pressure GRIB index file -# overridden by $5; defaults to none -# IGEN Model generating code -# overridden by $8; defaults to input sigma generating code -##### Moorthi: Add new imported shell variable for running chgres -# CHGRESSH optional: the script to run chgres -# default to to ${USHglobal}/global_chgres.sh -# SIGLEVEL optional: the coordinate text file -# default to to /nwprod/fix/global_hyblev.l${LEVS}.txt -##### Chuang: Add new imported Shell Variable for post -# OUTTYP Output file type read in by post -# 1: if user has a sigma file and needs post to run chgres to convert to gfs io file -# 2: if user already has a gfs io file -# 3: if user uses post to read sigma file directly -# 0: if user wishes to generate both gfsio and sigma files -# 4: if user uses post to read nemsio file directly -# VDATE Verifying date 10 digits yyyymmddhh -# GFSOUT Optional, output file name from chgres which is input file name to post -# if model already runs gfs io, make sure GFSOUT is linked to the gfsio file -# CTLFILE Optional, Your version of control file if not using operational one -# OVERPARMEXEC Optional, the executable for changing Grib KPDS ID -# default to to ${EXECglobal}/overparm_grib -# CHGRESTHREAD Optional, speed up chgres by using multiple threads -# default to 1 -# FILTER Optional, set to 1 to filter SLP and 500 mb height using copygb -# D3DINP Optional, Inout D3D file, if not defined, post will run -# without processing D3D file -# D3DOUT Optional, output D3D file, if not defined, post will -# use the file name specified in the control file -# IPVOUT Optional, output IPV file, if not defined, post will -# use the file name specified in the control file -# GENPSICHI Optional, set to YES will generate psi and chi and -# append it to the end of PGBOUT. Default to NO -# GENPSICHIEXE Optional, specify where executable is for generating -# psi and chi. -######################################################################## -# EXECUTIL Directory for utility executables -# defaults to /nwprod/util/exec -# USHUTIL Directory for utility scripts -# defaults to /nwprod/util/ush -# EXECglobal Directory for global executables -# defaults to /nwprod/exec -# USHglobal Directory for global scripts -# defaults to /nwprod/ush -# DATA working directory -# (if nonexistent will be made, used and deleted) -# defaults to current working directory -# MP Multi-processing type ("p" or "s") -# defaults to "p", or "s" if LOADL_STEP_TYPE is not PARALLEL -# XC Suffix to add to executables -# defaults to none -# POSTGPEXEC Global post executable -# defaults to ${EXECglobal}/upp.x -# GRBINDEX GRIB index maker -# defaults to ${EXECUTIL}/grbindex$XC -# POSTGPLIST File containing further namelist inputs -# defaults to /dev/null -# INISCRIPT Preprocessing script -# defaults to none -# LOGSCRIPT Log posting script -# defaults to none -# ERRSCRIPT Error processing script -# defaults to 'eval [[ $err = 0 ]]' -# ENDSCRIPT Postprocessing script -# defaults to none -# POSTGPVARS Other namelist inputs to the global post executable -# such as IDRT,KO,PO,KTT,KT,PT,KZZ,ZZ, -# NCPUS,MXBIT,IDS,POB,POT,MOO,MOOA,MOW,MOWA, -# ICEN,ICEN2,IENST,IENSI -# defaults to none set -# NTHREADS Number of threads -# defaults to 1 -# NTHSTACK Size of stack per thread -# defaults to 64000000 -# VERBOSE Verbose flag (YES or NO) -# defaults to NO -# PGMOUT Executable standard output -# defaults to $pgmout, then to '&1' -# PGMERR Executable standard error -# defaults to $pgmerr, then to '&1' -# pgmout Executable standard output default -# pgmerr Executable standard error default -# REDOUT standard output redirect ('1>' or '1>>') -# defaults to '1>', or to '1>>' to append if $PGMOUT is a file -# REDERR standard error redirect ('2>' or '2>>') -# defaults to '2>', or to '2>>' to append if $PGMERR is a file -# -# Exported Shell Variables: -# PGM Current program name -# pgm -# ERR Last return code -# err -# -# Modules and files referenced: -# scripts : $INISCRIPT -# $LOGSCRIPT -# $ERRSCRIPT -# $ENDSCRIPT -# -# programs : $POSTGPEXEC -# $GRBINDEX -# -# input data : $1 or $SIGINP -# $2 or $SFCINP -# $POSTGPLIST -# -# output data: $3 or $FLXIOUT -# $4 or $PGBOUT -# $5 or $PGIOUT -# $PGMOUT -# $PGMERR -# -# scratch : ${DATA}/postgp.inp.sig -# ${DATA}/postgp.inp.flx -# ${DATA}/postgp.out.pgb -# -# Remarks: -# -# Condition codes -# 0 - no problem encountered -# >0 - some problem encountered -# -# Control variable resolution priority -# 1 Command line argument. -# 2 Environment variable. -# 3 Inline default. -# -# Attributes: -# Language: POSIX shell -# Machine: IBM SP -# -#### -################################################################################ -# Set environment. -source "${HOMEgfs}/ush/preamble.sh" - -# Command line arguments. -export SIGINP=${1:-${SIGINP:-}} -export FLXINP=${2:-${FLXINP:-}} -export FLXIOUT=${3:-${FLXIOUT:-}} -export PGBOUT=${4:-${PGBOUT:-}} -#export PGIOUT=${5:-${PGIOUT}} -export PGIOUT=${PGIOUT:-pgb.idx} -export IO=${6:-${IO:-0}} -export JO=${7:-${JO:-0}} -export IGEN=${8:-${IGEN:-0}} -# Directories. -export NWPROD=${NWPROD:-/nwprod} -#export EXECUTIL=${EXECUTIL:-${NWPROD}/util/exec} -export USHUTIL=${USHUTIL:-${NWPROD}/util/ush} -export EXECgfs=${EXECgfs:-${NWPROD}/exec} -export USHgfs=${USHgfs:-${NWPROD}/ush} -export DATA=${DATA:-$(pwd)} -# Filenames. -export MP=${MP:-$([[ ${LOADL_STEP_TYPE:-SERIAL} = PARALLEL ]]&&echo "p"||echo "s")} -export XC=${XC:-} -export POSTGPEXEC=${POSTGPEXEC:-${EXECgfs}/upp.x} -export OVERPARMEXEC=${OVERPARMEXEC:-${EXECgfs}/overparm_grib} -export POSTGPLIST=${POSTGPLIST:-/dev/null} -export INISCRIPT=${INISCRIPT:-} -# Ignore warning about single quote not subtituting now -# shellcheck disable=SC2016 -export ERRSCRIPT=${ERRSCRIPT:-'eval (( err == 0 ))'} -# shellcheck disable= -export LOGSCRIPT=${LOGSCRIPT:-} -export ENDSCRIPT=${ENDSCRIPT:-} -export GFSOUT=${GFSOUT:-gfsout} -export CTLFILE=${CTLFILE:-${NWPROD}/parm/gfs_cntrl.parm} -export GRIBVERSION=${GRIBVERSION:-'grib1'} -# Other variables. -export POSTGPVARS=${POSTGPVARS} -export NTHREADS=${NTHREADS:-1} -export NTHSTACK=${NTHSTACK:-64000000} -export PGMOUT=${PGMOUT:-${pgmout:-'&1'}} -export PGMERR=${PGMERR:-${pgmerr:-'&2'}} -export CHGRESTHREAD=${CHGRESTHREAD:-1} -export FILTER=${FILTER:-0} -export GENPSICHI=${GENPSICHI:-NO} -export GENPSICHIEXE=${GENPSICHIEXE:-${EXECgfs}/genpsiandchi} -export ens=${ens:-NO} -#export D3DINP=${D3DINP:-/dev/null} -l="$(echo "${PGMOUT}" | xargs | cut -c1)" -[[ ${l} = '&' ]]&&a=''||a='>' -export REDOUT=${REDOUT:-'1>'${a}} -l="$(echo "${PGMERR}" | xargs | cut -c1)" -[[ ${l} = '&' ]]&&a=''||a='>' -export REDERR=${REDERR:-'2>'${a}} -################################################################################ - -# Chuang: Run chgres if OUTTYP=1 or 0 - -export APRUN=${APRUNP:-${APRUN:-""}} - -# exit if NEMSINP does not exist -if (( OUTTYP == 4 )) ; then - if [ ! -s "${NEMSINP}" ] || [ ! -s "${FLXINP}" ] ; then - echo "model files not found, exitting" - exit 111 - fi -fi - -export SIGHDR=${SIGHDR:-${NWPROD}/exec/global_sighdr} -export IDRT=${IDRT:-4} - -# run post to read file if OUTTYP=4 -if (( OUTTYP == 4 )) ; then - export MODEL_OUT_FORM=${MODEL_OUT_FORM:-netcdfpara} - export GFSOUT=${NEMSINP} -fi - -# allow threads to use threading in Jim's sp lib -# but set default to 1 -export OMP_NUM_THREADS=${OMP_NUM_THREADS:-1} - -pwd=$(pwd) -if [[ -d "${DATA}" ]]; then - mkdata=NO -else - mkdir -p "${DATA}" - mkdata=YES -fi -cd "${DATA}" || exit 99 -################################################################################ -# Post GRIB -export PGM=${POSTGPEXEC} -export pgm=${PGM} -${LOGSCRIPT} -cat <<-EOF >postgp.inp.nml$$ - &NAMPGB - ${POSTGPVARS} -EOF - -cat <<-EOF >>postgp.inp.nml$$ - / -EOF - -if [[ "${VERBOSE}" = "YES" ]]; then - cat postgp.inp.nml$$ -fi - -# making the time stamp format for ncep post -YY=$(echo "${VDATE}" | cut -c1-4) -MM=$(echo "${VDATE}" | cut -c5-6) -DD=$(echo "${VDATE}" | cut -c7-8) -HH=$(echo "${VDATE}" | cut -c9-10) -export YY MM DD HH - -cat > itag <<-EOF - &model_inputs - fileName='${GFSOUT}' - IOFORM='${MODEL_OUT_FORM}' - grib='${GRIBVERSION}' - DateStr='${YY}-${MM}-${DD}_${HH}:00:00' - MODELNAME='GFS' - fileNameFlux='${FLXINP}' - / -EOF - -cat postgp.inp.nml$$ >> itag - -cat itag - -rm -f fort.* - -#ln -sf $SIGINP postgp.inp.sig$$ -#ln -sf $FLXINP postgp.inp.flx$$ -#ln -sf $PGBOUT postgp.out.pgb$$ - -# change model generating Grib number -if [ "${GRIBVERSION}" = "grib2" ]; then - cp "${POSTGRB2TBL}" . - cp "${PostFlatFile}" ./postxconfig-NT.txt - if [ "${ens}" = "YES" ] ; then - sed < "${PostFlatFile}" -e "s#negatively_pert_fcst#${ens_pert_type}#" > ./postxconfig-NT.txt - fi - # cp ${CTLFILE} postcntrl.xml -fi -CTL=$(basename "${CTLFILE}") -export CTL - -ln -sf griddef.out fort.110 -cp "${PARMpost}/nam_micro_lookup.dat" ./eta_micro_lookup.dat - -echo "gfs_post.sh OMP_NUM_THREADS= ${OMP_NUM_THREADS}" -${APRUN:-mpirun.lsf} "${POSTGPEXEC}" < itag > "outpost_gfs_${VDATE}_${CTL}" - -export ERR=$? -export err=${ERR} - -${ERRSCRIPT} || exit 2 - -if [ "${FILTER}" = "1" ] ; then - # Filter SLP and 500 mb height using copygb, change GRIB ID, and then - # cat the filtered fields to the pressure GRIB file, from Iredell - - if [ "${GRIBVERSION}" = "grib2" ]; then - if [ "${ens}" = "YES" ] ; then - "${COPYGB2}" -x -i'4,0,80' -k'1 3 0 7*-9999 101 0 0' "${PGBOUT}" tfile - export err=$?; err_chk - else - "${COPYGB2}" -x -i'4,0,80' -k'0 3 0 7*-9999 101 0 0' "${PGBOUT}" tfile - export err=$?; err_chk - fi - ${WGRIB2} tfile -set_byte 4 11 1 -grib prmsl - export err=$?; err_chk - if [ "${ens}" = "YES" ] ; then - "${COPYGB2}" -x -i'4,1,5' -k'1 3 5 7*-9999 100 0 50000' "${PGBOUT}" tfile - export err=$?; err_chk - else - "${COPYGB2}" -x -i'4,1,5' -k'0 3 5 7*-9999 100 0 50000' "${PGBOUT}" tfile - export err=$?; err_chk - fi - ${WGRIB2} tfile -set_byte 4 11 193 -grib h5wav - export err=$?; err_chk - - #cat $PGBOUT prmsl h5wav >> $PGBOUT - #wm - # cat prmsl h5wav >> $PGBOUT - [[ -f prmsl ]] && rm prmsl - [[ -f h5wav ]] && rm h5wav - [[ -f tfile ]] && rm tfile - fi -fi - -################################################################################ -# Make GRIB index file -if [[ -n "${PGIOUT}" ]]; then - if [ "${GRIBVERSION}" = "grib2" ]; then - ${GRB2INDEX} "${PGBOUT}" "${PGIOUT}" - fi -fi -if [[ -r ${FLXINP} && -n ${FLXIOUT} && ${OUTTYP} -le 3 ]]; then - ${GRBINDEX} "${FLXINP}" "${FLXIOUT}" -fi -################################################################################ -# generate psi and chi -echo "GENPSICHI = ${GENPSICHI}" -if [ "${GENPSICHI}" = "YES" ] ; then - #echo "PGBOUT PGIOUT=" $PGBOUT $PGIOUT - #echo "YY MM=" $YY $MM - export psichifile=./psichi.grb - ${GENPSICHIEXE} < postgp.inp.nml$$ - rc=$? - if (( rc != 0 )); then - echo "Nonzero return code rc=${rc}" - exit 3 - fi - cat ./psichi.grb >> "${PGBOUT}" -fi -################################################################################ -# Postprocessing -cd "${pwd}" || exit 2 -[[ "${mkdata}" = "YES" ]] && rmdir "${DATA}" - -exit "${err}" diff --git a/ush/gfs_transfer.sh b/ush/gfs_transfer.sh deleted file mode 100755 index fa53c2526b..0000000000 --- a/ush/gfs_transfer.sh +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env bash - -##################################################################### -# echo "-----------------------------------------------------" -# echo " Script: gfs_transfer.sh" -# echo " " -# echo " Purpose - Copy GFS Posts to /nwges and /com" -# echo " Alert posted files to DBNet" -# echo " " -# echo " History - " -# echo " Cooke - 04/21/05 - Inital version, based off of" -# echo " global_transfer.sh" -# echo " Meng - 01/04/18 - Remove writing data file to /nwges." -# echo " Meng - 09/14/20 - Update model output format to netcdf for GFS V16" -# echo "-----------------------------------------------------" -##################################################################### - -source "${HOMEgfs}/ush/preamble.sh" - -if [[ "${SENDDBN}" = 'YES' && "${RUN}" = 'gfs' ]]; then - fhr3=$(printf "%03d" "${fhr}") - "${DBNROOT}/bin/dbn_alert" MODEL GFS_SF "${job}" "${COMOUT}/${RUN}.t${cyc}z.atmf${fhr3}.nc" - - if (( fhr > 0 && fhr <= 84 )); then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_BF "${job}" "${COMOUT}/${RUN}.t${cyc}z.sfcf${fhr3}.nc" - fi - if (( fhr == 120 )); then - "${DBNROOT}/bin/dbn_alert" MODEL GFS_BF "${job}" "${COMOUT}/${RUN}.t${cyc}z.sfcf${fhr3}.nc" - fi -fi - -exit 0 - diff --git a/ush/inter_flux.sh b/ush/inter_flux.sh deleted file mode 100755 index b1f4475e05..0000000000 --- a/ush/inter_flux.sh +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env bash - -source "$HOMEgfs/ush/preamble.sh" "$FH" - -#----------------------------------------------------------------------- -#-Wen Meng, 03/2019: First version. -# This scripts is for interpolating flux file from model native grid -# into lat-lon grids. -#----------------------------------------------------------------------- - -export CNVGRIB=${CNVGRIB:-${grib_util_ROOT}/bin/cnvgrib} -export COPYGB2=${COPYGB2:-${grib_util_ROOT}/bin/copygb} -export WGRIB2=${WGRIB2:-${wgrib2_ROOT}/bin/wgrib2} -export GRBINDEX=${GRBINDEX:-${wgrib2_ROOT}/bin/grbindex} -export RUN=${RUN:-"gfs"} -export cycn=$(echo $CDATE |cut -c 9-10) -export TCYC=${TCYC:-".t${cycn}z."} -export PREFIX=${PREFIX:-${RUN}${TCYC}} -export PGB1F=${PGB1F:-"NO"} - -#--wgrib2 regrid parameters -export option1=' -set_grib_type same -new_grid_winds earth ' -export option21=' -new_grid_interpolation bilinear -if ' -export option22=":(LAND|CRAIN|CICEP|CFRZR|CSNOW|ICSEV):" -export option23=' -new_grid_interpolation neighbor -fi ' -export option24=' -set_bitmap 1 -set_grib_max_bits 16 -if ' -export option25=":(APCP|ACPCP|PRATE|CPRAT):" -export option26=' -set_grib_max_bits 25 -fi -if ' -export option27=":(APCP|ACPCP|PRATE|CPRAT|DZDT):" -export option28=' -new_grid_interpolation budget -fi ' -export grid0p25="latlon 0:1440:0.25 90:721:-0.25" -export grid0p5="latlon 0:720:0.5 90:361:-0.5" -export grid1p0="latlon 0:360:1.0 90:181:-1.0" -export grid2p5="latlon 0:144:2.5 90:73:-2.5" - - -if [ $FH -eq 0 ] ; then - export fhr3=000 -else - export fhr3=$(expr $FH + 0 ) - if [ $fhr3 -lt 100 ]; then export fhr3="0$fhr3"; fi - if [ $fhr3 -lt 10 ]; then export fhr3="0$fhr3"; fi -fi - -#--------------------------------------------------------------- - ${WGRIB2} "${COM_ATMOS_MASTER}/${FLUXFL}" ${option1} ${option21} ${option22} ${option23} \ - ${option24} ${option25} ${option26} ${option27} ${option28} \ - -new_grid ${grid1p0} fluxfile_${fhr3}_1p00 - export err=$?; err_chk - - ${WGRIB2} -s "fluxfile_${fhr3}_1p00" > "${COM_ATMOS_GRIB_1p00}/${PREFIX}flux.1p00.f${fhr3}.idx" - cp "fluxfile_${fhr3}_1p00" "${COM_ATMOS_GRIB_1p00}/${PREFIX}flux.1p00.f${fhr3}" -#--------------------------------------------------------------- - - -exit 0 diff --git a/ush/fv3gfs_dwn_nems.sh b/ush/interp_atmos_master.sh similarity index 99% rename from ush/fv3gfs_dwn_nems.sh rename to ush/interp_atmos_master.sh index 93f88e2687..0abc6ad185 100755 --- a/ush/fv3gfs_dwn_nems.sh +++ b/ush/interp_atmos_master.sh @@ -4,7 +4,6 @@ # Generate 0.25 / 0.5 / 1 degree interpolated grib2 files for each input grib2 file # trim's RH and tweaks sea-ice cover - source "${HOMEgfs}/ush/preamble.sh" input_file=${1:-"pgb2file_in"} # Input pressure grib2 file diff --git a/ush/interp_atmos_sflux.sh b/ush/interp_atmos_sflux.sh new file mode 100755 index 0000000000..516a2f5e4a --- /dev/null +++ b/ush/interp_atmos_sflux.sh @@ -0,0 +1,49 @@ +#! /usr/bin/env bash + +# This script takes in a master flux file and creates interpolated flux files at various interpolated resolutions +# Generate 0.25 / 0.5 / 1 degree interpolated grib2 flux files for each input sflux grib2 file + +source "${HOMEgfs}/ush/preamble.sh" + +input_file=${1:-"sfluxfile_in"} # Input sflux grib2 file +output_file_prefix=${2:-"sfluxfile_out"} # Prefix for output sflux grib2 file; the prefix is appended by resolution e.g. _0p25 +grid_string=${3:-"1p00"} # Target grids; e.g. "0p25" or "0p25:0p50"; If multiple, they need to be ":" seperated + +WGRIB2=${WGRIB2:-${wgrib2_ROOT}/bin/wgrib2} + +# wgrib2 options for regridding +defaults="-set_grib_type same -set_bitmap 1 -set_grib_max_bits 16" +interp_winds="-new_grid_winds earth" +interp_bilinear="-new_grid_interpolation bilinear" +interp_neighbor="-if :(LAND|CSNOW|CRAIN|CFRZR|CICEP|ICSEV): -new_grid_interpolation neighbor -fi" +interp_budget="-if :(APCP|ACPCP|PRATE|CPRAT|DZDT): -new_grid_interpolation budget -fi" +increased_bits="-if :(APCP|ACPCP|PRATE|CPRAT): -set_grib_max_bits 25 -fi" + +# interpolated target grids +# shellcheck disable=SC2034 +grid0p25="latlon 0:1440:0.25 90:721:-0.25" +# shellcheck disable=SC2034 +grid0p50="latlon 0:720:0.5 90:361:-0.5" +# shellcheck disable=SC2034 +grid1p00="latlon 0:360:1.0 90:181:-1.0" + +# Transform the input ${grid_string} into an array for processing +IFS=':' read -ra grids <<< "${grid_string}" + +output_grids="" +for grid in "${grids[@]}"; do + gridopt="grid${grid}" + output_grids="${output_grids} -new_grid ${!gridopt} ${output_file_prefix}_${grid}" +done + +#shellcheck disable=SC2086 +${WGRIB2} "${input_file}" ${defaults} \ + ${interp_winds} \ + ${interp_bilinear} \ + ${interp_neighbor} \ + ${interp_budget} \ + ${increased_bits} \ + ${output_grids} +export err=$?; err_chk + +exit 0 \ No newline at end of file diff --git a/ush/python/pygfs/task/upp.py b/ush/python/pygfs/task/upp.py index b7127483e7..7db50e1582 100644 --- a/ush/python/pygfs/task/upp.py +++ b/ush/python/pygfs/task/upp.py @@ -22,6 +22,8 @@ class UPP(Task): """Unified Post Processor Task """ + VALID_UPP_RUN = ['analysis', 'forecast', 'goes', 'wafs'] + @logit(logger, name="UPP") def __init__(self, config: Dict[str, Any]) -> None: """Constructor for the UPP task @@ -31,6 +33,7 @@ def __init__(self, config: Dict[str, Any]) -> None: analysis: process analysis output forecast: process UFS-weather-model forecast output goes: process UFS-weather-model forecast output for simulated satellite imagery + wafs: process UFS-weather-model forecast output for WAFS products Parameters ---------- @@ -43,6 +46,11 @@ def __init__(self, config: Dict[str, Any]) -> None: """ super().__init__(config) + if self.config.UPP_RUN not in self.VALID_UPP_RUN: + raise NotImplementedError(f'{self.config.UPP_RUN} is not a valid UPP run type.\n' + + 'Valid UPP_RUN values are:\n' + + f'{", ".join(self.VALID_UPP_RUN)}') + valid_datetime = add_to_datetime(self.runtime_config.current_cycle, to_timedelta(f"{self.config.FORECAST_HOUR}H")) localdict = AttrDict( diff --git a/versions/build.hera.ver b/versions/build.hera.ver index cba2bb5a76..ff85b1a801 100644 --- a/versions/build.hera.ver +++ b/versions/build.hera.ver @@ -1,2 +1,3 @@ export stack_intel_ver=2021.5.0 export stack_impi_ver=2021.5.1 +source "${HOMEgfs:-}/versions/build.spack.ver" diff --git a/versions/build.jet.ver b/versions/build.jet.ver index cba2bb5a76..ff85b1a801 100644 --- a/versions/build.jet.ver +++ b/versions/build.jet.ver @@ -1,2 +1,3 @@ export stack_intel_ver=2021.5.0 export stack_impi_ver=2021.5.1 +source "${HOMEgfs:-}/versions/build.spack.ver" diff --git a/versions/build.orion.ver b/versions/build.orion.ver index cba2bb5a76..ff85b1a801 100644 --- a/versions/build.orion.ver +++ b/versions/build.orion.ver @@ -1,2 +1,3 @@ export stack_intel_ver=2021.5.0 export stack_impi_ver=2021.5.1 +source "${HOMEgfs:-}/versions/build.spack.ver" diff --git a/versions/build.s4.ver b/versions/build.s4.ver index a0aed0ee24..a0aae51d87 100644 --- a/versions/build.s4.ver +++ b/versions/build.s4.ver @@ -1,2 +1,3 @@ export stack_intel_ver=2021.5.0 export stack_impi_ver=2021.5.0 +source "${HOMEgfs:-}/versions/build.spack.ver" diff --git a/versions/build.spack.ver b/versions/build.spack.ver index 679da91116..28c3a10185 100644 --- a/versions/build.spack.ver +++ b/versions/build.spack.ver @@ -1,7 +1,5 @@ export spack_stack_ver=1.5.1 export spack_env=gsi-addon -export stack_intel_ver=2021.5.0 -export stack_impi_ver=2021.5.1 export python_ver=3.10.8 export cmake_ver=3.23.1 diff --git a/versions/run.hera.ver b/versions/run.hera.ver index d82e9fba62..43443ba715 100644 --- a/versions/run.hera.ver +++ b/versions/run.hera.ver @@ -9,3 +9,5 @@ export gempak_ver=7.4.2 #For metplus jobs, not currently working with spack-stack #export met_ver=9.1.3 #export metplus_ver=3.1.1 + +source "${HOMEgfs:-}/versions/run.spack.ver" diff --git a/versions/run.jet.ver b/versions/run.jet.ver index ab350da770..18a82cab4f 100644 --- a/versions/run.jet.ver +++ b/versions/run.jet.ver @@ -5,3 +5,5 @@ export hpss_ver= export ncl_ver=6.6.2 export R_ver=4.0.2 export gempak_ver=7.4.2 + +source "${HOMEgfs:-}/versions/run.spack.ver" diff --git a/versions/run.orion.ver b/versions/run.orion.ver index ca3258ba8e..ee2f65523b 100644 --- a/versions/run.orion.ver +++ b/versions/run.orion.ver @@ -3,3 +3,5 @@ export stack_impi_ver=2021.5.1 export ncl_ver=6.6.2 export gempak_ver=7.5.1 + +source "${HOMEgfs:-}/versions/run.spack.ver" diff --git a/versions/run.s4.ver b/versions/run.s4.ver index 72206b21d6..56817ef439 100644 --- a/versions/run.s4.ver +++ b/versions/run.s4.ver @@ -2,3 +2,5 @@ export stack_intel_ver=2021.5.0 export stack_impi_ver=2021.5.0 export ncl_ver=6.4.0-precompiled + +source "${HOMEgfs:-}/versions/run.spack.ver" diff --git a/workflow/applications/applications.py b/workflow/applications/applications.py index 2a7b8d249f..9d9bf52595 100644 --- a/workflow/applications/applications.py +++ b/workflow/applications/applications.py @@ -54,7 +54,6 @@ def __init__(self, conf: Configuration) -> None: self.do_bufrsnd = _base.get('DO_BUFRSND', False) self.do_gempak = _base.get('DO_GEMPAK', False) self.do_awips = _base.get('DO_AWIPS', False) - self.do_wafs = _base.get('WAFSF', False) self.do_verfozn = _base.get('DO_VERFOZN', True) self.do_verfrad = _base.get('DO_VERFRAD', True) self.do_vminmon = _base.get('DO_VMINMON', True) @@ -63,6 +62,7 @@ def __init__(self, conf: Configuration) -> None: self.do_genesis_fsu = _base.get('DO_GENESIS_FSU', False) self.do_metp = _base.get('DO_METP', False) self.do_npoess = _base.get('DO_NPOESS', False) + self.do_upp = not _base.get('WRITE_DOPOST', True) self.do_hpssarch = _base.get('HPSSARCH', False) diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 4e0897f739..894a551721 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -47,7 +47,7 @@ def _get_app_configs(self): if self.do_ocean: configs += ['ocnpost'] - configs += ['sfcanl', 'analcalc', 'fcst', 'post', 'arch', 'cleanup'] + configs += ['sfcanl', 'analcalc', 'fcst', 'upp', 'atmos_products', 'arch', 'cleanup'] if self.do_hybvar: if self.do_jediatmens: @@ -101,9 +101,6 @@ def _get_app_configs(self): if self.do_awips: configs += ['waveawipsbulls', 'waveawipsgridded'] - if self.do_wafs: - configs += ['wafs', 'wafsgrib2', 'wafsblending', 'wafsgcip', 'wafsgrib20p25', 'wafsblending0p25'] - if self.do_aero: configs += ['aeroanlinit', 'aeroanlrun', 'aeroanlfinal'] @@ -125,10 +122,6 @@ def get_task_names(self): """ gdas_gfs_common_tasks_before_fcst = ['prep'] - gdas_gfs_common_tasks_after_fcst = ['postanl', 'post'] - # if self.do_ocean: # TODO: uncomment when ocnpost is fixed in cycled mode - # gdas_gfs_common_tasks_after_fcst += ['ocnpost'] - gdas_gfs_common_cleanup_tasks = ['arch', 'cleanup'] if self.do_jediatmvar: @@ -164,15 +157,18 @@ def get_task_names(self): # Collect all "gdas" cycle tasks gdas_tasks = gdas_gfs_common_tasks_before_fcst.copy() + if not self.do_jediatmvar: gdas_tasks += ['analdiag'] if self.do_wave and 'gdas' in self.wave_cdumps: gdas_tasks += wave_prep_tasks - gdas_tasks += ['fcst'] + gdas_tasks += ['atmanlupp', 'atmanlprod', 'fcst'] - gdas_tasks += gdas_gfs_common_tasks_after_fcst + if self.do_upp: + gdas_tasks += ['atmupp'] + gdas_tasks += ['atmprod'] if self.do_wave and 'gdas' in self.wave_cdumps: if self.do_wave_bnd: @@ -194,14 +190,16 @@ def get_task_names(self): gdas_tasks += gdas_gfs_common_cleanup_tasks # Collect "gfs" cycle tasks - gfs_tasks = gdas_gfs_common_tasks_before_fcst + gfs_tasks = gdas_gfs_common_tasks_before_fcst.copy() if self.do_wave and 'gfs' in self.wave_cdumps: gfs_tasks += wave_prep_tasks - gfs_tasks += ['fcst'] + gfs_tasks += ['atmanlupp', 'atmanlprod', 'fcst'] - gfs_tasks += gdas_gfs_common_tasks_after_fcst + if self.do_upp: + gfs_tasks += ['atmupp'] + gfs_tasks += ['atmprod'] if self.do_vminmon: gfs_tasks += ['vminmon'] @@ -234,16 +232,11 @@ def get_task_names(self): gfs_tasks += ['gempak'] if self.do_awips: - gfs_tasks += ['awips_20km_1p0deg'] - gfs_tasks += ['awips_g2'] - gfs_tasks += ['fbwinds'] + gfs_tasks += ['awips_20km_1p0deg', 'awips_g2', 'fbwinds'] if self.do_npoess: gfs_tasks += ['npoess'] - if self.do_wafs: - gfs_tasks += ['wafs', 'wafsgcip', 'wafsgrib2', 'wafsgrib20p25', 'wafsblending', 'wafsblending0p25'] - gfs_tasks += gdas_gfs_common_cleanup_tasks tasks = dict() diff --git a/workflow/applications/gfs_forecast_only.py b/workflow/applications/gfs_forecast_only.py index 1790c86a2c..6de6cbe79e 100644 --- a/workflow/applications/gfs_forecast_only.py +++ b/workflow/applications/gfs_forecast_only.py @@ -18,34 +18,38 @@ def _get_app_configs(self): configs = ['stage_ic', 'fcst', 'arch', 'cleanup'] if self.do_atm: - configs += ['post'] - if self.do_aero: - configs += ['aerosol_init'] + if self.do_upp: + configs += ['upp'] - if self.do_ocean or self.do_ice: - configs += ['ocnpost'] + configs += ['atmos_products'] - if self.do_atm and self.do_tracker: - configs += ['tracker'] + if self.do_aero: + configs += ['aerosol_init'] - if self.do_atm and self.do_genesis: - configs += ['genesis'] + if self.do_tracker: + configs += ['tracker'] - if self.do_atm and self.do_genesis_fsu: - configs += ['genesis_fsu'] + if self.do_genesis: + configs += ['genesis'] - if self.do_atm and self.do_metp: - configs += ['metp'] + if self.do_genesis_fsu: + configs += ['genesis_fsu'] - if self.do_bufrsnd: - configs += ['postsnd'] + if self.do_metp: + configs += ['metp'] + + if self.do_bufrsnd: + configs += ['postsnd'] + + if self.do_gempak: + configs += ['gempak'] - if self.do_gempak: - configs += ['gempak'] + if self.do_awips: + configs += ['awips'] - if self.do_awips: - configs += ['awips'] + if self.do_ocean or self.do_ice: + configs += ['ocnpost'] if self.do_wave: configs += ['waveinit', 'waveprep', 'wavepostsbs', 'wavepostpnt'] @@ -86,22 +90,35 @@ def get_task_names(self): tasks += ['fcst'] if self.do_atm: - tasks += ['post'] - if self.do_ocean: - tasks += ['ocnpost'] + if self.do_upp: + tasks += ['atmupp'] + + tasks += ['atmprod'] + + if self.do_tracker: + tasks += ['tracker'] - if self.do_atm and self.do_tracker: - tasks += ['tracker'] + if self.do_genesis: + tasks += ['genesis'] - if self.do_atm and self.do_genesis: - tasks += ['genesis'] + if self.do_genesis_fsu: + tasks += ['genesis_fsu'] - if self.do_atm and self.do_genesis_fsu: - tasks += ['genesis_fsu'] + if self.do_metp: + tasks += ['metp'] - if self.do_atm and self.do_metp: - tasks += ['metp'] + if self.do_bufrsnd: + tasks += ['postsnd'] + + if self.do_gempak: + tasks += ['gempak'] + + if self.do_awips: + tasks += ['awips_20km_1p0deg', 'awips_g2', 'fbwinds'] + + if self.do_ocean or self.do_ice: + tasks += ['ocnpost'] if self.do_wave: if self.do_wave_bnd: @@ -112,20 +129,6 @@ def get_task_names(self): if self.do_awips: tasks += ['waveawipsbulls', 'waveawipsgridded'] - if self.do_bufrsnd: - tasks += ['postsnd'] - - if self.do_gempak: - tasks += ['gempak'] - - if self.do_awips: - tasks += ['awips_20km_1p0deg'] - tasks += ['awips_g2'] - tasks += ['fbwinds'] - - if self.do_wafs: - tasks += ['wafs', 'wafsgcip', 'wafsgrib2', 'wafsgrib20p25', 'wafsblending', 'wafsblending0p25'] - tasks += ['arch', 'cleanup'] # arch and cleanup **must** be the last tasks return {f"{self._base['CDUMP']}": tasks} diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index f69579b068..9c69240a0e 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -87,7 +87,7 @@ def prep(self): gfs_enkf = True if self.app_config.do_hybvar and 'gfs' in self.app_config.eupd_cdumps else False deps = [] - dep_dict = {'type': 'metatask', 'name': 'gdaspost', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} + dep_dict = {'type': 'metatask', 'name': 'gdasatmprod', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} deps.append(rocoto.add_dependency(dep_dict)) data = f'{atm_hist_path}/gdas.t@Hz.atmf009.nc' dep_dict = {'type': 'data', 'data': data, 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} @@ -552,93 +552,163 @@ def _fcst_cycled(self): return task - def post(self): - return self._post_task('post') - - def postanl(self): + def atmanlupp(self): postenvars = self.envars.copy() - postenvar_dict = {'FHRLST': 'anl', - 'ROTDIR': self._base.get('ROTDIR')} - + postenvar_dict = {'FHRLST': 'f000', + 'UPP_RUN': 'analysis'} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) - deps = [] atm_anl_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_ANALYSIS_TMPL"]) + deps = [] + data = f'{atm_anl_path}/{self.cdump}.t@Hz.atmanl.nc' + dep_dict = {'type': 'data', 'data': data, 'age': 120} + deps.append(rocoto.add_dependency(dep_dict)) + data = f'{atm_anl_path}/{self.cdump}.t@Hz.sfcanl.nc' + dep_dict = {'type': 'data', 'data': data, 'age': 120} + deps.append(rocoto.add_dependency(dep_dict)) data = f'{atm_anl_path}/{self.cdump}.t@Hz.loganl.txt' - dep_dict = {'type': 'data', 'data': data} + dep_dict = {'type': 'data', 'data': data, 'age': 60} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') + resources = self.get_resource('upp') + task = create_wf_task('atmanlupp', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, + cycledef=self.cdump, command='&JOBS_DIR;/upp.sh') + + return task + + def atmanlprod(self): + postenvars = self.envars.copy() + postenvar_dict = {'FHRLST': '-f001'} + for key, value in postenvar_dict.items(): + postenvars.append(rocoto.create_envar(name=key, value=str(value))) + + atm_master_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_MASTER_TMPL"]) + deps = [] + data = f'{atm_master_path}/{self.cdump}.t@Hz.master.grb2anl' + dep_dict = {'type': 'data', 'data': data, 'age': 120} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) - resources = self.get_resource('post') - task = create_wf_task('postanl', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - cycledef=self.cdump) + resources = self.get_resource('atmos_products') + task = create_wf_task('atmanlprod', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, + cycledef=self.cdump, command='&JOBS_DIR;/atmos_products.sh') return task - def ocnpost(self): - if self.app_config.mode in ['forecast-only']: # TODO: fix ocnpost in cycled mode - return self._post_task('ocnpost') - - def _post_task(self, task_name): - if task_name not in ['post', 'ocnpost']: - raise KeyError(f'Invalid post-processing task: {task_name}') - - def _get_postgroups(cdump, config): - - fhmin = config['FHMIN'] - fhmax = config['FHMAX'] - fhout = config['FHOUT'] - - # Get a list of all forecast hours - fhrs = [] - if cdump in ['gdas']: - fhrs = range(fhmin, fhmax + fhout, fhout) - elif cdump in ['gfs']: - fhmax = np.max( - [config['FHMAX_GFS_00'], config['FHMAX_GFS_06'], config['FHMAX_GFS_12'], config['FHMAX_GFS_18']]) - fhout = config['FHOUT_GFS'] - fhmax_hf = config['FHMAX_HF_GFS'] - fhout_hf = config['FHOUT_HF_GFS'] - fhrs_hf = range(fhmin, fhmax_hf + fhout_hf, fhout_hf) - fhrs = list(fhrs_hf) + list(range(fhrs_hf[-1] + fhout, fhmax + fhout, fhout)) - - npostgrp = config['NPOSTGRP'] - ngrps = npostgrp if len(fhrs) > npostgrp else len(fhrs) + @staticmethod + def _get_ufs_postproc_grps(cdump, config): - fhrs = [f'f{fhr:03d}' for fhr in fhrs] - fhrs = np.array_split(fhrs, ngrps) - fhrs = [fhr.tolist() for fhr in fhrs] + fhmin = config['FHMIN'] + fhmax = config['FHMAX'] + fhout = config['FHOUT'] + + # Get a list of all forecast hours + fhrs = [] + if cdump in ['gdas']: + fhrs = range(fhmin, fhmax + fhout, fhout) + elif cdump in ['gfs']: + fhmax = np.max( + [config['FHMAX_GFS_00'], config['FHMAX_GFS_06'], config['FHMAX_GFS_12'], config['FHMAX_GFS_18']]) + fhout = config['FHOUT_GFS'] + fhmax_hf = config['FHMAX_HF_GFS'] + fhout_hf = config['FHOUT_HF_GFS'] + fhrs_hf = range(fhmin, fhmax_hf + fhout_hf, fhout_hf) + fhrs = list(fhrs_hf) + list(range(fhrs_hf[-1] + fhout, fhmax + fhout, fhout)) - grp = ' '.join(f'_{fhr[0]}-{fhr[-1]}' if len(fhr) > 1 else f'_{fhr[0]}' for fhr in fhrs) - dep = ' '.join([fhr[-1] for fhr in fhrs]) - lst = ' '.join(['_'.join(fhr) for fhr in fhrs]) + nfhrs_per_grp = config.get('NFHRS_PER_GROUP', 1) + ngrps = len(fhrs) // nfhrs_per_grp if len(fhrs) % nfhrs_per_grp == 0 else len(fhrs) // nfhrs_per_grp + 1 - return grp, dep, lst + fhrs = [f'f{fhr:03d}' for fhr in fhrs] + fhrs = np.array_split(fhrs, ngrps) + fhrs = [fhr.tolist() for fhr in fhrs] + + grp = ' '.join(f'_{fhr[0]}-{fhr[-1]}' if len(fhr) > 1 else f'_{fhr[0]}' for fhr in fhrs) + dep = ' '.join([fhr[-1] for fhr in fhrs]) + lst = ' '.join(['_'.join(fhr) for fhr in fhrs]) + + return grp, dep, lst + + def atmupp(self): + + varname1, varname2, varname3 = 'grp', 'dep', 'lst' + varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['upp']) + vardict = {varname2: varval2, varname3: varval3} + + postenvars = self.envars.copy() + postenvar_dict = {'FHRLST': '#lst#', + 'UPP_RUN': 'forecast'} + for key, value in postenvar_dict.items(): + postenvars.append(rocoto.create_envar(name=key, value=str(value))) - deps = [] atm_hist_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_HISTORY_TMPL"]) - data = f'{atm_hist_path}/{self.cdump}.t@Hz.atm.log#dep#.txt' - dep_dict = {'type': 'data', 'data': data} + deps = [] + data = f'{atm_hist_path}/{self.cdump}.t@Hz.atm#dep#.nc' + dep_dict = {'type': 'data', 'data': data, 'age': 120} deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'} + data = f'{atm_hist_path}/{self.cdump}.t@Hz.sfc#dep#.nc' + dep_dict = {'type': 'data', 'data': data, 'age': 120} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep_condition='or', dep=deps) + data = f'{atm_hist_path}/{self.cdump}.t@Hz.atm.log#dep#.txt' + dep_dict = {'type': 'data', 'data': data, 'age': 60} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') + cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump + resources = self.get_resource('upp') + task = create_wf_task('atmupp', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, + metatask='atmupp', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef, + command='&JOBS_DIR;/upp.sh') + + return task + + def atmprod(self): + + varname1, varname2, varname3 = 'grp', 'dep', 'lst' + varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['atmos_products']) + vardict = {varname2: varval2, varname3: varval3} postenvars = self.envars.copy() - postenvar_dict = {'FHRLST': '#lst#', - 'ROTDIR': self._base.get('ROTDIR')} + postenvar_dict = {'FHRLST': '#lst#'} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) + atm_master_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_MASTER_TMPL"]) + deps = [] + data = f'{atm_master_path}/{self.cdump}.t@Hz.master.grb2#dep#' + dep_dict = {'type': 'data', 'data': data, 'age': 120} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps) + cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump + resources = self.get_resource('atmos_products') + task = create_wf_task('atmprod', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, + metatask='atmprod', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef, + command='&JOBS_DIR;/atmos_products.sh') + + return task + + def ocnpost(self): + varname1, varname2, varname3 = 'grp', 'dep', 'lst' - varval1, varval2, varval3 = _get_postgroups(self.cdump, self._configs[task_name]) + varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['ocnpost']) vardict = {varname2: varval2, varname3: varval3} - cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump + postenvars = self.envars.copy() + postenvar_dict = {'FHRLST': '#lst#', + 'ROTDIR': self._base.get('ROTDIR')} + for key, value in postenvar_dict.items(): + postenvars.append(rocoto.create_envar(name=key, value=str(value))) - resources = self.get_resource(task_name) - task = create_wf_task(task_name, resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - metatask=task_name, varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef) + deps = [] + atm_hist_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_HISTORY_TMPL"]) + data = f'{atm_hist_path}/{self.cdump}.t@Hz.atm.log#dep#.txt' + dep_dict = {'type': 'data', 'data': data} + deps.append(rocoto.add_dependency(dep_dict)) + dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='or', dep=deps) + cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump + resources = self.get_resource('ocnpost') + task = create_wf_task('ocnpost', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, + metatask='ocnpost', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef) return task @@ -731,60 +801,6 @@ def waveawipsgridded(self): return task - def wafs(self): - return self._wafs_task('wafs') - - def wafsgcip(self): - return self._wafs_task('wafsgcip') - - def wafsgrib2(self): - return self._wafs_task('wafsgrib2') - - def wafsgrib20p25(self): - return self._wafs_task('wafsgrib20p25') - - def _wafs_task(self, task_name): - if task_name not in ['wafs', 'wafsgcip', 'wafsgrib2', 'wafsgrib20p25']: - raise KeyError(f'Invalid WAFS task: {task_name}') - - wafs_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_WAFS_TMPL"]) - - deps = [] - fhrlst = [6] + [*range(12, 36 + 3, 3)] - for fhr in fhrlst: - data = f'{wafs_path}/{self.cdump}.t@Hz.wafs.grb2if{fhr:03d}' - dep_dict = {'type': 'data', 'data': data} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) - - resources = self.get_resource(task_name) - task = create_wf_task(task_name, resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) - - return task - - def wafsblending(self): - deps = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}wafsgrib2'} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) - - resources = self.get_resource('wafsblending') - task = create_wf_task('wafsblending', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) - - return task - - def wafsblending0p25(self): - deps = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}wafsgrib20p25'} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) - - resources = self.get_resource('wafsblending0p25') - task = create_wf_task('wafsblending0p25', resources, cdump=self.cdump, envar=self.envars, - dependency=dependencies) - - return task - def postsnd(self): deps = [] dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'} @@ -862,7 +878,7 @@ def _get_awipsgroups(cdump, config): def awips_20km_1p0deg(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -886,7 +902,7 @@ def awips_20km_1p0deg(self): def awips_g2(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -910,7 +926,7 @@ def awips_g2(self): def gempak(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -922,7 +938,7 @@ def gempak(self): def npoess(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}postanl'} + dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -966,7 +982,7 @@ def vminmon(self): def tracker(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -977,7 +993,7 @@ def tracker(self): def genesis(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -988,7 +1004,7 @@ def genesis(self): def genesis_fsu(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -999,7 +1015,7 @@ def genesis_fsu(self): def fit2obs(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1037,14 +1053,14 @@ def arch(self): dependencies = [] if self.app_config.mode in ['cycled']: if self.cdump in ['gfs']: - dep_dict = {'type': 'task', 'name': f'{self.cdump}postanl'} + dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlprod'} deps.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_vminmon: dep_dict = {'type': 'task', 'name': f'{self.cdump}vminmon'} deps.append(rocoto.add_dependency(dep_dict)) elif self.cdump in ['gdas']: # Block for handling half cycle dependencies deps2 = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}postanl'} + dep_dict = {'type': 'task', 'name': f'{self.cdump}atmanlprod'} deps2.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_fit2obs: dep_dict = {'type': 'task', 'name': f'{self.cdump}fit2obs'} @@ -1072,7 +1088,7 @@ def arch(self): dep_dict = {'type': 'task', 'name': f'{self.cdump}genesis_fsu'} deps.append(rocoto.add_dependency(dep_dict)) # Post job dependencies - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}post'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} deps.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_wave: dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostsbs'} diff --git a/workflow/rocoto/rocoto.py b/workflow/rocoto/rocoto.py index 153ff71475..c4240622d4 100644 --- a/workflow/rocoto/rocoto.py +++ b/workflow/rocoto/rocoto.py @@ -115,8 +115,6 @@ def create_task(task_dict: Dict[str, Any]) -> List[str]: strings.append(f'\t\t{d}\n') strings.append('\t\n') strings.append('\n') - elif taskname != "gfswaveinit": - print("WARNING: No dependencies for task " + taskname) strings.append('\n') diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py index aa52e4fa9b..7fb42b73e8 100644 --- a/workflow/rocoto/tasks.py +++ b/workflow/rocoto/tasks.py @@ -19,8 +19,11 @@ class Tasks: 'atmensanlinit', 'atmensanlrun', 'atmensanlfinal', 'aeroanlinit', 'aeroanlrun', 'aeroanlfinal', 'preplandobs', 'landanl', - 'fcst', 'post', 'ocnpost', - 'verfozn', 'verfrad', 'vminmon', 'metp', + 'fcst', + 'atmanlupp', 'atmanlprod', 'atmupp', 'atmprod', + 'ocnpost', + 'verfozn', 'verfrad', 'vminmon', + 'metp', 'tracker', 'genesis', 'genesis_fsu', 'postsnd', 'awips_g2', 'awips_20km_1p0deg', 'fbwinds', 'gempak', 'waveawipsbulls', 'waveawipsgridded', 'wavegempak', 'waveinit', @@ -189,7 +192,7 @@ def get_task(self, task_name, *args, **kwargs): def create_wf_task(task_name, resources, cdump='gdas', cycledef=None, envar=None, dependency=None, metatask=None, varname=None, varval=None, vardict=None, - final=False): + final=False, command=None): tasknamestr = f'{cdump}{task_name}' metatask_dict = None if metatask is not None: @@ -204,7 +207,7 @@ def create_wf_task(task_name, resources, task_dict = {'taskname': f'{tasknamestr}', 'cycledef': f'{cycledefstr}', 'maxtries': '&MAXTRIES;', - 'command': f'&JOBS_DIR;/{task_name}.sh', + 'command': f'&JOBS_DIR;/{task_name}.sh' if command is None else command, 'jobname': f'&PSLOT;_{tasknamestr}_@H', 'resources': resources, 'log': f'&ROTDIR;/logs/@Y@m@d@H/{tasknamestr}.log',