diff --git a/.gitignore b/.gitignore index 83706de085..8fc6d0b20b 100644 --- a/.gitignore +++ b/.gitignore @@ -79,6 +79,7 @@ parm/ufs/MOM_input_*.IN parm/ufs/MOM6_data_table.IN parm/ufs/ice_in.IN parm/ufs/ufs.configure.*.IN +parm/ufs/post_itag_gfs parm/wafs # Ignore sorc and logs folders from externals diff --git a/env/AZUREPW.env b/env/AZUREPW.env index 9e246a9cb4..c2faeb2bf6 100755 --- a/env/AZUREPW.env +++ b/env/AZUREPW.env @@ -43,6 +43,13 @@ if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then export APRUN_UFS="${launcher} -n ${ufs_ntasks}" unset nnodes ufs_ntasks +elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then + + export CFP_MP="YES" + if [[ "${step}" = "waveprep" ]]; then export MP_PULSE=0 ; fi + export wavempexec=${launcher} + export wave_mpmd=${mpmd_opt} + elif [[ "${step}" = "post" ]]; then export NTHREADS_NP=${NTHREADS1} @@ -52,4 +59,45 @@ elif [[ "${step}" = "post" ]]; then [[ ${NTHREADS_DWN} -gt ${max_threads_per_task} ]] && export NTHREADS_DWN=${max_threads_per_task} export APRUN_DWN="${launcher} -n ${ntasks_dwn}" +elif [[ "${step}" = "atmos_products" ]]; then + + export USE_CFP="YES" # Use MPMD for downstream product generation on Hera + +elif [[ "${step}" = "oceanice_products" ]]; then + + export NTHREADS_OCNICEPOST=${NTHREADS1} + export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" + +elif [[ "${step}" = "ecen" ]]; then + + export NTHREADS_ECEN=${NTHREADSmax} + export APRUN_ECEN="${APRUN}" + + export NTHREADS_CHGRES=${threads_per_task_chgres:-12} + [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node} + export APRUN_CHGRES="time" + + export NTHREADS_CALCINC=${threads_per_task_calcinc:-1} + [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task} + export APRUN_CALCINC="${APRUN}" + +elif [[ "${step}" = "esfc" ]]; then + + export NTHREADS_ESFC=${NTHREADSmax} + export APRUN_ESFC="${APRUN}" + + export NTHREADS_CYCLE=${threads_per_task_cycle:-14} + [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node} + export APRUN_CYCLE="${APRUN}" + +elif [[ "${step}" = "epos" ]]; then + + export NTHREADS_EPOS=${NTHREADSmax} + export APRUN_EPOS="${APRUN}" + +elif [[ "${step}" = "fit2obs" ]]; then + + export NTHREADS_FIT2OBS=${NTHREADS1} + export MPIRUN="${APRUN}" + fi diff --git a/env/WCOSS2.env b/env/WCOSS2.env index d2dae3ba93..cea24fb26b 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -272,12 +272,12 @@ elif [[ "${step}" = "postsnd" ]]; then export OMP_NUM_THREADS=1 export NTHREADS_POSTSND=${NTHREADS1} - export mpmd_opt="-ppn 21 ${mpmd_opt}" - export NTHREADS_POSTSNDCFP=${threads_per_task_postsndcfp:-1} [[ ${NTHREADS_POSTSNDCFP} -gt ${max_threads_per_task} ]] && export NTHREADS_POSTSNDCFP=${max_threads_per_task} export APRUN_POSTSNDCFP="${launcher} -np ${ntasks_postsndcfp} ${mpmd_opt}" + export mpmd_opt="-ppn ${tasks_per_node} ${mpmd_opt}" + elif [[ "${step}" = "awips" ]]; then export NTHREADS_AWIPS=${NTHREADS1} diff --git a/parm/config/gefs/config.base b/parm/config/gefs/config.base index 47474fb108..883957ed0c 100644 --- a/parm/config/gefs/config.base +++ b/parm/config/gefs/config.base @@ -342,7 +342,7 @@ export DELETE_COM_IN_ARCHIVE_JOB="YES" # NO=retain ROTDIR. YES default in arc export NUM_SND_COLLECTIVES=${NUM_SND_COLLECTIVES:-9} # The tracker, genesis, and METplus jobs are not supported on CSPs yet -# TODO: we should place these in workflow/hosts/[csp]pw.yaml as part of AWS/AZURE/GOOGLE setup, not for general. +# TODO: we should place these in workflow/hosts/[aws|azure|google]pw.yaml as part of CSP's setup, not for general. if [[ "${machine}" =~ "PW" ]]; then export DO_WAVE="NO" fi diff --git a/parm/config/gefs/config.cleanup b/parm/config/gefs/config.cleanup new file mode 120000 index 0000000000..839ed6c194 --- /dev/null +++ b/parm/config/gefs/config.cleanup @@ -0,0 +1 @@ +../gfs/config.cleanup \ No newline at end of file diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index 79f3426f56..690fdf919a 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -43,6 +43,10 @@ case ${machine} in export PARTITION_BATCH="compute" max_tasks_per_node=36 ;; + "AZUREPW") + export PARTITION_BATCH="compute" + max_tasks_per_node=24 + ;; "GOOGLEPW") export PARTITION_BATCH="compute" max_tasks_per_node=32 @@ -291,6 +295,14 @@ case ${step} in export threads_per_task=1 export memory="4096M" ;; + + "cleanup") + export walltime="00:15:00" + export ntasks=1 + export tasks_per_node=1 + export threads_per_task=1 + export memory="4096M" + ;; *) echo "FATAL ERROR: Invalid job ${step} passed to ${BASH_SOURCE[0]}" exit 1 diff --git a/parm/config/gefs/config.resources.AZUREPW b/parm/config/gefs/config.resources.AZUREPW new file mode 100644 index 0000000000..96303139d8 --- /dev/null +++ b/parm/config/gefs/config.resources.AZUREPW @@ -0,0 +1,11 @@ +#! /usr/bin/env bash + +# AZURE-specific job resources + +export is_exclusive="True" +unset memory + +# shellcheck disable=SC2312 +for mem_var in $(env | grep '^memory_' | cut -d= -f1); do + unset "${mem_var}" +done diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs index bfc11e3c5a..bc3950490e 100644 --- a/parm/config/gefs/config.ufs +++ b/parm/config/gefs/config.ufs @@ -80,8 +80,8 @@ case "${fv3_res}" in export nthreads_fv3_gfs=1 export nthreads_ufs=1 export nthreads_ufs_gfs=1 - export xr_cnvcld=.false. # Do not pass conv. clouds to Xu-Randall cloud fraction - export cdmbgwd="0.071,2.1,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling + export xr_cnvcld=.false. # Do not pass conv. clouds to Xu-Randall cloud fraction + export cdmbgwd="0.071,2.1,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="40.0,1.77,1.0,1.0" # settings for GSL drag suite export k_split=1 export n_split=4 @@ -104,8 +104,8 @@ case "${fv3_res}" in export nthreads_fv3_gfs=1 export nthreads_ufs=1 export nthreads_ufs_gfs=1 - export xr_cnvcld=".false." # Do not pass conv. clouds to Xu-Randall cloud fraction - export cdmbgwd="0.14,1.8,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling + export xr_cnvcld=".false." # Do not pass conv. clouds to Xu-Randall cloud fraction + export cdmbgwd="0.14,1.8,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="20.0,2.5,1.0,1.0" # settings for GSL drag suite export knob_ugwp_tauamp=3.0e-3 # setting for UGWPv1 non-stationary GWD export k_split=1 @@ -254,40 +254,33 @@ export ntasks_fv3_gfs export ntasks_quilt export ntasks_quilt_gfs -# Determine whether to use compression in the write grid component based on resolution +# Determine whether to use compression in the write grid component +# and whether to use parallel NetCDF based on resolution case ${fv3_res} in - "C48" | "C96" | "C192" | "C384") + "C48" | "C96" | "C192") zstandard_level=0 ideflate=0 quantize_nsd=0 + OUTPUT_FILETYPE_ATM="netcdf" + OUTPUT_FILETYPE_SFC="netcdf" ;; - "C768" | "C1152" | "C3072") + "C384" | "C768" | "C1152" | "C3072") zstandard_level=0 ideflate=1 quantize_nsd=5 - ;; - *) - echo "FATAL ERROR: Unrecognized FV3 resolution ${fv3_res}" - exit 15 - ;; -esac -export zstandard_level ideflate quantize_nsd - -# Determine whether to use parallel NetCDF based on resolution -case ${fv3_res} in - "C48" | "C96" | "C192" | "C384") - OUTPUT_FILETYPE_ATM="netcdf" - OUTPUT_FILETYPE_SFC="netcdf" - ;; - "C768" | "C1152" | "C3072") OUTPUT_FILETYPE_ATM="netcdf_parallel" - OUTPUT_FILETYPE_SFC="netcdf_parallel" + if [[ "${fv3_res}" == "C384" ]]; then + OUTPUT_FILETYPE_SFC="netcdf" # For C384, the write grid component is better off with serial netcdf + else + OUTPUT_FILETYPE_SFC="netcdf_parallel" + fi ;; *) echo "FATAL ERROR: Unrecognized FV3 resolution ${fv3_res}" exit 15 ;; esac +export zstandard_level ideflate quantize_nsd export OUTPUT_FILETYPE_ATM OUTPUT_FILETYPE_SFC # cpl defaults diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index b50e1c5fbb..afc5939fcd 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -527,6 +527,7 @@ case ${step} in case ${OCNRES} in "025") ntasks=480;; "050") ntasks=16;; + "100") ntasks=16;; "500") ntasks=16;; *) echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}" @@ -550,6 +551,10 @@ case ${step} in ntasks=16 memory="96GB" ;; + "100") + ntasks=16 + memory="96GB" + ;; "500") ntasks=16 memory="24GB" @@ -576,6 +581,10 @@ case ${step} in ntasks=16 memory="96GB" ;; + "100") + ntasks=16 + memory="96GB" + ;; "500") ntasks=16 memory="24GB" @@ -602,6 +611,10 @@ case ${step} in ntasks=16 memory="96GB" ;; + "100") + ntasks=16 + memory="96GB" + ;; "500") ntasks=16 memory="24GB" @@ -630,6 +643,9 @@ case ${step} in "050") memory="32GB" ntasks=16;; + "100") + memory="32GB" + ntasks=16;; "500") memory="32GB" ntasks=8;; @@ -1192,9 +1208,23 @@ case ${step} in "postsnd") walltime="02:00:00" export ntasks=141 - threads_per_task=6 - export tasks_per_node=21 export ntasks_postsndcfp=9 + case ${CASE} in + "C768") + tasks_per_node=21 + threads_per_task=6 + memory="23GB" + ;; + "C1152") + tasks_per_node=9 + threads_per_task=14 + memory="50GB" + ;; + *) + tasks_per_node=21 + threads_per_task=6 + ;; + esac export tasks_per_node_postsndcfp=1 postsnd_req_cores=$(( tasks_per_node * threads_per_task )) if (( postsnd_req_cores > max_tasks_per_node )); then diff --git a/parm/config/gfs/config.resources.GAEA b/parm/config/gfs/config.resources.GAEA index 51007b5b4f..a4d4ddfece 100644 --- a/parm/config/gfs/config.resources.GAEA +++ b/parm/config/gfs/config.resources.GAEA @@ -21,6 +21,7 @@ case ${step} in esac +unset memory # shellcheck disable=SC2312 for mem_var in $(env | grep '^memory_' | cut -d= -f1); do unset "${mem_var}" diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index 0aed935553..6309c4073b 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -356,40 +356,33 @@ export ntasks_fv3_gfs export ntasks_quilt_gdas export ntasks_quilt_gfs -# Determine whether to use compression in the write grid component based on resolution +# Determine whether to use compression in the write grid component +# and whether to use parallel NetCDF based on resolution case ${fv3_res} in - "C48" | "C96" | "C192" | "C384") + "C48" | "C96" | "C192") zstandard_level=0 ideflate=0 quantize_nsd=0 + OUTPUT_FILETYPE_ATM="netcdf" + OUTPUT_FILETYPE_SFC="netcdf" ;; - "C768" | "C1152" | "C3072") + "C384" | "C768" | "C1152" | "C3072") zstandard_level=0 ideflate=1 quantize_nsd=5 - ;; - *) - echo "FATAL ERROR: Unrecognized FV3 resolution ${fv3_res}" - exit 15 - ;; -esac -export zstandard_level ideflate quantize_nsd - -# Determine whether to use parallel NetCDF based on resolution -case ${fv3_res} in - "C48" | "C96" | "C192" | "C384") - OUTPUT_FILETYPE_ATM="netcdf" - OUTPUT_FILETYPE_SFC="netcdf" - ;; - "C768" | "C1152" | "C3072") OUTPUT_FILETYPE_ATM="netcdf_parallel" - OUTPUT_FILETYPE_SFC="netcdf_parallel" + if [[ "${fv3_res}" == "C384" ]]; then + OUTPUT_FILETYPE_SFC="netcdf" # For C384, the write grid component is better off with serial netcdf + else + OUTPUT_FILETYPE_SFC="netcdf_parallel" + fi ;; *) echo "FATAL ERROR: Unrecognized FV3 resolution ${fv3_res}" exit 15 ;; esac +export zstandard_level ideflate quantize_nsd export OUTPUT_FILETYPE_ATM OUTPUT_FILETYPE_SFC # cpl defaults diff --git a/sorc/build_all.sh b/sorc/build_all.sh index 79ae3c937f..b2f4e6ce0e 100755 --- a/sorc/build_all.sh +++ b/sorc/build_all.sh @@ -145,7 +145,7 @@ build_opts["ww3prepost"]="${_wave_opt} ${_verbose_opt} ${_build_ufs_opt} ${_buil # Optional DA builds if [[ "${_build_ufsda}" == "YES" ]]; then - if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" && "${MACHINE_ID}" != "hercules" && "${MACHINE_ID}" != "wcoss2" && "${MACHINE_ID}" != "noaacloud" ]]; then + if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" && "${MACHINE_ID}" != "hercules" && "${MACHINE_ID}" != "wcoss2" && "${MACHINE_ID}" != "noaacloud" && "${MACHINE_ID}" != "gaea" ]]; then echo "NOTE: The GDAS App is not supported on ${MACHINE_ID}. Disabling build." else build_jobs["gdas"]=8 diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 92404afc01..270a8bb1c9 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -213,12 +213,7 @@ declare -a ufs_templates=("model_configure.IN" "input_global_nest.nml.IN"\ "ufs.configure.s2swa_esmf.IN" \ "ufs.configure.leapfrog_atm_wav.IN" \ "ufs.configure.leapfrog_atm_wav_esmf.IN" \ - "post_itag_gfs" \ - "postxconfig-NT-gfs.txt" \ - "postxconfig-NT-gfs_FH00.txt") - # TODO: The above postxconfig files in the UFSWM are not the same as the ones in UPP - # TODO: GEFS postxconfig files also need to be received from UFSWM - # See forecast_predet.sh where the UPP versions are used. They will need to be replaced with these. + "post_itag_gfs") for file in "${ufs_templates[@]}"; do [[ -s "${file}" ]] && rm -f "${file}" ${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_model.fd/tests/parm/${file}" . diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh index d13cb0df0c..58755d41d9 100755 --- a/ush/forecast_postdet.sh +++ b/ush/forecast_postdet.sh @@ -233,14 +233,14 @@ EOF ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.atmf${FH3}.nc" "atmf${f_hhmmss}.nc" ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.sfcf${FH3}.nc" "sfcf${f_hhmmss}.nc" ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.atm.logf${FH3}.txt" "log.atm.f${f_hhmmss}" - ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.cubed_sphere_grid_atmf${FH3}.nc" "cubed_sphere_grid_atmf${f_hhmmss}.nc" - ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.cubed_sphere_grid_sfcf${FH3}.nc" "cubed_sphere_grid_sfcf${f_hhmmss}.nc" else ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.atmf${FH3}.nc" "atmf${FH3}.nc" ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.sfcf${FH3}.nc" "sfcf${FH3}.nc" ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.atm.logf${FH3}.txt" "log.atm.f${FH3}" - ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.cubed_sphere_grid_atmf${FH3}.nc" "cubed_sphere_grid_atmf${FH3}.nc" - ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.cubed_sphere_grid_sfcf${FH3}.nc" "cubed_sphere_grid_sfcf${FH3}.nc" + if [[ "${DO_JEDIATMVAR:-}" == "YES" ]]; then + ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.cubed_sphere_grid_atmf${FH3}.nc" "cubed_sphere_grid_atmf${FH3}.nc" + ${NLN} "${COMOUT_ATMOS_HISTORY}/${RUN}.t${cyc}z.cubed_sphere_grid_sfcf${FH3}.nc" "cubed_sphere_grid_sfcf${FH3}.nc" + fi fi if [[ "${WRITE_DOPOST}" == ".true." ]]; then ${NLN} "${COMOUT_ATMOS_MASTER}/${RUN}.t${cyc}z.master.grb2f${FH3}" "GFSPRS.GrbF${FH2}" diff --git a/ush/parsing_model_configure_FV3.sh b/ush/parsing_model_configure_FV3.sh index 8f102fe298..7cf7bf8662 100755 --- a/ush/parsing_model_configure_FV3.sh +++ b/ush/parsing_model_configure_FV3.sh @@ -31,7 +31,11 @@ local WRITE_GROUP=${WRITE_GROUP:-1} local WRTTASK_PER_GROUP=${WRTTASK_PER_GROUP:-24} local ITASKS=1 local OUTPUT_HISTORY=${OUTPUT_HISTORY:-".true."} -local HISTORY_FILE_ON_NATIVE_GRID=".true." +if [[ "${DO_JEDIATMVAR:-}" == "YES" ]]; then + local HISTORY_FILE_ON_NATIVE_GRID=".true." +else + local HISTORY_FILE_ON_NATIVE_GRID=".false." +fi local WRITE_DOPOST=${WRITE_DOPOST:-".false."} local WRITE_NSFLIP=${WRITE_NSFLIP:-".false."} local NUM_FILES=${NUM_FILES:-2} diff --git a/workflow/applications/gefs.py b/workflow/applications/gefs.py index 1db3c51287..afb4072596 100644 --- a/workflow/applications/gefs.py +++ b/workflow/applications/gefs.py @@ -17,7 +17,7 @@ def _get_app_configs(self): """ Returns the config_files that are involved in gefs """ - configs = ['stage_ic', 'fcst', 'atmos_products', 'arch'] + configs = ['stage_ic', 'fcst', 'atmos_products', 'arch', 'cleanup'] if self.nens > 0: configs += ['efcs', 'atmos_ensstat'] @@ -82,6 +82,6 @@ def get_task_names(self): if self.do_extractvars: tasks += ['extractvars'] - tasks += ['arch'] + tasks += ['arch', 'cleanup'] return {f"{self.run}": tasks} diff --git a/workflow/hosts/azurepw.yaml b/workflow/hosts/azurepw.yaml index 2155c67dea..d59736e653 100644 --- a/workflow/hosts/azurepw.yaml +++ b/workflow/hosts/azurepw.yaml @@ -18,7 +18,7 @@ CHGRP_RSTPROD: 'YES' CHGRP_CMD: 'chgrp rstprod' # TODO: This is not yet supported. HPSSARCH: 'NO' HPSS_PROJECT: emc-global #TODO: See `ATARDIR` below. -BASE_CPLIC: '/bucket/global-workflow-shared-data/ICSDIR/prototype_ICs' +BASE_IC: '/bucket/global-workflow-shared-data/ICSDIR' LOCALARCH: 'NO' ATARDIR: '' # TODO: This will not yet work from AZURE. MAKE_NSSTBUFR: 'NO' diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py index 70a39cea5a..8a4f148f24 100644 --- a/workflow/rocoto/gefs_tasks.py +++ b/workflow/rocoto/gefs_tasks.py @@ -546,6 +546,9 @@ def arch(self): deps.append(rocoto.add_dependency(dep_dict)) dep_dict = {'type': 'metatask', 'name': 'wave_post_bndpnt_bull'} deps.append(rocoto.add_dependency(dep_dict)) + if self.app_config.do_extractvars: + dep_dict = {'type': 'metatask', 'name': 'extractvars'} + deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') resources = self.get_resource('arch') @@ -564,3 +567,27 @@ def arch(self): task = rocoto.create_task(task_dict) return task + + def cleanup(self): + deps = [] + dep_dict = {'type': 'task', 'name': 'arch'} + deps.append(rocoto.add_dependency(dep_dict)) + + dependencies = rocoto.create_dependency(dep=deps) + + resources = self.get_resource('cleanup') + task_name = 'cleanup' + task_dict = {'task_name': task_name, + 'resources': resources, + 'envars': self.envars, + 'cycledef': 'gefs', + 'dependency': dependencies, + 'command': f'{self.HOMEgfs}/jobs/rocoto/cleanup.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) + + return task