diff --git a/bld/build-namelist b/bld/build-namelist index 0c56fff055..5a58b8cc50 100755 --- a/bld/build-namelist +++ b/bld/build-namelist @@ -4103,6 +4103,7 @@ if ($dyn =~ /se/) { se_kmax_jet se_molecular_diff se_pgf_formulation + se_dribble_in_rsplit_loop ); my %opts; diff --git a/bld/namelist_files/namelist_defaults_cam.xml b/bld/namelist_files/namelist_defaults_cam.xml index 953631b9e2..06cf238a7f 100644 --- a/bld/namelist_files/namelist_defaults_cam.xml +++ b/bld/namelist_files/namelist_defaults_cam.xml @@ -320,16 +320,11 @@ atm/cam/topo/se/ne3pg3_gmted2010_modis_bedmachine_nc0540_Laplace1000_noleak_20230209.nc atm/cam/topo/se/ne5pg3_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw064_20170516.nc atm/cam/topo/se/ne16pg3_nc3000_Co120_Fi001_PF_nullRR_Nsw084_20171012.nc -atm/cam/topo/se/ne30pg3_gmted2010_modis_bedmachine_nc3000_Laplace0100_20230105.nc +atm/cam/topo/se/ne30pg3_gmted2010_modis_bedmachine_nc3000_Laplace0100_noleak_20240117.nc atm/cam/topo/se/ne60pg3_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171012.nc atm/cam/topo/se/ne120pg3_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171014.nc atm/cam/topo/se/ne240pg3_nc3000_Co008_Fi001_PF_nullRR_Nsw005_20171015.nc -atm/cam/topo/se/ne5pg4_nc3000_Co360_Fi001_MulG_PF_nullRR_Nsw060_20170707.nc -atm/cam/topo/se/ne30pg4_nc3000_Co060_Fi001_PF_nullRR_Nsw042_20171014.nc -atm/cam/topo/se/ne60pg4_nc3000_Co030_Fi001_PF_nullRR_Nsw021_20171018.nc -atm/cam/topo/se/ne120pg4_nc3000_Co015_Fi001_PF_nullRR_Nsw010_20171014.nc - atm/cam/topo/se/ne30x8_CONUS_nc3000_Co060_Fi001_MulG_PF_RR_Nsw042_c200428.nc atm/cam/topo/se/ne30x4_ARCTIC_nc3000_Co060_Fi001_MulG_PF_RR_Nsw042_c200428.nc atm/cam/topo/se/ne30x8_ARCTICGRIS_nc3000_Co060_Fi001_MulG_PF_RR_Nsw042_c200428.nc @@ -3129,6 +3124,10 @@ 1 3 + + 0 + 1 + 2 .true. @@ -3138,15 +3137,21 @@ 3 2 4 - 4 + 9 + 8 + 2 + 3 3 1 - 1 - 20 - 4 - 2 - 4 + 1 + 3 + 2 + 4 + 20 + 4 + 2 + 4 1 2 @@ -3160,9 +3165,11 @@ 1.9 -1 +6.e15 5.e15 -1 +6.e15 10.e15 -1 @@ -3170,12 +3177,14 @@ 1.25e5 1.0e6 1.0e6 + 1.0e6 0.0 1.0 -1 -1 + 7.5 -1 1 @@ -3190,17 +3199,17 @@ 1 3 - 5 - 3 - 5 - 3 + 5 + 2 + 4 + 2 10 7 3 - 4 - 6 + 2 + 4 3 -1 diff --git a/bld/namelist_files/namelist_definition.xml b/bld/namelist_files/namelist_definition.xml index 9d7c323b59..04f6772562 100644 --- a/bld/namelist_files/namelist_definition.xml +++ b/bld/namelist_files/namelist_definition.xml @@ -8255,6 +8255,20 @@ Default: Set by build-namelist. Default: Set by build-namelist. + + + 0: physics tendencies will be added every vertical remapping time-step (dt_phys/se_nsplit) + for se_ftype=0,2 + + 1: physics tendencies will be added every dynamics time-step (dt_phys/se_nsplit*se_rsplit) + for se_ftype=0,2 + + If se_ftype=1 then se_dribble_in_rsplit_loop has no effect since physics tendencies are added as an adjustment + + Default: Set by build-namelist. + + diff --git a/cime_config/buildlib b/cime_config/buildlib index 73db5db3dd..172953ea40 100755 --- a/cime_config/buildlib +++ b/cime_config/buildlib @@ -6,7 +6,7 @@ create the cam library # pylint: disable=multiple-imports, wrong-import-position, wildcard-import # pylint: disable=unused-wildcard-import, bad-whitespace, too-many-locals # pylint: disable=invalid-name -import sys, os, filecmp, shutil, imp +import sys, os, filecmp, shutil _CIMEROOT = os.environ.get("CIMEROOT") @@ -19,6 +19,7 @@ sys.path.append(_LIBDIR) from standard_script_setup import * from CIME.case import Case from CIME.utils import run_sub_or_cmd, expect, run_cmd +from CIME.utils import import_from_file from CIME.buildlib import parse_input from CIME.build import get_standard_makefile_args @@ -41,10 +42,10 @@ def _build_cam(caseroot, libroot, bldroot): cmd = os.path.join(os.path.join(srcroot, "cime_config", "buildcpp")) logger.info(" ...calling cam buildcpp to set build time options") try: - mod = imp.load_source("buildcpp", cmd) - cam_cppdefs = mod.buildcpp(case) + buildcpp = import_from_file("buildcpp", cmd) + cam_cppdefs = buildcpp.buildcpp(case) except: - raise + raise RuntimeError("CAM's 'buildcpp' script failed to run properly.") with Case(caseroot) as case: diff --git a/cime_config/buildnml b/cime_config/buildnml index 97563bf59a..a077e13fcd 100755 --- a/cime_config/buildnml +++ b/cime_config/buildnml @@ -4,7 +4,7 @@ CAM namelist creator """ # pylint: disable=multiple-imports -import sys, os, shutil, filecmp, imp +import sys, os, shutil, filecmp _CIMEROOT = os.environ.get("CIMEROOT") if _CIMEROOT is None: @@ -19,7 +19,7 @@ from standard_script_setup import * from CIME.XML.standard_module_setup import * from CIME.buildnml import create_namelist_infile, parse_input from CIME.case import Case -from CIME.utils import expect, run_cmd +from CIME.utils import expect, run_cmd, import_from_file logger = logging.getLogger(__name__) @@ -75,10 +75,10 @@ def buildnml(case, caseroot, compname): cmd = os.path.join(os.path.join(srcroot,"cime_config","buildcpp")) logger.info(" ...calling cam buildcpp to set build time options") try: - mod = imp.load_source("buildcpp", cmd) - mod.buildcpp(case) + buildcpp = import_from_file("buildcpp", cmd) + _ = buildcpp.buildcpp(case) except: - raise + raise RuntimeError("CAM's 'buildcpp' script failed to run properly.") # Verify that we have a config_cache file (generated by the call to buildcpp) expect(os.path.isfile(filename), diff --git a/cime_config/testdefs/testlist_cam.xml b/cime_config/testdefs/testlist_cam.xml index 0061d5c9ce..7cd5767648 100644 --- a/cime_config/testdefs/testlist_cam.xml +++ b/cime_config/testdefs/testlist_cam.xml @@ -984,7 +984,7 @@ - + diff --git a/doc/ChangeLog b/doc/ChangeLog index 676bcae485..72e73c6a79 100644 --- a/doc/ChangeLog +++ b/doc/ChangeLog @@ -1,4 +1,240 @@ =============================================================== +Tag name: cam6_3_152 +Originator(s): pel +Date: Jan 30, 2024 +One-line Summary: "science optimization" for SE-CSLAM and stabilize WACCM +Github PR URL: https://github.com/ESCOMP/CAM/pull/968 + +Increase computational throughput of the SE-CSLAM dynamical core by: + + - Reducing se_nsplit to 2 (from 3) in FMT: CSLAM now runs with ~30% longer time-step compared to baseline + - No double advection of thermodynamic active tracers when using CSLAM. Overwrite GLL values of Q, CLDLIQ, + etc. every vertical remapping time-step with CSLAM values (interpolated from physics grid to GLL grid) + - Vertical sponge layer diffusion in physics for WACCM and WACCM-x + - No increased hyperdiffusion in sponge for FLT and FMT + +Provide stable configuration for WACCM with spectral-elements (ne30-pg3 and ne16pg3): namelist changes + +Resolve qneg issue 864 +Resolve issue 552 (read in topo file on GLL grid if available) +Resolve issue 951 (remove namelist defaults for pg4 grids) +Resolve issue 970 (remove deprecated 'imp' module from buildnml and buildlib) + +Describe any changes made to build system: + + - added namelist variable + - modified 'buildnml' and 'buildlib' python scripts + to remove deprecated 'imp' python module. + +Describe any changes made to the namelist: + + - changed bnd_topo file for ne30-pg3 for reading in topography + on the GLL grid (if available) (issue #552) + - remove namelist defaults for pg4 topo files (issue #951) + - added namelist se_dribble_in_rsplit_loop to stabilize ne16pg3 WACCM + - change se_nsplit, se_rsplit and se_hypervis_subcycle for efficiency/stability + - se_hypervis_subcycle_sponge for efficiency/stability + - change se_nu, se_nu_div and se_sponge_del4_nu_div_fac to stabilize + ne16pg3 WACCM + + +List any changes to the defaults for the boundary datasets: + - new default topo file for ne30pg3 + +Describe any substantial timing or memory changes: + + - approximately 30% speed-up of entire CAM model using COMPSET FLTHIST or FMTHIST + +Code reviewed by: nusbaume, fvitt + +List all existing files that have been modified, and describe the changes: + +M bld/build-namelist + - add namelist variable + +M bld/namelist_files/namelist_defaults_cam.xml + - change defaults (see above) + +M bld/namelist_files/namelist_definition.xml + - add namelist variable + +M cime_config/buildlib +M cime_config/buildnml + - remove deprecated "imp" python module + +M cime_config/testdefs/testlist_cam.xml + - replace ne5pg4 FADIAB test with ne5pg3 test + +M src/dynamics/se/dp_coupling.F90 +M src/dynamics/se/dycore/control_mod.F90 +M src/dynamics/se/dycore/fvm_control_volume_mod.F90 +M src/dynamics/se/dycore/fvm_mapping.F90 +M src/dynamics/se/dycore/fvm_mod.F90 +M src/dynamics/se/dycore/fvm_reconstruction_mod.F90 +M src/dynamics/se/dycore/global_norms_mod.F90 +M src/dynamics/se/dycore/prim_advance_mod.F90 +M src/dynamics/se/dycore/prim_advection_mod.F90 +M src/dynamics/se/dycore/prim_driver_mod.F90 +M src/dynamics/se/dyn_comp.F90 +M src/dynamics/se/dyn_grid.F90 +M src/dynamics/se/dycore_budget.F90 + - implement SE dycore improvements + +M src/dynamics/se/gravity_waves_sources.F90 + - fix model top pressure bug + +M src/physics/cam/vertical_diffusion.F90 + - add vertical sponge layer diffusion + +If there were any failures reported from running test_driver.sh on any test +platform, and checkin with these failures has been OK'd by the gatekeeper, +then copy the lines from the td.*.status files for the failed tests to the +appropriate machine below. All failed tests must be justified. + +derecho/intel/aux_cam: + ERP_Ln9.C96_C96_mg17.F2000climo.derecho_intel.cam-outfrq9s_mg3 (Overall: PEND) + ERP_Ln9.f09_f09_mg17.FCSD_HCO.derecho_intel.cam-outfrq9s (Overall: FAIL) + SMS_Ld1.f09_f09_mg17.FCHIST_GC.derecho_intel.cam-outfrq1d (Overall: FAIL) + - pre-existing failures + + ERC_D_Ln9.ne16_ne16_mg17.FADIAB.derecho_intel.cam-terminator (Overall: DIFF) + ERC_D_Ln9.ne16_ne16_mg17.QPC5HIST.derecho_intel.cam-outfrq3s_usecase (Overall: DIFF) + ERC_D_Ln9_P144x1.ne16pg3_ne16pg3_mg17.QPC6HIST.derecho_intel.cam-outfrq3s_ttrac_usecase (Overall: DIFF) + ERP_D_Ln9.ne30pg3_ne30pg3_mg17.F2000dev.derecho_intel.cam-outfrq9s (Overall: DIFF) + ERP_D_Ln9.ne30pg3_ne30pg3_mg17.FLTHIST.derecho_intel.cam-outfrq9s (Overall: DIFF) + ERP_D_Ln9.ne30pg3_ne30pg3_mg17.FLTHIST.derecho_intel.cam-outfrq9s_rrtmgp (Overall: DIFF) + ERP_Ln9.ne30_ne30_mg17.FCnudged.derecho_intel.cam-outfrq9s (Overall: DIFF) + ERP_Ln9.ne30pg3_ne30pg3_mg17.FW2000climo.derecho_intel.cam-outfrq9s (Overall: DIFF) + ERS_Ln9.ne0TESTONLYne5x4_ne0TESTONLYne5x4_mg37.FADIAB.derecho_intel.cam-outfrq3s_refined (Overall: DIFF) + SMS_D_Ln9.ne16_ne16_mg17.FX2000.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_D_Ln9.ne16_ne16_mg17.QPX2000.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_D_Ln9.ne30pg3_ne30pg3_mg17.FMTHIST.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_D_Ln9_P1280x1.ne0ARCTICne30x4_ne0ARCTICne30x4_mt12.FHIST.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_D_Ln9_P1280x1.ne0CONUSne30x8_ne0CONUSne30x8_mt12.FCHIST.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_D_Ln9_P1280x1.ne30pg3_ne30pg3_mg17.FCLTHIST.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_D_Ln9.T42_T42.FSCAM.derecho_intel.cam-outfrq9s (Overall: DIFF) + SMS_Ld1.ne30pg3_ne30pg3_mg17.FC2010climo.derecho_intel.cam-outfrq1d (Overall: DIFF) + SMS_Ln9.ne30pg3_ne30pg3_mg17.FW2000climo.derecho_intel.cam-outfrq9s_rrtmgp (Overall: DIFF) + - expected answer changes + +izumi/nag/aux_cam: + DAE.f45_f45_mg37.FHS94.izumi_nag.cam-dae (Overall: FAIL) + - pre-existing failure + + ERC_D_Ln9.ne16_ne16_mg17.QPC4.izumi_nag.cam-outfrq3s_usecase (Overall: DIFF) + ERC_D_Ln9.ne16pg3_ne16pg3_mg17.QPC4.izumi_nag.cam-outfrq3s_usecase (Overall: DIFF) + ERC_D_Ln9.ne5_ne5_mg37.QPC5.izumi_nag.cam-outfrq3s_ttrac (Overall: DIFF) + ERI_D_Ln18.ne5_ne5_mg37.FADIAB.izumi_nag.cam-outfrq3s_bwic (Overall: DIFF) + ERI_D_Ln18.ne5pg3_ne5pg3_mg37.FADIAB.izumi_nag.cam-outfrq3s_bwic (Overall: DIFF) + ERP_Ln9.ne5pg3_ne5pg3_mg37.QPC6.izumi_nag.cam-outfrq9s_clubbmf (Overall: DIFF) + ERS_Ln27.ne5pg3_ne5pg3_mg37.FKESSLER.izumi_nag.cam-outfrq9s (Overall: DIFF) + ERS_Ln9.ne5_ne5_mg37.FADIAB.izumi_nag.cam-outfrq9s (Overall: DIFF) + PEM_D_Ln9.ne5_ne5_mg37.FADIAB.izumi_nag.cam-outfrq3s (Overall: DIFF) + PLB_D_Ln9.ne5_ne5_mg37.QPC5.izumi_nag.cam-ttrac_loadbal0 (Overall: DIFF) + PLB_D_Ln9.ne5_ne5_mg37.QPC5.izumi_nag.cam-ttrac_loadbal1 (Overall: DIFF) + PLB_D_Ln9.ne5_ne5_mg37.QPC5.izumi_nag.cam-ttrac_loadbal3 (Overall: DIFF) + SMS_D_Ln3.ne5pg3_ne5pg3_mg37.QPX2000.izumi_nag.cam-outfrq3s (Overall: DIFF) + SMS_D_Ln6.ne5_ne5_mg37.QPWmaC4.izumi_nag.cam-outfrq3s_physgrid_tem (Overall: DIFF) + SMS_D_Ln9_P1x1.ne5_ne5_mg37.FADIAB.izumi_nag.cam-outfrq3s (Overall: DIFF) + - expected answer changes + +izumi/gnu/aux_cam: + + ERC_D_Ln9.ne5_ne5_mg37.QPC4.izumi_gnu.cam-outfrq3s_nudging_ne5_L26 (Overall: DIFF) + ERC_D_Ln9.ne5_ne5_mg37.QPC5.izumi_gnu.cam-outfrq3s_ba (Overall: DIFF) + ERC_D_Ln9.ne5pg2_ne5pg2_mg37.FADIAB.izumi_gnu.cam-outfrq3s (Overall: DIFF) + ERC_D_Ln9.ne5pg3_ne5pg3_mg37.FADIAB.izumi_gnu.cam-outfrq3s (Overall: DIFF) + ERP_D_Ln9.ne3pg3_ne3pg3_mg37.QPC6.izumi_gnu.cam-outfrq9s_rrtmgp (Overall: DIFF) + ERP_Ln9.ne5_ne5_mg37.FHS94.izumi_gnu.cam-outfrq9s (Overall: DIFF) + ERP_Ln9.ne5_ne5_mg37.QPC5.izumi_gnu.cam-outfrq9s (Overall: DIFF) + PEM_D_Ln9.ne5pg3_ne5pg3_mg37.FADIAB.izumi_gnu.cam-outfrq3s (Overall: DIFF) + PLB_D_Ln9.ne5pg3_ne5pg3_mg37.QPC5.izumi_gnu.cam-ttrac_loadbal0 (Overall: DIFF) + PLB_D_Ln9.ne5pg3_ne5pg3_mg37.QPC5.izumi_gnu.cam-ttrac_loadbal1 (Overall: DIFF) + PLB_D_Ln9.ne5pg3_ne5pg3_mg37.QPC5.izumi_gnu.cam-ttrac_loadbal3 (Overall: DIFF) + SMS_D_Ln9.ne5pg3_ne5pg3_mg37.QPC5.izumi_gnu.cam-outfrq3s_ttrac (Overall: DIFF) + - expected answer changes + +Summarize any changes to answers: +All spectral-element tests fail due to baseline differences. + + The SE-CSLAM tests fail because of no double-advection + change as well as default hyperviscosity change + The SE (not CSLAM) tests fail because default + hyperviscosity has changed + All WACCM tests fail due to added sponge layer + vertical diffusion + +=============================================================== +=============================================================== + +Tag name: cam6_3_151 +Originator(s): eaton +Date: Thu 21 Mar 2024 +One-line Summary: Bugfix to allow multiple monthly avg history files +Github PR URL: https://github.com/ESCOMP/CAM/pull/1003 + +Purpose of changes (include the issue number and title text for each relevant GitHub issue): + +#1000 - Output of more than 1 monthly average history file is broken. + +. resolves #1000 + +Describe any changes made to build system: none + +Describe any changes made to the namelist: none + +List any changes to the defaults for the boundary datasets: none + +Describe any substantial timing or memory changes: none + +Code reviewed by: cacraigucar, peverwhee + +List all files eliminated: none + +List all files added and what they do: none + +List all existing files that have been modified, and describe the changes: + +src/control/cam_history.F90 +. subroutine wshist + - add new local variables to store the year, month, and day components of + the time interval midpoint date. + +If there were any failures reported from running test_driver.sh on any test +platform, and checkin with these failures has been OK'd by the gatekeeper, +then copy the lines from the td.*.status files for the failed tests to the +appropriate machine below. All failed tests must be justified. + +All tests have a MEMCOMP failure which we are ignoring. +Several tests have a TPUTCOMP failure which we are also ignoring. + +derecho/intel/aux_cam: + ERP_Ln9.C96_C96_mg17.F2000climo.derecho_intel.cam-outfrq9s_mg3 (Overall: FAIL) details: + ERP_Ln9.f09_f09_mg17.FCSD_HCO.derecho_intel.cam-outfrq9s (Overall: FAIL) details: + SMS_Ld1.f09_f09_mg17.FCHIST_GC.derecho_intel.cam-outfrq1d (Overall: FAIL) details: + SMS_Lh12.f09_f09_mg17.FCSD_HCO.derecho_intel.cam-outfrq3h (Overall: DIFF) details: + - pre-existing failures + +izumi/nag/aux_cam: + DAE.f45_f45_mg37.FHS94.izumi_nag.cam-dae (Overall: FAIL) details: + - pre-existing failure + +izumi/gnu/aux_cam: + All PASS. + +TESTING NOTE: None of our regression tests use multiple monthly output +files. The fix was tested in a low res FHS94 compset that specified +monthly output for h0, h1, h2, and h3. The 'T' field was output in each +file. A 1 month test was run and all files had identical output. This is +the same configuration that I used to debug the problem. + +CAM tag used for the baseline comparison tests if different than previous +tag: + +Summarize any changes to answers: BFB + +=============================================================== +=============================================================== Tag name: cam6_3_150 Originator(s): megandevlan, peverwhee @@ -194,6 +430,7 @@ izumi/gnu/aux_cam: - failures due to So_ugustOut in cpl.hi and answer changes for cam_dev tests =============================================================== +=============================================================== Tag name: cam6_3_149 Originator(s): cacraig, fischer, jedwards diff --git a/src/control/cam_history.F90 b/src/control/cam_history.F90 index 28e1d848f2..a0b35e5a1d 100644 --- a/src/control/cam_history.F90 +++ b/src/control/cam_history.F90 @@ -5516,6 +5516,7 @@ subroutine wshist (rgnht_in) #endif integer :: yr, mon, day ! year, month, and day components of a date + integer :: yr_mid, mon_mid, day_mid ! year, month, and day components of midpoint date integer :: nstep ! current timestep number integer :: ncdate(maxsplitfiles) ! current (or midpoint) date in integer format [yyyymmdd] integer :: ncsec(maxsplitfiles) ! current (or midpoint) time of day [seconds] @@ -5529,7 +5530,6 @@ subroutine wshist (rgnht_in) logical :: prev ! Label file with previous date rather than current logical :: duplicate ! Flag for duplicate file name integer :: ierr - integer :: ncsec_temp #if ( defined BFB_CAM_SCAM_IOP ) integer :: tsec ! day component of current time integer :: dtime ! seconds component of current time @@ -5583,6 +5583,7 @@ subroutine wshist (rgnht_in) end if end if end if + time = ndcur + nscur/86400._r8 if (is_initfile(file_index=t)) then tdata = time ! Inithist file is always instantanious data @@ -5590,10 +5591,12 @@ subroutine wshist (rgnht_in) tdata(1) = beg_time(t) tdata(2) = time end if + ! Set midpoint date/datesec for accumulated file - call set_date_from_time_float((tdata(1) + tdata(2)) / 2._r8, yr, mon, day, ncsec_temp) - ncsec(accumulated_file_index) = ncsec_temp - ncdate(accumulated_file_index) = yr*10000 + mon*100 + day + call set_date_from_time_float((tdata(1) + tdata(2)) / 2._r8, & + yr_mid, mon_mid, day_mid, ncsec(accumulated_file_index) ) + ncdate(accumulated_file_index) = yr_mid*10000 + mon_mid*100 + day_mid + if (hstwr(t) .or. (restart .and. rgnht(t))) then if(masterproc) then if(is_initfile(file_index=t)) then @@ -5609,7 +5612,7 @@ subroutine wshist (rgnht_in) if (f == instantaneous_file_index) then write(iulog,200) nfils(t),'instantaneous',t,yr,mon,day,ncsec(f) else - write(iulog,200) nfils(t),'accumulated',t,yr,mon,day,ncsec(f) + write(iulog,200) nfils(t),'accumulated',t,yr_mid,mon_mid,day_mid,ncsec(f) end if 200 format('WSHIST: writing time sample ',i3,' to ', a, ' h-file ', & i1,' DATE=',i4.4,'/',i2.2,'/',i2.2,' NCSEC=',i6) diff --git a/src/dynamics/se/dp_coupling.F90 b/src/dynamics/se/dp_coupling.F90 index 61b7fc54e9..beba3d3611 100644 --- a/src/dynamics/se/dp_coupling.F90 +++ b/src/dynamics/se/dp_coupling.F90 @@ -312,7 +312,7 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) use fvm_mapping, only: phys2dyn_forcings_fvm use test_fvm_mapping, only: test_mapping_overwrite_tendencies use test_fvm_mapping, only: test_mapping_output_mapped_tendencies - + use dimensions_mod, only: use_cslam ! arguments type(physics_state), intent(inout), dimension(begchunk:endchunk) :: phys_state type(physics_tend), intent(inout), dimension(begchunk:endchunk) :: phys_tend @@ -427,8 +427,9 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) !JMD hybrid = config_thread_region(par,'horizontal') hybrid = config_thread_region(par,'serial') call get_loop_ranges(hybrid,ibeg=nets,iend=nete) - - ! high-order mapping of ft and fm (and fq if no cslam) using fvm technology + ! + ! high-order mapping of ft and fm using fvm technology + ! call t_startf('phys2dyn') call phys2dyn_forcings_fvm(elem, dyn_in%fvm, hybrid,nets,nete,ntrac==0, tl_f, tl_qdp) call t_stopf('phys2dyn') @@ -474,19 +475,20 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) dyn_in%elem(ie)%derived%FT(:,:,k) = & dyn_in%elem(ie)%derived%FT(:,:,k) * & dyn_in%elem(ie)%spheremp(:,:) - do m = 1, qsize - dyn_in%elem(ie)%derived%FQ(:,:,k,m) = & - dyn_in%elem(ie)%derived%FQ(:,:,k,m) * & - dyn_in%elem(ie)%spheremp(:,:) - end do end do end if kptr = 0 call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FM(:,:,:,:), 2*nlev, kptr, ie) kptr = kptr + 2*nlev call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FT(:,:,:), nlev, kptr, ie) - kptr = kptr + nlev - call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FQ(:,:,:,:), nlev*qsize, kptr, ie) + if (.not. use_cslam) then + ! + ! if using CSLAM qdp is being overwritten with CSLAM values in the dynamics + ! so no need to do boundary exchange of tracer tendency on GLL grid here + ! + kptr = kptr + nlev + call edgeVpack(edgebuf, dyn_in%elem(ie)%derived%FQ(:,:,:,:), nlev*qsize, kptr, ie) + end if end do if (iam < par%nprocs) then @@ -499,7 +501,9 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) kptr = kptr + 2*nlev call edgeVunpack(edgebuf, dyn_in%elem(ie)%derived%FT(:,:,:), nlev, kptr, ie) kptr = kptr + nlev - call edgeVunpack(edgebuf, dyn_in%elem(ie)%derived%FQ(:,:,:,:), nlev*qsize, kptr, ie) + if (.not. use_cslam) then + call edgeVunpack(edgebuf, dyn_in%elem(ie)%derived%FQ(:,:,:,:), nlev*qsize, kptr, ie) + end if if (fv_nphys > 0) then do k = 1, nlev dyn_in%elem(ie)%derived%FM(:,:,1,k) = & @@ -511,11 +515,6 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in, tl_f, tl_qdp) dyn_in%elem(ie)%derived%FT(:,:,k) = & dyn_in%elem(ie)%derived%FT(:,:,k) * & dyn_in%elem(ie)%rspheremp(:,:) - do m = 1, qsize - dyn_in%elem(ie)%derived%FQ(:,:,k,m) = & - dyn_in%elem(ie)%derived%FQ(:,:,k,m) * & - dyn_in%elem(ie)%rspheremp(:,:) - end do end do end if end do @@ -691,23 +690,21 @@ subroutine derived_phys_dry(phys_state, phys_tend, pbuf2d) end if end do + ! Ensure tracers are all positive + call qneg3('D_P_COUPLING',lchnk ,ncol ,pcols ,pver , & + 1, pcnst, qmin ,phys_state(lchnk)%q) + ! Compute initial geopotential heights - based on full pressure call geopotential_t(phys_state(lchnk)%lnpint, phys_state(lchnk)%lnpmid , phys_state(lchnk)%pint, & phys_state(lchnk)%pmid , phys_state(lchnk)%pdel , phys_state(lchnk)%rpdel , & phys_state(lchnk)%t , phys_state(lchnk)%q(:,:,:), rairv(:,:,lchnk), gravit, zvirv , & phys_state(lchnk)%zi , phys_state(lchnk)%zm , ncol) - ! Compute initial dry static energy, include surface geopotential call update_dry_static_energy_run(pver, gravit, phys_state(lchnk)%t(1:ncol,:), & phys_state(lchnk)%zm(1:ncol,:), & phys_state(lchnk)%phis(1:ncol), & phys_state(lchnk)%s(1:ncol,:), & cpairv(1:ncol,:,lchnk), errflg, errmsg) - - ! Ensure tracers are all positive - call qneg3('D_P_COUPLING',lchnk ,ncol ,pcols ,pver , & - 1, pcnst, qmin ,phys_state(lchnk)%q) - ! Compute energy and water integrals of input state pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) call check_energy_timestep_init(phys_state(lchnk), phys_tend(lchnk), pbuf_chnk) diff --git a/src/dynamics/se/dycore/control_mod.F90 b/src/dynamics/se/dycore/control_mod.F90 index 053f478c6a..6d92e66d7d 100644 --- a/src/dynamics/se/dycore/control_mod.F90 +++ b/src/dynamics/se/dycore/control_mod.F90 @@ -16,6 +16,7 @@ module control_mod integer, public :: rk_stage_user = 0 ! number of RK stages to use integer, public :: ftype = 2 ! Forcing Type integer, public :: ftype_conserve = 1 !conserve momentum (dp*u) + integer, public :: dribble_in_rsplit_loop = 0 integer, public :: statediag_numtrac = 3 integer, public :: qsplit = 1 ! ratio of dynamics tsteps to tracer tsteps diff --git a/src/dynamics/se/dycore/element_mod.F90 b/src/dynamics/se/dycore/element_mod.F90 index 6ba2b36e02..2e758727db 100644 --- a/src/dynamics/se/dycore/element_mod.F90 +++ b/src/dynamics/se/dycore/element_mod.F90 @@ -25,9 +25,8 @@ module element_mod real (kind=r8) :: T (np,np,nlev,timelevels) ! temperature real (kind=r8) :: dp3d (np,np,nlev,timelevels) ! dry delta p on levels real (kind=r8) :: psdry (np,np) ! dry surface pressure - real (kind=r8) :: phis (np,np) ! surface geopotential (prescribed) - real (kind=r8) :: Qdp (np,np,nlev,qsize_d,2) ! Tracer mass - + real (kind=r8) :: phis (np,np) ! surface geopotential (prescribed) + real (kind=r8), allocatable :: Qdp(:,:,:,:,:) ! Tracer mass end type elem_state_t !___________________________________________________________________ @@ -43,20 +42,16 @@ module element_mod real (kind=r8) :: phi(np,np,nlev) ! geopotential real (kind=r8) :: omega(np,np,nlev) ! vertical velocity - ! semi-implicit diagnostics: computed in explict-component, reused in Helmholtz-component. - real (kind=r8) :: zeta(np,np,nlev) ! relative vorticity - real (kind=r8) :: div(np,np,nlev,timelevels) ! divergence - ! tracer advection fields used for consistency and limiters real (kind=r8) :: dp(np,np,nlev) ! for dp_tracers at physics timestep - real (kind=r8) :: divdp(np,np,nlev) ! divergence of dp - real (kind=r8) :: divdp_proj(np,np,nlev) ! DSSed divdp + real (kind=r8), allocatable :: divdp(:,:,:) ! divergence of dp + real (kind=r8), allocatable :: divdp_proj(:,:,:) ! DSSed divdp real (kind=r8) :: mass(MAX(qsize_d,ntrac_d)+9) ! total tracer mass for diagnostics ! forcing terms for CAM - real (kind=r8) :: FQ(np,np,nlev,qsize_d) ! tracer forcing + real (kind=r8), allocatable :: FQ(:,:,:,:) ! tracer forcing real (kind=r8) :: FM(np,np,2,nlev) ! momentum forcing - real (kind=r8) :: FDP(np,np,nlev) ! save full updated dp right after physics + real (kind=r8), allocatable :: FDP(:,:,:) ! save full updated dp right after physics real (kind=r8) :: FT(np,np,nlev) ! temperature forcing real (kind=r8) :: etadot_prescribed(np,np,nlevp) ! prescribed vertical tendency real (kind=r8) :: u_met(np,np,nlev) ! zonal component of prescribed meteorology winds diff --git a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 index c1b3c6fc15..e3208c86cd 100644 --- a/src/dynamics/se/dycore/fvm_control_volume_mod.F90 +++ b/src/dynamics/se/dycore/fvm_control_volume_mod.F90 @@ -128,7 +128,6 @@ module fvm_control_volume_mod ! !****************************************** ! - real (kind=r8) , allocatable :: phis_physgrid(:,:) real (kind=r8) , allocatable :: vtx_cart_physgrid(:,:,:,:) real (kind=r8) , allocatable :: flux_orient_physgrid(:,:,:) integer , allocatable :: ifct_physgrid(:,:) @@ -280,7 +279,6 @@ subroutine allocate_physgrid_vars(fvm,par) end if do ie=1,nelemd - allocate(fvm(ie)%phis_physgrid (fv_nphys,fv_nphys)) allocate(fvm(ie)%vtx_cart_physgrid (4,2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) allocate(fvm(ie)%flux_orient_physgrid (2,1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) allocate(fvm(ie)%ifct_physgrid (1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys)) diff --git a/src/dynamics/se/dycore/fvm_mapping.F90 b/src/dynamics/se/dycore/fvm_mapping.F90 index f52d961be5..0f090ebe9e 100644 --- a/src/dynamics/se/dycore/fvm_mapping.F90 +++ b/src/dynamics/se/dycore/fvm_mapping.F90 @@ -18,13 +18,14 @@ module fvm_mapping use dimensions_mod, only: irecons_tracer use element_mod, only: element_t use fvm_control_volume_mod, only: fvm_struct - use perf_mod, only: t_startf, t_stopf - + use perf_mod, only: t_startf, t_stopf + use cam_abortutils, only: endrun + use cam_logfile, only: iulog implicit none private public :: phys2dyn_forcings_fvm, dyn2phys, dyn2phys_vector, dyn2phys_all_vars,dyn2fvm_mass_vars - public :: phys2dyn,fvm2dyn,dyn2fvm + public :: phys2dyn,fvm2dyn,dyn2fvm,cslam2gll save integer :: save_max_overlap real(kind=r8), allocatable, dimension(:,:,:,:,:) :: save_air_mass_overlap @@ -48,7 +49,6 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ use dimensions_mod, only: np, nc,nlev use dimensions_mod, only: fv_nphys, nhc_phys,ntrac,nhc,ksponge_end, nu_scale_top use hybrid_mod, only: hybrid_t - use cam_abortutils, only: endrun use air_composition, only: thermodynamic_active_species_num, thermodynamic_active_species_idx type (element_t), intent(inout):: elem(:) type(fvm_struct), intent(inout):: fvm(:) @@ -58,8 +58,7 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ integer, intent(in) :: nets, nete, tl_f, tl_qdp integer :: ie,i,j,k,m_cnst,nq - real (kind=r8), dimension(:,:,:,:,:) , allocatable :: fld_phys, fld_gll, fld_fvm - real (kind=r8), allocatable, dimension(:,:,:,:,:) :: qgll + real (kind=r8), dimension(:,:,:,:,:) , allocatable :: fld_phys, fld_gll real (kind=r8) :: element_ave ! ! for tensor product Lagrange interpolation @@ -67,13 +66,7 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ integer :: nflds logical, allocatable :: llimiter(:) - allocate(qgll(np,np,nlev,thermodynamic_active_species_num,nets:nete)) - - do ie=nets,nete - do nq=1,thermodynamic_active_species_num - qgll(:,:,:,nq,ie) = elem(ie)%state%Qdp(:,:,:,nq,tl_qdp)/elem(ie)%state%dp3d(:,:,:,tl_f) - end do - end do + integer :: ierr if (no_cslam) then call endrun("phys2dyn_forcings_fvm: no cslam case: NOT SUPPORTED") @@ -87,9 +80,21 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! call t_startf('p2d-pg2:copying') nflds = 4+ntrac - allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)) - allocate(fld_gll(np,np,nlev,3,nets:nete)) - allocate(llimiter(nflds)) + allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'phys2dyn_forcings_fvm: fld_phys allocation error = ',ierr + call endrun('phys2dyn_forcings_fvm: failed to allocate fld_phys array') + end if + allocate(fld_gll(np,np,nlev,3,nets:nete), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'phys2dyn_forcings_fvm: fld_gll allocation error = ',ierr + call endrun('phys2dyn_forcings_fvm: failed to allocate fld_gll array') + end if + allocate(llimiter(3), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'phys2dyn_forcings_fvm: llimiter allocation error = ',ierr + call endrun('phys2dyn_forcings_fvm: failed to allocate llimiter array') + end if fld_phys = -9.99E99_r8!xxx necessary? llimiter = .false. @@ -113,7 +118,8 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ ! ! do mapping of fu,fv,ft ! - call phys2dyn(hybrid,elem,fld_phys(:,:,:,1:3,:),fld_gll(:,:,:,1:3,:),nets,nete,nlev,3,fvm,llimiter(1:3),2,.true.) + call phys2dyn(hybrid,elem,fld_phys(:,:,:,1:3,:),fld_gll,nets,nete,nlev,3,fvm,llimiter, & + istart_vector=2,halo_filled=.true.) do ie=nets,nete elem(ie)%derived%fT(:,:,:) = fld_gll(:,:,:,1,ie) elem(ie)%derived%fM(:,:,1,:) = fld_gll(:,:,:,2,ie) @@ -134,38 +140,7 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ end do end do call t_stopf('p2d-pg2:phys2fvm') - - ! - ! overwrite SE Q with cslam Q - ! - nflds = thermodynamic_active_species_num - allocate(fld_gll(np,np,nlev,nflds,nets:nete)) - allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete)) - do ie=nets,nete - ! - ! compute cslam updated Q value - do m_cnst=1,thermodynamic_active_species_num - fld_fvm(1:nc,1:nc,:,m_cnst,ie) = fvm(ie)%c(1:nc,1:nc,:,thermodynamic_active_species_idx(m_cnst))+& - fvm(ie)%fc(1:nc,1:nc,:,thermodynamic_active_species_idx(m_cnst))/fvm(ie)%dp_fvm(1:nc,1:nc,:) - enddo - end do - call t_startf('p2d-pg2:fvm2dyn') - llimiter(1:nflds) = .false. - call fvm2dyn(fld_fvm,fld_gll(:,:,:,1:nflds,:),hybrid,nets,nete,nlev,nflds,fvm,llimiter(1:nflds)) - call t_stopf('p2d-pg2:fvm2dyn') - ! - ! fld_gll now holds q cslam value on gll grid - ! - ! convert fld_gll to increment (q_new-q_old) - ! - do ie=nets,nete - do m_cnst=1,thermodynamic_active_species_num - elem(ie)%derived%fq(:,:,:,m_cnst) =& - fld_gll(:,:,:,m_cnst,ie)-qgll(:,:,:,m_cnst,ie) - end do - end do - deallocate(fld_fvm) - !deallocate arrays allocated in dyn2phys_all_vars + !deallocate arrays allocated in dyn2phys_all_vars deallocate(save_air_mass_overlap,save_q_phys,save_q_overlap,& save_overlap_area,save_num_overlap,save_overlap_idx,save_dp_phys) else @@ -178,7 +153,7 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ !***************************************************************************************** ! ! nflds is ft, fu, fv, + thermo species - nflds = 3+thermodynamic_active_species_num + nflds = 3 allocate(fld_phys(1-nhc_phys:fv_nphys+nhc_phys,1-nhc_phys:fv_nphys+nhc_phys,nlev,nflds,nets:nete)) allocate(fld_gll(np,np,nlev,nflds,nets:nete)) allocate(llimiter(nflds)) @@ -190,18 +165,8 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ fld_phys(1:fv_nphys,1:fv_nphys,:,1,ie) = fvm(ie)%ft(1:fv_nphys,1:fv_nphys,:) fld_phys(1:fv_nphys,1:fv_nphys,:,2,ie) = fvm(ie)%fm(1:fv_nphys,1:fv_nphys,1,:) fld_phys(1:fv_nphys,1:fv_nphys,:,3,ie) = fvm(ie)%fm(1:fv_nphys,1:fv_nphys,2,:) - ! - ! compute cslam mixing ratio with physics update - ! - do m_cnst=1,thermodynamic_active_species_num - do k=1,nlev - fld_phys(1:fv_nphys,1:fv_nphys,k,m_cnst+3,ie) = & - fvm(ie)%c(1:fv_nphys,1:fv_nphys,k,thermodynamic_active_species_idx(m_cnst))+& - fvm(ie)%fc_phys(1:fv_nphys,1:fv_nphys,k,thermodynamic_active_species_idx(m_cnst)) - end do - end do - end do - ! + end do + ! ! do mapping ! call phys2dyn(hybrid,elem,fld_phys,fld_gll,nets,nete,nlev,nflds,fvm,llimiter,2) @@ -210,24 +175,18 @@ subroutine phys2dyn_forcings_fvm(elem, fvm, hybrid,nets,nete,no_cslam, tl_f, tl_ elem(ie)%derived%fM(:,:,1,:) = fld_gll(:,:,:,2,ie) elem(ie)%derived%fM(:,:,2,:) = fld_gll(:,:,:,3,ie) end do + deallocate(fld_gll) do ie=nets,nete - do m_cnst=1,thermodynamic_active_species_num - ! - ! convert fq so that it will effectively overwrite SE q with CSLAM q - ! - elem(ie)%derived%fq(:,:,:,m_cnst) = fld_gll(:,:,:,m_cnst+3,ie)-& - qgll(:,:,:,m_cnst,ie) - end do do m_cnst = 1,ntrac fvm(ie)%fc(1:nc,1:nc,:,m_cnst) = fvm(ie)%fc_phys(1:nc,1:nc,:,m_cnst)*fvm(ie)%dp_fvm(1:nc,1:nc,:) end do end do end if - deallocate(fld_phys,llimiter,fld_gll,qgll) + deallocate(fld_phys,llimiter) end subroutine phys2dyn_forcings_fvm ! for multiple fields - subroutine fvm2dyntn(fld_fvm,fld_gll,hybrid,nets,nete,numlev,num_flds,fvm,llimiter) + subroutine fvm2dyntn(fld_fvm,fld_gll,hybrid,nets,nete,numlev,num_flds,fvm,llimiter,halo_filled) use dimensions_mod, only: np, nhc, nc use hybrid_mod , only: hybrid_t use bndry_mod , only: ghost_exchange @@ -240,7 +199,10 @@ subroutine fvm2dyntn(fld_fvm,fld_gll,hybrid,nets,nete,numlev,num_flds,fvm,llimit type (hybrid_t) , intent(in) :: hybrid type(fvm_struct) , intent(in) :: fvm(nets:nete) logical , intent(in) :: llimiter(num_flds) + logical, optional , intent(in) :: halo_filled !optional if boundary exchange for fld_fvm has already been called + integer :: ie, iwidth + logical :: fill_halo ! !********************************************* ! @@ -248,13 +210,20 @@ subroutine fvm2dyntn(fld_fvm,fld_gll,hybrid,nets,nete,numlev,num_flds,fvm,llimit ! !********************************************* ! - do ie=nets,nete - call ghostpack(ghostBufQnhc_s, fld_fvm(:,:,:,:,ie),numlev*num_flds,0,ie) - end do - call ghost_exchange(hybrid,ghostbufQnhc_s,location='fvm2dyntn') - do ie=nets,nete - call ghostunpack(ghostbufQnhc_s, fld_fvm(:,:,:,:,ie),numlev*num_flds,0,ie) - end do + fill_halo = .true. + if (present(halo_filled)) then + fill_halo = .not. halo_filled + end if + + if (fill_halo) then + do ie=nets,nete + call ghostpack(ghostBufQnhc_s, fld_fvm(:,:,:,:,ie),numlev*num_flds,0,ie) + end do + call ghost_exchange(hybrid,ghostbufQnhc_s,location='fvm2dyntn') + do ie=nets,nete + call ghostunpack(ghostbufQnhc_s, fld_fvm(:,:,:,:,ie),numlev*num_flds,0,ie) + end do + end if ! ! mapping ! @@ -267,7 +236,7 @@ subroutine fvm2dyntn(fld_fvm,fld_gll,hybrid,nets,nete,numlev,num_flds,fvm,llimit end subroutine fvm2dyntn ! for single field - subroutine fvm2dynt1(fld_fvm,fld_gll,hybrid,nets,nete,numlev,fvm,llimiter) + subroutine fvm2dynt1(fld_fvm,fld_gll,hybrid,nets,nete,numlev,fvm,llimiter,halo_filled) use dimensions_mod, only: np, nhc, nc use hybrid_mod , only: hybrid_t use bndry_mod , only: ghost_exchange @@ -280,7 +249,10 @@ subroutine fvm2dynt1(fld_fvm,fld_gll,hybrid,nets,nete,numlev,fvm,llimiter) type (hybrid_t) , intent(in) :: hybrid type(fvm_struct) , intent(in) :: fvm(nets:nete) logical , intent(in) :: llimiter(1) + logical, optional , intent(in) :: halo_filled!optional if boundary exchange for fld_fvm has already been called + integer :: ie, iwidth + logical :: fill_halo ! !********************************************* ! @@ -288,13 +260,20 @@ subroutine fvm2dynt1(fld_fvm,fld_gll,hybrid,nets,nete,numlev,fvm,llimiter) ! !********************************************* ! - do ie=nets,nete - call ghostpack(ghostBufQnhc_t1, fld_fvm(:,:,:,1,ie),numlev,0,ie) - end do - call ghost_exchange(hybrid,ghostbufQnhc_t1,location='fvm2dynt1') - do ie=nets,nete - call ghostunpack(ghostbufQnhc_t1, fld_fvm(:,:,:,1,ie),numlev,0,ie) - end do + fill_halo = .true. + if (present(halo_filled)) then + fill_halo = .not. halo_filled + end if + + if (fill_halo) then + do ie=nets,nete + call ghostpack(ghostBufQnhc_t1, fld_fvm(:,:,:,1,ie),numlev,0,ie) + end do + call ghost_exchange(hybrid,ghostbufQnhc_t1,location='fvm2dynt1') + do ie=nets,nete + call ghostunpack(ghostbufQnhc_t1, fld_fvm(:,:,:,1,ie),numlev,0,ie) + end do + end if ! ! mapping ! @@ -305,7 +284,6 @@ subroutine fvm2dynt1(fld_fvm,fld_gll,hybrid,nets,nete,numlev,fvm,llimiter) end do end subroutine fvm2dynt1 - subroutine fill_halo_phys(fld_phys,hybrid,nets,nete,num_lev,num_flds) use dimensions_mod, only: nhc_phys, fv_nphys use hybrid_mod , only: hybrid_t @@ -354,7 +332,7 @@ subroutine phys2dyn(hybrid,elem,fld_phys,fld_gll,nets,nete,num_lev,num_flds,fvm, type(fvm_struct) , intent(in) :: fvm(:) integer, optional , intent(in) :: istart_vector logical , intent(in) :: llimiter(num_flds) - logical, optional , intent(in) :: halo_filled + logical, optional , intent(in) :: halo_filled!optional if boundary exchange for fld_fvm has already been called integer :: i, j, ie, k, iwidth real (kind=r8) :: v1,v2 @@ -503,7 +481,6 @@ subroutine dyn2phys_all_vars(nets,nete,elem,fvm,& do k=1,nlev inv_darea_dp_fvm = dyn2fvm(elem(ie)%state%dp3d(:,:,k,tl),elem(ie)%metdet(:,:)) inv_darea_dp_fvm = 1.0_r8/inv_darea_dp_fvm - T_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%state%T(:,:,k,tl),elem(ie)%metdet(:,:),inv_area),SHAPE(T_phys(:,k,ie))) Omega_phys(:,k,ie) = RESHAPE(dyn2phys(elem(ie)%derived%omega(:,:,k),elem(ie)%metdet(:,:),inv_area), & SHAPE(Omega_phys(:,k,ie))) @@ -1317,6 +1294,87 @@ subroutine get_q_overlap_save(ie,k,fvm,q_fvm,num_trac,q_phys) save_q_phys(:,:,k,m_cnst,ie) = q_phys(:,:,m_cnst) end do end subroutine get_q_overlap_save + ! + ! Routine to overwrite thermodynamic active tracers on the GLL grid with CSLAM values + ! by Lagrange interpolation from 3x3 CSLAM grid to GLL grid. + ! + subroutine cslam2gll(elem, fvm, hybrid,nets,nete, tl_f, tl_qdp) + use dimensions_mod, only: nc,nlev,np,nhc + use hybrid_mod, only: hybrid_t + use air_composition, only: thermodynamic_active_species_num, thermodynamic_active_species_idx + use fvm_mod, only: ghostBuf_cslam2gll + use bndry_mod, only: ghost_exchange + use edge_mod, only: ghostpack,ghostunpack + + type (element_t), intent(inout):: elem(:) + type(fvm_struct), intent(inout):: fvm(:) + + type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) + integer, intent(in) :: nets, nete, tl_f, tl_qdp + + integer :: ie,i,j,k,m_cnst,nq,ierr + real (kind=r8), dimension(:,:,:,:,:) , allocatable :: fld_fvm, fld_gll + ! + ! for tensor product Lagrange interpolation + ! + integer :: nflds + logical, allocatable :: llimiter(:) + call t_startf('cslam2gll') + nflds = thermodynamic_active_species_num + + !Allocate variables + !------------------ + allocate(fld_fvm(1-nhc:nc+nhc,1-nhc:nc+nhc,nlev,nflds,nets:nete), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'cslam2gll: fld_fvm allocation error = ', ierr + call endrun('cslam2gll: failed to allocate fld_fvm array') + end if + + allocate(fld_gll(np,np,nlev,thermodynamic_active_species_num,nets:nete),stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'cslam2gll: fld_gll allocation error = ', ierr + call endrun('cslam2gll: failed to allocate fld_gll array') + end if + allocate(llimiter(nflds), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'cslam2gll: llimiter allocation error = ', ierr + call endrun('cslam2gll: failed to allocate llimiter array') + end if + !------------------ + llimiter(1:nflds) = .false. + do ie=nets,nete + do m_cnst=1,thermodynamic_active_species_num + do k=1,nlev + fld_fvm(1:nc,1:nc,k,m_cnst,ie) = & + fvm(ie)%c(1:nc,1:nc,k,thermodynamic_active_species_idx(m_cnst)) + end do + end do + end do + call t_startf('fvm:fill_halo_cslam2gll') + do ie=nets,nete + call ghostpack(ghostBuf_cslam2gll, fld_fvm(:,:,:,:,ie),nlev*nflds,0,ie) + end do + + call ghost_exchange(hybrid,ghostBuf_cslam2gll,location='cslam2gll') + + do ie=nets,nete + call ghostunpack(ghostBuf_cslam2gll, fld_fvm(:,:,:,:,ie),nlev*nflds,0,ie) + end do + call t_stopf('fvm:fill_halo_cslam2gll') + ! + ! do mapping + ! + call fvm2dyn(fld_fvm,fld_gll,hybrid,nets,nete,nlev,nflds,fvm,llimiter,halo_filled=.true.) + + do ie=nets,nete + do m_cnst=1,thermodynamic_active_species_num + elem(ie)%state%qdp(:,:,:,m_cnst,tl_qdp) = fld_gll(:,:,:,m_cnst,ie)*& + elem(ie)%state%dp3d(:,:,:,tl_f) + end do + end do + deallocate(fld_fvm, fld_gll, llimiter) + call t_stopf('cslam2gll') + end subroutine cslam2gll end module fvm_mapping diff --git a/src/dynamics/se/dycore/fvm_mod.F90 b/src/dynamics/se/dycore/fvm_mod.F90 index 309a101ba2..e2f311ee81 100644 --- a/src/dynamics/se/dycore/fvm_mod.F90 +++ b/src/dynamics/se/dycore/fvm_mod.F90 @@ -36,6 +36,7 @@ module fvm_mod type (EdgeBuffer_t), public :: ghostBufQnhcJet_h type (EdgeBuffer_t), public :: ghostBufFluxJet_h type (EdgeBuffer_t), public :: ghostBufPG_s + type (EdgeBuffer_t), public :: ghostBuf_cslam2gll interface fill_halo_fvm module procedure fill_halo_fvm_noprealloc @@ -496,13 +497,14 @@ subroutine fvm_init2(elem,fvm,hybrid,nets,nete) call initghostbuffer(hybrid%par,ghostBufQ1_vh,elem,klev*(ntrac+1),1,nc,nthreads=vert_num_threads*horz_num_threads) ! call initghostbuffer(hybrid%par,ghostBufFlux_h,elem,4*nlev,nhe,nc,nthreads=horz_num_threads) call initghostbuffer(hybrid%par,ghostBufFlux_vh,elem,4*nlev,nhe,nc,nthreads=vert_num_threads*horz_num_threads) + call initghostbuffer(hybrid%par,ghostBuf_cslam2gll,elem,nlev*thermodynamic_active_species_num,nhc,nc,nthreads=1) ! ! preallocate buffers for physics-dynamics coupling ! if (fv_nphys.ne.nc) then call initghostbuffer(hybrid%par,ghostBufPG_s,elem,nlev*(4+ntrac),nhc_phys,fv_nphys,nthreads=1) else - call initghostbuffer(hybrid%par,ghostBufPG_s,elem,nlev*(3+thermodynamic_active_species_num),nhc_phys,fv_nphys,nthreads=1) + call initghostbuffer(hybrid%par,ghostBufPG_s,elem,nlev*3,nhc_phys,fv_nphys,nthreads=1) end if if (fvm_supercycling.ne.fvm_supercycling_jet) then diff --git a/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 b/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 index b7310ad477..b4708dfd3b 100644 --- a/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 +++ b/src/dynamics/se/dycore/fvm_reconstruction_mod.F90 @@ -105,7 +105,6 @@ subroutine reconstruction(fcube,nlev_in,k_in,recons,irecons,llimiter,ntrac_in,& if(FVM_TIMERS) call t_startf('FVM:reconstruction:part#1') if (nhe>0) then do itr=1,ntrac_in - ! f=-9e9_r8 call extend_panel_interpolate(nc,nhc,nhr,nht,ns,nh,& fcube(:,:,k_in,itr),cubeboundary,halo_interp_weight,ibase,f(:,:,1),f(:,:,2:3)) call get_gradients(f(:,:,:),jx,jy,irecons,recons(:,:,:,itr),& @@ -113,8 +112,6 @@ subroutine reconstruction(fcube,nlev_in,k_in,recons,irecons,llimiter,ntrac_in,& end do else do itr=1,ntrac_in - ! f=-9e9_r8!to avoid floating point exception for uninitialized variables - ! !in non-existent cells (corners of cube) call extend_panel_interpolate(nc,nhc,nhr,nht,ns,nh,& fcube(:,:,k_in,itr),cubeboundary,halo_interp_weight,ibase,f(:,:,1)) call get_gradients(f(:,:,:),jx,jy,irecons,recons(:,:,:,itr),& diff --git a/src/dynamics/se/dycore/global_norms_mod.F90 b/src/dynamics/se/dycore/global_norms_mod.F90 index 17e773d99c..5290017c8e 100644 --- a/src/dynamics/se/dycore/global_norms_mod.F90 +++ b/src/dynamics/se/dycore/global_norms_mod.F90 @@ -577,7 +577,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& deallocate(gp%weights) call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu_p ,1.0_r8 ,'_p ') - call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu ,0.5_r8,' ') + call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu ,1.0_r8,' ') call automatically_set_viscosity_coefficients(hybrid,ne,max_min_dx,min_min_dx,nu_div,2.5_r8 ,'_div') if (nu_q<0) nu_q = nu_p ! necessary for consistency @@ -600,29 +600,34 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& lev_set = sponge_del4_lev < 0 if (ptop>1000.0_r8) then ! - ! low top (~1000 Pa) + ! low top; usually idealized test cases ! top_000_032km = .true. + if (hybrid%masterthread) write(iulog,* )"Model top damping configuration: top_000_032km" else if (ptop>100.0_r8) then ! - ! CAM6 top (~225 Pa) + ! CAM6 top (~225 Pa) or CAM7 low top ! top_032_042km = .true. + if (hybrid%masterthread) write(iulog,* )"Model top damping configuration: top_032_042km" else if (ptop>1e-1_r8) then ! ! CAM7 top (~4.35e-1 Pa) ! top_042_090km = .true. + if (hybrid%masterthread) write(iulog,* )"Model top damping configuration: top_042_090km" else if (ptop>1E-4_r8) then ! ! WACCM top (~4.5e-4 Pa) ! top_090_140km = .true. + if (hybrid%masterthread) write(iulog,* )"Model top damping configuration: top_090_140km" else ! ! WACCM-x - geospace (~4e-7 Pa) ! top_140_600km = .true. + if (hybrid%masterthread) write(iulog,* )"Model top damping configuration: top_140_600km" end if ! ! Logging text for sponge layer configuration @@ -634,28 +639,24 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& ! ! if user or namelist is not specifying sponge del4 settings here are best guesses (empirically determined) ! - if (top_000_032km) then + if (top_090_140km.or.top_140_600km) then ! defaults for waccm(x) + if (sponge_del4_lev <0) sponge_del4_lev = 20 + if (sponge_del4_nu_fac <0) sponge_del4_nu_fac = 5.0_r8 + if (sponge_del4_nu_div_fac<0) sponge_del4_nu_div_fac = 10.0_r8 + else if (sponge_del4_lev <0) sponge_del4_lev = 1 if (sponge_del4_nu_fac <0) sponge_del4_nu_fac = 1.0_r8 if (sponge_del4_nu_div_fac<0) sponge_del4_nu_div_fac = 1.0_r8 end if - if (top_032_042km) then - if (sponge_del4_lev <0) sponge_del4_lev = 3 - if (sponge_del4_nu_fac <0) sponge_del4_nu_fac = 1.0_r8 - if (sponge_del4_nu_div_fac<0) sponge_del4_nu_div_fac = 4.5_r8 - end if - + ! set max wind speed for diagnostics + umax = 120.0_r8 if (top_042_090km) then - if (sponge_del4_lev <0) sponge_del4_lev = 3 - if (sponge_del4_nu_fac <0) sponge_del4_nu_fac = 5.0_r8 - if (sponge_del4_nu_div_fac<0) sponge_del4_nu_div_fac = 7.5_r8 - end if - - if (top_090_140km.or.top_140_600km) then - if (sponge_del4_lev <0) sponge_del4_lev = 10 - if (sponge_del4_nu_fac <0) sponge_del4_nu_fac = 5.0_r8 - if (sponge_del4_nu_div_fac<0) sponge_del4_nu_div_fac = 7.5_r8 + umax = 240._r8 + else if (top_090_140km) then + umax = 300._r8 + else if (top_140_600km) then + umax = 800._r8 end if ! ! Log sponge layer configuration @@ -672,7 +673,6 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& if (lev_set) then write(iulog, '(a,i0)') ' sponge_del4_lev = ',sponge_del4_lev end if - write(iulog,* )"" end if @@ -689,6 +689,7 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& nu_t_lev(k) = (1.0_r8-scale1)*nu_p +scale1*nu_max end if end do + if (hybrid%masterthread)then write(iulog,*) "z computed from barometric formula (using US std atmosphere)" call std_atm_height(pmid(:),z(:)) @@ -696,8 +697,16 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& do k=1,nlev write(iulog,'(i3,5e11.4)') k,pmid(k),z(k),nu_lev(k),nu_t_lev(k),nu_div_lev(k) end do - end if + if (nu_top>0) then + write(iulog,*) ": ksponge_end = ",ksponge_end + write(iulog,*) ": sponge layer Laplacian damping" + write(iulog,*) "k, p, z, nu_scale_top, nu (actual Laplacian damping coefficient)" + do k=1,ksponge_end + write(iulog,'(i3,4e11.4)') k,pmid(k),z(k),nu_scale_top(k),nu_scale_top(k)*nu_top + end do + end if + end if !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! @@ -732,16 +741,6 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& S_laplacian = 2.0_r8 !using forward Euler for sponge diffusion S_hypervis = 2.0_r8 !using forward Euler for hyperviscosity S_rk_tracer = 2.0_r8 - ! - ! estimate max winds - ! - if (ptop>100.0_r8) then - umax = 120.0_r8 - else if (ptop>10.0_r8) then - umax = 400.0_r8 - else - umax = 800.0_r8 - end if ugw = 342.0_r8 !max gravity wave speed @@ -778,13 +777,14 @@ subroutine print_cfl(elem,hybrid,nets,nete,dtnu,ptop,pmid,& write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_dyn_vis (hyperviscosity) ; u,v,T,dM) < ',dt_max_hypervis,& 's ',dt_dyn_visco_actual,'s' if (dt_dyn_visco_actual>dt_max_hypervis) write(iulog,*) 'WARNING: dt_dyn_vis theoretically unstable' - write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_se (time-stepping tracers ; q ) < ',dt_max_tracer_se,'s ',& + if (.not.use_cslam) then + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_se (time-stepping tracers ; q ) < ',dt_max_tracer_se,'s ',& dt_tracer_se_actual,'s' - if (dt_tracer_se_actual>dt_max_tracer_se) write(iulog,*) 'WARNING: dt_tracer_se theoretically unstable' - write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_vis (hyperviscosity tracers; q ) < ',dt_max_hypervis_tracer,'s',& - dt_tracer_visco_actual,'s' - if (dt_tracer_visco_actual>dt_max_hypervis_tracer) write(iulog,*) 'WARNING: dt_tracer_hypervis theoretically unstable' - + if (dt_tracer_se_actual>dt_max_tracer_se) write(iulog,*) 'WARNING: dt_tracer_se theoretically unstable' + write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_vis (hyperviscosity tracers; q ) < ',dt_max_hypervis_tracer,'s',& + dt_tracer_visco_actual,'s' + if (dt_tracer_visco_actual>dt_max_hypervis_tracer) write(iulog,*) 'WARNING: dt_tracer_hypervis theoretically unstable' + end if if (use_cslam) then write(iulog,'(a,f10.2,a,f10.2,a)') '* dt_tracer_fvm (time-stepping tracers ; q ) < ',dt_max_tracer_fvm,& 's ',dt_tracer_fvm_actual diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90 index ed7a627ec4..b9b6b746e0 100644 --- a/src/dynamics/se/dycore/prim_advance_mod.F90 +++ b/src/dynamics/se/dycore/prim_advance_mod.F90 @@ -14,7 +14,6 @@ module prim_advance_mod type (EdgeBuffer_t) :: edge3,edgeOmega,edgeSponge real (kind=r8), allocatable :: ur_weights(:) - contains subroutine prim_advance_init(par, elem) @@ -28,7 +27,9 @@ subroutine prim_advance_init(par, elem) integer :: i call initEdgeBuffer(par,edge3 ,elem,4*nlev ,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) - call initEdgeBuffer(par,edgeSponge,elem,4*ksponge_end,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) + if (ksponge_end>0) then + call initEdgeBuffer(par,edgeSponge,elem,4*ksponge_end,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) + end if call initEdgeBuffer(par,edgeOmega ,elem,nlev ,bndry_type=HME_BNDRY_P2P, nthreads=horz_num_threads) if(.not. allocated(ur_weights)) allocate(ur_weights(qsplit)) @@ -112,6 +113,7 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net ! ================================== ! Take timestep ! ================================== + call t_startf('prim_adv_prep') do nq=1,thermodynamic_active_species_num qidx(nq) = nq end do @@ -134,7 +136,7 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net do ie=nets,nete call get_kappa_dry(qwater(:,:,:,:,ie), qidx, kappa(:,:,:,ie)) end do - + call t_stopf('prim_adv_prep') dt_vis = dt @@ -280,7 +282,7 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu real (kind=r8) :: pdel(np,np,nlev) real (kind=r8), allocatable :: ftmp_fvm(:,:,:,:,:) !diagnostics - + call t_startf('applyCAMforc') if (use_cslam) allocate(ftmp_fvm(nc,nc,nlev,ntrac,nets:nete)) if (ftype==0) then @@ -333,7 +335,7 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu ! ! tracers ! - if (qsize>0.and.dt_local_tracer>0) then + if (.not.use_cslam.and.dt_local_tracer>0) then #if (defined COLUMN_OPENMP) !$omp parallel do num_threads(tracer_num_threads) private(q,k,i,j,v1) #endif @@ -389,7 +391,7 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu if (use_cslam) ftmp_fvm(:,:,:,:,ie) = 0.0_r8 end if - if (ftype_conserve==1) then + if (ftype_conserve==1.and..not.use_cslam) then call get_dp(elem(ie)%state%Qdp(:,:,:,1:qsize,np1_qdp), MASS_MIXING_RATIO, & thermodynamic_active_species_idx_dycore, elem(ie)%state%dp3d(:,:,:,np1), pdel) do k=1,nlev @@ -422,6 +424,7 @@ subroutine applyCAMforcing(elem,fvm,np1,np1_qdp,dt_dribble,dt_phys,nets,nete,nsu end if if (ftype==1.and.nsubstep==1) call tot_energy_dyn(elem,fvm,nets,nete,np1,np1_qdp,'p2d') if (use_cslam) deallocate(ftmp_fvm) + call t_stopf('applyCAMforc') end subroutine applyCAMforcing diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90 index 17ad85ba61..6ee6d2586c 100644 --- a/src/dynamics/se/dycore/prim_advection_mod.F90 +++ b/src/dynamics/se/dycore/prim_advection_mod.F90 @@ -45,7 +45,7 @@ module prim_advection_mod public :: prim_advec_tracers_fvm public :: vertical_remap - type (EdgeBuffer_t) :: edgeAdv, edgeAdvp1, edgeAdvQminmax, edgeAdv1, edgeveloc + type (EdgeBuffer_t) :: edgeAdv, edgeAdvp1, edgeAdvQminmax, edgeveloc integer,parameter :: DSSeta = 1 integer,parameter :: DSSomega = 2 @@ -63,7 +63,7 @@ module prim_advection_mod subroutine Prim_Advec_Init1(par, elem) - use dimensions_mod, only: nlev, qsize, nelemd,ntrac + use dimensions_mod, only: nlev, qsize, nelemd,ntrac,use_cslam use parallel_mod, only: parallel_t, boundaryCommMethod type(parallel_t) :: par type (element_t) :: elem(:) @@ -80,7 +80,7 @@ subroutine Prim_Advec_Init1(par, elem) ! ! Set the number of threads used in the subroutine Prim_Advec_tracers_remap() ! - if (ntrac>0) then + if (use_cslam) then advec_remap_num_threads = 1 else advec_remap_num_threads = tracer_num_threads @@ -89,17 +89,17 @@ subroutine Prim_Advec_Init1(par, elem) ! allocate largest one first ! Currently this is never freed. If it was, only this first one should ! be freed, as only it knows the true size of the buffer. - call initEdgeBuffer(par,edgeAdvp1,elem,qsize*nlev + nlev,bndry_type=boundaryCommMethod,& - nthreads=horz_num_threads*advec_remap_num_threads) - call initEdgeBuffer(par,edgeAdv,elem,qsize*nlev,bndry_type=boundaryCommMethod, & - nthreads=horz_num_threads*advec_remap_num_threads) - ! This is a different type of buffer pointer allocation - ! used for determine the minimum and maximum value from - ! neighboring elements - call initEdgeSBuffer(par,edgeAdvQminmax,elem,qsize*nlev*2,bndry_type=boundaryCommMethod, & - nthreads=horz_num_threads*advec_remap_num_threads) - - call initEdgeBuffer(par,edgeAdv1,elem,nlev,bndry_type=boundaryCommMethod) + if (.not.use_cslam) then + call initEdgeBuffer(par,edgeAdvp1,elem,qsize*nlev + nlev,bndry_type=boundaryCommMethod,& + nthreads=horz_num_threads*advec_remap_num_threads) + call initEdgeBuffer(par,edgeAdv,elem,qsize*nlev,bndry_type=boundaryCommMethod, & + nthreads=horz_num_threads*advec_remap_num_threads) + ! This is a different type of buffer pointer allocation + ! used for determine the minimum and maximum value from + ! neighboring elements + call initEdgeSBuffer(par,edgeAdvQminmax,elem,qsize*nlev*2,bndry_type=boundaryCommMethod, & + nthreads=horz_num_threads*advec_remap_num_threads) + end if call initEdgeBuffer(par,edgeveloc,elem,2*nlev,bndry_type=boundaryCommMethod) diff --git a/src/dynamics/se/dycore/prim_driver_mod.F90 b/src/dynamics/se/dycore/prim_driver_mod.F90 index 6cfb52e356..dc012e2d12 100644 --- a/src/dynamics/se/dycore/prim_driver_mod.F90 +++ b/src/dynamics/se/dycore/prim_driver_mod.F90 @@ -19,7 +19,6 @@ module prim_driver_mod private public :: prim_init2, prim_run_subcycle, prim_finalize public :: prim_set_dry_mass - contains !=============================================================================! @@ -61,9 +60,10 @@ subroutine prim_init2(elem, fvm, hybrid, nets, nete, tl, hvcoord) ! variables used to calculate CFL real (kind=r8) :: dtnu ! timestep*viscosity parameter - real (kind=r8) :: dt_dyn_vis ! viscosity timestep used in dynamics - real (kind=r8) :: dt_dyn_del2_sponge, dt_remap + real (kind=r8) :: dt_dyn_del2_sponge real (kind=r8) :: dt_tracer_vis ! viscosity timestep used in tracers + real (kind=r8) :: dt_dyn_vis ! viscosity timestep + real (kind=r8) :: dt_remap ! remapping timestep real (kind=r8) :: dp,dp0,T1,T0,pmid_ref(np,np) real (kind=r8) :: ps_ref(np,np,nets:nete) @@ -219,7 +219,7 @@ subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubst ! use hybvcoord_mod, only : hvcoord_t use se_dyn_time_mod, only: TimeLevel_t, timelevel_update, timelevel_qdp, nsplit - use control_mod, only: statefreq,qsplit, rsplit, variable_nsplit + use control_mod, only: statefreq,qsplit, rsplit, variable_nsplit, dribble_in_rsplit_loop use prim_advance_mod, only: applycamforcing use prim_advance_mod, only: tot_energy_dyn,compute_omega use prim_state_mod, only: prim_printstate, adjust_nsplit @@ -227,8 +227,8 @@ subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubst use thread_mod, only: omp_get_thread_num use perf_mod , only: t_startf, t_stopf use fvm_mod , only: fill_halo_fvm, ghostBufQnhc_h - use dimensions_mod, only: use_cslam,fv_nphys, ksponge_end - + use dimensions_mod, only: use_cslam,fv_nphys + use fvm_mapping, only: cslam2gll type (element_t) , intent(inout) :: elem(:) type(fvm_struct), intent(inout) :: fvm(:) type (hybrid_t), intent(in) :: hybrid ! distributed parallel structure (shared) @@ -245,7 +245,6 @@ subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubst real (kind=r8) :: dp_np1(np,np) real (kind=r8) :: dp_start(np,np,nlev+1,nets:nete),dp_end(np,np,nlev,nets:nete) logical :: compute_diagnostics - ! =================================== ! Main timestepping loop ! =================================== @@ -282,12 +281,33 @@ subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubst call TimeLevel_Qdp( tl, qsplit, n0_qdp) - call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dAF') - call ApplyCAMForcing(elem,fvm,tl%n0,n0_qdp,dt_remap,dt_phys,nets,nete,nsubstep) - call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dBD') + if (dribble_in_rsplit_loop==0) then + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dAF') + call ApplyCAMForcing(elem,fvm,tl%n0,n0_qdp,dt_remap,dt_phys,nets,nete,nsubstep) + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dBD') + end if do r=1,rsplit if (r.ne.1) call TimeLevel_update(tl,"leapfrog") - call prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,r) + ! + ! if nsplit==1 and physics time-step is long then there will be noise in the + ! pressure field; hence "dripple" in tendencies + ! + if (dribble_in_rsplit_loop==1) then + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dAF') + call ApplyCAMForcing(elem,fvm,tl%n0,n0_qdp,dt,dt_phys,nets,nete,MAX(nsubstep,r)) + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dBD') + end if + ! + ! right after physics overwrite Qdp with CSLAM values + ! + if (use_cslam.and.nsubstep==1.and.r==1) then + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dAF') + call cslam2gll(elem, fvm, hybrid,nets,nete, tl%n0, n0_qdp) + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dBD') + end if + call tot_energy_dyn(elem,fvm,nets,nete,tl%n0,n0_qdp,'dBL') + call prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,r,nsubstep==nsplit,dt_remap) + call tot_energy_dyn(elem,fvm,nets,nete,tl%np1,n0_qdp,'dAL') enddo @@ -363,7 +383,6 @@ subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubst end do end do end do - if (nsubstep==nsplit.and.variable_nsplit) then call t_startf('adjust_nsplit') call adjust_nsplit(elem, tl, hybrid,nets,nete, fvm, omega_cn) @@ -389,7 +408,7 @@ subroutine prim_run_subcycle(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord,nsubst end subroutine prim_run_subcycle - subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) + subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep, last_step,dt_remap) ! ! Take qsplit dynamics steps and one tracer step ! for vertically lagrangian option, this subroutine does only the horizontal step @@ -418,7 +437,8 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) use dimensions_mod, only: kmin_jet, kmax_jet use fvm_mod, only: ghostBufQnhc_vh,ghostBufQ1_vh, ghostBufFlux_vh use fvm_mod, only: ghostBufQ1_h,ghostBufQnhcJet_h, ghostBufFluxJet_h - + use se_dyn_time_mod, only: timelevel_qdp + use fvm_mapping, only: cslam2gll #ifdef waccm_debug use cam_history, only: outfld #endif @@ -433,6 +453,8 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) real(kind=r8), intent(in) :: dt ! "timestep dependent" timestep type (TimeLevel_t), intent(inout) :: tl integer, intent(in) :: rstep ! vertical remap subcycling step + logical, intent(in) :: last_step! last step before d_p_coupling + real(kind=r8), intent(in) :: dt_remap type (hybrid_t):: hybridnew,hybridnew2 real(kind=r8) :: st, st1, dp, dt_q @@ -440,6 +462,7 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) integer :: ithr integer :: region_num_threads integer :: kbeg,kend + integer :: n0_qdp, np1_qdp real (kind=r8) :: tempdp3d(np,np), x real (kind=r8) :: tempmass(nc,nc) @@ -517,7 +540,6 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) end do end if #endif - ! current dynamics state variables: ! derived%dp = dp at start of timestep ! derived%vn0 = mean horiz. flux: U*dp @@ -537,32 +559,19 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) ! special case in CAM: if CSLAM tracers are turned on , qsize=1 but this tracer should ! not be advected. This will be cleaned up when the physgrid is merged into CAM trunk ! Currently advecting all species - if (qsize > 0) then - + if (.not.use_cslam) then call t_startf('prim_advec_tracers_remap') - if(use_cslam) then - ! Deactivate threading in the tracer dimension if this is a CSLAM run - region_num_threads = 1 - else - region_num_threads=tracer_num_threads - endif + region_num_threads=tracer_num_threads call omp_set_nested(.true.) !$OMP PARALLEL NUM_THREADS(region_num_threads), DEFAULT(SHARED), PRIVATE(hybridnew) - if(use_cslam) then - ! Deactivate threading in the tracer dimension if this is a CSLAM run - hybridnew = config_thread_region(hybrid,'serial') - else - hybridnew = config_thread_region(hybrid,'tracer') - endif + hybridnew = config_thread_region(hybrid,'tracer') call Prim_Advec_Tracers_remap(elem, deriv,hvcoord,hybridnew,dt_q,tl,nets,nete) !$OMP END PARALLEL call omp_set_nested(.false.) call t_stopf('prim_advec_tracers_remap') - end if - ! - ! only run fvm transport every fvm_supercycling rstep - ! - if (use_cslam) then + else + ! + ! only run fvm transport every fvm_supercycling rstep ! ! FVM transport ! @@ -594,7 +603,9 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) fvm(ie)%psc(i,j) = sum(fvm(ie)%dp_fvm(i,j,:)) + hvcoord%hyai(1)*hvcoord%ps0 end do end do - end do + end do + call TimeLevel_Qdp( tl, qsplit, n0_qdp, np1_qdp) + if (.not.last_step) call cslam2gll(elem, fvm, hybrid,nets,nete, tl%np1, np1_qdp) else if ((mod(rstep,fvm_supercycling_jet) == 0)) then ! ! shorter fvm time-step in jet region @@ -609,7 +620,7 @@ subroutine prim_step(elem, fvm, hybrid,nets,nete, dt, tl, hvcoord, rstep) (/nc*nc,nlev/)), nc*nc, ie) end do #endif - endif + endif end subroutine prim_step diff --git a/src/dynamics/se/dycore/prim_init.F90 b/src/dynamics/se/dycore/prim_init.F90 index 42a336f65c..930b887107 100644 --- a/src/dynamics/se/dycore/prim_init.F90 +++ b/src/dynamics/se/dycore/prim_init.F90 @@ -1,7 +1,7 @@ module prim_init use shr_kind_mod, only: r8=>shr_kind_r8 - use dimensions_mod, only: nc + use dimensions_mod, only: nc, use_cslam use reduction_mod, only: reductionbuffer_ordered_1d_t use quadrature_mod, only: quadrature_t, gausslobatto @@ -22,7 +22,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use cam_logfile, only: iulog use shr_sys_mod, only: shr_sys_flush use thread_mod, only: max_num_threads - use dimensions_mod, only: np, nlev, nelem, nelemd, nelemdmax + use dimensions_mod, only: np, nlev, nelem, nelemd, nelemdmax, qsize_d use dimensions_mod, only: GlobalUniqueCols, fv_nphys,irecons_tracer use control_mod, only: topology, partmethod use element_mod, only: element_t, allocate_element_desc @@ -56,6 +56,7 @@ subroutine prim_init1(elem, fvm, par, Tl) use shr_reprosum_mod, only: repro_sum => shr_reprosum_calc use fvm_analytic_mod, only: compute_basic_coordinate_vars use fvm_control_volume_mod, only: fvm_struct, allocate_physgrid_vars + use air_composition, only: thermodynamic_active_species_num type(element_t), pointer :: elem(:) type(fvm_struct), pointer :: fvm(:) @@ -70,7 +71,7 @@ subroutine prim_init1(elem, fvm, par, Tl) integer :: ie integer :: nets, nete integer :: nelem_edge - integer :: ierr, j + integer :: ierr=0, j logical, parameter :: Debug = .FALSE. real(r8), allocatable :: aratio(:,:) @@ -165,9 +166,49 @@ subroutine prim_init1(elem, fvm, par, Tl) end if call mpi_allreduce(nelemd, nelemdmax, 1, MPI_INTEGER, MPI_MAX, par%comm, ierr) + !Allocate elements: if (nelemd > 0) then - allocate(elem(nelemd)) - call allocate_element_desc(elem) + allocate(elem(nelemd)) + call allocate_element_desc(elem) + !Allocate Qdp and derived FQ arrays: + if(fv_nphys > 0) then !SE-CSLAM + do ie=1,nelemd + allocate(elem(ie)%state%Qdp(np,np,nlev,thermodynamic_active_species_num,1), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate Qdp array') + end if + allocate(elem(ie)%derived%FQ(np,np,nlev,thermodynamic_active_species_num), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate fq array') + end if + end do + else !Regular SE + do ie=1,nelemd + allocate(elem(ie)%state%Qdp(np,np,nlev,qsize_d,2), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate Qdp array') + end if + allocate(elem(ie)%derived%FQ(np,np,nlev,qsize_d), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate fq array') + end if + end do + end if + !Allocate remaining derived quantity arrays: + do ie=1,nelemd + allocate(elem(ie)%derived%FDP(np,np,nlev), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate fdp array') + end if + allocate(elem(ie)%derived%divdp(np,np,nlev), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate divdp array') + end if + allocate(elem(ie)%derived%divdp_proj(np,np,nlev), stat=ierr) + if( ierr /= 0 ) then + call endrun('prim_init1: failed to allocate divdp_proj array') + end if + end do end if if (fv_nphys > 0) then @@ -306,7 +347,7 @@ subroutine prim_init1(elem, fvm, par, Tl) elem(ie)%derived%FM=0.0_r8 elem(ie)%derived%FQ=0.0_r8 elem(ie)%derived%FT=0.0_r8 - elem(ie)%derived%FDP=0.0_r8 + elem(ie)%derived%FDP=0.0_r8 elem(ie)%derived%pecnd=0.0_r8 elem(ie)%derived%Omega=0 diff --git a/src/dynamics/se/dycore/se_dyn_time_mod.F90 b/src/dynamics/se/dycore/se_dyn_time_mod.F90 index 4dfd981661..cfe7ad2323 100644 --- a/src/dynamics/se/dycore/se_dyn_time_mod.F90 +++ b/src/dynamics/se/dycore/se_dyn_time_mod.F90 @@ -80,6 +80,7 @@ end subroutine TimeLevel_init_specific !locations for nm1 and n0 for Qdp - because !it only has 2 levels for storage subroutine TimeLevel_Qdp(tl, qsplit, n0, np1) + use dimensions_mod, only: use_cslam type (TimeLevel_t) :: tl integer, intent(in) :: qsplit integer, intent(inout) :: n0 @@ -87,22 +88,26 @@ subroutine TimeLevel_Qdp(tl, qsplit, n0, np1) integer :: i_temp - i_temp = tl%nstep/qsplit - - if (mod(i_temp,2) ==0) then + if (use_cslam) then n0 = 1 - if (present(np1)) then - np1 = 2 - endif + if (present(np1)) np1 = 1 else - n0 = 2 - if (present(np1)) then - np1 = 1 - end if - endif + i_temp = tl%nstep/qsplit + + if (mod(i_temp,2) ==0) then + n0 = 1 + if (present(np1)) then + np1 = 2 + endif + else + n0 = 2 + if (present(np1)) then + np1 = 1 + end if + endif !print * ,'nstep = ', tl%nstep, 'qsplit= ', qsplit, 'i_temp = ', i_temp, 'n0 = ', n0 - + endif end subroutine TimeLevel_Qdp subroutine TimeLevel_update(tl,uptype) diff --git a/src/dynamics/se/dycore_budget.F90 b/src/dynamics/se/dycore_budget.F90 index d2bfe0fceb..14f1d65167 100644 --- a/src/dynamics/se/dycore_budget.F90 +++ b/src/dynamics/se/dycore_budget.F90 @@ -63,7 +63,7 @@ subroutine print_budget(hstwr) ! ! mass budgets dynamics ! - real(r8) :: dMdt_floating_dyn ! mass tendency floating dynamics (dAD-dBD) + real(r8) :: dMdt_floating_dyn ! mass tendency floating dynamics (dAL-dBL) real(r8) :: dMdt_vert_remap ! mass tendency vertical remapping (dAR-dAD) real(r8) :: dMdt_del4_fric_heat ! mass tendency del4 frictional heating (dAH-dCH) real(r8) :: dMdt_del4_tot ! mass tendency del4 + del4 frictional heating (dAH-dBH) @@ -73,7 +73,7 @@ subroutine print_budget(hstwr) ! ! energy budgets dynamics ! - real(r8) :: dEdt_floating_dyn ! dE/dt floating dynamics (dAD-dBD) + real(r8) :: dEdt_floating_dyn ! dE/dt floating dynamics (dAL-dBL) real(r8) :: dEdt_vert_remap ! dE/dt vertical remapping (dAR-dAD) real(r8) :: dEdt_del4 ! dE/dt del4 (dCH-dBH) real(r8) :: dEdt_del4_fric_heat ! dE/dt del4 frictional heating (dAH-dCH) @@ -132,7 +132,7 @@ subroutine print_budget(hstwr) call cam_budget_get_global('dBF' ,idx(i),E_dBF(i)) !state passed to physics end do - call cam_budget_get_global('dAD-dBD',teidx,dEdt_floating_dyn) + call cam_budget_get_global('dAL-dBL',teidx,dEdt_floating_dyn) call cam_budget_get_global('dAR-dAD',teidx,dEdt_vert_remap) dEdt_dycore_dyn = dEdt_floating_dyn+dEdt_vert_remap @@ -459,7 +459,7 @@ subroutine print_budget(hstwr) ! detailed mass budget in dynamical core ! if (is_cam_budget('dAD').and.is_cam_budget('dBD').and.is_cam_budget('dAR').and.is_cam_budget('dCH')) then - call cam_budget_get_global('dAD-dBD',m_cnst,dMdt_floating_dyn) + call cam_budget_get_global('dAL-dBL',m_cnst,dMdt_floating_dyn) call cam_budget_get_global('dAR-dAD',m_cnst,dMdt_vert_remap) tmp = dMdt_floating_dyn+dMdt_vert_remap diff = abs_diff(tmp,0.0_r8,pf=pf) @@ -472,7 +472,7 @@ subroutine print_budget(hstwr) write(iulog,*) "Error: mass non-conservation in dynamical core" write(iulog,*) "(detailed budget below)" write(iulog,*) " " - write(iulog,*)"dMASS/dt 2D dynamics (dAD-dBD) ",dMdt_floating_dyn," Pa/m^2/s" + write(iulog,*)"dMASS/dt 2D dynamics (dAL-dBL) ",dMdt_floating_dyn," Pa/m^2/s" write(iulog,*)"dE/dt vertical remapping (dAR-dAD) ",dMdt_vert_remap write(iulog,*)" " write(iulog,*)"Breakdown of 2D dynamics:" diff --git a/src/dynamics/se/dyn_comp.F90 b/src/dynamics/se/dyn_comp.F90 index 312349eb44..5dcffe7347 100644 --- a/src/dynamics/se/dyn_comp.F90 +++ b/src/dynamics/se/dyn_comp.F90 @@ -110,7 +110,7 @@ subroutine dyn_readnl(NLFileName) use control_mod, only: topology, variable_nsplit use control_mod, only: fine_ne, hypervis_power, hypervis_scaling use control_mod, only: max_hypervis_courant, statediag_numtrac,refined_mesh - use control_mod, only: molecular_diff, pgf_formulation + use control_mod, only: molecular_diff, pgf_formulation, dribble_in_rsplit_loop use control_mod, only: sponge_del4_nu_div_fac, sponge_del4_nu_fac, sponge_del4_lev use dimensions_mod, only: ne, npart use dimensions_mod, only: large_Courant_incr @@ -168,7 +168,7 @@ subroutine dyn_readnl(NLFileName) integer :: se_kmax_jet real(r8) :: se_molecular_diff integer :: se_pgf_formulation - + integer :: se_dribble_in_rsplit_loop namelist /dyn_se_inparm/ & se_fine_ne, & ! For refined meshes se_ftype, & ! forcing type @@ -213,8 +213,8 @@ subroutine dyn_readnl(NLFileName) se_kmin_jet, & se_kmax_jet, & se_molecular_diff, & - se_pgf_formulation - + se_pgf_formulation, & + se_dribble_in_rsplit_loop !-------------------------------------------------------------------------- ! defaults for variables not set by build-namelist @@ -288,7 +288,7 @@ subroutine dyn_readnl(NLFileName) call MPI_bcast(se_kmax_jet, 1, mpi_integer, masterprocid, mpicom, ierr) call MPI_bcast(se_molecular_diff, 1, mpi_real8, masterprocid, mpicom, ierr) call MPI_bcast(se_pgf_formulation, 1, mpi_integer, masterprocid, mpicom, ierr) - + call MPI_bcast(se_dribble_in_rsplit_loop, 1, mpi_integer, masterprocid, mpicom, ierr) if (se_npes <= 0) then call endrun('dyn_readnl: ERROR: se_npes must be > 0') end if @@ -356,7 +356,7 @@ subroutine dyn_readnl(NLFileName) variable_nsplit = .false. molecular_diff = se_molecular_diff pgf_formulation = se_pgf_formulation - + dribble_in_rsplit_loop = se_dribble_in_rsplit_loop if (fv_nphys > 0) then ! Use finite volume physics grid and CSLAM for tracer advection nphys_pts = fv_nphys*fv_nphys @@ -472,7 +472,7 @@ subroutine dyn_readnl(NLFileName) end if end if - if (fv_nphys > 0) then + if (use_cslam) then write(iulog, '(a)') 'dyn_readnl: physics will run on FVM points; advection by CSLAM' write(iulog,'(a,i0)') 'dyn_readnl: se_fv_nphys = ', fv_nphys else @@ -618,12 +618,14 @@ subroutine dyn_init(dyn_in, dyn_out) integer :: m_cnst, m ! variables for initializing energy and axial angular momentum diagnostics - integer, parameter :: num_stages = 12 - character (len = 4), dimension(num_stages) :: stage = (/"dED","dAF","dBD","dAD","dAR","dBF","dBH","dCH","dAH","dBS","dAS","p2d"/) + integer, parameter :: num_stages = 14 + character (len = 4), dimension(num_stages) :: stage = (/"dED","dAF","dBD","dBL","dAL","dAD","dAR","dBF","dBH","dCH","dAH","dBS","dAS","p2d"/) character (len = 70),dimension(num_stages) :: stage_txt = (/& " end of previous dynamics ",& !dED " from previous remapping or state passed to dynamics",& !dAF - state in beginning of nsplit loop " state after applying CAM forcing ",& !dBD - state after applyCAMforcing + " before floating dynamics ",& !dBL + " after floating dynamics ",& !dAL " before vertical remapping ",& !dAD - state before vertical remapping " after vertical remapping ",& !dAR - state at end of nsplit loop " state passed to parameterizations ",& !dBF @@ -799,28 +801,49 @@ subroutine dyn_init(dyn_in, dyn_out) ! nu_scale_top(:) = 0.0_r8 if (nu_top>0) then - ptop = hvcoord%hyai(1)*hvcoord%ps0 - if (ptop>300.0_r8) then - ! - ! for low tops the tanh formulae below makes the sponge excessively deep - ! - nu_scale_top(1) = 4.0_r8 - nu_scale_top(2) = 2.0_r8 - nu_scale_top(3) = 1.0_r8 - ksponge_end = 3 - else - do k=1,nlev - press = hvcoord%hyam(k)*hvcoord%ps0+hvcoord%hybm(k)*pstd - nu_scale_top(k) = 8.0_r8*(1.0_r8+tanh(1.0_r8*log(ptop/press(1)))) ! tau will be maximum 8 at model top - if (nu_scale_top(k).ge.0.15_r8) then - ksponge_end = k - else - nu_scale_top(k) = 0.0_r8 - end if - end do - end if + ptop = hvcoord%hyai(1)*hvcoord%ps0 + if (ptop>300.0_r8) then + ! + ! for low tops the tanh formulae below makes the sponge excessively deep + ! + nu_scale_top(1) = 4.0_r8 + nu_scale_top(2) = 2.0_r8 + nu_scale_top(3) = 1.0_r8 + ksponge_end = 3 + else if (ptop>100.0_r8) then + ! + ! CAM6 top (~225 Pa) or CAM7 low top + ! + ! For backwards compatibility numbers below match tanh profile + ! used in FV + ! + nu_scale_top(1) = 4.4_r8 + nu_scale_top(2) = 1.3_r8 + nu_scale_top(3) = 3.9_r8 + ksponge_end = 3 + else if (ptop>1e-1_r8) then + ! + ! CAM7 FMT + ! + nu_scale_top(1) = 3.0_r8 + nu_scale_top(2) = 1.0_r8 + nu_scale_top(3) = 0.1_r8 + nu_scale_top(4) = 0.05_r8 + ksponge_end = 4 + else if (ptop>1e-4_r8) then + ! + ! WACCM and WACCM-x + ! + nu_scale_top(1) = 5.0_r8 + nu_scale_top(2) = 5.0_r8 + nu_scale_top(3) = 5.0_r8 + nu_scale_top(4) = 2.0_r8 + nu_scale_top(5) = 1.0_r8 + nu_scale_top(6) = 0.1_r8 + ksponge_end = 6 + end if else - ksponge_end = 0 + ksponge_end = 0 end if ksponge_end = MAX(MAX(ksponge_end,1),kmol_end) if (masterproc) then @@ -906,8 +929,8 @@ subroutine dyn_init(dyn_in, dyn_out) ! ! Register tendency (difference) budgets ! - call cam_budget_em_register('dEdt_floating_dyn' ,'dAD','dBD','dyn','dif', & - longname="dE/dt floating dynamics (dAD-dBD)" ) + call cam_budget_em_register('dEdt_floating_dyn' ,'dAL','dBL','dyn','dif', & + longname="dE/dt floating dynamics (dAL-dBL)" ) call cam_budget_em_register('dEdt_vert_remap' ,'dAR','dAD','dyn','dif', & longname="dE/dt vertical remapping (dAR-dAD)" ) call cam_budget_em_register('dEdt_phys_tot_in_dyn','dBD','dAF','dyn','dif', & @@ -963,11 +986,10 @@ subroutine dyn_run(dyn_state) use air_composition, only: thermodynamic_active_species_idx_dycore use prim_driver_mod, only: prim_run_subcycle use dimensions_mod, only: cnst_name_gll - use se_dyn_time_mod, only: tstep, nsplit, timelevel_qdp + use se_dyn_time_mod, only: tstep, nsplit, timelevel_qdp, tevolve use hybrid_mod, only: config_thread_region, get_loop_ranges use control_mod, only: qsplit, rsplit, ftype_conserve use thread_mod, only: horz_num_threads - use se_dyn_time_mod, only: tevolve type(dyn_export_t), intent(inout) :: dyn_state @@ -1042,24 +1064,23 @@ subroutine dyn_run(dyn_state) end if end do - - ! convert elem(ie)%derived%fq to mass tendency - do ie = nets, nete - do m = 1, qsize + if (.not.use_cslam) then + do ie = nets, nete + do m = 1, qsize do k = 1, nlev - do j = 1, np - do i = 1, np - dyn_state%elem(ie)%derived%FQ(i,j,k,m) = dyn_state%elem(ie)%derived%FQ(i,j,k,m)* & - rec2dt*dyn_state%elem(ie)%state%dp3d(i,j,k,tl_f) - end do - end do + do j = 1, np + do i = 1, np + dyn_state%elem(ie)%derived%FQ(i,j,k,m) = dyn_state%elem(ie)%derived%FQ(i,j,k,m)* & + rec2dt*dyn_state%elem(ie)%state%dp3d(i,j,k,tl_f) + end do + end do end do - end do - end do - + end do + end do + end if - if (ftype_conserve>0) then + if (ftype_conserve>0.and..not.use_cslam) then do ie = nets, nete do k=1,nlev do j=1,np @@ -1076,7 +1097,6 @@ subroutine dyn_run(dyn_state) end do end if - if (use_cslam) then do ie = nets, nete do m = 1, ntrac @@ -1795,6 +1815,7 @@ subroutine set_phis(dyn_in) integer :: ierr, pio_errtype character(len=max_fieldname_len) :: fieldname + character(len=max_fieldname_len) :: fieldname_gll character(len=max_hcoordname_len):: grid_name integer :: dims(2) integer :: dyn_cols @@ -1828,7 +1849,7 @@ subroutine set_phis(dyn_in) allocate(phis_tmp(npsq,nelemd)) phis_tmp = 0.0_r8 - if (fv_nphys > 0) then + if (use_cslam) then allocate(phis_phys_tmp(fv_nphys**2,nelemd)) phis_phys_tmp = 0.0_r8 do ie=1,nelemd @@ -1853,7 +1874,7 @@ subroutine set_phis(dyn_in) ! Set name of grid object which will be used to read data from file ! into internal data structure via PIO. - if (fv_nphys == 0) then + if (.not.use_cslam) then grid_name = 'GLL' else grid_name = 'physgrid_d' @@ -1878,13 +1899,38 @@ subroutine set_phis(dyn_in) end if fieldname = 'PHIS' - if (dyn_field_exists(fh_topo, trim(fieldname))) then - if (fv_nphys == 0) then - call read_dyn_var(fieldname, fh_topo, 'ncol', phis_tmp) + fieldname_gll = 'PHIS_gll' + if (use_cslam.and.dyn_field_exists(fh_topo, trim(fieldname_gll),required=.false.)) then + ! + ! If physgrid it is recommended to read in PHIS on the GLL grid and then + ! map to the physgrid in d_p_coupling + ! + ! This requires a topo file with PHIS_gll on it ... + ! + if (masterproc) then + write(iulog, *) "Reading in PHIS on GLL grid (mapped to physgrid in d_p_coupling)" + end if + call read_dyn_var(fieldname_gll, fh_topo, 'ncol_gll', phis_tmp) + else if (dyn_field_exists(fh_topo, trim(fieldname))) then + if (.not.use_cslam) then + if (masterproc) then + write(iulog, *) "Reading in PHIS" + end if + call read_dyn_var(fieldname, fh_topo, 'ncol', phis_tmp) else - call read_phys_field_2d(fieldname, fh_topo, 'ncol', phis_phys_tmp) - call map_phis_from_physgrid_to_gll(dyn_in%fvm, elem, phis_phys_tmp, & - phis_tmp, pmask) + ! + ! For backwards compatibility we allow reading in PHIS on the physgrid + ! which is then mapped to the GLL grid and back to the physgrid in d_p_coupling + ! (the latter is to avoid noise in derived quantities such as PSL) + ! + if (masterproc) then + write(iulog, *) "Reading in PHIS on physgrid" + write(iulog, *) "Recommended to read in PHIS on GLL grid" + end if + call read_phys_field_2d(fieldname, fh_topo, 'ncol', phis_phys_tmp) + call map_phis_from_physgrid_to_gll(dyn_in%fvm, elem, phis_phys_tmp, & + phis_tmp, pmask) + deallocate(phis_phys_tmp) end if else call endrun(sub//': Could not find PHIS field on input datafile') @@ -1916,44 +1962,6 @@ subroutine set_phis(dyn_in) PHIS_OUT=phis_tmp, mask=pmask(:)) deallocate(glob_ind) - if (fv_nphys > 0) then - - ! initialize PHIS on physgrid - allocate(latvals_phys(fv_nphys*fv_nphys*nelemd)) - allocate(lonvals_phys(fv_nphys*fv_nphys*nelemd)) - indx = 1 - do ie = 1, nelemd - do j = 1, fv_nphys - do i = 1, fv_nphys - latvals_phys(indx) = dyn_in%fvm(ie)%center_cart_physgrid(i,j)%lat - lonvals_phys(indx) = dyn_in%fvm(ie)%center_cart_physgrid(i,j)%lon - indx = indx + 1 - end do - end do - end do - - allocate(pmask_phys(fv_nphys*fv_nphys*nelemd)) - pmask_phys(:) = .true. - allocate(glob_ind(fv_nphys*fv_nphys*nelemd)) - - j = 1 - do ie = 1, nelemd - do i = 1, fv_nphys*fv_nphys - ! Create a global(ish) column index - glob_ind(j) = elem(ie)%GlobalId - j = j + 1 - end do - end do - - call analytic_ic_set_ic(vcoord, latvals_phys, lonvals_phys, glob_ind, & - PHIS_OUT=phis_phys_tmp, mask=pmask_phys) - - deallocate(latvals_phys) - deallocate(lonvals_phys) - deallocate(pmask_phys) - deallocate(glob_ind) - end if - end if deallocate(pmask) @@ -1969,16 +1977,7 @@ subroutine set_phis(dyn_in) end do end do end do - if (fv_nphys > 0) then - do ie = 1, nelemd - dyn_in%fvm(ie)%phis_physgrid = RESHAPE(phis_phys_tmp(:,ie),(/fv_nphys,fv_nphys/)) - end do - end if - deallocate(phis_tmp) - if (fv_nphys > 0) then - deallocate(phis_phys_tmp) - end if ! boundary exchange to update the redundent columns in the element objects do ie = 1, nelemd diff --git a/src/dynamics/se/dyn_grid.F90 b/src/dynamics/se/dyn_grid.F90 index 293f7402dd..aa3ec8027a 100644 --- a/src/dynamics/se/dyn_grid.F90 +++ b/src/dynamics/se/dyn_grid.F90 @@ -177,12 +177,12 @@ subroutine dyn_grid_init() if (iam < par%nprocs) then call prim_init1(elem, fvm, par, TimeLevel) - if (fv_nphys > 0) then + if (use_cslam) then call dp_init(elem, fvm) end if if (fv_nphys > 0) then - qsize_local = thermodynamic_active_species_num + 3 + qsize_local = 3 else qsize_local = pcnst + 3 end if diff --git a/src/dynamics/se/gravity_waves_sources.F90 b/src/dynamics/se/gravity_waves_sources.F90 index 9adffc001b..a19733b465 100644 --- a/src/dynamics/se/gravity_waves_sources.F90 +++ b/src/dynamics/se/gravity_waves_sources.F90 @@ -141,7 +141,7 @@ subroutine compute_frontogenesis(frontgf,frontga,tl,tlq,elem,ederiv,hybrid,nets, do ie=nets,nete ! pressure at model top - pint(:,:) = hvcoord%hyai(1) + pint(:,:) = hvcoord%hyai(1)*hvcoord%ps0 do k=1,nlev ! moist pressure at mid points sum_water(:,:) = 1.0_r8 diff --git a/src/physics/cam/vertical_diffusion.F90 b/src/physics/cam/vertical_diffusion.F90 index 224d87e3a0..12c50b4234 100644 --- a/src/physics/cam/vertical_diffusion.F90 +++ b/src/physics/cam/vertical_diffusion.F90 @@ -141,6 +141,8 @@ module vertical_diffusion logical :: waccmx_mode = .false. logical :: do_hb_above_clubb = .false. +real(r8),allocatable :: kvm_sponge(:) + contains ! =============================================================================== ! @@ -280,6 +282,7 @@ subroutine vertical_diffusion_init(pbuf2d) use beljaars_drag_cam, only : beljaars_drag_init use upper_bc, only : ubc_init use phys_control, only : waccmx_is, fv_am_correction + use ref_pres, only : ptop_ref type(physics_buffer_desc), pointer :: pbuf2d(:,:) character(128) :: errstring ! Error status for init_vdiff @@ -289,7 +292,7 @@ subroutine vertical_diffusion_init(pbuf2d) real(r8), parameter :: ntop_eddy_pres = 1.e-7_r8 ! Pressure below which eddy diffusion is not done in WACCM-X. (Pa) - integer :: im, l, m, nmodes, nspec + integer :: im, l, m, nmodes, nspec, ierr logical :: history_amwg ! output the variables used by the AMWG diag package logical :: history_eddy ! output the eddy variables @@ -297,10 +300,48 @@ subroutine vertical_diffusion_init(pbuf2d) integer :: history_budget_histfile_num ! output history file number for budget fields logical :: history_waccm ! output variables of interest for WACCM runs - ! ----------------------------------------------------------------- ! + ! + ! add sponge layer vertical diffusion + ! + if (ptop_ref>1e-1_r8.and.ptop_ref<100.0_r8) then + ! + ! CAM7 FMT (but not CAM6 top (~225 Pa) or CAM7 low top or lower) + ! + allocate(kvm_sponge(4), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'vertical_diffusion_init: kvm_sponge allocation error = ',ierr + call endrun('vertical_diffusion_init: failed to allocate kvm_sponge array') + end if + kvm_sponge(1) = 2E6_r8 + kvm_sponge(2) = 2E6_r8 + kvm_sponge(3) = 0.5E6_r8 + kvm_sponge(4) = 0.1E6_r8 + else if (ptop_ref>1e-4_r8) then + ! + ! WACCM and WACCM-x + ! + allocate(kvm_sponge(6), stat=ierr) + if( ierr /= 0 ) then + write(iulog,*) 'vertical_diffusion_init: kvm_sponge allocation error = ',ierr + call endrun('vertical_diffusion_init: failed to allocate kvm_sponge array') + end if + kvm_sponge(1) = 2E6_r8 + kvm_sponge(2) = 2E6_r8 + kvm_sponge(3) = 1.5E6_r8 + kvm_sponge(4) = 1.0E6_r8 + kvm_sponge(5) = 0.5E6_r8 + kvm_sponge(6) = 0.1E6_r8 + end if if (masterproc) then write(iulog,*)'Initializing vertical diffusion (vertical_diffusion_init)' + if (allocated(kvm_sponge)) then + write(iulog,*)'Artificial sponge layer vertical diffusion added:' + do k=1,size(kvm_sponge(:),1) + write(iulog,'(a44,i2,a17,e7.2,a8)') 'vertical diffusion coefficient at interface',k,' is increased by ', & + kvm_sponge(k),' m2 s-2' + end do + end if !allocated end if ! Check to see if WACCM-X is on (currently we don't care whether the @@ -633,7 +674,6 @@ subroutine vertical_diffusion_init(pbuf2d) call pbuf_set_field(pbuf2d, qti_flx_idx, 0.0_r8) end if end if - end subroutine vertical_diffusion_init ! =============================================================================== ! @@ -695,6 +735,7 @@ subroutine vertical_diffusion_tend( & use upper_bc, only : ubc_get_flxs use coords_1d, only : Coords1D use phys_control, only : cam_physpkg_is + use ref_pres, only : ptop_ref ! --------------- ! ! Input Arguments ! @@ -1067,6 +1108,14 @@ subroutine vertical_diffusion_tend( & call outfld( 'ustar', ustar(:), pcols, lchnk ) call outfld( 'obklen', obklen(:), pcols, lchnk ) + ! + ! add sponge layer vertical diffusion + ! + if (allocated(kvm_sponge)) then + do k=1,size(kvm_sponge(:),1) + kvm(:ncol,1) = kvm(:ncol,1)+kvm_sponge(k) + end do + end if ! kvh (in pbuf) is used by other physics parameterizations, and as an initial guess in compute_eddy_diff ! on the next timestep. It is not updated by the compute_vdiff call below.