diff --git a/lib/adf_diag.py b/lib/adf_diag.py index 47383d0ff..2de865323 100644 --- a/lib/adf_diag.py +++ b/lib/adf_diag.py @@ -485,179 +485,186 @@ def call_ncrcat(cmd): print(wmsg) vert_coord_type = None - # End if (long name) - # End if (vert_coord) - else: - # No level dimension found, so assume there is no vertical coordinate: - vert_coord_type = None - # End if (lev existence) - # ------------------------ - - # Check if time series directory exists, and if not, then create it: - # Use pathlib to create parent directories, if necessary. - Path(ts_dir[case_idx]).mkdir(parents=True, exist_ok=True) - - # INPUT NAME TEMPLATE: $CASE.$scomp.[$type.][$string.]$date[$ending] - first_file_split = str(hist_files[0]).split(".") - if first_file_split[-1] == "nc": - time_string_start = first_file_split[-2].replace("-", "") - else: - time_string_start = first_file_split[-1].replace("-", "") - last_file_split = str(hist_files[-1]).split(".") - if last_file_split[-1] == "nc": - time_string_finish = last_file_split[-2].replace("-", "") - else: - time_string_finish = last_file_split[-1].replace("-", "") - time_string = "-".join([time_string_start, time_string_finish]) - - # Loop over CAM history variables: - list_of_commands = [] - vars_to_derive = [] - # create copy of var list that can be modified for derivable variables - diag_var_list = self.diag_var_list - for var in diag_var_list: - if var not in hist_file_var_list: - vres = res.get(var, {}) - if "derivable_from" in vres: - constit_list = vres["derivable_from"] - for constit in constit_list: - if constit not in diag_var_list: - diag_var_list.append(constit) - vars_to_derive.append(var) - continue - # INPUT NAME TEMPLATE: $CASE.$scomp.[$type.][$string.]$date[$ending] - first_file_split = str(hist_files[0]).split(".") - if first_file_split[-1] == "nc": - time_string_start = first_file_split[-2].replace("-", "") - else: - time_string_start = first_file_split[-1].replace("-", "") - last_file_split = str(hist_files[-1]).split(".") - if last_file_split[-1] == "nc": - time_string_finish = last_file_split[-2].replace("-", "") - else: - time_string_finish = last_file_split[-1].replace("-", "") - time_string = "-".join([time_string_start, time_string_finish]) - - # Loop over CAM history variables: - list_of_commands = [] - vars_to_derive = [] - # create copy of var list that can be modified for derivable variables - diag_var_list = self.diag_var_list - - # Aerosol Calcs - #-------------- - #Always make sure PMID is made if aerosols are desired in config file - if "PMID" not in diag_var_list: - if any(item in res["aerosol_zonal_list"] for item in diag_var_list): - diag_var_list += ["PMID"] - if "T" not in diag_var_list: - if any(item in res["aerosol_zonal_list"] for item in diag_var_list): - diag_var_list += ["T"] - #End aerosol calcs - - for var in diag_var_list: - if var not in hist_file_var_list: - vres = res.get(var, {}) - if "derivable_from" in vres: - constit_list = vres["derivable_from"] - for constit in constit_list: - if constit not in diag_var_list: - diag_var_list.append(constit) - vars_to_derive.append(var) - continue - else: - msg = f"WARNING: {var} is not in the file {hist_files[0]}." - msg += " No time series will be generated." - print(msg) - continue - - # Check if variable has a "lev" dimension according to first file: - has_lev = bool("lev" in hist_file_ds[var].dims) - - # Create full path name, file name template: - # $cam_case_name.$hist_str.$variable.YYYYMM-YYYYMM.nc - - ts_outfil_str = ( - ts_dir[case_idx] - + os.sep - + ".".join([case_name, hist_str, var, time_string, "nc"]) - ) - - # Check if files already exist in time series directory: - ts_file_list = glob.glob(ts_outfil_str) - - # If files exist, then check if over-writing is allowed: - if ts_file_list: - if not overwrite_ts[case_idx]: - # If not, then simply skip this variable: - continue - - # Notify user of new time series file: - print(f"\t - time series for {var}") - - # Variable list starts with just the variable - ncrcat_var_list = f"{var}" - - # Determine "ncrcat" command to generate time series file: - if "date" in hist_file_ds[var].dims: - ncrcat_var_list = ncrcat_var_list + ",date" - if "datesec" in hist_file_ds[var].dims: - ncrcat_var_list = ncrcat_var_list + ",datesec" - - if has_lev and vert_coord_type: - # For now, only add these variables if using CAM: - if "cam" in hist_str: - # PS might be in a different history file. If so, continue without error. - ncrcat_var_list = ncrcat_var_list + ",hyam,hybm,hyai,hybi" - - if "PS" in hist_file_var_list: - ncrcat_var_list = ncrcat_var_list + ",PS" - print("Adding PS to file") - else: - wmsg = "WARNING: PS not found in history file." - wmsg += " It might be needed at some point." - print(wmsg) - # End if - - if vert_coord_type == "height": - # Adding PMID here works, but significantly increases - # the storage (disk usage) requirements of the ADF. - # This can be alleviated in the future by figuring out - # a way to determine all of the regridding targets at - # the start of the ADF run, and then regridding a single - # PMID file to each one of those targets separately. -JN - if "PMID" in hist_file_var_list: - ncrcat_var_list = ncrcat_var_list + ",PMID" - print("Adding PMID to file") - else: - wmsg = "WARNING: PMID not found in history file." - wmsg += " It might be needed at some point." - print(wmsg) - # End if PMID - # End if height - # End if cam - # End if has_lev - - cmd = ( - ["ncrcat", "-O", "-4", "-h", "--no_cll_mth", "-v", ncrcat_var_list] - + hist_files - + ["-o", ts_outfil_str] - ) - - # Add to command list for use in multi-processing pool: - list_of_commands.append(cmd) - - # End variable loop - # Now run the "ncrcat" subprocesses in parallel: - with mp.Pool(processes=self.num_procs) as mpool: - _ = mpool.map(call_ncrcat, list_of_commands) + # End if (long name) + # End if (vert_coord) + else: + # No level dimension found, so assume there is no vertical coordinate: + vert_coord_type = None + # End if (lev existence) + # ------------------------ + + # Check if time series directory exists, and if not, then create it: + # Use pathlib to create parent directories, if necessary. + Path(ts_dir[case_idx]).mkdir(parents=True, exist_ok=True) + + # INPUT NAME TEMPLATE: $CASE.$scomp.[$type.][$string.]$date[$ending] + first_file_split = str(hist_files[0]).split(".") + if first_file_split[-1] == "nc": + time_string_start = first_file_split[-2].replace("-", "") + else: + time_string_start = first_file_split[-1].replace("-", "") + last_file_split = str(hist_files[-1]).split(".") + if last_file_split[-1] == "nc": + time_string_finish = last_file_split[-2].replace("-", "") + else: + time_string_finish = last_file_split[-1].replace("-", "") + time_string = "-".join([time_string_start, time_string_finish]) + + # Loop over CAM history variables: + list_of_commands = [] + vars_to_derive = [] + # create copy of var list that can be modified for derivable variables + diag_var_list = self.diag_var_list + + # Aerosol Calcs + #-------------- + #Always make sure PMID is made if aerosols are desired in config file + if "PMID" not in diag_var_list: + if any(item in res["aerosol_zonal_list"] for item in diag_var_list): + diag_var_list += ["PMID"] + if "T" not in diag_var_list: + if any(item in res["aerosol_zonal_list"] for item in diag_var_list): + diag_var_list += ["T"] + #End aerosol calcs + + for var in diag_var_list: + if var not in hist_file_var_list: + vres = res.get(var, {}) + if "derivable_from" in vres: + constit_list = vres["derivable_from"] + for constit in constit_list: + if constit not in diag_var_list: + diag_var_list.append(constit) + vars_to_derive.append(var) + continue + else: + msg = f"WARNING: {var} is not in the file {hist_files[0]}." + msg += " No time series will be generated." + print(msg) + continue + + # Check if variable has a "lev" dimension according to first file: + has_lev = bool("lev" in hist_file_ds[var].dims) + + # Create full path name, file name template: + # $cam_case_name.$hist_str.$variable.YYYYMM-YYYYMM.nc + + ts_outfil_str = ( + ts_dir[case_idx] + + os.sep + + ".".join([case_name, hist_str, var, time_string, "nc"]) + ) + + # Check if files already exist in time series directory: + ts_file_list = glob.glob(ts_outfil_str) + + # If files exist, then check if over-writing is allowed: + if ts_file_list: + if not overwrite_ts[case_idx]: + # If not, then simply skip this variable: + continue + + # Notify user of new time series file: + print(f"\t - time series for {var}") + + # Variable list starts with just the variable + ncrcat_var_list = f"{var}" + + # Determine "ncrcat" command to generate time series file: + if "date" in hist_file_ds[var].dims: + ncrcat_var_list = ncrcat_var_list + ",date" + if "datesec" in hist_file_ds[var].dims: + ncrcat_var_list = ncrcat_var_list + ",datesec" + + if has_lev and vert_coord_type: + # For now, only add these variables if using CAM: + if "cam" in hist_str: + # PS might be in a different history file. If so, continue without error. + ncrcat_var_list = ncrcat_var_list + ",hyam,hybm,hyai,hybi" + + if "PS" in hist_file_var_list: + ncrcat_var_list = ncrcat_var_list + ",PS" + print("Adding PS to file") + else: + msg = f"WARNING: {var} is not in the file {hist_files[0]}." + msg += " No time series will be generated." + print(msg) + continue + + # Check if variable has a "lev" dimension according to first file: + has_lev = bool("lev" in hist_file_ds[var].dims) + + # Create full path name, file name template: + # $cam_case_name.$hist_str.$variable.YYYYMM-YYYYMM.nc + + ts_outfil_str = ( + ts_dir[case_idx] + + os.sep + + ".".join([case_name, hist_str, var, time_string, "nc"]) + ) + + # Check if files already exist in time series directory: + ts_file_list = glob.glob(ts_outfil_str) + + # If files exist, then check if over-writing is allowed: + if ts_file_list: + if not overwrite_ts[case_idx]: + # If not, then simply skip this variable: + continue + + # Notify user of new time series file: + print(f"\t - time series for {var}") + + # Variable list starts with just the variable + ncrcat_var_list = f"{var}" + + # Determine "ncrcat" command to generate time series file: + if "date" in hist_file_ds[var].dims: + ncrcat_var_list = ncrcat_var_list + ",date" + if "datesec" in hist_file_ds[var].dims: + ncrcat_var_list = ncrcat_var_list + ",datesec" + + if has_lev and vert_coord_type: + # For now, only add these variables if using CAM: + if "cam" in hist_str: + # PS might be in a different history file. If so, continue without error. + ncrcat_var_list = ncrcat_var_list + ",hyam,hybm,hyai,hybi" + + if "PS" in hist_file_var_list: + ncrcat_var_list = ncrcat_var_list + ",PS" + print("Adding PS to file") + else: + wmsg = "WARNING: PS not found in history file." + wmsg += " It might be needed at some point." + print(wmsg) + + # End if PMID + # End if height + # End if cam + # End if has_lev + + cmd = ( + ["ncrcat", "-O", "-4", "-h", "--no_cll_mth", "-v", ncrcat_var_list] + + hist_files + + ["-o", ts_outfil_str] + ) + + # Add to command list for use in multi-processing pool: + list_of_commands.append(cmd) + + # End variable loop + + # Now run the "ncrcat" subprocesses in parallel: + with mp.Pool(processes=self.num_procs) as mpool: + _ = mpool.map(call_ncrcat, list_of_commands) + + if vars_to_derive: + self.derive_variables( + res=res, vars_to_derive=vars_to_derive, ts_dir=ts_dir[case_idx] + ) + # End with - if vars_to_derive: - self.derive_variables( - res=res, vars_to_derive=vars_to_derive, ts_dir=ts_dir[case_idx] - ) - # End with # End for hist_str # End cases loop @@ -1166,6 +1173,99 @@ def my_formatwarning(msg, *args, **kwargs): #End def ######## +======= + print( + f"[{__name__}] Warning: '{var}' file was found and overwrite is False. Will use existing file." + ) + continue + + #NOTE: this will need to be changed when derived equations are more complex! - JR + if var == "RESTOM": + der_val = ds["FSNT"]-ds["FLNT"] + else: + #Loop through all constituents and sum + der_val = 0 + for v in constit_list: + der_val += ds[v] + + #Set derived variable name and add to dataset + der_val.name = var + ds[var] = der_val + + #Aerosol Calculations - used for zonal plots + #These will be multiplied by rho (density of dry air) + ds_pmid_done = False + ds_t_done = False + if var in res["aerosol_zonal_list"]: + + #Only calculate once for all aerosol vars + if not ds_pmid_done: + ds_pmid = _load_dataset(glob.glob(os.path.join(ts_dir, "*.PMID.*"))[0]) + ds_pmid_done = True + if not ds_pmid: + errmsg = f"Missing necessary files for dry air density (rho) calculation.\n" + errmsg += "Please make sure 'PMID' is in the CAM run for aerosol calculations" + print(errmsg) + continue + if not ds_t_done: + ds_t = _load_dataset(glob.glob(os.path.join(ts_dir, "*.T.*"))[0]) + ds_t_done = True + if not ds_t: + errmsg = f"Missing necessary files for dry air density (rho) calculation.\n" + errmsg += "Please make sure 'T' is in the CAM run for aerosol calculations" + print(errmsg) + continue + + #Multiply aerosol by dry air density (rho): (P/Rd*T) + ds[var] = ds[var]*(ds_pmid["PMID"]/(res["Rgas"]*ds_t["T"])) + + #Sulfate conversion factor + if var == "SO4": + ds[var] = ds[var]*(96./115.) + + #Drop all constituents from final saved dataset + #These are not necessary because they have their own time series files + ds_final = ds.drop_vars(constit_list) + ds_final.to_netcdf(derived_file, unlimited_dims='time', mode='w') + +######## + +#Helper Function(s) +def _load_dataset(fils): + """ + This method exists to get an xarray Dataset from input file information that can be passed into the plotting methods. + + Parameters + ---------- + fils : list + strings or paths to input file(s) + + Returns + ------- + xr.Dataset + + Notes + ----- + When just one entry is provided, use `open_dataset`, otherwise `open_mfdatset` + """ + import warnings # use to warn user about missing files. + + #Format warning messages: + def my_formatwarning(msg, *args, **kwargs): + """Issue `msg` as warning.""" + return str(msg) + '\n' + warnings.formatwarning = my_formatwarning + + if len(fils) == 0: + warnings.warn("Input file list is empty.") + return None + elif len(fils) > 1: + return xr.open_mfdataset(fils, combine='by_coords') + else: + return xr.open_dataset(fils[0]) + #End if +#End def + ######### MDTF functions ######### def move_tsfiles_for_mdtf(self, verbose): diff --git a/lib/adf_variable_defaults.yaml b/lib/adf_variable_defaults.yaml index 139158b51..b8ecaa1ee 100644 --- a/lib/adf_variable_defaults.yaml +++ b/lib/adf_variable_defaults.yaml @@ -54,6 +54,9 @@ # derivable_from -> If not present in the available output files, the variable can be derived from # other variables that are present (e.g. PRECT can be derived from PRECC and PRECL), # which are specified in this list +# NOTE: this is not very flexible at the moment! It can only handle variables that +# are sums of the constituents. Futher flexibility is being explored. +# # # Final Note: Please do not modify this file unless you plan to push your changes back to the ADF repo. # If you would like to modify this file for your personal ADF runs then it is recommended @@ -62,6 +65,19 @@ # #+++++++++++ +#+++++++++++++ +# Available ADF Default Plot Types +#+++++++++++++ +default_ptypes: ["Tables","LatLon","LatLon_Vector","Zonal","Meridional", + "NHPolar","SHPolar","Special"] + +#+++++++++++++ +# Constants +#+++++++++++++ + +#Dry Air Gas Constant: +Rgas: 287.04 #[J/K/Kg]=8.314/0.028965 + #+++++++++++++ # Category: Microphysics #+++++++++++++ @@ -181,6 +197,87 @@ SO2: SOAG: category: "Aerosols" +BC: + colormap: "RdBu_r" + diff_colormap: "BrBG" + scale_factor: 1000000000 + add_offset: 0 + new_unit: '$\mu$g/m3' + mpl: + colorbar: + label : '$\mu$g/m3' + category: "Aerosols" + derivable_from: ["bc_a1", "bc_a4"] + +POM: + colormap: "RdBu_r" + diff_colormap: "BrBG" + scale_factor: 1000000000 + add_offset: 0 + new_unit: '$\mu$g/m3' + mpl: + colorbar: + label : '$\mu$g/m3' + category: "Aerosols" + derivable_from: ["pom_a1", "pom_a4"] + +SO4: + colormap: "RdBu_r" + diff_colormap: "BrBG" + scale_factor: 1000000000 + add_offset: 0 + new_unit: '$\mu$g/m3' + mpl: + colorbar: + label : '$\mu$g/m3' + category: "Aerosols" + derivable_from: ["so4_a1", "so4_a2", "so4_a3", "so4_a5"] + +SOA: + colormap: "RdBu_r" + diff_colormap: "BrBG" + scale_factor: 1000000000 + add_offset: 0 + new_unit: '$\mu$g/m3' + mpl: + colorbar: + label : '$\mu$g/m3' + category: "Aerosols" + derivable_from: ["soa_a1", "soa_a2"] + +DUST: + colormap: "RdBu_r" + contour_levels: [0,0.1,0.25,0.4,0.6,0.8,1.4,2,3,4,8,12,30,48,114,180] + non_linear: True + diff_colormap: "BrBG" + scale_factor: 1000000000 + add_offset: 0 + new_unit: '$\mu$g/m3' + mpl: + colorbar: + label : '$\mu$g/m3' + category: "Aerosols" + derivable_from: ["dst_a1", "dst_a2", "dst_a3"] + +SeaSalt: + colormap: "RdBu_r" + contour_levels: [0,0.05,0.075,0.2,0.3,0.4,0.7,1,1.5,2,4,6,15,24,57,90] + non_linear: True + diff_colormap: "BrBG" + scale_factor: 1000000000 + add_offset: 0 + new_unit: '$\mu$g/m3' + mpl: + colorbar: + label : '$\mu$g/m3' + ticks: [0.05,0.2,0.4,1,2,6,24,90] + diff_colorbar: + label : '$\mu$g/m3' + ticks: [-10,8,6,4,2,0,-2,-4,-6,-8,-10] + category: "Aerosols" + derivable_from: ["ncl_a1", "ncl_a2", "ncl_a3"] + + #+++++++++++++++++ # Category: Budget @@ -497,7 +594,6 @@ QFLX: # Category: Surface variables #+++++++++++++++++ - PBLH: category: "Surface variables" obs_file: "PBLH_ERA5_monthly_climo_197901-202112.nc" @@ -1093,4 +1189,4 @@ utendwtem: obs_var_name: "utendwtem" #----------- -#End of File +#End of File \ No newline at end of file diff --git a/lib/adf_web.py b/lib/adf_web.py index 9e3ecaae7..f7b3e0d3d 100644 --- a/lib/adf_web.py +++ b/lib/adf_web.py @@ -353,6 +353,9 @@ def jinja_list(seas_list): main_site_path = "" #Set main_site_path to blank value #End if + #Access variable defaults yaml file + res = self.variable_defaults + #Extract needed variables from yaml file: case_names = self.get_cam_info('cam_case_name', required=True) @@ -650,23 +653,21 @@ def jinja_list(seas_list): ofil.write(rndr) #End with - #Check if the mean plot type page exists for this case: + #Mean plot type html file name mean_ptype_file = img_pages_dir / f"mean_diag_{web_data.plot_type}.html" - if not mean_ptype_file.exists(): - #Construct individual plot type mean_diag html files, if they don't - #already exist: - mean_tmpl = jinenv.get_template('template_mean_diag.html') - - #Remove keys from main dictionary for this html page - templ_rend_kwarg_dict = {k: rend_kwarg_dict[k] for k in rend_kwarg_dict.keys() - {'imgs', 'var_title', 'season_title'}} - templ_rend_kwarg_dict["list"] = jinja_list - mean_rndr = mean_tmpl.render(templ_rend_kwarg_dict) - - #Write mean diagnostic plots HTML file: - with open(mean_ptype_file,'w', encoding='utf-8') as ofil: - ofil.write(mean_rndr) - #End with - #End if (mean_ptype exists) + + #Construct individual plot type mean_diag html files + mean_tmpl = jinenv.get_template('template_mean_diag.html') + + #Remove keys from main dictionary for this html page + templ_rend_kwarg_dict = {k: rend_kwarg_dict[k] for k in rend_kwarg_dict.keys() - {'imgs', 'var_title', 'season_title'}} + templ_rend_kwarg_dict["list"] = jinja_list + mean_rndr = mean_tmpl.render(templ_rend_kwarg_dict) + + #Write mean diagnostic plots HTML file: + with open(mean_ptype_file,'w', encoding='utf-8') as ofil: + ofil.write(mean_rndr) + #End with #End if (data frame) #Also check if index page exists for this case: @@ -681,6 +682,15 @@ def jinja_list(seas_list): plot_types = plot_type_html #End if + #List of ADF default plot types + avail_plot_types = res["default_ptypes"] + + #Check if current plot type is in ADF default. + #If not, add it so the index.html file can include it + for ptype in plot_types.keys(): + if ptype not in avail_plot_types: + avail_plot_types.append(plot_types) + #Construct index.html index_title = "AMP Diagnostics Prototype" index_tmpl = jinenv.get_template('template_index.html') @@ -689,7 +699,8 @@ def jinja_list(seas_list): base_name=data_name, case_yrs=case_yrs, baseline_yrs=baseline_yrs, - plot_types=plot_types) + plot_types=plot_types, + avail_plot_types=avail_plot_types) #Write Mean diagnostics index HTML file: with open(index_html_file, 'w', encoding='utf-8') as ofil: @@ -748,4 +759,4 @@ def jinja_list(seas_list): print(" ...Webpages have been generated successfully.") #++++++++++++++++++++ #End Class definition -#++++++++++++++++++++ +#++++++++++++++++++++ \ No newline at end of file diff --git a/lib/plotting_functions.py b/lib/plotting_functions.py index f84743aa3..03f7b6ae1 100644 --- a/lib/plotting_functions.py +++ b/lib/plotting_functions.py @@ -3,6 +3,8 @@ Functions --------- +load_dataset() + generalized load dataset method used for plotting/analysis functions use_this_norm() switches matplotlib color normalization method get_difference_colors(values) @@ -96,10 +98,19 @@ import geocat.comp as gcomp from mpl_toolkits.axes_grid1.inset_locator import inset_axes from matplotlib.lines import Line2D +import matplotlib.cm as cm from adf_diag import AdfDiag from adf_base import AdfError +import warnings # use to warn user about missing files. + +#Format warning messages: +def my_formatwarning(msg, *args, **kwargs): + """Issue `msg` as warning.""" + return str(msg) + '\n' +warnings.formatwarning = my_formatwarning + #Set non-X-window backend for matplotlib: mpl.use('Agg') @@ -123,6 +134,33 @@ #HELPER FUNCTIONS ################# +def load_dataset(fils): + """ + This method exists to get an xarray Dataset from input file information that can be passed into the plotting methods. + + Parameters + ---------- + fils : list + strings or paths to input file(s) + + Returns + ------- + xr.Dataset + + Notes + ----- + When just one entry is provided, use `open_dataset`, otherwise `open_mfdatset` + """ + if len(fils) == 0: + warnings.warn(f"Input file list is empty.") + return None + elif len(fils) > 1: + return xr.open_mfdataset(fils, combine='by_coords') + else: + return xr.open_dataset(fils[0]) + #End if +#End def + def use_this_norm(): """Just use the right normalization; avoids a deprecation warning.""" @@ -723,20 +761,20 @@ def make_polar_plot(wks, case_nickname, base_nickname, levs_diff = np.unique(np.array(levelsdiff)) if len(levs) < 2: - img1 = ax1.contourf(lons, lats, d1_cyclic, transform=ccrs.PlateCarree(), transform_first=True, colors="w", norm=norm1) + img1 = ax1.contourf(lons, lats, d1_cyclic, transform=ccrs.PlateCarree(), colors="w", norm=norm1) ax1.text(0.4, 0.4, empty_message, transform=ax1.transAxes, bbox=props) - img2 = ax2.contourf(lons, lats, d2_cyclic, transform=ccrs.PlateCarree(), transform_first=True, colors="w", norm=norm1) + img2 = ax2.contourf(lons, lats, d2_cyclic, transform=ccrs.PlateCarree(), colors="w", norm=norm1) ax2.text(0.4, 0.4, empty_message, transform=ax2.transAxes, bbox=props) else: - img1 = ax1.contourf(lons, lats, d1_cyclic, transform=ccrs.PlateCarree(), transform_first=True, cmap=cmap1, norm=norm1, levels=levels1) - img2 = ax2.contourf(lons, lats, d2_cyclic, transform=ccrs.PlateCarree(), transform_first=True, cmap=cmap1, norm=norm1, levels=levels1) + img1 = ax1.contourf(lons, lats, d1_cyclic, transform=ccrs.PlateCarree(), cmap=cmap1, norm=norm1, levels=levels1) + img2 = ax2.contourf(lons, lats, d2_cyclic, transform=ccrs.PlateCarree(), cmap=cmap1, norm=norm1, levels=levels1) if len(levs_diff) < 2: - img3 = ax3.contourf(lons, lats, dif_cyclic, transform=ccrs.PlateCarree(), transform_first=True, colors="w", norm=dnorm) + img3 = ax3.contourf(lons, lats, dif_cyclic, transform=ccrs.PlateCarree(), colors="w", norm=dnorm) ax3.text(0.4, 0.4, empty_message, transform=ax3.transAxes, bbox=props) else: - img3 = ax3.contourf(lons, lats, dif_cyclic, transform=ccrs.PlateCarree(), transform_first=True, cmap=cmapdiff, norm=dnorm, levels=levelsdiff) + img3 = ax3.contourf(lons, lats, dif_cyclic, transform=ccrs.PlateCarree(), cmap=cmapdiff, norm=dnorm, levels=levelsdiff) #Set Main title for subplots: st = fig.suptitle(wks.stem[:-5].replace("_"," - "), fontsize=18) @@ -1716,6 +1754,7 @@ def prep_contour_plot(adata, bdata, diffdata, **kwargs): - 'subplots_opt': mpl kwargs for subplots - 'contourf_opt': mpl kwargs for contourf - 'colorbar_opt': mpl kwargs for colorbar + - 'diff_colorbar_opt' : mpl kwargs for difference colorbar - 'normdiff': color normalization for difference panel - 'cmapdiff': colormap for difference panel - 'levelsdiff': contour levels for difference panel @@ -1739,16 +1778,28 @@ def prep_contour_plot(adata, bdata, diffdata, **kwargs): if 'contour_levels' in kwargs: levels1 = kwargs['contour_levels'] - norm1 = mpl.colors.Normalize(vmin=min(levels1), vmax=max(levels1)) + if ('non_linear' in kwargs) and (kwargs['non_linear']): + cmap_obj = cm.get_cmap(cmap1) + norm1 = mpl.colors.BoundaryNorm(levels1, cmap_obj.N) + else: + norm1 = mpl.colors.Normalize(vmin=min(levels1), vmax=max(levels1)) elif 'contour_levels_range' in kwargs: assert len(kwargs['contour_levels_range']) == 3, \ "contour_levels_range must have exactly three entries: min, max, step" levels1 = np.arange(*kwargs['contour_levels_range']) - norm1 = mpl.colors.Normalize(vmin=min(levels1), vmax=max(levels1)) + if ('non_linear' in kwargs) and (kwargs['non_linear']): + cmap_obj = cm.get_cmap(cmap1) + norm1 = mpl.colors.BoundaryNorm(levels1, cmap_obj.N) + else: + norm1 = mpl.colors.Normalize(vmin=min(levels1), vmax=max(levels1)) else: levels1 = np.linspace(minval, maxval, 12) - norm1 = mpl.colors.Normalize(vmin=minval, vmax=maxval) + if ('non_linear' in kwargs) and (kwargs['non_linear']): + cmap_obj = cm.get_cmap(cmap1) + norm1 = mpl.colors.BoundaryNorm(levels1, cmap_obj.N) + else: + norm1 = mpl.colors.Normalize(vmin=minval, vmax=maxval) #End if #Check if the minval and maxval are actually different. If not, @@ -1803,16 +1854,19 @@ def prep_contour_plot(adata, bdata, diffdata, **kwargs): subplots_opt = {} contourf_opt = {} colorbar_opt = {} + diff_colorbar_opt = {} # extract any MPL kwargs that should be passed on: if 'mpl' in kwargs: subplots_opt.update(kwargs['mpl'].get('subplots',{})) contourf_opt.update(kwargs['mpl'].get('contourf',{})) colorbar_opt.update(kwargs['mpl'].get('colorbar',{})) + diff_colorbar_opt.update(kwargs['mpl'].get('diff_colorbar',{})) #End if return {'subplots_opt': subplots_opt, 'contourf_opt': contourf_opt, 'colorbar_opt': colorbar_opt, + 'diff_colorbar_opt': diff_colorbar_opt, 'normdiff': normdiff, 'cmapdiff': cmapdiff, 'levelsdiff': levelsdiff, @@ -1899,7 +1953,6 @@ def plot_zonal_mean_and_save(wks, case_nickname, base_nickname, levs_diff = np.unique(np.array(cp_info['levelsdiff'])) - if len(levs) < 2: img0, ax[0] = zonal_plot(adata['lat'], azm, ax=ax[0]) ax[0].text(0.4, 0.4, empty_message, transform=ax[0].transAxes, bbox=props) @@ -1917,7 +1970,7 @@ def plot_zonal_mean_and_save(wks, case_nickname, base_nickname, ax[2].text(0.4, 0.4, empty_message, transform=ax[2].transAxes, bbox=props) else: img2, ax[2] = zonal_plot(adata['lat'], diff, ax=ax[2], norm=cp_info['normdiff'],cmap=cp_info['cmapdiff'],levels=cp_info['levelsdiff'],**cp_info['contourf_opt']) - fig.colorbar(img2, ax=ax[2], location='right',**cp_info['colorbar_opt']) + fig.colorbar(img2, ax=ax[2], location='right',**cp_info['diff_colorbar_opt']) ax[0].set_title(case_title, loc='left', fontsize=tiFontSize) ax[1].set_title(base_title, loc='left', fontsize=tiFontSize) diff --git a/lib/test/unit_tests/test_adf_config.py b/lib/test/unit_tests/test_adf_config.py index abd32d0e5..29fd8b4cb 100644 --- a/lib/test/unit_tests/test_adf_config.py +++ b/lib/test/unit_tests/test_adf_config.py @@ -61,7 +61,7 @@ def test_AdfConfig_create(self): obs_data_loc = adf_test.read_config_var("obs_data_loc", conf_dict=basic_diag_dict) - self.assertEqual(obs_data_loc, "/glade/work/nusbaume/SE_projects/model_diagnostics/ADF_obs") + self.assertEqual(obs_data_loc, "/glade/campaign/cgd/amp/amwg/ADF_obs") ##### diff --git a/lib/website_templates/adf_diag.css b/lib/website_templates/adf_diag.css index 20d9b6543..291f227aa 100644 --- a/lib/website_templates/adf_diag.css +++ b/lib/website_templates/adf_diag.css @@ -232,8 +232,9 @@ table.dataframe thead th{ .grid-item-blocked { background-color: rgba(192, 192, 192, 0.6); - border: 1px solid rgba(0, 0, 0, 0.8); - padding: 10px; + font-family: Tahoma, Geneva, Verdana, sans-serif; + border-collapse: collapse; + border-radius: 15px; padding: 10px; font-size: 16px; text-align: center; } @@ -260,7 +261,7 @@ table.dataframe thead th{ display: grid; column-gap: 50px; row-gap: 50px; - grid-template-columns: repeat(3, auto); + grid-template-columns: repeat(4, auto); background-color: #e4eef0; padding: 85px; } @@ -315,4 +316,4 @@ table.dataframe thead th{ width: 50%; float: left; padding: 20px; -} +} \ No newline at end of file diff --git a/lib/website_templates/template_index.html b/lib/website_templates/template_index.html index 747487e1c..4b2659cf8 100644 --- a/lib/website_templates/template_index.html +++ b/lib/website_templates/template_index.html @@ -35,10 +35,16 @@

Plot Types

- {% for type, html_file in plot_types.items() %} -
-   {{ type }} -
+ {% for avail_type in avail_plot_types %} + {% if avail_type in plot_types.keys() %} +
+   {{ avail_type }} +
+ {% else %} +
+ {{ avail_type }} +
+ {% endif %} {% endfor %}
diff --git a/scripts/analysis/amwg_table.py b/scripts/analysis/amwg_table.py index 535fbc0cd..794d5a678 100644 --- a/scripts/analysis/amwg_table.py +++ b/scripts/analysis/amwg_table.py @@ -150,6 +150,8 @@ def amwg_table(adf): #----------------------------------------- #Loop over CAM cases: + #Initialize list of case name csv files for case comparison check later + csv_list = [] for case_idx, case_name in enumerate(case_names): #Convert output location string to a Path object: @@ -206,8 +208,9 @@ def amwg_table(adf): continue #End if - #Load model data from file: - data = _load_data(ts_files[0], var) + #Load model variable data from file: + ds = pf.load_dataset(ts_files) + data = ds[var] #Extract units string, if available: if hasattr(data, 'units'): @@ -305,6 +308,9 @@ def amwg_table(adf): print(f"\n\tAMWG table for '{case_name}' not created.\n") #End try/except + #Keep track of case csv files for comparison table check later + csv_list.extend(sorted(output_location.glob(f"amwg_table_{case_name}.csv"))) + #End of model case loop #---------------------- @@ -313,7 +319,7 @@ def amwg_table(adf): #Check if observations are being compared to, if so skip table comparison... if not adf.get_basic_info("compare_obs"): #Check if all tables were created to compare against, if not, skip table comparison... - if len(sorted(output_location.glob("*.csv"))) != len(case_names): + if len(csv_list) != len(case_names): print("\tNot enough cases to compare, skipping comparison table...") else: #Create comparison table for both cases @@ -333,13 +339,6 @@ def amwg_table(adf): # Helper functions ################## -def _load_data(dataloc, varname): - import xarray as xr - ds = xr.open_dataset(dataloc) - return ds[varname] - -##### - def _get_row_vals(data): # Now that data is (time,), we can do our simple stats: diff --git a/scripts/averaging/create_TEM_files.py b/scripts/averaging/create_TEM_files.py index e09350feb..d947838d5 100644 --- a/scripts/averaging/create_TEM_files.py +++ b/scripts/averaging/create_TEM_files.py @@ -14,9 +14,13 @@ def create_TEM_files(adf): """ + #Notify user that script has started: + print("\n Generating CAM TEM diagnostics files...") + #Special ADF variables #CAM simulation variables (these quantities are always lists): case_names = adf.get_cam_info("cam_case_name", required=True) + base_name = adf.get_baseline_info("cam_case_name") #Grab h4 history files locations cam_hist_locs = adf.get_cam_info("cam_hist_loc", required=True) @@ -25,16 +29,44 @@ def create_TEM_files(adf): start_years = adf.climo_yrs["syears"] end_years = adf.climo_yrs["eyears"] + res = adf.variable_defaults # will be dict of variable-specific plot preferences + + if "qbo" in adf.plotting_scripts: + var_list = ['uzm','epfy','epfz','vtem','wtem', + 'psitem','utendepfd','utendvtem','utendwtem'] + else: + var_list = ['uzm','epfy','epfz','vtem','wtem','psitem','utendepfd'] + + tem_locs = [] + #Grab TEM diagnostics options - tem_opts = adf.read_config_var("tem_info") + #---------------------------- + #Extract TEM file save locations + tem_base_loc = adf.get_baseline_info("cam_tem_loc") + tem_case_locs = adf.get_cam_info("cam_tem_loc") + + #If path not specified, skip TEM calculation? + if tem_case_locs is None: + print("\t 'cam_tem_loc' not found in 'diag_cam_climo', so no TEM files/diagnostics will be generated.") + pass + else: + for tem_case_loc in tem_case_locs: + tem_case_loc = Path(tem_case_loc) + #Check if TEM directory exists, and if not, then create it: + if not tem_case_loc.is_dir(): + print(f" {tem_case_loc} not found, making new directory") + tem_case_loc.mkdir(parents=True) + #End if + tem_locs.append(tem_case_loc) + #End for - if not tem_opts: - print("\n No TEM options provided, skipping TEM file creation." \ - "\nSee documentation or config_cam_baseline_example.yaml for options to add to configuration file.") - return + #Set default to h4 + hist_nums = adf.get_cam_info("tem_hist_str") + if hist_nums is None: + hist_nums = ["h4"]*len(case_names) #Get test case(s) tem over-write boolean and force to list if not by default - overwrite_tem_cases = tem_opts.get("overwrite_tem_case") + overwrite_tem_cases = adf.get_cam_info("overwrite_tem") #If overwrite argument is missing, then default to False: if overwrite_tem_cases is None: @@ -47,109 +79,57 @@ def create_TEM_files(adf): #Check if comparing to observations if adf.get_basic_info("compare_obs"): var_obs_dict = adf.var_obs_dict + #If dictionary is empty, then there are no observations, so quit here: if not var_obs_dict: - print("No observations found to plot against, so no TEM will be generated.") - return + print("No observations found to plot against, so no obs-based TEM plot will be generated.") + pass + + print(f"\t Processing TEM for observations :") base_name = "Obs" - else: - base_name = adf.get_baseline_info("cam_case_name", required=True) - cam_hist_locs.append(adf.get_baseline_info("cam_hist_loc", required=True)) - - #Extract baseline years (which may be empty strings if using Obs): - syear_baseline = adf.climo_yrs["syear_baseline"] - eyear_baseline = adf.climo_yrs["eyear_baseline"] - case_names.append(base_name) - start_years.append(syear_baseline) - end_years.append(eyear_baseline) - overwrite_tem_cases.append(tem_opts.get("overwrite_tem_base", False)) - #End if + #Save Obs TEM file to first test case location + output_loc_idx = tem_locs[0] - #Set default to h4 - hist_num = tem_opts.get("hist_num") - if hist_num is None: - hist_num = "h4" - - #Extract TEM file save location - output_loc = tem_opts["tem_loc"] - - #If path not specified, skip TEM calculation? - if output_loc is None: - print("\t 'tem_loc' not found in config file, so no TEM files will be generated.") - return - else: - #Notify user that script has started: - print("\n Generating CAM TEM diagnostics files...") - - output_loc = Path(output_loc) - #Check if re-gridded directory exists, and if not, then create it: - if not output_loc.is_dir(): - print(f" {output_loc} not found, making new directory") - output_loc.mkdir(parents=True) - #End if - - res = adf.variable_defaults # will be dict of variable-specific plot preferences - - if "qbo" in adf.plotting_scripts: - var_list = ['uzm','epfy','epfz','vtem','wtem', - 'psitem','utendepfd','utendvtem','utendwtem'] - else: - var_list = ['uzm','epfy','epfz','vtem','wtem','psitem','utendepfd'] - - #Check if comparing against observations - if adf.get_basic_info("compare_obs"): - print(f"\t Processing TEM for observations :") - - output_loc_idx = output_loc / base_name #Check if re-gridded directory exists, and if not, then create it: if not output_loc_idx.is_dir(): print(f" {output_loc_idx} not found, making new directory") output_loc_idx.mkdir(parents=True) #End if + print(f"\t NOTE: Observation TEM file being saved to '{output_loc_idx}'") + #Set baseline file name as full path tem_fil = output_loc_idx / f'{base_name}.TEMdiag.nc' - #Get baseline case tem over-write boolean - overwrite_tem = tem_opts.get("overwrite_tem_base") - - #If files exist, then check if over-writing is allowed: - if (tem_fil.is_file()) and (not overwrite_tem): - print(f"\t INFO: Found TEM file and clobber is False, so moving to next case.") - pass - else: - if tem_fil.is_file(): - print(f"\t INFO: Found TEM file but clobber is True, so over-writing file.") - - #Group all TEM observation files together - tem_obs_fils = [] - for var in var_list: - if var in res: - #Gather from variable defaults file - obs_file_path = Path(res[var]["obs_file"]) - if not obs_file_path.is_file(): - obs_data_loc = adf.get_basic_info("obs_data_loc") - obs_file_path = Path(obs_data_loc)/obs_file_path - - #It's likely multiple TEM vars will come from one file, so check - #to see if it already exists from other vars. - if obs_file_path not in tem_obs_fils: - tem_obs_fils.append(obs_file_path) - - ds = xr.open_mfdataset(tem_obs_fils,combine="nested") - start_year = str(ds.time[0].values)[0:4] - end_year = str(ds.time[-1].values)[0:4] - - #Update the attributes - ds.attrs['created'] = str(date.today()) - ds['lev']=ds['level'] - ds['zalat']=ds['lat'] - - #Make a copy of obs data so we don't do anything bad - ds_obs = ds.copy() - ds_base = xr.Dataset({'uzm': xr.Variable(('time', 'lev', 'zalat'), ds_obs.uzm.data), + #Group all TEM observation files together + tem_obs_fils = [] + for var in var_list: + if var in res: + #Gather from variable defaults file + obs_file_path = Path(res[var]["obs_file"]) + if not obs_file_path.is_file(): + obs_data_loc = adf.get_basic_info("obs_data_loc") + obs_file_path = Path(obs_data_loc)/obs_file_path + + #It's likely multiple TEM vars will come from one file, so check + #to see if it already exists from other vars. + if obs_file_path not in tem_obs_fils: + tem_obs_fils.append(obs_file_path) + + ds = xr.open_mfdataset(tem_obs_fils,combine="nested") + start_year = str(ds.time[0].values)[0:4] + end_year = str(ds.time[-1].values)[0:4] + + #Update the attributes + ds.attrs['created'] = str(date.today()) + ds['lev']=ds['level'] + ds['zalat']=ds['lat'] + + #Make a copy of obs data so we don't do anything bad + ds_obs = ds.copy() + ds_base = xr.Dataset({'uzm': xr.Variable(('time', 'lev', 'zalat'), ds_obs.uzm.data), 'epfy': xr.Variable(('time', 'lev', 'zalat'), ds_obs.epfy.data), 'epfz': xr.Variable(('time', 'lev', 'zalat'), ds_obs.epfz.data), 'vtem': xr.Variable(('time', 'lev', 'zalat'), ds_obs.vtem.data), @@ -163,11 +143,41 @@ def create_TEM_files(adf): 'time': xr.Variable('time', ds_obs.time.values) }) - # write output to a netcdf file - ds_base.to_netcdf(tem_fil, unlimited_dims='time', mode='w') + # write output to a netcdf file + ds_base.to_netcdf(tem_fil, unlimited_dims='time', mode='w') - #End if (file creation or over-write file) - #End if baseline case + else: + if tem_base_loc: + cam_hist_locs.append(adf.get_baseline_info("cam_hist_loc", required=True)) + + #Set default to h4 + hist_num = adf.get_baseline_info("tem_hist_str") + if hist_num is None: + hist_num = "h4" + + #Extract baseline years (which may be empty strings if using Obs): + syear_baseline = adf.climo_yrs["syear_baseline"] + eyear_baseline = adf.climo_yrs["eyear_baseline"] + + case_names.append(base_name) + start_years.append(syear_baseline) + end_years.append(eyear_baseline) + + tem_base_loc = Path(tem_base_loc) + #Check if TEM directory exists, and if not, then create it: + if not tem_base_loc.is_dir(): + print(f" {tem_base_loc} not found, making new directory") + tem_base_loc.mkdir(parents=True) + #End if + + tem_locs.append(tem_base_loc) + overwrite_tem_cases.append(adf.get_baseline_info("overwrite_tem", False)) + + hist_nums.append(hist_num) + else: + print("\t 'cam_tem_loc' not found in 'diag_cam_baseline_climo', so no baseline files/diagnostics will be generated.") + + #End if (check for obs) #Loop over cases: for case_idx, case_name in enumerate(case_names): @@ -189,7 +199,7 @@ def create_TEM_files(adf): #End if #Check if history files actually exist. If not then kill script: - hist_str = '*.cam.'+hist_num + hist_str = f"*{hist_nums[case_idx]}" if not list(starting_location.glob(hist_str+'.*.nc')): emsg = f"No CAM history {hist_str} files found in '{starting_location}'." emsg += " Script is ending here." @@ -197,7 +207,7 @@ def create_TEM_files(adf): #End if #Get full path and file for file name - output_loc_idx = output_loc / case_name + output_loc_idx = tem_locs[case_idx] #Check if re-gridded directory exists, and if not, then create it: if not output_loc_idx.is_dir(): @@ -459,4 +469,4 @@ def calc_tem(ds): utendwtem = utendwtem )) - return dstem + return dstem \ No newline at end of file diff --git a/scripts/plotting/global_latlon_map.py b/scripts/plotting/global_latlon_map.py index f55fed5b2..e04e1b91e 100644 --- a/scripts/plotting/global_latlon_map.py +++ b/scripts/plotting/global_latlon_map.py @@ -8,9 +8,6 @@ my_formatwarning(msg, *args, **kwargs) format warning messages (private method) -_load_dataset(fils) - load files into dataset - (private methd) """ #Import standard modules: from pathlib import Path @@ -242,7 +239,7 @@ def global_latlon_map(adfobj): else: oclim_fils = sorted(dclimo_loc.glob(f"{data_src}_{var}_baseline.nc")) - oclim_ds = _load_dataset(oclim_fils) + oclim_ds = pf.load_dataset(oclim_fils) if oclim_ds is None: print("WARNING: Did not find any oclim_fils. Will try to skip.") print(f"INFO: Data Location, dclimo_loc is {dclimo_loc}") @@ -266,7 +263,7 @@ def global_latlon_map(adfobj): #Load re-gridded model files: mclim_fils = sorted(mclimo_rg_loc.glob(f"{data_src}_{case_name}_{var}_*.nc")) - mclim_ds = _load_dataset(mclim_fils) + mclim_ds = pf.load_dataset(mclim_fils) #Skip this variable/case if the regridded climo file doesn't exist: if mclim_ds is None: @@ -484,33 +481,6 @@ def global_latlon_map(adfobj): # Helpers ######### -def _load_dataset(fils): - """ - loads datasets from input file information - - Parameters - ---------- - fils : list - strings or paths to input file(s) - - Returns - ------- - xr.Dataset - - Notes - ----- - When just one entry is provided, use `open_dataset`, otherwise `open_mfdatset` - """ - if len(fils) == 0: - warnings.warn(f"Input file list is empty.") - return None - elif len(fils) > 1: - return xr.open_mfdataset(fils, combine='by_coords') - else: - sfil = str(fils[0]) - return xr.open_dataset(sfil) - #End if -#End def ############## -#END OF SCRIPT +#END OF SCRIPT \ No newline at end of file diff --git a/scripts/plotting/meridional_mean.py b/scripts/plotting/meridional_mean.py index f365c039c..7e423bfb7 100644 --- a/scripts/plotting/meridional_mean.py +++ b/scripts/plotting/meridional_mean.py @@ -147,7 +147,7 @@ def meridional_mean(adfobj): else: oclim_fils = sorted(dclimo_loc.glob(f"{data_src}_{var}_baseline.nc")) #End if - oclim_ds = _load_dataset(oclim_fils) + oclim_ds = pf.load_dataset(oclim_fils) #Loop over model cases: for case_idx, case_name in enumerate(case_names): @@ -165,7 +165,7 @@ def meridional_mean(adfobj): # load re-gridded model files: mclim_fils = sorted(mclimo_rg_loc.glob(f"{data_src}_{case_name}_{var}_*.nc")) - mclim_ds = _load_dataset(mclim_fils) + mclim_ds = pf.load_dataset(mclim_fils) # stop if data is invalid: if (oclim_ds is None) or (mclim_ds is None): @@ -256,17 +256,6 @@ def meridional_mean(adfobj): # Helpers ######### -def _load_dataset(fils): - if len(fils) == 0: - warnings.warn(f"Input file list is empty.") - return None - elif len(fils) > 1: - return xr.open_mfdataset(fils, combine='by_coords') - else: - sfil = str(fils[0]) - return xr.open_dataset(sfil) - #End if -#End def ############## -#END OF SCRIPT +#END OF SCRIPT \ No newline at end of file diff --git a/scripts/plotting/polar_map.py b/scripts/plotting/polar_map.py index db4cd1b7e..f1e9f6805 100644 --- a/scripts/plotting/polar_map.py +++ b/scripts/plotting/polar_map.py @@ -161,13 +161,9 @@ def polar_map(adfobj): data_name = data_src else: oclim_fils = sorted(dclimo_loc.glob(f"{data_src}_{var}_baseline.nc")) - - if len(oclim_fils) > 1: - oclim_ds = xr.open_mfdataset(oclim_fils, combine='by_coords') - elif len(oclim_fils) == 1: - sfil = str(oclim_fils[0]) - oclim_ds = xr.open_dataset(sfil) - else: + + oclim_ds = pf.load_dataset(oclim_fils) + if oclim_ds is None: print("WARNING: Did not find any oclim_fils. Will try to skip.") print(f"INFO: Data Location, dclimo_loc is {dclimo_loc}") print(f"INFO: The glob is: {data_src}_{var}_*.nc") @@ -190,11 +186,8 @@ def polar_map(adfobj): # load re-gridded model files: mclim_fils = sorted(mclimo_rg_loc.glob(f"{data_src}_{case_name}_{var}_*.nc")) - if len(mclim_fils) > 1: - mclim_ds = xr.open_mfdataset(mclim_fils, combine='by_coords') - elif len(mclim_fils) == 1: - mclim_ds = xr.open_dataset(mclim_fils[0]) - else: + mclim_ds = pf.load_dataset(mclim_fils) + if mclim_ds is None: print("WARNING: Did not find any regridded climo files. Will try to skip.") print(f"INFO: Data Location, mclimo_rg_loc, is {mclimo_rg_loc}") print(f"INFO: The glob is: {data_src}_{case_name}_{var}_*.nc") @@ -396,4 +389,4 @@ def polar_map(adfobj): #END OF `polar_map` function ############## -# END OF FILE +# END OF FILE \ No newline at end of file diff --git a/scripts/plotting/qbo.py b/scripts/plotting/qbo.py index da15311b1..a5ab9dddf 100644 --- a/scripts/plotting/qbo.py +++ b/scripts/plotting/qbo.py @@ -6,6 +6,7 @@ from matplotlib.colors import ListedColormap ## used to create custom colormaps import matplotlib.colors as mcolors import matplotlib as mpl +import plotting_functions as pf def my_formatwarning(msg, *args, **kwargs): # ignore everything except the message @@ -98,7 +99,7 @@ def qbo(adfobj): #----Read in the case data and baseline ncases = len(case_loc) - casedat = [ _load_dataset(case_loc[i], case_names[i],'U') for i in range(0,ncases,1) ] + casedat = [pf.load_dataset(sorted(Path(case_loc[i]).glob(f"{case_names[i]}.*.U.*.nc"))) for i in range(0,ncases,1)] #Find indices for all case datasets that don't contain a zonal wind field (U): bad_idxs = [] @@ -190,35 +191,6 @@ def qbo(adfobj): #End QBO plotting script: return -#-------------------For Reading Data------------------------ - -def _load_dataset(data_loc, case_name, variable, other_name=None): - """ - This method exists to get an xarray Dataset that can be passed into the plotting methods. - - This could (should) be changed to use an intake-esm catalog if (when) that is available. - * At some point, we hope ADF will provide functions that can be used directly to replace this step, - so the user will not need to know how the data gets saved. - - In this example, assume timeseries files are available via the ADF api. - - """ - - dloc = Path(data_loc) - - # a hack here: ADF uses different file names for "reference" case and regridded model data, - # - try the longer name first (regridded), then try the shorter name - - fils = sorted(dloc.glob(f"{case_name}.*.{variable}.*.nc")) - if (len(fils) == 0): - warnings.warn("QBO: Input file list is empty.") - return None - elif (len(fils) > 1): - return xr.open_mfdataset(fils, combine='by_coords') - else: - sfil = str(fils[0]) - return xr.open_dataset(sfil) - #-----------------For Calculating----------------------------- def cosweightlat(darray, lat1, lat2): @@ -337,4 +309,4 @@ def blue2red_cmap(n, nowhite = False): colors = np.vstack((colors1, colorsw, colors2)) mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors) - return mymap + return mymap \ No newline at end of file diff --git a/scripts/plotting/tape_recorder.py b/scripts/plotting/tape_recorder.py index 5d747b0b1..fe048dc5d 100644 --- a/scripts/plotting/tape_recorder.py +++ b/scripts/plotting/tape_recorder.py @@ -9,6 +9,7 @@ from dateutil.relativedelta import relativedelta import glob from pathlib import Path +import plotting_functions as pf def tape_recorder(adfobj): """ @@ -90,14 +91,14 @@ def tape_recorder(adfobj): #----------------------------------------- #This may have to change if other variables are desired in this plot type? - plot_name = plot_loc / f"Q_ANN_TapeRecorder_Mean.{plot_type}" + plot_name = plot_loc / f"Q_TapeRecorder_ANN_Special_Mean.{plot_type}" print(f"\t - Plotting annual tape recorder for Q") # Check redo_plot. If set to True: remove old plot, if it already exists: if (not redo_plot) and plot_name.is_file(): #Add already-existing plot to website (if enabled): adfobj.debug_log(f"'{plot_name}' exists and clobber is false.") - adfobj.add_website_data(plot_name, "tape_recorder", None, season="ANN", multi_case=True) + adfobj.add_website_data(plot_name, "Q_TapeRecorder", None, season="ANN", multi_case=True) return elif (redo_plot) and plot_name.is_file(): @@ -125,7 +126,8 @@ def tape_recorder(adfobj): alldat=[] runname_LT=[] for idx,key in enumerate(runs_LT2): - dat = xr.open_dataset(glob.glob(runs_LT2[key]+'/*h0.Q.*.nc')[0]) + fils= sorted(Path(runs_LT2[key]).glob('*h0.Q.*.nc')) + dat = pf.load_dataset(fils) dat = fixcesmtime(dat,start_years[idx],end_years[idx]) datzm = dat.mean('lon') dat_tropics = cosweightlat(datzm.Q, -10, 10) @@ -187,7 +189,7 @@ def tape_recorder(adfobj): fig.savefig(plot_name, bbox_inches='tight', facecolor='white') #Add plot to website (if enabled): - adfobj.add_website_data(plot_name, "tape_recorder", None, season="ANN", multi_case=True) + adfobj.add_website_data(plot_name, "Q_TapeRecorder", None, season="ANN", multi_case=True) #Notify user that script has ended: print(" ...Tape recorder plots have been generated successfully.") @@ -503,4 +505,4 @@ def plot_pre_mon(fig, data, ci, cmin, cmax, expname, x1=None, x2=None, y1=None, ######### -############### +############### \ No newline at end of file diff --git a/scripts/plotting/tem.py b/scripts/plotting/tem.py index 6abd21c0c..7b5f1e4b1 100644 --- a/scripts/plotting/tem.py +++ b/scripts/plotting/tem.py @@ -19,11 +19,17 @@ def tem(adf): Steps: - loop through TEM variables - calculate all-time fields (from individual months) - - Take difference, calculate statistics - - make plot + - take difference, calculate statistics + - make plots + + Notes: + - If any of the TEM cases are missing, the ADF skips this plotting script and moves on. """ + #Notify user that script has started: + print("\n Generating TEM plots...") + #Special ADF variable which contains the output paths for #all generated plots and tables for each case: plot_location = Path(adf.plot_location[0]) @@ -38,15 +44,6 @@ def tem(adf): res = adf.variable_defaults # will be dict of variable-specific plot preferences - #Check if comparing against observations - if adf.compare_obs: - obs = True - base_name = "Obs" - else: - obs = False - base_name = adf.get_baseline_info("cam_case_name", required=True) - #End if - #Extract test case years syear_cases = adf.climo_yrs["syears"] eyear_cases = adf.climo_yrs["eyears"] @@ -71,26 +68,29 @@ def tem(adf): redo_plot = adf.get_basic_info('redo_plot') print(f"\t NOTE: redo_plot is set to {redo_plot}") #----------------------------------------- + + #Initialize list of input TEM file locations + tem_locs = [] - #Grab TEM diagnostics options - tem_opts = adf.read_config_var("tem_info") - - if not tem_opts: - print("\n No TEM options provided, skipping TEM plots." \ - "\nSee documentation or config_cam_baseline_example.yaml for options to add to configuration file.") - return - - #Location of saved TEM netCDF files - tem_loc = tem_opts.get("tem_loc") + #Extract TEM file save locations + tem_case_locs = adf.get_cam_info("cam_tem_loc",required=True) + tem_base_loc = adf.get_baseline_info("cam_tem_loc") - #If path not specified, skip TEM calculation - if tem_loc is None: - print("'tem_loc' not found in config file, so TEM plots will be skipped.") + #If path not specified, skip TEM calculation? + if tem_case_locs is None: + print("\t 'cam_tem_loc' not found for test case(s) in config file, so no TEM plots will be generated.") return else: - #Notify user that script has started: - print("\n Generating TEM plots...") - + for tem_case_loc in tem_case_locs: + tem_case_loc = Path(tem_case_loc) + #Check if TEM directory exists, and if not, then create it: + if not tem_case_loc.is_dir(): + print(f" {tem_case_loc} not found, making new directory") + tem_case_loc.mkdir(parents=True) + #End if + tem_locs.append(tem_case_loc) + #End for + #Set seasonal ranges: seasons = {"ANN": np.arange(1,13,1), "DJF": [12, 1, 2], @@ -107,20 +107,28 @@ def tem(adf): else: var_list = ['uzm','epfy','epfz','vtem','wtem','psitem','utendepfd'] - #Baseline TEM location - input_loc_idx = Path(tem_loc) / base_name - #Check if comparing against obs - if obs: + if adf.compare_obs: + obs = True #Set TEM file for observations - base_file_name = f'{base_name}.TEMdiag.nc' + base_file_name = 'Obs.TEMdiag.nc' + input_loc_idx = Path(tem_locs[0]) else: - #Set TEM file for baseline - base_file_name = f'{base_name}.TEMdiag_{syear_baseline}-{eyear_baseline}.nc' + base_name = adf.get_baseline_info("cam_case_name", required=True) + + #If path not specified, skip TEM calculation? + if tem_base_loc is None: + print(f"\t 'cam_tem_loc' not found for '{base_name}' in config file, so no TEM plots will be generated.") + return + else: + obs = False + input_loc_idx = Path(tem_base_loc) + #Set TEM file for baseline + base_file_name = f'{base_name}.TEMdiag_{syear_baseline}-{eyear_baseline}.nc' #Set full path for baseline/obs file tem_base = input_loc_idx / base_file_name - + #Check to see if baseline/obs TEM file exists if tem_base.is_file(): ds_base = xr.open_dataset(tem_base) @@ -140,6 +148,17 @@ def tem(adf): for s in seasons: #Location to save plots plot_name = plot_location / f"{s}_TEM_Mean.png" + + # Check redo_plot. If set to True: remove old plot, if it already exists: + if (not redo_plot) and plot_name.is_file(): + #Add already-existing plot to website (if enabled): + adf.debug_log(f"'{plot_name}' exists and clobber is false.") + adf.add_website_data(plot_name, "TEM", None, season=s, multi_case=True) + + #Continue to next iteration: + continue + elif (redo_plot) and plot_name.is_file(): + plot_name.unlink() fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(fig_width,fig_height), facecolor='w', edgecolor='k') @@ -147,23 +166,12 @@ def tem(adf): #Loop over model cases: for idx,case_name in enumerate(case_names): - # Check redo_plot. If set to True: remove old plot, if it already exists: - if (not redo_plot) and plot_name.is_file(): - #Add already-existing plot to website (if enabled): - adf.debug_log(f"'{plot_name}' exists and clobber is false.") - adf.add_website_data(plot_name, "TEM", case_name, season=s) - - #Continue to next iteration: - continue - elif (redo_plot) and plot_name.is_file(): - plot_name.unlink() - #Extract start and end year values: start_year = syear_cases[idx] end_year = eyear_cases[idx] #Open the TEM file - output_loc_idx = Path(tem_loc) / case_name + output_loc_idx = tem_locs[idx] case_file_name = f'{case_name}.TEMdiag_{start_year}-{end_year}.nc' tem = output_loc_idx / case_file_name @@ -187,7 +195,7 @@ def tem(adf): fig.savefig(plot_name, bbox_inches='tight', dpi=300) #Add plot to website (if enabled): - adf.add_website_data(plot_name, "TEM", case_name, season=s) + adf.add_website_data(plot_name, "TEM", None, season=s, multi_case=True) print(" ...TEM plots have been generated successfully.") diff --git a/scripts/plotting/zonal_mean.py b/scripts/plotting/zonal_mean.py index 8631256f0..944683752 100644 --- a/scripts/plotting/zonal_mean.py +++ b/scripts/plotting/zonal_mean.py @@ -145,7 +145,7 @@ def zonal_mean(adfobj): for var in var_list: for s in seasons: #Check zonal log-p: - plot_name_log = plot_loc / f"{var}_{s}_Zonal_logp_Mean.{plot_type}" + plot_name_log = plot_loc / f"{var}_logp_{s}_Zonal_Mean.{plot_type}" # Check redo_plot. If set to True: remove old plot, if it already exists: if (not redo_plot) and plot_name_log.is_file(): @@ -230,7 +230,7 @@ def zonal_mean(adfobj): else: oclim_fils = sorted(dclimo_loc.glob(f"{data_src}_{var}_baseline.nc")) #End if - oclim_ds = _load_dataset(oclim_fils) + oclim_ds = pf.load_dataset(oclim_fils) #Loop over model cases: for case_idx, case_name in enumerate(case_names): @@ -243,7 +243,7 @@ def zonal_mean(adfobj): # load re-gridded model files: mclim_fils = sorted(mclimo_rg_loc.glob(f"{data_src}_{case_name}_{var}_*.nc")) - mclim_ds = _load_dataset(mclim_fils) + mclim_ds = pf.load_dataset(mclim_fils) # stop if data is invalid: if (oclim_ds is None) or (mclim_ds is None): @@ -320,7 +320,7 @@ def zonal_mean(adfobj): #Create new plot with log-p: if has_lev: - plot_name_log = plot_loc / f"{var}_{s}_Zonal_logp_Mean.{plot_type}" + plot_name_log = plot_loc / f"{var}_logp_{s}_Zonal_Mean.{plot_type}" if plot_name_log not in logp_zonal_skip: pf.plot_zonal_mean_and_save(plot_name_log, case_nickname, base_nickname, @@ -344,18 +344,6 @@ def zonal_mean(adfobj): # Helpers ######### -def _load_dataset(fils): - if len(fils) == 0: - warnings.warn(f"Input file list is empty.") - return None - elif len(fils) > 1: - return xr.open_mfdataset(fils, combine='by_coords') - else: - sfil = str(fils[0]) - return xr.open_dataset(sfil) - #End if -#End def ############## -#END OF SCRIPT - +#END OF SCRIPT \ No newline at end of file diff --git a/scripts/regridding/regrid_and_vert_interp.py b/scripts/regridding/regrid_and_vert_interp.py index 37aa5b345..a501de16d 100644 --- a/scripts/regridding/regrid_and_vert_interp.py +++ b/scripts/regridding/regrid_and_vert_interp.py @@ -532,7 +532,7 @@ def _regrid_and_interpolate_levs(model_dataset, var_name, regrid_dataset=None, r #Regrid model data to match target grid: rgdata = regrid_data(mdata, tgrid, method=1) if mdat_ofrac: - rgofrac = regrid_data(mdat_ofrac, tgrd, method=1) + rgofrac = regrid_data(mdat_ofrac, tgrid, method=1) #Regrid surface pressure if need be: if has_lev: if not regridded_ps: