diff --git a/CHANGES.rst b/CHANGES.rst index 9d4cdc1a5..01be6e18f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,21 @@ ## What's Changed +1.2.5 (2024-03-19) +================== + +Web Application +~~~~~~~~~~~~~~~ +- Fix Bokeh `file_html` Call by @mfixstsci +- Update Bad Pix Exclude Line by @mfixstsci +- Interactive preview image - updates for Bokeh 3 by @bhilbert4 + +Project & API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- Allow creation of pngs from 3D and 4D arrays by @bhilbert4 +- Add max length to charfield by @BradleySappington +- Header fix by @BradleySappington + + 1.2.4 (2024-03-11) ================== diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index b4812d81b..d58b32123 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -45,7 +45,7 @@ """ import calendar from collections import OrderedDict -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from numbers import Number import os import warnings @@ -1265,8 +1265,13 @@ def timed_stats(self, sigma=3): good = ((date_arr >= min_date) & (date_arr < max_date)) if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: avg, med, dev = sigma_clipped_stats(self.data["euvalues"][good], sigma=sigma) - maxval = np.max(self.data["euvalues"][good]) - minval = np.min(self.data["euvalues"][good]) + # if self.data is empty, or good is empty, then calculating the max and + # min values will not work. + try: + maxval = np.max(self.data["euvalues"][good]) + minval = np.min(self.data["euvalues"][good]) + except ValueError: + pass else: avg, med, dev, maxval, minval = change_only_stats(self.data["dates"][good], self.data["euvalues"][good], sigma=sigma) if np.isfinite(avg): @@ -1412,9 +1417,15 @@ def change_only_bounding_points(date_list, value_list, starttime, endtime): if isinstance(starttime, Time): starttime = starttime.datetime + if starttime.tzinfo == None or starttime.tzinfo.utcoffset(starttime) == None: + starttime = starttime.replace(tzinfo=timezone.utc) + if isinstance(endtime, Time): endtime = endtime.datetime + if endtime.tzinfo == None or endtime.tzinfo.utcoffset(endtime) == None: + endtime = endtime.replace(tzinfo=timezone.utc) + valid_idx = np.where((date_list_arr <= endtime) & (date_list_arr >= starttime))[0] before_startime = np.where(date_list_arr < starttime)[0] before_endtime = np.where(date_list_arr < endtime)[0] @@ -1601,7 +1612,7 @@ def get_mnemonic(mnemonic_identifier, start_time, end_time): data = service.get_values(mnemonic_identifier, start_time, end_time, include_obstime=True, include_bracket_values=bracket) - dates = [datetime.strptime(row.obstime.iso, "%Y-%m-%d %H:%M:%S.%f") for row in data] + dates = [datetime.strptime(row.obstime.iso, "%Y-%m-%d %H:%M:%S.%f").replace(tzinfo=timezone.utc) for row in data] values = [row.value for row in data] if bracket: diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index f07e48875..e23778b8e 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -374,6 +374,7 @@ from requests.exceptions import HTTPError import urllib +from astropy.modeling import models from astropy.stats import sigma_clipped_stats from astropy.table import Table from astropy.time import Time, TimeDelta @@ -381,16 +382,9 @@ from bokeh.embed import components, json_item from bokeh.layouts import gridplot from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d -from bokeh.models.layouts import Tabs +from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure, output_file, save, show from bokeh.palettes import Turbo256 -from jwql.database import database_interface -from jwql.database.database_interface import NIRCamEDBDailyStats, NIRCamEDBBlockStats, \ - NIRCamEDBTimeIntervalStats, NIRCamEDBEveryChangeStats, NIRISSEDBDailyStats, NIRISSEDBBlockStats, \ - NIRISSEDBTimeIntervalStats, NIRISSEDBEveryChangeStats, MIRIEDBDailyStats, MIRIEDBBlockStats, \ - MIRIEDBTimeIntervalStats, MIRIEDBEveryChangeStats, FGSEDBDailyStats, FGSEDBBlockStats, \ - FGSEDBTimeIntervalStats, FGSEDBEveryChangeStats, NIRSpecEDBDailyStats, NIRSpecEDBBlockStats, \ - NIRSpecEDBTimeIntervalStats, NIRSpecEDBEveryChangeStats, session, engine from jwql.edb import engineering_database as ed from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import condition from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import utils @@ -398,9 +392,20 @@ from jwql.utils import monitor_utils from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.constants import EDB_DEFAULT_PLOT_RANGE, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MIRI_POS_RATIO_VALUES +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.permissions import set_permissions -from jwql.utils.utils import ensure_dir_exists, get_config +from jwql.utils.utils import add_timezone_to_datetime, ensure_dir_exists, get_config +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + + # Import * is okay here because this module specifically only contains database models + # for this monitor + from jwql.website.apps.jwql.monitor_models.edb import * # noqa: E402 (module level import not at top of file) + #from jwql.website.apps.jwql.monitor_models.edb import MIRIEdbDailyMeansStats ALLOWED_COMBINATION_TYPES = ['all+daily_means', 'all+block_means', 'all+every_change', 'all+time_interval'] @@ -510,13 +515,35 @@ def add_new_block_db_entry(self, mnem, query_time): logging.info(f"Adding new entry for {mnem.mnemonic_identifier} to history table.") times = mnem.data["dates"].data data = mnem.data["euvalues"].data - stdevs = mnem.stdev times = ensure_list(times) data = ensure_list(data) - stdevs = ensure_list(stdevs) + stdevs = ensure_list(mnem.stdev) medians = ensure_list(mnem.median) maxs = ensure_list(mnem.max) mins = ensure_list(mnem.min) + + # Make sure the max and min values are floats rather than ints + if len(maxs) > 0: + if (isinstance(maxs[0], int) | isinstance(maxs[0], np.integer)): + maxs = [float(v) for v in maxs] + else: + print('len of maxs is zero! {mnem.mnemonic_identifier}, {maxs}, {mins}, {medians}') + if len(mins) > 0: + if (isinstance(mins[0], int) | isinstance(mins[0], np.integer)): + mins = [float(v) for v in mins] + if len(medians) > 0: + if (isinstance(medians[0], int) | isinstance(medians[0], np.integer)): + medians = [float(v) for v in medians] + + + print('In add_new_block_db_entry:') + for ll, name in zip([times, data, stdevs, medians, maxs, mins],['times', 'data', 'stdevs', 'medians', 'maxs', 'mins']): + if isinstance(ll, np.ndarray): + print(f'{name} is a numpy array.') + print(ll) + + + db_entry = {'mnemonic': mnem.mnemonic_identifier, 'latest_query': query_time, 'times': times, @@ -525,10 +552,10 @@ def add_new_block_db_entry(self, mnem, query_time): 'median': medians, 'max': maxs, 'min': mins, - 'entry_date': datetime.datetime.now() + 'entry_date': datetime.datetime.now(datetime.timezone.utc) } - with engine.begin() as connection: - connection.execute(self.history_table.__table__.insert(), db_entry) + entry = self.history_table(**db_entry) + entry.save() def add_new_every_change_db_entry(self, mnem, mnem_dict, dependency_name, query_time): """Add new entries to the database table for "every change" @@ -556,19 +583,20 @@ def add_new_every_change_db_entry(self, mnem, mnem_dict, dependency_name, query_ times = ensure_list(times) values = ensure_list(values) + # medians and stdevs will be single-element lists, so provide the + # 0th element to the database entry db_entry = {'mnemonic': mnem, 'dependency_mnemonic': dependency_name, 'dependency_value': key, 'mnemonic_value': values, 'time': times, - 'median': medians, - 'stdev': stdevs, + 'median': medians[0], + 'stdev': stdevs[0], 'latest_query': query_time, - 'entry_date': datetime.datetime.now() + 'entry_date': datetime.datetime.now(datetime.timezone.utc) } - with engine.begin() as connection: - connection.execute( - self.history_table.__table__.insert(), db_entry) + entry = self.history_table(**db_entry) + entry.save() def calc_timed_stats(self, mnem_data, bintime, sigma=3): """Not currently used. @@ -703,7 +731,7 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): # as defined by the json files. This is the default operation. # Loop over instruments - for instrument_name in JWST_INSTRUMENT_NAMES: + for instrument_name in ['miri']: #JWST_INSTRUMENT_NAMES: monitor_dir = os.path.dirname(os.path.abspath(__file__)) # File of mnemonics to monitor @@ -1054,10 +1082,10 @@ def get_history(self, mnemonic, start_date, end_date, info={}, meta={}): hist : jwql.edb.engineering_database.EdbMnemonic Retrieved data """ - data = session.query(self.history_table) \ - .filter(self.history_table.mnemonic == mnemonic, - self.history_table.latest_query > start_date, - self.history_table.latest_query < end_date) + filters = {"mnemonic__iexact": mnemonic, + "latest_query__range": (start_date, end_date) + } + data = self.history_table.objects.filter(**filters).order_by("latest_query") all_dates = [] all_values = [] @@ -1069,8 +1097,13 @@ def get_history(self, mnemonic, start_date, end_date, info={}, meta={}): # outside of the plot range. Return only the points inside the desired # plot range for row in data: - good = np.where((np.array(row.times) > self._plot_start) & (np.array(row.times) < self._plot_end))[0] - times = list(np.array(row.times)[good]) + # Make sure the data from the database has timezone info + time_vals = row.times + if time_vals[0].tzinfo == None or tie_vals[0].tzinfo.utcoffset(time_vals[0]) == None: + time_vals = [val.replace(tzinfo=datetime.timezone.utc) for val in time_vals] + + good = np.where((np.array(time_vals) > self._plot_start) & (np.array(time_vals) < self._plot_end))[0] + times = list(np.array(time_vals)[good]) data = list(np.array(row.data)[good]) medians = list(np.array(row.median)[good]) maxs = list(np.array(row.max)[good]) @@ -1112,14 +1145,15 @@ def get_history_every_change(self, mnemonic, start_date, end_date): ------- hist : dict Retrieved data. Keys are the value of the dependency mnemonic, - and each value is a 3-tuple. The tuple contains the times, values, - and mean value of the primary mnemonic corresponding to the times - that they dependency mnemonic has the value of the key. + and each value is a 4-tuple. The tuple contains the times, values, + mean value, and standard deviation of the primary mnemonic corresponding + to the times that they dependency mnemonic has the value of the key. + Values are lists of lists. """ - data = session.query(self.history_table) \ - .filter(self.history_table.mnemonic == mnemonic, - self.history_table.latest_query > start_date, - self.history_table.latest_query < end_date) + filters = {"mnemonic__iexact": mnemonic, + "latest_query__range": (start_date, end_date) + } + data = self.history_table.objects.filter(**filters).order_by("latest_query") # Set up the dictionary to contain the data hist = {} @@ -1129,8 +1163,6 @@ def get_history_every_change(self, mnemonic, start_date, end_date): if row.dependency_value in hist: if len(hist[row.dependency_value]) > 0: times, values, medians, devs = hist[row.dependency_value] - medians = [medians] - devs = [devs] else: times = [] values = [] @@ -1140,15 +1172,26 @@ def get_history_every_change(self, mnemonic, start_date, end_date): # Keep only data that fall at least partially within the plot range if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): - times.extend(row.time) - values.extend(row.mnemonic_value) - medians.append(row.median) - devs.append(row.stdev) + times.append(row.time) + values.append(row.mnemonic_value) + medians.append([row.median]) + devs.append([row.stdev]) hist[row.dependency_value] = (times, values, medians, devs) else: if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): - hist[row.dependency_value] = (row.time, row.mnemonic_value, row.median, row.stdev) + hist[row.dependency_value] = ([row.time], [row.mnemonic_value], [[row.median]], [[row.stdev]]) + if row.dependency_value == 'F1000W': + print('INITIAL ENTRY:') + for e in hist[row.dependency_value][0]: + print(e) + print('') + for e in hist[row.dependency_value][2]: + print(e) + print('') + + + return hist @@ -1243,11 +1286,16 @@ def identify_tables(self, inst, tel_type): Examples include "every_change", "daily", "all", etc """ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst] - if '_means' in tel_type: - tel_type = tel_type.strip('_means') + if '_means' in tel_type: # this is used for dailymeans and blockmeans. need to update both table names + tel_type = tel_type.strip('_means') # in order to get rid of these lines tel_type = tel_type.title().replace('_', '') - self.history_table_name = f'{mixed_case_name}EDB{tel_type}Stats' - self.history_table = getattr(database_interface, f'{mixed_case_name}EDB{tel_type}Stats') + self.history_table_name = f'{mixed_case_name}Edb{tel_type}Stats' + + # temporary fix + if 'BlockStats' in self.history_table_name: + self.history_table_name = self.history_table_name.replace('BlockStats', 'BlocksStats') + + self.history_table = eval(self.history_table_name) def most_recent_search(self, telem_name): """Query the database and return the information @@ -1264,14 +1312,18 @@ def most_recent_search(self, telem_name): query_result : datetime.datetime Date of the ending range of the previous query """ - query = session.query(self.history_table).filter(self.history_table.mnemonic == telem_name).order_by(self.history_table.latest_query).all() + filters = {"mnemonic__iexact": telem_name} + query = self.history_table.objects.filter(**filters).order_by("latest_query") if len(query) == 0: base_time = '2022-11-15 00:00:0.0' - query_result = datetime.datetime.strptime(base_time, '%Y-%m-%d %H:%M:%S.%f') + query_result = datetime.datetime.strptime(base_time, '%Y-%m-%d %H:%M:%S.%f').replace(tzinfo=datetime.timezone.utc) logging.info(f'\tNo query history for {telem_name}. Returning default "previous query" date of {base_time}.') else: - query_result = query[-1].latest_query + # Negative indexing not allowed in QuerySet + query_result = query[len(query) - 1].latest_query + if query_result.tzinfo == None or query_result.tzinfo.utcoffset(query_result) == None: + query_result = query_result.replace(tzinfo=datetime.timezone.utc) logging.info(f'For {telem_name}, the previous query time is {query_result}') return query_result @@ -1431,7 +1483,12 @@ def multiday_mnemonic_query(self, mnemonic_dict, starting_time_list, ending_time # Combine the mean values and median time data from multiple days into a single EdbMnemonic # instance. multiday_table["dates"] = multiday_median_times - multiday_table["euvalues"] = multiday_median_vals + + if telemetry_type != 'all': + multiday_table["euvalues"] = multiday_median_vals + else: + multiday_table["euvalues"] = multiday_mean_vals + all_data = ed.EdbMnemonic(identifier, starting_time_list[0], ending_time_list[-1], multiday_table, meta, info) all_data.stdev = multiday_stdev_vals @@ -1476,7 +1533,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Container to hold and organize all plots self.figures = {} self.instrument = instrument - self._today = datetime.datetime.now() + self._today = datetime.datetime.now(datetime.timezone.utc) # Set the limits for the telemetry plots if necessary if plot_start is None: @@ -1485,13 +1542,25 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): if plot_end is None: plot_end = self._today + + + + # SPEED UP TESTING. REMOVE BEFORE MERGING + plot_start = self._today - datetime.timedelta(days=3.) + plot_end = self._today #- datetime.timedelta(days=56.) + + + + + + # Only used as fall-back plot range for cases where there is no data self._plot_start = plot_start self._plot_end = plot_end # At the top level, we loop over the different types of telemetry. These types # largely control if/how the data will be averaged. - for telemetry_kind in mnemonic_dict: + for telemetry_kind in ['every_change']: # mnemonic_dict: telem_type = telemetry_kind logging.info(f'Working on telemetry_type: {telem_type}') @@ -1517,6 +1586,16 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Work on one mnemonic at a time for mnemonic in mnemonic_dict[telemetry_kind]: logging.info(f'Working on {mnemonic["name"]}') + + # It seems that some mnemonics that were previously in the EDB are no longer + # present. Check for the existence of the mnemonic before proceeding. If the + # mnemonic is not present in the EDB, make a note in the log and move on to + # the next one. + present_in_edb = ed.get_mnemonic_info(mnemonic["name"]) + if not present_in_edb: + logging.info(f'WARNING: {mnemonic["name"]} is not present in the EDB. Skipping') + continue # Move on to the next mnemonic + create_new_history_entry = True # Only two types of plots are currently supported. Plotting the data in the EdbMnemonic @@ -1550,7 +1629,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # For daily_means mnemonics, we force the search to always start at noon, and # have a 1 day cadence if telem_type == 'daily_means': - most_recent_search = datetime.datetime.combine(most_recent_search.date(), datetime.time(hour=12)) + most_recent_search = datetime.datetime.combine(most_recent_search.date(), datetime.time(hour=12)).replace(tzinfo=datetime.timezone.utc) logging.info(f'Most recent search is {most_recent_search}.') logging.info(f'Query cadence is {self.query_cadence}') @@ -1570,6 +1649,14 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): create_new_history_entry = False starttime = None + + + + # SPEED UP TESTING - REMOVE BEFORE MERGING + starttime = plot_start + + + else: # In the case where telemetry data have no averaging done, we do not store the data # in the JWQL database, in order to save space. So in this case, we will retrieve @@ -1646,13 +1733,27 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Before we can add the every-change data to the database, organize it to make it # easier to access. Note that every_change_data is now a dict rather than an EDBMnemonic instance + + print('new_data:') + print(new_data.data) + print(new_data.every_change_values) + print(new_data.blocks) + + every_change_data = organize_every_change(new_data) + print('\n\nevery_change_data:') + print(every_change_data) + print('\n\nhistorical data:') + print(historical_data) + stop + # Add new data to JWQLDB. # If no new data were retrieved from the EDB, then there is no need to add an entry to the JWQLDB if create_new_history_entry: - self.add_new_every_change_db_entry(new_data.mnemonic_identifier, every_change_data, mnemonic['dependency'][0]["name"], - query_start_times[-1]) + #self.add_new_every_change_db_entry(new_data.mnemonic_identifier, every_change_data, mnemonic['dependency'][0]["name"], + # query_start_times[-1]) + pass # UNCOMMENT ABOVE BEFORE MERGING else: logging.info("No new data retrieved from EDB, so no new entry added to JWQLDB") @@ -1664,6 +1765,12 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Note that the line below will change mnemonic_info into a dictionary mnemonic_info = add_every_change_history(historical_data, every_change_data) + + print('mnemonic_info:') + print(mnemonic_info) + + + logging.info(f'Combined new data plus historical data. Number of data points per key:') for key in mnemonic_info: logging.info(f'Key: {key}, Num of Points: {len(mnemonic_info[key][0])}') @@ -1760,10 +1867,10 @@ def tabbed_figure(self, ncols=2): grid = gridplot(plot_list, ncols=ncols, merge_tools=False) # Create one panel for each plot category - panel_list.append(Panel(child=grid, title=key)) + panel_list.append(TabPanel(child=grid, title=key)) # Assign the panels to Tabs - tabbed = Tabs(tabs=panel_list) + tabbed = Tabs(tabs=panel_list, tabs_location='left') # Save the tabbed plot to a json file item_text = json.dumps(json_item(tabbed, "tabbed_edb_plot")) @@ -1778,13 +1885,27 @@ def add_every_change_history(dict1, dict2): """Combine two dictionaries that contain every change data. For keys that are present in both dictionaries, remove any duplicate entries based on date. + For the every change data at the moment (MIRI), the key values + are filter names, and the values are data corresponding to those + filters. The median and stdev values for each filter come from + MIRI_POS_RATIO_VALUES in constants.py. So for a given filter, it + is safe (and in fact necessary for plotting purposes) to have only + a single value for the median, and the same for stdev. So in combining + the dictionaries, we combine dates and data values, but keep only a + single value for median and stdev. + Parameters ---------- dict1 : dict - First dictionary to combine + First dictionary to combine. The intention is that dict1 will contain + the historical data for the mnemonic, retrieved from the database. This + means the values will be tuples of nested lists. Also allow for this to be an + empty dictionary, if there are no historical data. dict2 : dict - Second dictionary to combine + Second dictionary to combine. The intention is that dict2 will contain + the new mnemonic data from the latest EDB query. This means the values + will be tuples of lists. Returns ------- @@ -1793,19 +1914,111 @@ def add_every_change_history(dict1, dict2): """ combined = defaultdict(list) + """ + Looks good + print('Before combining:') + print(dict1['F1000W'][0]) + print(dict1['F1000W'][2]) + print('') + for e in dict1['F1000W'][0]: + print(e) + for e in dict1['F1000W'][2]: + print(e) + """ + + + print('dict1 keys: ', dict1.keys()) + print('dict2 keys: ', dict2.keys()) + + + + + for key, value in dict1.items(): + all_dates = [] + all_values = [] + all_medians = [] + all_devs = [] + + + print(key) + print(type(value)) # tuple + print(type(value[0])) # list (of lists) + print(value[0]) #- list of lists + print('') + for v0, v2 in zip(value[0], value[2]): + print(type(v0), v0) + print(type(v2), v2) + print('') + print('') + #stop + #print(type(dict2[key][0]), dict2[key][0]) + + + + if key in dict2: - if np.min(value[0]) < np.min(dict2[key][0]): - all_dates = np.append(value[0], dict2[key][0]) - all_data = np.append(value[1], dict2[key][1]) - all_medians = np.append(value[2], dict2[key][2]) - all_devs = np.append(value[3], dict2[key][3]) + + #if key == 'OPAQUE': + #print(dict1[key]) #- tuple(array of times, array of data, list of medians, list of stdevs) + + + #print(type(value[0])) + #print(type(np.array(value[0]))) + + + #print(np.min(np.array(value[0]))) + #print(np.min(dict2[key][0])) + + min_time_dict1 = min(min(m) for m in value[0]) + if min_time_dict1 < np.min(dict2[key][0]): + # Here, the minimum date in the history (dict1) is earlier + # than the minimum date in the new EDB query data (dict2). + # This is where we expect to be most of the time. + + #all_dates = np.append(value[0], dict2[key][0]) + #all_data = np.append(value[1], dict2[key][1]) + + all_dates = deepcopy(value[0]) + all_dates.append(list(dict2[key][0])) + + all_values = deepcopy(value[1]) + all_values.append(list(dict2[key][1])) + + all_medians = deepcopy(value[2]) + all_medians.append(list(dict2[key][2])) + + all_devs = deepcopy(value[3]) + all_devs.append(list(dict2[key][3])) + + #all_medians = np.append(value[2], dict2[key][2]) + #all_devs = np.append(value[3], dict2[key][3]) else: - all_dates = np.append(dict2[key][0], value[0]) - all_data = np.append(dict2[key][1], value[1]) - all_medians = np.append(dict2[key][2], value[2]) - all_devs = np.append(dict2[key][3], value[3]) + # Seems unlikely we'll ever want to be here. This would be + # for a case where a given set of values has an earliest date + # that is earlier than anything in the database. + + #all_dates = np.append(dict2[key][0], value[0]) + #all_data = np.append(dict2[key][1], value[1]) + #all_medians = np.append(dict2[key][2], value[2]) + #all_devs = np.append(dict2[key][3], value[3]) + all_dates = [list(dict2[key][0])] + all_dates.extend(value[0]) + + all_values = [list(dict2[key][1])] + all_values.extend(value[1]) + + all_medians = [list(dict2[key][2])] + all_medians.extend(value[2]) + + all_devs = [list(dict2[key][3])] + all_devs.extend(value[3]) + + # Remove any duplicates + #unique_dates, unique_idx = np.unique(all_dates, return_index=True) + #all_dates = all_dates[unique_idx] + #all_data = all_data[unique_idx] # Not sure how to treat duplicates here. If we remove duplicates, then # the mean values may not be valid any more. For example, if there is a @@ -1813,19 +2026,60 @@ def add_every_change_history(dict1, dict2): # those 4 hours of entries, but then what would we do with the mean values # that cover those times. Let's instead warn the user if there are duplicate # entries, but don't take any action - unique_dates = np.unique(all_dates, return_index=False) - if len(unique_dates) != len(all_dates): - logging.info(("WARNING - There are duplicate entries in the every-change history " - "and the new entry. Keeping and plotting all values, but be sure the " - "data look ok.")) - updated_value = (all_dates, all_data, all_medians, all_devs) + #unique_dates = np.unique(all_dates, return_index=False) + #if len(unique_dates) != len(all_dates): + # n_duplicates = len(unique_dates) != len(all_dates) + # logging.info((f"WARNING - There are {n_duplicates} duplicate entries in the " + # f"every-change history (total length {value[0]}) and the new entry " + # f"(total length {dict2[key][0]}). Keeping and plotting all values, " + # "but be sure the data look ok.")) + updated_value = (all_dates, all_values, all_medians, all_devs) combined[key] = updated_value else: - combined[key] = value - # Add entries for keys that are in dict2 but not dict1 + # In this case, a given key is present in the historical data from the database, + # but not in the data from the new EDB query + if key == 'OPAQUE': + print(key) + print(value[0]) + print(value[1]) + print(value[2]) + print(value[3]) + stop + print(key) + print(value) + #stop + + # value here is already a nested list, so we can transfer that directly to the new dictionary + combined[key] = deepcopy(value) + + if key == 'OPAQUE': + print('before dict2 only keys:') + for e in combined[key][0]: + print(e) + print('') + print('') + for e in combined[key][2]: + print(e) + print('') + print('') + #stop + + logging.info(f'In add_every_change_history: key: {key}, len data: {len(all_dates)}, median: {all_medians}, dev: {all_devs}') + + # Add entries for keys that are in dict2 (recent query) but not dict1 (historical data) for key, value in dict2.items(): if key not in dict1: - combined[key] = value + dates =[] + vals = [] + meds = [] + devs = [] + dates.append(list(value[0])) + vals.append(list(value[1])) + meds.append(list(value[2])) + devs.append(list(value[3])) + combined[key] = (dates, vals, meds, devs) + + logging.info(f'dict2 only add_every_change_history: key: {key}, len data: {len(value[0])}, median: {dict2[key][2]}, dev: {dict2[key][3]}') return combined @@ -1916,8 +2170,13 @@ def ensure_list(var): var : list var, translated into a list if necessary """ - if not isinstance(var, list) and not isinstance(var, np.ndarray): - return [var] + if not isinstance(var, list): + if not isinstance(var, np.ndarray): + # Here we assume var is a single float, int, str, etc. + return [var] + else: + # Here we convert a numpy array to a list + return var.tolist() else: return var @@ -1943,8 +2202,9 @@ def organize_every_change(mnemonic): all_data : dict Dictionary of organized results. Keys are the dependency values, and values are tuples. The first element of each tuple is a list - of dates, the second element is a list of data values, and the third - is a the sigma-clipped mean value of the data. + of dates, the second element is a list of data values, the third is + a single element list of the sigma-clipped mean value of the data, + and the fourth is a single element list of the stdev of the data. """ all_data = {} @@ -1968,7 +2228,7 @@ def organize_every_change(mnemonic): # Normalize by the expected value medianval, stdevval = MIRI_POS_RATIO_VALUES[mnemonic.mnemonic_identifier.split('_')[2]][val] - all_data[val] = (val_times, val_data, medianval, stdevval) + all_data[val] = (val_times, val_data, [medianval], [stdevval]) return all_data @@ -2079,22 +2339,39 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True # Find the min and max values in the x-range. These may be used for plotting # the nominal_value line later. Initialize here, and then dial them in based # on the data. - min_time = datetime.datetime.today() - max_time = datetime.datetime(2021, 12, 25) + min_time = datetime.datetime.now(datetime.timezone.utc) + max_time = datetime.datetime(2021, 12, 25, tzinfo=datetime.timezone.utc) logging.info('In plot_every_change_data:') for (key, value), color in zip(data.items(), colors): if len(value) > 0: val_times, val_data, normval, stdevval = value - val_data = np.array(val_data) - dependency_val = np.repeat(key, len(val_times)) + + # At this point, val_times and val_data will be a list of numpy arrays + # normval and stdevval will be lists. First, iterate through the lists + # and normalize the data values in each element by the corresponding + # normval (expected value) + all_val_data = [] + all_val_times = [] + for time_ele, data_ele, norm_ele in zip(val_times, val_data, normval): + if type(data_ele[0]) not in [np.str_, str]: + data_ele_arr = np.array(data_ele) / norm_ele[0] + all_val_data.extend(list(data_ele_arr)) + all_val_times.extend(time_ele) + logging.info(f'key: {key}, len_data: {len(data_ele)}, firstentry: {data_ele[0]}, stats: {norm_ele}') + + all_val_data = np.array(all_val_data) + all_val_times = np.array(all_val_times) + dependency_val = np.repeat(key, len(all_val_times)) + #val_data = np.array(val_data) + #dependency_val = np.repeat(key, len(val_times)) # Normalize by normval (the expected value) so that all data will fit on one plot easily - if type(val_data[0]) not in [np.str_, str]: - logging.info(f'key: {key}, len_data: {len(val_data)}, firstentry: {val_data[0]}, stats: {normval}, {stdevval}') - val_data /= normval + #if type(val_data[0]) not in [np.str_, str]: + # logging.info(f'key: {key}, len_data: {len(val_data)}, firstentry: {val_data[0]}, stats: {normval}, {stdevval}') + # val_data /= normval - source = ColumnDataSource(data={'x': val_times, 'y': val_data, 'dep': dependency_val}) + source = ColumnDataSource(data={'x': all_val_times, 'y': all_val_data, 'dep': dependency_val}) ldata = fig.line(x='x', y='y', line_width=1, line_color=Turbo256[color], source=source, legend_label=key) cdata = fig.circle(x='x', y='y', fill_color=Turbo256[color], size=8, source=source, legend_label=key) @@ -2106,10 +2383,10 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True hover_tool.formatters = {'@x': 'datetime'} fig.tools.append(hover_tool) - if np.min(val_times) < min_time: - min_time = np.min(val_times) - if np.max(val_times) > max_time: - max_time = np.max(val_times) + if np.min(all_val_times) < min_time: + min_time = np.min(all_val_times) + if np.max(all_val_times) > max_time: + max_time = np.max(all_val_times) # If the input dictionary is empty, then create an empty plot with reasonable # x range diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py old mode 100644 new mode 100755 index b73d916c0..9978ec182 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -192,6 +192,7 @@ def make_background_plots(self, plot_type='bkg'): df = df[df['stddev'] != 0] # older data has no accurate stddev measures plot_data = df['stddev'].values if plot_type == 'model': + df = df[np.isfinite(df['total_bkg'])] # the claw monitor did not track model measurements at first plot_data = df['median'].values / df['total_bkg'].values plot_expstarts = df['expstart_mjd'].values @@ -300,7 +301,11 @@ def process(self): # Get predicted background level using JWST background tool ra, dec = hdu[1].header['RA_V1'], hdu[1].header['DEC_V1'] - wv = self.filter_wave[self.fltr.upper()] + if ('N' in self.pupil.upper()) | ('M' in self.pupil.upper()): + fltr_wv = self.pupil.upper() + else: + fltr_wv = self.fltr.upper() + wv = self.filter_wave[fltr_wv] date = hdu[0].header['DATE-BEG'] doy = int(Time(date).yday.split(':')[1]) try: @@ -332,7 +337,7 @@ def process(self): 'skyflat_filename': os.path.basename(self.outfile), 'doy': float(doy), 'total_bkg': float(total_bkg), - 'entry_date': datetime.datetime.now() + 'entry_date': datetime.datetime.now(datetime.timezone.utc) } entry = self.stats_table(**claw_db_entry) entry.save() @@ -423,11 +428,13 @@ def run(self): mast_table = self.query_mast() logging.info('{} files found between {} and {}.'.format(len(mast_table), self.query_start_mjd, self.query_end_mjd)) - # Define pivot wavelengths - self.filter_wave = {'F070W': 0.704, 'F090W': 0.902, 'F115W': 1.154, 'F150W': 1.501, 'F150W2': 1.659, - 'F200W': 1.989, 'F212N': 2.121, 'F250M': 2.503, 'F277W': 2.762, 'F300M': 2.989, - 'F322W2': 3.232, 'F356W': 3.568, 'F410M': 4.082, 'F430M': 4.281, 'F444W': 4.408, - 'F480M': 4.874} + # Define pivot wavelengths - last downloaded March 8 2024 from: + # https://jwst-docs.stsci.edu/jwst-near-infrared-camera/nircam-instrumentation/nircam-filters + self.filter_wave = {'F070W': 0.704, 'F090W': 0.901, 'F115W': 1.154, 'F140M': 1.404, 'F150W': 1.501, 'F162M': 1.626, 'F164N': 1.644, + 'F150W2': 1.671, 'F182M': 1.845, 'F187N': 1.874, 'F200W': 1.99, 'F210M': 2.093, 'F212N': 2.12, 'F250M': 2.503, + 'F277W': 2.786, 'F300M': 2.996, 'F322W2': 3.247, 'F323N': 3.237, 'F335M': 3.365, 'F356W': 3.563, 'F360M': 3.621, + 'F405N': 4.055, 'F410M': 4.092, 'F430M': 4.28, 'F444W': 4.421, 'F460M': 4.624, 'F466N': 4.654, 'F470N': 4.707, + 'F480M': 4.834} # Create observation-level median stacks for each filter/pupil combo, in pixel-space combos = np.array(['{}_{}_{}_{}'.format(str(row['program']), row['observtn'], row['filter'], row['pupil']).lower() for row in mast_table]) @@ -469,7 +476,7 @@ def run(self): 'start_time_mjd': self.query_start_mjd, 'end_time_mjd': self.query_end_mjd, 'run_monitor': monitor_run, - 'entry_date': datetime.datetime.now()} + 'entry_date': datetime.datetime.now(datetime.timezone.utc)} entry = self.query_table(**new_entry) entry.save() diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh index 5544124d7..ed0dceeaa 100644 --- a/jwql/pull_jwql_branch.sh +++ b/jwql/pull_jwql_branch.sh @@ -62,6 +62,7 @@ echo "Reset: $reset"; echo "Notify: $notify $recipient"; # 1. Pull updated code from GitHub deployment branch (keep second checkout in case its already defined for some weird reason) +git fetch origin git checkout -b $branch_name --track origin/$branch_name git checkout $branch_name git fetch origin $branch_name diff --git a/jwql/tests/test_data_containers.py b/jwql/tests/test_data_containers.py index 7c4f68401..4b1f1c4ba 100644 --- a/jwql/tests/test_data_containers.py +++ b/jwql/tests/test_data_containers.py @@ -31,7 +31,7 @@ import pandas as pd import pytest -from jwql.utils.constants import ON_GITHUB_ACTIONS +from jwql.utils.constants import ON_GITHUB_ACTIONS, DEFAULT_MODEL_CHARFIELD os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") @@ -45,7 +45,7 @@ from jwql.utils.utils import get_config # noqa: E402 (module level import not at top of file) from jwql.website.apps.jwql.models import RootFileInfo - + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_build_table(): tab = data_containers.build_table('filesystem_general') @@ -199,7 +199,6 @@ def test_get_all_proposals(): (['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'], {'bad'})), (False, ['rate', 'uncal', 'bad', 'o006_crfints', 'o001_crf'], ['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'])]) - def test_get_available_suffixes(untracked, input_suffixes, expected): result = data_containers.get_available_suffixes( input_suffixes, return_untracked=untracked) @@ -339,6 +338,7 @@ def test_get_anomaly_form_post_group(mocker): assert update_mock.call_count == 2 """ + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_get_dashboard_components(): request = MockPostRequest() @@ -607,42 +607,42 @@ def test_mast_query_by_rootname(): instrument = 'NIRCam' rootname1 = 'jw02767002001_02103_00005_nrcb4' dict_stuff = data_containers.mast_query_by_rootname(instrument, rootname1) - defaults = dict(filter=dict_stuff.get('filter', ''), - detector=dict_stuff.get('detector', ''), - exp_type=dict_stuff.get('exp_type', ''), - read_pat=dict_stuff.get('readpatt', ''), - grating=dict_stuff.get('grating', ''), + defaults = dict(filter=dict_stuff.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=dict_stuff.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=dict_stuff.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_pat=dict_stuff.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=dict_stuff.get('grating', DEFAULT_MODEL_CHARFIELD), patt_num=dict_stuff.get('patt_num', 0), - aperture=dict_stuff.get('apername', ''), - subarray=dict_stuff.get('subarray', ''), - pupil=dict_stuff.get('pupil', '')) + aperture=dict_stuff.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=dict_stuff.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=dict_stuff.get('pupil', DEFAULT_MODEL_CHARFIELD)) assert isinstance(defaults, dict) rootname2 = 'jw02084001001_04103_00001-seg003_nrca3' dict_stuff = data_containers.mast_query_by_rootname(instrument, rootname2) - defaults = dict(filter=dict_stuff.get('filter', ''), - detector=dict_stuff.get('detector', ''), - exp_type=dict_stuff.get('exp_type', ''), - read_pat=dict_stuff.get('readpatt', ''), - grating=dict_stuff.get('grating', ''), + defaults = dict(filter=dict_stuff.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=dict_stuff.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=dict_stuff.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_pat=dict_stuff.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=dict_stuff.get('grating', DEFAULT_MODEL_CHARFIELD), patt_num=dict_stuff.get('patt_num', 0), - aperture=dict_stuff.get('apername', ''), - subarray=dict_stuff.get('subarray', ''), - pupil=dict_stuff.get('pupil', '')) + aperture=dict_stuff.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=dict_stuff.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=dict_stuff.get('pupil', DEFAULT_MODEL_CHARFIELD)) assert isinstance(defaults, dict) instrument2 = 'FGS' rootname3 = 'jw01029003001_06201_00001_guider2' dict_stuff = data_containers.mast_query_by_rootname(instrument2, rootname3) - defaults = dict(filter=dict_stuff.get('filter', ''), - detector=dict_stuff.get('detector', ''), - exp_type=dict_stuff.get('exp_type', ''), - read_pat=dict_stuff.get('readpatt', ''), - grating=dict_stuff.get('grating', ''), + defaults = dict(filter=dict_stuff.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=dict_stuff.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=dict_stuff.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_pat=dict_stuff.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=dict_stuff.get('grating', DEFAULT_MODEL_CHARFIELD), patt_num=dict_stuff.get('patt_num', 0), - aperture=dict_stuff.get('apername', ''), - subarray=dict_stuff.get('subarray', ''), - pupil=dict_stuff.get('pupil', '')) + aperture=dict_stuff.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=dict_stuff.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=dict_stuff.get('pupil', DEFAULT_MODEL_CHARFIELD)) assert isinstance(defaults, dict) diff --git a/jwql/tests/test_edb.py b/jwql/tests/test_edb.py index bdbb4b179..167b99345 100644 --- a/jwql/tests/test_edb.py +++ b/jwql/tests/test_edb.py @@ -17,7 +17,7 @@ pytest -s test_edb.py """ -from datetime import datetime +from datetime import datetime, timezone import os from astropy.table import Table @@ -65,10 +65,10 @@ def test_change_only_bounding_points(): """Make sure we correctly add starting and ending time entries to a set of change-only data """ - dates = [datetime(2022, 3, 2, 12, i) for i in range(10)] + dates = [datetime(2022, 3, 2, 12, i, tzinfo=timezone.utc) for i in range(10)] values = np.arange(10) - starting_time = datetime(2022, 3, 2, 12, 3, 3) - ending_time = datetime(2022, 3, 2, 12, 8, 4) + starting_time = datetime(2022, 3, 2, 12, 3, 3, tzinfo=timezone.utc) + ending_time = datetime(2022, 3, 2, 12, 8, 4, tzinfo=timezone.utc) new_dates, new_values = ed.change_only_bounding_points(dates, values, starting_time, ending_time) diff --git a/jwql/tests/test_edb_telemetry_monitor.py b/jwql/tests/test_edb_telemetry_monitor.py index 8ddd21f6e..b2f17cf31 100644 --- a/jwql/tests/test_edb_telemetry_monitor.py +++ b/jwql/tests/test_edb_telemetry_monitor.py @@ -43,40 +43,46 @@ def test_add_every_change_history(): """Test that every_change data is correctly combined with an existing set of every_change data """ - dates1 = np.array([datetime.datetime(2022, 3, 4, 1, 5, i) for i in range(10)]) - data1 = np.array([0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2]) - means1 = 0.15 - devs1 = 0.07 - dates2 = np.array([dates1[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) - data2 = np.array([0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4]) - means2 = 0.35 - devs2 = 0.07 - ec1 = {'0.15': (dates1, data1, means1, devs1), - '0.35': (dates2, data2, means2, devs2) - } - ec2 = {'0.15': (dates1, data1, means1, devs1)} - combine1 = etm.add_every_change_history(ec1, ec2) - expected1 = defaultdict(list) - expected1['0.15'] = (np.append(dates1, dates1), np.append(data1, data1), np.append(means1, means1), np.append(devs1, devs1)) - expected1['0.35'] = (dates2, data2, means2, devs2) - - for key in combine1: - print('compare ', key) - for i, cele in enumerate(combine1[key]): - assert np.all(cele == expected1[key][i]) + dates1 = [datetime.datetime(2022, 3, 4, 1, 5, i) for i in range(10)] + data1 = [0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2] + means1 = [0.15] + devs1 = [0.07] + dates2 = [dates1[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)] + data2 = [0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4] + means2 = [0.35] + devs2 = [0.07] + #ec1 = {'0.15': (dates1, data1, means1, devs1), + # '0.35': (dates2, data2, means2, devs2) + # } + history = {'0.15': ([dates1], [data1], [means1], [devs1]), + '0.35': ([dates2], [data2], [means2], [devs2]) + } + ec2 = {'0.15': (dates2, data2, means2, devs2)} + combine1 = etm.add_every_change_history(history, ec2) - dates3 = np.array([dates2[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) - ec3 = {'0.55': (dates3, data2 + 0.2, means2 + 0.2, devs2)} - combine2 = etm.add_every_change_history(ec1, ec3) + expected1 = defaultdict(list) + expected_dates = [dates1] + expected_dates.append(dates2) + expected_data = [data1] + expected_data.append(data2) + expected_means = [means1] + expected_means.append(means2) + expected_devs = [devs1] + expected_devs.append(devs2) + expected1['0.15'] = (expected_dates, expected_data, expected_means, expected_devs) + expected1['0.35'] = ([dates2], [data2], [means2], [devs2]) + assert combine1 == expected1 + + dates3 = [dates2[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)] + data3 = [e + 0.2 for e in data2] + means3 = [0.55] + ec3 = {'0.55': (dates3, data3, means3, devs2)} + combine2 = etm.add_every_change_history(history, ec3) expected2 = defaultdict(list) - expected2['0.15'] = (dates1, data1, means1, devs1) - expected2['0.35'] = (dates2, data2, means2, devs2) - expected2['0.55'] = (dates3, data2 + 0.2, means2 + 0.2, devs2) - - for key in combine2: - print('compare ', key) - for i, cele in enumerate(combine2[key]): - assert np.all(cele == expected2[key][i]) + expected2['0.15'] = ([dates1], [data1], [means1], [devs1]) + expected2['0.35'] = ([dates2], [data2], [means2], [devs2]) + expected2['0.55'] = ([dates3], [data3], [means3], [devs2]) + assert combine2 == expected2 def test_change_only_add_points(): @@ -299,11 +305,11 @@ def test_organize_every_change(): f770mean, _, _ = sigma_clipped_stats(f770_vals, sigma=3) f1000mean, _, _ = sigma_clipped_stats(f1000_vals, sigma=3) f1500mean, _, _ = sigma_clipped_stats(f1500_vals, sigma=3) - expected = {'F2550W': (np.array(dates[f2550_idx]), f2550_vals, MIRI_POS_RATIO_VALUES['FW']['F2550W'][0]), - 'F560W': (np.array(dates[f560_idx]), f560_vals, MIRI_POS_RATIO_VALUES['FW']['F560W'][0]), - 'F770W': (np.array(dates[f770_idx]), f770_vals, MIRI_POS_RATIO_VALUES['FW']['F770W'][0]), - 'F1000W': (np.array(dates[f1000_idx]), f1000_vals, MIRI_POS_RATIO_VALUES['FW']['F1000W'][0]), - 'F1500W': (np.array(dates[f1500_idx]), f1500_vals, MIRI_POS_RATIO_VALUES['FW']['F1500W'][0])} + expected = {'F2550W': (np.array(dates[f2550_idx]), f2550_vals, [MIRI_POS_RATIO_VALUES['FW']['F2550W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F2550W'][1]]), + 'F560W': (np.array(dates[f560_idx]), f560_vals, [MIRI_POS_RATIO_VALUES['FW']['F560W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F560W'][1]]), + 'F770W': (np.array(dates[f770_idx]), f770_vals, [MIRI_POS_RATIO_VALUES['FW']['F770W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F770W'][1]]), + 'F1000W': (np.array(dates[f1000_idx]), f1000_vals, [MIRI_POS_RATIO_VALUES['FW']['F1000W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F1000W'][1]]), + 'F1500W': (np.array(dates[f1500_idx]), f1500_vals, [MIRI_POS_RATIO_VALUES['FW']['F1500W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F1500W'][1]])} for key, val in expected.items(): assert np.all(val[0] == data[key][0]) diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 92e2774d5..5114542d6 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -364,6 +364,9 @@ "wfscmb", ] +# Default Model Values +DEFAULT_MODEL_CHARFIELD = "empty" + # Filename Component Lengths FILE_AC_CAR_ID_LEN = 4 FILE_AC_O_ID_LEN = 3 diff --git a/jwql/utils/interactive_preview_image.py b/jwql/utils/interactive_preview_image.py index 1ffd2ab7b..1845e0d91 100644 --- a/jwql/utils/interactive_preview_image.py +++ b/jwql/utils/interactive_preview_image.py @@ -554,7 +554,7 @@ def add_interactive_controls(self, images, color_bars): # JS callbacks for client side controls # set alternate image visibility when scale selection changes - scale_group.js_on_click(CustomJS(args={'i1': images[0], 'c1': color_bars[0], + scale_group.js_on_change('active', CustomJS(args={'i1': images[0], 'c1': color_bars[0], 'i2': images[1], 'c2': color_bars[1]}, code=""" if (i1.visible == true) { @@ -594,10 +594,10 @@ def add_interactive_controls(self, images, color_bars): limit_high.js_link('value', color_bars[i].color_mapper, 'high') # reset boxes to preset range on button click - reset.js_on_click(limit_reset) + reset.js_on_event('button_click', limit_reset) # also reset when swapping limit style - scale_group.js_on_click(limit_reset) + scale_group.js_on_change('active', limit_reset) # return widgets spacer = Spacer(height=20) diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index e0794cc1b..120a34e11 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -27,6 +27,7 @@ - JWST TR JWST-STScI-004800, SM-12 """ +import datetime import getpass import glob import itertools @@ -153,6 +154,25 @@ def _validate_config(config_file_dict): ) +def add_timezone_to_datetime(dt): + """Check to see if timezone information is present in the given + datetime.datetime object. If not, set it to UTC. + + Parameters + ---------- + dt : datetime.datetime + Datetime object + + Returns + ------- + dt : datetime.datetime + Datetime object with UTC timezone info added + """ + if dt.tzinfo == None or dt.tzinfo.utcoffset(dt) == None: + dt = dt.replace(tzinfo=datetime.timezone.utc) + return dt + + def create_png_from_fits(filename, outdir): """Create and save a png file of the provided file. The file will be saved with the same filename as the input file, but diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index 071df24a8..bb7ffe481 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -26,7 +26,7 @@ Use the '--fill_empty' argument to provide a model and field. Updates ALL fields for any model with empty/null/0 specified field $ python archive_database_update.py --fill_empty rootfileinfo expstart WARNING: Not all fields will be populated by all model objects. This will result in updates that may not be necessary. - While this will not disturb the data, it has the potential to increase run time. + While this will not disturb the data, it has the potential to increase run time. Select the field that is most pertient to the models you need updated minimize run time Use the 'update' argument to update every rootfileinfo data model with the most complete information from MAST @@ -49,6 +49,7 @@ from django.apps import apps from jwql.utils.protect_module import lock_module +from jwql.utils.constants import DEFAULT_MODEL_CHARFIELD # These lines are needed in order to use the Django models in a standalone # script (as opposed to code run as a result of a webpage request). If these @@ -160,6 +161,16 @@ def get_updates(update_database): create_archived_proposals_context(inst) +@log_info +@log_fail +def cleanup_past_runs(): + logging.info("Starting cleanup_past_runs") + rootfileinfo_field_set = ["filter", "detector", "exp_type", "read_patt", "grating", "read_patt_num", "aperture", "subarray", "pupil", "expstart"] + # Consume iterator created in map with list in order to make it run + list(map(lambda x: fill_empty_model("rootfileinfo", x), rootfileinfo_field_set)) + logging.info("Finished cleanup_past_runs") + + def get_all_possible_filenames_for_proposal(instrument, proposal_num): """Wrapper around a MAST query for filenames from a given instrument/proposal @@ -332,15 +343,15 @@ def update_database_table(update, instrument, prop, obs, thumbnail, obsfiles, ty # Updating defaults only on update or creation to prevent call to mast_query_by_rootname on every file name. defaults_dict = mast_query_by_rootname(instrument, file) - defaults = dict(filter=defaults_dict.get('filter', ''), - detector=defaults_dict.get('detector', ''), - exp_type=defaults_dict.get('exp_type', ''), - read_patt=defaults_dict.get('readpatt', ''), - grating=defaults_dict.get('grating', ''), - read_patt_num=defaults_dict.get('patt_num', 0), - aperture=defaults_dict.get('apername', ''), - subarray=defaults_dict.get('subarray', ''), - pupil=defaults_dict.get('pupil', ''), + defaults = dict(filter=defaults_dict.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=defaults_dict.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=defaults_dict.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_patt=defaults_dict.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=defaults_dict.get('grating', DEFAULT_MODEL_CHARFIELD), + read_patt_num=defaults_dict.get('patt_num', 1), + aperture=defaults_dict.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=defaults_dict.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=defaults_dict.get('pupil', DEFAULT_MODEL_CHARFIELD), expstart=defaults_dict.get('expstart', 0.0)) for key, value in defaults.items(): @@ -369,10 +380,14 @@ def fill_empty_model(model_name, model_field): ''' + is_proposal = (model_name == "proposal") + is_rootfileinfo = (model_name == "rootfileinfo") + rootfile_info_fields_default_ok = ["filter", "grating", "pupil"] + model_field_null = model_field + "__isnull" model_field_empty = model_field + "__exact" - model = apps.get_model('jwql', model_name) + model = apps.get_model("jwql", model_name) null_models = empty_models = zero_models = model.objects.none() # filter(field__isnull=True) @@ -387,6 +402,13 @@ def fill_empty_model(model_name, model_field): except ValueError: pass + # filter(field__exact=DEFAULT_MODEL_CHARFIELD) + try: + if is_proposal or model_field not in rootfile_info_fields_default_ok: + empty_models = model.objects.filter(**{model_field_empty: DEFAULT_MODEL_CHARFIELD}) + except ValueError: + pass + # filter(field=0) try: zero_models = model.objects.filter(**{model_field: 0}) @@ -396,9 +418,9 @@ def fill_empty_model(model_name, model_field): model_set = null_models | empty_models | zero_models if model_set.exists(): logging.info(f'{model_set.count()} models to be updated') - if model_name == 'proposal': + if is_proposal: fill_empty_proposals(model_set) - elif model_name == 'rootfileinfo': + elif is_rootfileinfo: fill_empty_rootfileinfo(model_set) else: logging.warning(f'Filling {model_name} model is not currently implemented') @@ -458,18 +480,21 @@ def fill_empty_rootfileinfo(rootfileinfo_set): for rootfileinfo_mod in rootfileinfo_set: defaults_dict = mast_query_by_rootname(rootfileinfo_mod.instrument, rootfileinfo_mod.root_name) - defaults = dict(filter=defaults_dict.get('filter', ''), - detector=defaults_dict.get('detector', ''), - exp_type=defaults_dict.get('exp_type', ''), - read_patt=defaults_dict.get('readpatt', ''), - grating=defaults_dict.get('grating', ''), - read_patt_num=defaults_dict.get('patt_num', 0), - aperture=defaults_dict.get('apername', ''), - subarray=defaults_dict.get('subarray', ''), - pupil=defaults_dict.get('pupil', ''), + defaults = dict(filter=defaults_dict.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=defaults_dict.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=defaults_dict.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_patt=defaults_dict.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=defaults_dict.get('grating', DEFAULT_MODEL_CHARFIELD), + read_patt_num=defaults_dict.get('patt_num', 1), + aperture=defaults_dict.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=defaults_dict.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=defaults_dict.get('pupil', DEFAULT_MODEL_CHARFIELD), expstart=defaults_dict.get('expstart', 0.0)) for key, value in defaults.items(): + # Final check to verify no None exists + if value is None: + value = DEFAULT_MODEL_CHARFIELD setattr(rootfileinfo_mod, key, value) try: rootfileinfo_mod.save() @@ -496,6 +521,7 @@ def protected_code(update_database, fill_empty_list): fill_empty_model(fill_empty_list[0], fill_empty_list[1]) else: get_updates(update_database) + cleanup_past_runs() if __name__ == '__main__': diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py index f4569747a..8210fed3f 100644 --- a/jwql/website/apps/jwql/bokeh_containers.py +++ b/jwql/website/apps/jwql/bokeh_containers.py @@ -230,7 +230,7 @@ def edb_monitor_tabs(instrument): """ html_file_list = file_list[instrument] print('read in html files') - + #CLEARLY THIS IS NOT BEING USED def generic_telemetry_plot(times, values, name, nominal_value=None, yellow_limits=None, red_limits=None, save=True): diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index 5c162d4bf..1300a6b6a 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -475,7 +475,6 @@ def get_additional_exposure_info(root_file_infos, image_info): additional_info['TITLE'] = header.get('TITLE', 'N/A') additional_info['PI_NAME'] = header.get('PI_NAME', 'N/A') additional_info['TARGNAME'] = header.get('TARGPROP', 'N/A') - additional_info['BRADTEST'] = header.get('BRADTEST', 'N/A') # For the exposure level (i.e. multiple files) present the target # RA and Dec. For the image level, give RA_REF, DEC_REF, since those diff --git a/jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py b/jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py new file mode 100644 index 000000000..afdc7686c --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py @@ -0,0 +1,63 @@ +# Generated by Django 4.1.7 on 2024-03-20 14:34 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0019_alter_fgsreadnoisequeryhistory_aperture_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='proposal', + name='category', + field=models.CharField(default='empty', help_text='Category Type', max_length=10), + ), + migrations.AlterField( + model_name='proposal', + name='thumbnail_path', + field=models.CharField(default='empty', help_text='Path to the proposal thumbnail', max_length=1000), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='aperture', + field=models.CharField(blank=True, default='empty', help_text='Aperture', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='detector', + field=models.CharField(blank=True, default='empty', help_text='Detector', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='exp_type', + field=models.CharField(blank=True, default='empty', help_text='Exposure Type', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='filter', + field=models.CharField(blank=True, default='empty', help_text='Instrument name', max_length=7, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='grating', + field=models.CharField(blank=True, default='empty', help_text='Grating', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='pupil', + field=models.CharField(blank=True, default='empty', help_text='Pupil', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='read_patt', + field=models.CharField(blank=True, default='empty', help_text='Read Pattern', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='subarray', + field=models.CharField(blank=True, default='empty', help_text='Subarray', max_length=40, null=True), + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py b/jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py new file mode 100644 index 000000000..96f6e6d58 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py @@ -0,0 +1,18 @@ +# Generated by Django 4.1.7 on 2024-04-05 18:08 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0020_alter_proposal_category_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='rootfileinfo', + name='read_patt_num', + field=models.IntegerField(default=1, help_text='Read Pattern Number'), + ), + ] diff --git a/jwql/website/apps/jwql/models.py b/jwql/website/apps/jwql/models.py index 5a51b7428..cd770cb18 100644 --- a/jwql/website/apps/jwql/models.py +++ b/jwql/website/apps/jwql/models.py @@ -31,6 +31,7 @@ from django.db import models from jwql.utils.constants import ( + DEFAULT_MODEL_CHARFIELD, MAX_LEN_APERTURE, MAX_LEN_DETECTOR, MAX_LEN_FILTER, @@ -74,9 +75,9 @@ class Proposal(models.Model): """A class defining the model used to hold information about a given proposal""" # Fields prop_id = models.CharField(max_length=5, help_text="5-digit proposal ID string") - thumbnail_path = models.CharField(max_length=MAX_LEN_PATH, help_text='Path to the proposal thumbnail', default='') + thumbnail_path = models.CharField(max_length=MAX_LEN_PATH, help_text='Path to the proposal thumbnail', default=DEFAULT_MODEL_CHARFIELD) archive = models.ForeignKey(Archive, blank=False, null=False, on_delete=models.CASCADE) - category = models.CharField(max_length=10, help_text="Category Type", default='') + category = models.CharField(max_length=10, help_text="Category Type", default=DEFAULT_MODEL_CHARFIELD) # Metadata class Meta: @@ -119,15 +120,15 @@ class RootFileInfo(models.Model): proposal = models.CharField(max_length=MAX_LEN_PROPOSAL, help_text="5-digit proposal ID string") root_name = models.TextField(primary_key=True, max_length=300) viewed = models.BooleanField(default=False) - filter = models.CharField(max_length=MAX_LEN_FILTER, help_text="Instrument name", default='', null=True, blank=True) - aperture = models.CharField(max_length=MAX_LEN_APERTURE, help_text="Aperture", default='', null=True, blank=True) - detector = models.CharField(max_length=MAX_LEN_DETECTOR, help_text="Detector", default='', null=True, blank=True) - read_patt_num = models.IntegerField(help_text='Read Pattern Number', default=0) - read_patt = models.CharField(max_length=MAX_LEN_READPATTERN, help_text="Read Pattern", default='', null=True, blank=True) - grating = models.CharField(max_length=MAX_LEN_GRATING, help_text="Grating", default='', null=True, blank=True) - subarray = models.CharField(max_length=MAX_LEN_SUBARRAY, help_text="Subarray", default='', null=True, blank=True) - pupil = models.CharField(max_length=MAX_LEN_PUPIL, help_text="Pupil", default='', null=True, blank=True) - exp_type = models.CharField(max_length=MAX_LEN_TYPE, help_text="Exposure Type", default='', null=True, blank=True) + filter = models.CharField(max_length=MAX_LEN_FILTER, help_text="Instrument name", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + aperture = models.CharField(max_length=MAX_LEN_APERTURE, help_text="Aperture", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + detector = models.CharField(max_length=MAX_LEN_DETECTOR, help_text="Detector", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + read_patt_num = models.IntegerField(help_text='Read Pattern Number', default=1) + read_patt = models.CharField(max_length=MAX_LEN_READPATTERN, help_text="Read Pattern", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + grating = models.CharField(max_length=MAX_LEN_GRATING, help_text="Grating", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + subarray = models.CharField(max_length=MAX_LEN_SUBARRAY, help_text="Subarray", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + pupil = models.CharField(max_length=MAX_LEN_PUPIL, help_text="Pupil", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + exp_type = models.CharField(max_length=MAX_LEN_TYPE, help_text="Exposure Type", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) expstart = models.FloatField(help_text='Exposure Start Time', default=0.0) # Metadata diff --git a/jwql/website/apps/jwql/monitor_models/edb.py b/jwql/website/apps/jwql/monitor_models/edb.py index 01a617af6..8f5c12825 100644 --- a/jwql/website/apps/jwql/monitor_models/edb.py +++ b/jwql/website/apps/jwql/monitor_models/edb.py @@ -32,7 +32,7 @@ from jwql.utils.constants import MAX_LEN_DEPENDENCY_VALUE, MAX_LEN_MNEMONIC -class FGSEdbBlocksStats(models.Model): +class FGSEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -41,6 +41,7 @@ class FGSEdbBlocksStats(models.Model): median = ArrayField(models.FloatField()) max = ArrayField(models.FloatField()) min = ArrayField(models.FloatField()) + delme = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) entry_date = models.DateTimeField(blank=True, null=True) class Meta: @@ -114,7 +115,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class MIRIEdbBlocksStats(models.Model): +class MIRIEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -196,7 +197,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class NIRCamEdbBlocksStats(models.Model): +class NIRCamEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -278,7 +279,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class NIRISSEdbBlocksStats(models.Model): +class NIRISSEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -360,7 +361,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class NIRSpecEdbBlocksStats(models.Model): +class NIRSpecEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) diff --git a/jwql/website/apps/jwql/monitor_pages/__init__.py b/jwql/website/apps/jwql/monitor_pages/__init__.py index ed184d7ff..1d7b7ee18 100644 --- a/jwql/website/apps/jwql/monitor_pages/__init__.py +++ b/jwql/website/apps/jwql/monitor_pages/__init__.py @@ -1 +1,13 @@ +import os + from .monitor_cosmic_rays_bokeh import CosmicRayMonitor + + +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + from jwql.website.apps.jwql.monitor_models.edb import MIRIEdbBlockMeansStats, NIRCamEdbBlockMeansStats diff --git a/jwql/website/apps/jwql/templates/view_exposure.html b/jwql/website/apps/jwql/templates/view_exposure.html index 7b9d71856..2a9e5cf2f 100644 --- a/jwql/website/apps/jwql/templates/view_exposure.html +++ b/jwql/website/apps/jwql/templates/view_exposure.html @@ -18,7 +18,7 @@