From 9e0716a2845ae996f595300bf2238ab66bcf47e8 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 15 Mar 2024 15:04:17 -0400 Subject: [PATCH 01/28] get every_change data flow working --- jwql/edb/engineering_database.py | 9 +- .../common_monitors/edb_telemetry_monitor.py | 307 +++++++++++++++--- 2 files changed, 272 insertions(+), 44 deletions(-) diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index b4812d81b..4f2c2d61c 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -1265,8 +1265,13 @@ def timed_stats(self, sigma=3): good = ((date_arr >= min_date) & (date_arr < max_date)) if self.meta['TlmMnemonics'][0]['AllPoints'] != 0: avg, med, dev = sigma_clipped_stats(self.data["euvalues"][good], sigma=sigma) - maxval = np.max(self.data["euvalues"][good]) - minval = np.min(self.data["euvalues"][good]) + # if self.data is empty, or good is empty, then calculating the max and + # min values will not work. + try: + maxval = np.max(self.data["euvalues"][good]) + minval = np.min(self.data["euvalues"][good]) + except ValueError: + pass else: avg, med, dev, maxval, minval = change_only_stats(self.data["dates"][good], self.data["euvalues"][good], sigma=sigma) if np.isfinite(avg): diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index f07e48875..114b15fe4 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -381,7 +381,7 @@ from bokeh.embed import components, json_item from bokeh.layouts import gridplot from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d -from bokeh.models.layouts import Tabs +from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure, output_file, save, show from bokeh.palettes import Turbo256 from jwql.database import database_interface @@ -556,13 +556,15 @@ def add_new_every_change_db_entry(self, mnem, mnem_dict, dependency_name, query_ times = ensure_list(times) values = ensure_list(values) + # medians and stdevs will be single-element lists, so provide the + # 0th element to the database entry db_entry = {'mnemonic': mnem, 'dependency_mnemonic': dependency_name, 'dependency_value': key, 'mnemonic_value': values, 'time': times, - 'median': medians, - 'stdev': stdevs, + 'median': medians[0], + 'stdev': stdevs[0], 'latest_query': query_time, 'entry_date': datetime.datetime.now() } @@ -703,7 +705,7 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): # as defined by the json files. This is the default operation. # Loop over instruments - for instrument_name in JWST_INSTRUMENT_NAMES: + for instrument_name in ['miri']: # JWST_INSTRUMENT_NAMES: ########UNCOMMETN BEFORE MERGING###### monitor_dir = os.path.dirname(os.path.abspath(__file__)) # File of mnemonics to monitor @@ -1129,8 +1131,21 @@ def get_history_every_change(self, mnemonic, start_date, end_date): if row.dependency_value in hist: if len(hist[row.dependency_value]) > 0: times, values, medians, devs = hist[row.dependency_value] - medians = [medians] - devs = [devs] + + """ + if row.dependency_value == 'F1000W': + print('BEFORE NEXT ENTRY, RETRIEVED DATA:') + for e in times: + print(e) + print('') + for e in medians: + print(e) + print('') + """ + + + + else: times = [] values = [] @@ -1140,15 +1155,50 @@ def get_history_every_change(self, mnemonic, start_date, end_date): # Keep only data that fall at least partially within the plot range if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): - times.extend(row.time) - values.extend(row.mnemonic_value) - medians.append(row.median) - devs.append(row.stdev) + times.append(row.time) + values.append(row.mnemonic_value) + medians.append([row.median]) + devs.append([row.stdev]) hist[row.dependency_value] = (times, values, medians, devs) + + """ + if row.dependency_value == 'F1000W': + print('AFTER NEXT ENTRY:') + for e in times: + print(e) + print('') + for e in medians: + print(e) + print('') + for e in hist[row.dependency_value][0]: + print(e) + print('') + for e in hist[row.dependency_value][2]: + print(e) + print('') + """ + + + + + + + else: if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): - hist[row.dependency_value] = (row.time, row.mnemonic_value, row.median, row.stdev) + hist[row.dependency_value] = ([row.time], [row.mnemonic_value], [[row.median]], [[row.stdev]]) + if row.dependency_value == 'F1000W': + print('INITIAL ENTRY:') + for e in hist[row.dependency_value][0]: + print(e) + print('') + for e in hist[row.dependency_value][2]: + print(e) + print('') + + + return hist @@ -1431,7 +1481,12 @@ def multiday_mnemonic_query(self, mnemonic_dict, starting_time_list, ending_time # Combine the mean values and median time data from multiple days into a single EdbMnemonic # instance. multiday_table["dates"] = multiday_median_times - multiday_table["euvalues"] = multiday_median_vals + + if telemetry_type != 'all': + multiday_table["euvalues"] = multiday_median_vals + else: + multiday_table["euvalues"] = multiday_mean_vals + all_data = ed.EdbMnemonic(identifier, starting_time_list[0], ending_time_list[-1], multiday_table, meta, info) all_data.stdev = multiday_stdev_vals @@ -1485,13 +1540,25 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): if plot_end is None: plot_end = self._today + + + + # SPEED UP TESTING. REMOVE BEFORE MERGING + plot_start = self._today - datetime.timedelta(days=3.) + plot_end = self._today + + + + + + # Only used as fall-back plot range for cases where there is no data self._plot_start = plot_start self._plot_end = plot_end # At the top level, we loop over the different types of telemetry. These types # largely control if/how the data will be averaged. - for telemetry_kind in mnemonic_dict: + for telemetry_kind in ['every_change']: # mnemonic_dict: ##############UNCOMMETN BEFORE MERGING telem_type = telemetry_kind logging.info(f'Working on telemetry_type: {telem_type}') @@ -1570,6 +1637,14 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): create_new_history_entry = False starttime = None + + + + # SPEED UP TESTING - REMOVE BEFORE MERGING + starttime = plot_start + + + else: # In the case where telemetry data have no averaging done, we do not store the data # in the JWQL database, in order to save space. So in this case, we will retrieve @@ -1651,8 +1726,9 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Add new data to JWQLDB. # If no new data were retrieved from the EDB, then there is no need to add an entry to the JWQLDB if create_new_history_entry: - self.add_new_every_change_db_entry(new_data.mnemonic_identifier, every_change_data, mnemonic['dependency'][0]["name"], - query_start_times[-1]) + #self.add_new_every_change_db_entry(new_data.mnemonic_identifier, every_change_data, mnemonic['dependency'][0]["name"], + # query_start_times[-1]) + pass # UNCOMMENT ABOVE BEFORE MERGING else: logging.info("No new data retrieved from EDB, so no new entry added to JWQLDB") @@ -1760,7 +1836,7 @@ def tabbed_figure(self, ncols=2): grid = gridplot(plot_list, ncols=ncols, merge_tools=False) # Create one panel for each plot category - panel_list.append(Panel(child=grid, title=key)) + panel_list.append(TabPanel(child=grid, title=key)) # Assign the panels to Tabs tabbed = Tabs(tabs=panel_list) @@ -1778,6 +1854,15 @@ def add_every_change_history(dict1, dict2): """Combine two dictionaries that contain every change data. For keys that are present in both dictionaries, remove any duplicate entries based on date. + For the every change data at the moment (MIRI), the key values + are filter names, and the values are data corresponding to those + filters. The median and stdev values for each filter come from + MIRI_POS_RATIO_VALUES in constants.py. So for a given filter, it + is safe (and in fact necessary for plotting purposes) to have only + a single value for the median, and the same for stdev. So in combining + the dictionaries, we combine dates and data values, but keep only a + single value for median and stdev. + Parameters ---------- dict1 : dict @@ -1793,19 +1878,96 @@ def add_every_change_history(dict1, dict2): """ combined = defaultdict(list) + """ + Looks good + print('Before combining:') + print(dict1['F1000W'][0]) + print(dict1['F1000W'][2]) + print('') + for e in dict1['F1000W'][0]: + print(e) + for e in dict1['F1000W'][2]: + print(e) + """ + + + + + + + + for key, value in dict1.items(): + all_dates = [] + all_values = [] + all_medians = [] + all_devs = [] if key in dict2: - if np.min(value[0]) < np.min(dict2[key][0]): - all_dates = np.append(value[0], dict2[key][0]) - all_data = np.append(value[1], dict2[key][1]) - all_medians = np.append(value[2], dict2[key][2]) - all_devs = np.append(value[3], dict2[key][3]) + if key == 'F1000W': + print(type(value)) # tuple + print(type(value[0])) # list (of lists) + print(value[0]) #- list of lists + print('') + for v0, v2 in zip(value[0], value[2]): + print(type(v0), v0) + print(type(v2), v2) + print('') + print('') + print(type(dict2[key][0]), dict2[key][0]) + #print(dict1[key]) #- tuple(array of times, array of data, list of medians, list of stdevs) + + + #print(type(value[0])) + #print(type(np.array(value[0]))) + + + #print(np.min(np.array(value[0]))) + #print(np.min(dict2[key][0])) + + min_time_dict1 = min(min(m) for m in value[0]) + if min_time_dict1 < np.min(dict2[key][0]): + #all_dates = np.append(value[0], dict2[key][0]) + #all_data = np.append(value[1], dict2[key][1]) + + all_dates = value[0] + all_dates.append(list(dict2[key][0])) + + all_values = value[1] + all_values.append(list(dict2[key][1])) + + all_medians = value[2] + all_medians.append(list(dict2[key][2])) + + all_devs = value[3] + all_devs.append(list(dict2[key][3])) + + #all_medians = np.append(value[2], dict2[key][2]) + #all_devs = np.append(value[3], dict2[key][3]) else: - all_dates = np.append(dict2[key][0], value[0]) - all_data = np.append(dict2[key][1], value[1]) - all_medians = np.append(dict2[key][2], value[2]) - all_devs = np.append(dict2[key][3], value[3]) + # Seems unlikely we'll ever want to be here. This would be + # for a case where a given set of values has an earliest date + # that is earlier than anything in the database. + #all_dates = np.append(dict2[key][0], value[0]) + #all_data = np.append(dict2[key][1], value[1]) + #all_medians = np.append(dict2[key][2], value[2]) + #all_devs = np.append(dict2[key][3], value[3]) + all_dates = [list(dict2[key][0])] + all_dates.extend(value[0]) + + all_values = [list(dict2[key][1])] + all_values.extend(value[1]) + + all_medians = [list(dict2[key][2])] + all_medians.extend(value[2]) + + all_devs = [list(dict2[key][3])] + all_devs.extend(value[3]) + + # Remove any duplicates + #unique_dates, unique_idx = np.unique(all_dates, return_index=True) + #all_dates = all_dates[unique_idx] + #all_data = all_data[unique_idx] # Not sure how to treat duplicates here. If we remove duplicates, then # the mean values may not be valid any more. For example, if there is a @@ -1813,19 +1975,48 @@ def add_every_change_history(dict1, dict2): # those 4 hours of entries, but then what would we do with the mean values # that cover those times. Let's instead warn the user if there are duplicate # entries, but don't take any action - unique_dates = np.unique(all_dates, return_index=False) - if len(unique_dates) != len(all_dates): - logging.info(("WARNING - There are duplicate entries in the every-change history " - "and the new entry. Keeping and plotting all values, but be sure the " - "data look ok.")) - updated_value = (all_dates, all_data, all_medians, all_devs) + #unique_dates = np.unique(all_dates, return_index=False) + #if len(unique_dates) != len(all_dates): + # n_duplicates = len(unique_dates) != len(all_dates) + # logging.info((f"WARNING - There are {n_duplicates} duplicate entries in the " + # f"every-change history (total length {value[0]}) and the new entry " + # f"(total length {dict2[key][0]}). Keeping and plotting all values, " + # "but be sure the data look ok.")) + updated_value = (all_dates, all_values, all_medians, all_devs) combined[key] = updated_value else: combined[key] = value + + if key == 'F1000W': + print('before dict2 only keys:') + for e in combined[key][0]: + print(e) + print('') + print('') + for e in combined[key][2]: + print(e) + print('') + print('') + + + + + + logging.info(f'In add_every_change_history: key: {key}, len data: {len(all_dates)}, median: {all_medians}, dev: {all_devs}') # Add entries for keys that are in dict2 but not dict1 for key, value in dict2.items(): if key not in dict1: combined[key] = value + + logging.info(f'dict2 only add_every_change_history: key: {key}, len data: {len(value[0])}, median: {dict2[key][2]}, dev: {dict2[key][3]}') + + + #print('after dict2 only keys:') + #for e in combined['F1000W'][0]: + # print(e) + #print('') + #for e in combined['F1000W'][2]: + # print(e) return combined @@ -1968,7 +2159,7 @@ def organize_every_change(mnemonic): # Normalize by the expected value medianval, stdevval = MIRI_POS_RATIO_VALUES[mnemonic.mnemonic_identifier.split('_')[2]][val] - all_data[val] = (val_times, val_data, medianval, stdevval) + all_data[val] = (val_times, val_data, [medianval], [stdevval]) return all_data @@ -2086,15 +2277,47 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True for (key, value), color in zip(data.items(), colors): if len(value) > 0: val_times, val_data, normval, stdevval = value - val_data = np.array(val_data) - dependency_val = np.repeat(key, len(val_times)) + + + print('in plotting code') + print('val_times is:') + print(val_times) + print('\n') + print('normval is:') + print(normval) + print('') + + # At this point, val_times and val_data will be a list of numpy arrays + # normval and stdevval will be lists. First, iterate through the lists + # and normalize the data values in each element by the corresponding + # normval (expected value) + all_val_data = [] + all_val_times = [] + for time_ele, data_ele, norm_ele in zip(val_times, val_data, normval): + if type(data_ele[0]) not in [np.str_, str]: + + + print(type(data_ele), data_ele) + print(type(norm_ele), norm_ele) + + data_ele_arr = np.array(data_ele) / norm_ele[0] + #data_ele /= norm_ele[0] + all_val_data.extend(list(data_ele)) + all_val_times.extend(time_ele) + logging.info(f'key: {key}, len_data: {len(data_ele)}, firstentry: {data_ele[0]}, stats: {norm_ele}') + + all_val_data = np.array(all_val_data) + all_val_times = np.array(all_val_times) + dependency_val = np.repeat(key, len(all_val_times)) + #val_data = np.array(val_data) + #dependency_val = np.repeat(key, len(val_times)) # Normalize by normval (the expected value) so that all data will fit on one plot easily - if type(val_data[0]) not in [np.str_, str]: - logging.info(f'key: {key}, len_data: {len(val_data)}, firstentry: {val_data[0]}, stats: {normval}, {stdevval}') - val_data /= normval + #if type(val_data[0]) not in [np.str_, str]: + # logging.info(f'key: {key}, len_data: {len(val_data)}, firstentry: {val_data[0]}, stats: {normval}, {stdevval}') + # val_data /= normval - source = ColumnDataSource(data={'x': val_times, 'y': val_data, 'dep': dependency_val}) + source = ColumnDataSource(data={'x': all_val_times, 'y': all_val_data, 'dep': dependency_val}) ldata = fig.line(x='x', y='y', line_width=1, line_color=Turbo256[color], source=source, legend_label=key) cdata = fig.circle(x='x', y='y', fill_color=Turbo256[color], size=8, source=source, legend_label=key) @@ -2106,10 +2329,10 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True hover_tool.formatters = {'@x': 'datetime'} fig.tools.append(hover_tool) - if np.min(val_times) < min_time: - min_time = np.min(val_times) - if np.max(val_times) > max_time: - max_time = np.max(val_times) + if np.min(all_val_times) < min_time: + min_time = np.min(all_val_times) + if np.max(all_val_times) > max_time: + max_time = np.max(all_val_times) # If the input dictionary is empty, then create an empty plot with reasonable # x range From ab9de4c8d6193bd9643e8642320ef39f51049aee Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 22 Mar 2024 22:56:30 -0400 Subject: [PATCH 02/28] Monitor working. Couple plots need tweaking --- .../common_monitors/edb_telemetry_monitor.py | 106 +++++++++++++++--- 1 file changed, 88 insertions(+), 18 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index 114b15fe4..edc576428 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -517,6 +517,20 @@ def add_new_block_db_entry(self, mnem, query_time): medians = ensure_list(mnem.median) maxs = ensure_list(mnem.max) mins = ensure_list(mnem.min) + + # Make sure the max and min values are floats rather than ints + if len(maxs) > 0: + if (isinstance(maxs[0], int) | isinstance(maxs[0], np.integer)): + maxs = [float(v) for v in maxs] + else: + print('len of maxs is zero! {mnem.mnemonic_identifier}, {maxs}, {mins}, {medians}') + if len(mins) > 0: + if (isinstance(mins[0], int) | isinstance(mins[0], np.integer)): + mins = [float(v) for v in mins] + if len(medians) > 0: + if (isinstance(medians[0], int) | isinstance(medians[0], np.integer)): + medians = [float(v) for v in medians] + db_entry = {'mnemonic': mnem.mnemonic_identifier, 'latest_query': query_time, 'times': times, @@ -705,7 +719,7 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): # as defined by the json files. This is the default operation. # Loop over instruments - for instrument_name in ['miri']: # JWST_INSTRUMENT_NAMES: ########UNCOMMETN BEFORE MERGING###### + for instrument_name in JWST_INSTRUMENT_NAMES: monitor_dir = os.path.dirname(os.path.abspath(__file__)) # File of mnemonics to monitor @@ -1558,7 +1572,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # At the top level, we loop over the different types of telemetry. These types # largely control if/how the data will be averaged. - for telemetry_kind in ['every_change']: # mnemonic_dict: ##############UNCOMMETN BEFORE MERGING + for telemetry_kind in mnemonic_dict: # ['every_change']'] telem_type = telemetry_kind logging.info(f'Working on telemetry_type: {telem_type}') @@ -1584,6 +1598,16 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Work on one mnemonic at a time for mnemonic in mnemonic_dict[telemetry_kind]: logging.info(f'Working on {mnemonic["name"]}') + + # It seems that some mnemonics that were previously in the EDB are no longer + # present. Check for the existence of the mnemonic before proceeding. If the + # mnemonic is not present in the EDB, make a note in the log and move on to + # the next one. + present_in_edb = ed.get_mnemonic_info(mnemonic["name"]) + if not present_in_edb: + logging.info(f'WARNING: {mnemonic["name"]} is not present in the EDB. Skipping') + continue # Move on to the next mnemonic + create_new_history_entry = True # Only two types of plots are currently supported. Plotting the data in the EdbMnemonic @@ -1891,7 +1915,8 @@ def add_every_change_history(dict1, dict2): """ - + print('dict1 keys: ', dict1.keys()) + print('dict2 keys: ', dict2.keys()) @@ -1902,19 +1927,28 @@ def add_every_change_history(dict1, dict2): all_values = [] all_medians = [] all_devs = [] + + + print(key) + print(type(value)) # tuple + print(type(value[0])) # list (of lists) + print(value[0]) #- list of lists + print('') + for v0, v2 in zip(value[0], value[2]): + print(type(v0), v0) + print(type(v2), v2) + print('') + print('') + #stop + #print(type(dict2[key][0]), dict2[key][0]) + + + + if key in dict2: - if key == 'F1000W': - print(type(value)) # tuple - print(type(value[0])) # list (of lists) - print(value[0]) #- list of lists - print('') - for v0, v2 in zip(value[0], value[2]): - print(type(v0), v0) - print(type(v2), v2) - print('') - print('') - print(type(dict2[key][0]), dict2[key][0]) + + #if key == 'OPAQUE': #print(dict1[key]) #- tuple(array of times, array of data, list of medians, list of stdevs) @@ -1985,9 +2019,19 @@ def add_every_change_history(dict1, dict2): updated_value = (all_dates, all_values, all_medians, all_devs) combined[key] = updated_value else: + if key == 'OPAQUE': + print(key) + print(value[0]) + print(value[1]) + print(value[2]) + print(value[3]) + stop + print(key) + print(value) + #stop combined[key] = value - if key == 'F1000W': + if key == 'OPAQUE': print('before dict2 only keys:') for e in combined[key][0]: print(e) @@ -1997,7 +2041,7 @@ def add_every_change_history(dict1, dict2): print(e) print('') print('') - + #stop @@ -2006,7 +2050,17 @@ def add_every_change_history(dict1, dict2): # Add entries for keys that are in dict2 but not dict1 for key, value in dict2.items(): if key not in dict1: - combined[key] = value + #combined[key] = value + dates =[] + vals = [] + meds = [] + devs = [] + dates.append(list(value[0])) + vals.append(list(value[1])) + meds.append(list(value[2])) + devs.append(list(value[3])) + combined[key] = (dates, vals, meds, devs) + logging.info(f'dict2 only add_every_change_history: key: {key}, len data: {len(value[0])}, median: {dict2[key][2]}, dev: {dict2[key][3]}') @@ -2278,6 +2332,8 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True if len(value) > 0: val_times, val_data, normval, stdevval = value + print(key) + print(value) print('in plotting code') print('val_times is:') @@ -2293,7 +2349,21 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True # normval (expected value) all_val_data = [] all_val_times = [] + + print('val_data:') + print(val_data) + print('') + for time_ele, data_ele, norm_ele in zip(val_times, val_data, normval): + + + print(data_ele) + print('') + print(time_ele) + print('\n\n') + + + if type(data_ele[0]) not in [np.str_, str]: @@ -2302,7 +2372,7 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True data_ele_arr = np.array(data_ele) / norm_ele[0] #data_ele /= norm_ele[0] - all_val_data.extend(list(data_ele)) + all_val_data.extend(list(data_ele_arr)) all_val_times.extend(time_ele) logging.info(f'key: {key}, len_data: {len(data_ele)}, firstentry: {data_ele[0]}, stats: {norm_ele}') From 7451774fc540d75aa8fd40cd78955a39a9410c25 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Mon, 25 Mar 2024 13:55:08 -0400 Subject: [PATCH 03/28] Move tabs to left side of page, since nav arrows are not in bokeh 3 --- .../common_monitors/edb_telemetry_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index edc576428..a000f2277 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -1863,7 +1863,7 @@ def tabbed_figure(self, ncols=2): panel_list.append(TabPanel(child=grid, title=key)) # Assign the panels to Tabs - tabbed = Tabs(tabs=panel_list) + tabbed = Tabs(tabs=panel_list, tabs_location='left') # Save the tabbed plot to a json file item_text = json.dumps(json_item(tabbed, "tabbed_edb_plot")) From 5fc4b54930567fc1b7a7a0840266dc7f497f0e2e Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 3 Apr 2024 21:02:17 -0400 Subject: [PATCH 04/28] Some clean up --- .../common_monitors/edb_telemetry_monitor.py | 86 ++++++------------- 1 file changed, 24 insertions(+), 62 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index a000f2277..021d9b316 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -374,6 +374,7 @@ from requests.exceptions import HTTPError import urllib +from astropy.modeling import models from astropy.stats import sigma_clipped_stats from astropy.table import Table from astropy.time import Time, TimeDelta @@ -385,12 +386,6 @@ from bokeh.plotting import figure, output_file, save, show from bokeh.palettes import Turbo256 from jwql.database import database_interface -from jwql.database.database_interface import NIRCamEDBDailyStats, NIRCamEDBBlockStats, \ - NIRCamEDBTimeIntervalStats, NIRCamEDBEveryChangeStats, NIRISSEDBDailyStats, NIRISSEDBBlockStats, \ - NIRISSEDBTimeIntervalStats, NIRISSEDBEveryChangeStats, MIRIEDBDailyStats, MIRIEDBBlockStats, \ - MIRIEDBTimeIntervalStats, MIRIEDBEveryChangeStats, FGSEDBDailyStats, FGSEDBBlockStats, \ - FGSEDBTimeIntervalStats, FGSEDBEveryChangeStats, NIRSpecEDBDailyStats, NIRSpecEDBBlockStats, \ - NIRSpecEDBTimeIntervalStats, NIRSpecEDBEveryChangeStats, session, engine from jwql.edb import engineering_database as ed from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import condition from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import utils @@ -398,9 +393,20 @@ from jwql.utils import monitor_utils from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.constants import EDB_DEFAULT_PLOT_RANGE, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MIRI_POS_RATIO_VALUES +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.permissions import set_permissions from jwql.utils.utils import ensure_dir_exists, get_config +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + + # Import * is okay here because this module specifically only contains database models + # for this monitor + from jwql.website.apps.jwql.monitor_models.edb import * # noqa: E402 (module level import not at top of file) + ALLOWED_COMBINATION_TYPES = ['all+daily_means', 'all+block_means', 'all+every_change', 'all+time_interval'] @@ -539,10 +545,10 @@ def add_new_block_db_entry(self, mnem, query_time): 'median': medians, 'max': maxs, 'min': mins, - 'entry_date': datetime.datetime.now() + 'entry_date': datetime.datetime.now(datetime.timezone.utc) } - with engine.begin() as connection: - connection.execute(self.history_table.__table__.insert(), db_entry) + entry = self.history_table(**db_entry) + entry.save() def add_new_every_change_db_entry(self, mnem, mnem_dict, dependency_name, query_time): """Add new entries to the database table for "every change" @@ -580,11 +586,10 @@ def add_new_every_change_db_entry(self, mnem, mnem_dict, dependency_name, query_ 'median': medians[0], 'stdev': stdevs[0], 'latest_query': query_time, - 'entry_date': datetime.datetime.now() + 'entry_date': datetime.datetime.now(datetime.timezone.utc) } - with engine.begin() as connection: - connection.execute( - self.history_table.__table__.insert(), db_entry) + entry = self.history_table(**db_entry) + entry.save() def calc_timed_stats(self, mnem_data, bintime, sigma=3): """Not currently used. @@ -1070,10 +1075,10 @@ def get_history(self, mnemonic, start_date, end_date, info={}, meta={}): hist : jwql.edb.engineering_database.EdbMnemonic Retrieved data """ - data = session.query(self.history_table) \ - .filter(self.history_table.mnemonic == mnemonic, - self.history_table.latest_query > start_date, - self.history_table.latest_query < end_date) + filters = {"mnemonic__iexact": mnemonic, + "latest_query__range": (start_date, end_date) + } + data = self.history_table.objects.filter(**filters).order_by("latest_query") all_dates = [] all_values = [] @@ -1312,6 +1317,7 @@ def identify_tables(self, inst, tel_type): tel_type = tel_type.title().replace('_', '') self.history_table_name = f'{mixed_case_name}EDB{tel_type}Stats' self.history_table = getattr(database_interface, f'{mixed_case_name}EDB{tel_type}Stats') + self.history_table = def most_recent_search(self, telem_name): """Query the database and return the information @@ -1545,7 +1551,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Container to hold and organize all plots self.figures = {} self.instrument = instrument - self._today = datetime.datetime.now() + self._today = datetime.datetime.now(datetime.timezone.utc) # Set the limits for the telemetry plots if necessary if plot_start is None: @@ -2043,14 +2049,10 @@ def add_every_change_history(dict1, dict2): print('') #stop - - - logging.info(f'In add_every_change_history: key: {key}, len data: {len(all_dates)}, median: {all_medians}, dev: {all_devs}') # Add entries for keys that are in dict2 but not dict1 for key, value in dict2.items(): if key not in dict1: - #combined[key] = value dates =[] vals = [] meds = [] @@ -2061,16 +2063,7 @@ def add_every_change_history(dict1, dict2): devs.append(list(value[3])) combined[key] = (dates, vals, meds, devs) - logging.info(f'dict2 only add_every_change_history: key: {key}, len data: {len(value[0])}, median: {dict2[key][2]}, dev: {dict2[key][3]}') - - - #print('after dict2 only keys:') - #for e in combined['F1000W'][0]: - # print(e) - #print('') - #for e in combined['F1000W'][2]: - # print(e) return combined @@ -2332,46 +2325,15 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True if len(value) > 0: val_times, val_data, normval, stdevval = value - print(key) - print(value) - - print('in plotting code') - print('val_times is:') - print(val_times) - print('\n') - print('normval is:') - print(normval) - print('') - # At this point, val_times and val_data will be a list of numpy arrays # normval and stdevval will be lists. First, iterate through the lists # and normalize the data values in each element by the corresponding # normval (expected value) all_val_data = [] all_val_times = [] - - print('val_data:') - print(val_data) - print('') - for time_ele, data_ele, norm_ele in zip(val_times, val_data, normval): - - - print(data_ele) - print('') - print(time_ele) - print('\n\n') - - - if type(data_ele[0]) not in [np.str_, str]: - - - print(type(data_ele), data_ele) - print(type(norm_ele), norm_ele) - data_ele_arr = np.array(data_ele) / norm_ele[0] - #data_ele /= norm_ele[0] all_val_data.extend(list(data_ele_arr)) all_val_times.extend(time_ele) logging.info(f'key: {key}, len_data: {len(data_ele)}, firstentry: {data_ele[0]}, stats: {norm_ele}') From 2a1c3890c90f7b2fada005c54bac14e2be7a8f29 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 3 Apr 2024 21:27:44 -0400 Subject: [PATCH 05/28] Switch database queries to use django models --- .../common_monitors/edb_telemetry_monitor.py | 58 ++++--------------- 1 file changed, 10 insertions(+), 48 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index 021d9b316..c28b6eba2 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -1137,10 +1137,10 @@ def get_history_every_change(self, mnemonic, start_date, end_date): and mean value of the primary mnemonic corresponding to the times that they dependency mnemonic has the value of the key. """ - data = session.query(self.history_table) \ - .filter(self.history_table.mnemonic == mnemonic, - self.history_table.latest_query > start_date, - self.history_table.latest_query < end_date) + filters = {"mnemonic__iexact": mnemonic, + "latest_query__range": (start_date, end_date) + } + data = self.history_table.objects.filter(**filters).order_by("latest_query") # Set up the dictionary to contain the data hist = {} @@ -1150,21 +1150,6 @@ def get_history_every_change(self, mnemonic, start_date, end_date): if row.dependency_value in hist: if len(hist[row.dependency_value]) > 0: times, values, medians, devs = hist[row.dependency_value] - - """ - if row.dependency_value == 'F1000W': - print('BEFORE NEXT ENTRY, RETRIEVED DATA:') - for e in times: - print(e) - print('') - for e in medians: - print(e) - print('') - """ - - - - else: times = [] values = [] @@ -1179,30 +1164,6 @@ def get_history_every_change(self, mnemonic, start_date, end_date): medians.append([row.median]) devs.append([row.stdev]) hist[row.dependency_value] = (times, values, medians, devs) - - """ - if row.dependency_value == 'F1000W': - print('AFTER NEXT ENTRY:') - for e in times: - print(e) - print('') - for e in medians: - print(e) - print('') - for e in hist[row.dependency_value][0]: - print(e) - print('') - for e in hist[row.dependency_value][2]: - print(e) - print('') - """ - - - - - - - else: if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): @@ -1315,9 +1276,8 @@ def identify_tables(self, inst, tel_type): if '_means' in tel_type: tel_type = tel_type.strip('_means') tel_type = tel_type.title().replace('_', '') - self.history_table_name = f'{mixed_case_name}EDB{tel_type}Stats' - self.history_table = getattr(database_interface, f'{mixed_case_name}EDB{tel_type}Stats') - self.history_table = + self.history_table_name = f'{mixed_case_name}Edb{tel_type}Stats' + self.history_table = eval(self.history_table_name) def most_recent_search(self, telem_name): """Query the database and return the information @@ -1334,14 +1294,16 @@ def most_recent_search(self, telem_name): query_result : datetime.datetime Date of the ending range of the previous query """ - query = session.query(self.history_table).filter(self.history_table.mnemonic == telem_name).order_by(self.history_table.latest_query).all() + filters = {"mnemonic__iexact": telem_name} + query = self.history_table.objects.filter(**filters).order_by("latest_query") if len(query) == 0: base_time = '2022-11-15 00:00:0.0' query_result = datetime.datetime.strptime(base_time, '%Y-%m-%d %H:%M:%S.%f') logging.info(f'\tNo query history for {telem_name}. Returning default "previous query" date of {base_time}.') else: - query_result = query[-1].latest_query + # Negative indexing not allowed in QuerySet + query_result = query[len(query) - 1].latest_query logging.info(f'For {telem_name}, the previous query time is {query_result}') return query_result From 3900a1a96ec89bc51b136b726ff8d6a227ce57be Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 4 Apr 2024 16:52:41 -0400 Subject: [PATCH 06/28] Datetime timezones, and modify db model name for blockmeans --- jwql/edb/engineering_database.py | 10 ++++- .../common_monitors/edb_telemetry_monitor.py | 45 +++++++++++++------ jwql/utils/utils.py | 20 +++++++++ jwql/website/apps/jwql/bokeh_containers.py | 2 +- jwql/website/apps/jwql/monitor_models/edb.py | 20 ++++----- 5 files changed, 71 insertions(+), 26 deletions(-) diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index 4f2c2d61c..d58b32123 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -45,7 +45,7 @@ """ import calendar from collections import OrderedDict -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from numbers import Number import os import warnings @@ -1417,9 +1417,15 @@ def change_only_bounding_points(date_list, value_list, starttime, endtime): if isinstance(starttime, Time): starttime = starttime.datetime + if starttime.tzinfo == None or starttime.tzinfo.utcoffset(starttime) == None: + starttime = starttime.replace(tzinfo=timezone.utc) + if isinstance(endtime, Time): endtime = endtime.datetime + if endtime.tzinfo == None or endtime.tzinfo.utcoffset(endtime) == None: + endtime = endtime.replace(tzinfo=timezone.utc) + valid_idx = np.where((date_list_arr <= endtime) & (date_list_arr >= starttime))[0] before_startime = np.where(date_list_arr < starttime)[0] before_endtime = np.where(date_list_arr < endtime)[0] @@ -1606,7 +1612,7 @@ def get_mnemonic(mnemonic_identifier, start_time, end_time): data = service.get_values(mnemonic_identifier, start_time, end_time, include_obstime=True, include_bracket_values=bracket) - dates = [datetime.strptime(row.obstime.iso, "%Y-%m-%d %H:%M:%S.%f") for row in data] + dates = [datetime.strptime(row.obstime.iso, "%Y-%m-%d %H:%M:%S.%f").replace(tzinfo=timezone.utc) for row in data] values = [row.value for row in data] if bracket: diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index c28b6eba2..2557d1935 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -385,7 +385,6 @@ from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure, output_file, save, show from bokeh.palettes import Turbo256 -from jwql.database import database_interface from jwql.edb import engineering_database as ed from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import condition from jwql.instrument_monitors.common_monitors.edb_telemetry_monitor_utils import utils @@ -395,7 +394,7 @@ from jwql.utils.constants import EDB_DEFAULT_PLOT_RANGE, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MIRI_POS_RATIO_VALUES from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.permissions import set_permissions -from jwql.utils.utils import ensure_dir_exists, get_config +from jwql.utils.utils import add_timezone_to_datetime, ensure_dir_exists, get_config if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: # Need to set up django apps before we can access the models @@ -516,10 +515,9 @@ def add_new_block_db_entry(self, mnem, query_time): logging.info(f"Adding new entry for {mnem.mnemonic_identifier} to history table.") times = mnem.data["dates"].data data = mnem.data["euvalues"].data - stdevs = mnem.stdev times = ensure_list(times) data = ensure_list(data) - stdevs = ensure_list(stdevs) + stdevs = ensure_list(mnem.stdev) medians = ensure_list(mnem.median) maxs = ensure_list(mnem.max) mins = ensure_list(mnem.min) @@ -537,6 +535,15 @@ def add_new_block_db_entry(self, mnem, query_time): if (isinstance(medians[0], int) | isinstance(medians[0], np.integer)): medians = [float(v) for v in medians] + + print('In add_new_block_db_entry:') + for ll, name in zip([times, data, stdevs, medians, maxs, mins],['times', 'data', 'stdevs', 'medians', 'maxs', 'mins']): + if isinstance(ll, np.ndarray): + print(f'{name} is a numpy array.') + print(ll) + + + db_entry = {'mnemonic': mnem.mnemonic_identifier, 'latest_query': query_time, 'times': times, @@ -1090,8 +1097,13 @@ def get_history(self, mnemonic, start_date, end_date, info={}, meta={}): # outside of the plot range. Return only the points inside the desired # plot range for row in data: - good = np.where((np.array(row.times) > self._plot_start) & (np.array(row.times) < self._plot_end))[0] - times = list(np.array(row.times)[good]) + # Make sure the data from the database has timezone info + time_vals = row.times + if time_vals[0].tzinfo == None or tie_vals[0].tzinfo.utcoffset(time_vals[0]) == None: + time_vals = [val.replace(tzinfo=datetime.timezone.utc) for val in time_vals] + + good = np.where((np.array(time_vals) > self._plot_start) & (np.array(time_vals) < self._plot_end))[0] + times = list(np.array(time_vals)[good]) data = list(np.array(row.data)[good]) medians = list(np.array(row.median)[good]) maxs = list(np.array(row.max)[good]) @@ -1273,8 +1285,8 @@ def identify_tables(self, inst, tel_type): Examples include "every_change", "daily", "all", etc """ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst] - if '_means' in tel_type: - tel_type = tel_type.strip('_means') + #if '_means' in tel_type: + # tel_type = tel_type.strip('_means') tel_type = tel_type.title().replace('_', '') self.history_table_name = f'{mixed_case_name}Edb{tel_type}Stats' self.history_table = eval(self.history_table_name) @@ -1299,11 +1311,13 @@ def most_recent_search(self, telem_name): if len(query) == 0: base_time = '2022-11-15 00:00:0.0' - query_result = datetime.datetime.strptime(base_time, '%Y-%m-%d %H:%M:%S.%f') + query_result = datetime.datetime.strptime(base_time, '%Y-%m-%d %H:%M:%S.%f').replace(tzinfo=datetime.timezone.utc) logging.info(f'\tNo query history for {telem_name}. Returning default "previous query" date of {base_time}.') else: # Negative indexing not allowed in QuerySet query_result = query[len(query) - 1].latest_query + if query_result.tzinfo == None or query_result.tzinfo.utcoffset(query_result) == None: + query_result = query_result.replace(tzinfo=datetime.timezone.utc) logging.info(f'For {telem_name}, the previous query time is {query_result}') return query_result @@ -1527,7 +1541,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # SPEED UP TESTING. REMOVE BEFORE MERGING plot_start = self._today - datetime.timedelta(days=3.) - plot_end = self._today + plot_end = self._today #- datetime.timedelta(days=56.) @@ -1609,7 +1623,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # For daily_means mnemonics, we force the search to always start at noon, and # have a 1 day cadence if telem_type == 'daily_means': - most_recent_search = datetime.datetime.combine(most_recent_search.date(), datetime.time(hour=12)) + most_recent_search = datetime.datetime.combine(most_recent_search.date(), datetime.time(hour=12)).replace(tzinfo=datetime.timezone.utc) logging.info(f'Most recent search is {most_recent_search}.') logging.info(f'Query cadence is {self.query_cadence}') @@ -2116,8 +2130,13 @@ def ensure_list(var): var : list var, translated into a list if necessary """ - if not isinstance(var, list) and not isinstance(var, np.ndarray): - return [var] + if not isinstance(var, list): + if not isinstance(var, np.ndarray): + # Here we assume var is a single float, int, str, etc. + return [var] + else: + # Here we convert a numpy array to a list + return var.tolist() else: return var diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index e0794cc1b..120a34e11 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -27,6 +27,7 @@ - JWST TR JWST-STScI-004800, SM-12 """ +import datetime import getpass import glob import itertools @@ -153,6 +154,25 @@ def _validate_config(config_file_dict): ) +def add_timezone_to_datetime(dt): + """Check to see if timezone information is present in the given + datetime.datetime object. If not, set it to UTC. + + Parameters + ---------- + dt : datetime.datetime + Datetime object + + Returns + ------- + dt : datetime.datetime + Datetime object with UTC timezone info added + """ + if dt.tzinfo == None or dt.tzinfo.utcoffset(dt) == None: + dt = dt.replace(tzinfo=datetime.timezone.utc) + return dt + + def create_png_from_fits(filename, outdir): """Create and save a png file of the provided file. The file will be saved with the same filename as the input file, but diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py index f4569747a..8210fed3f 100644 --- a/jwql/website/apps/jwql/bokeh_containers.py +++ b/jwql/website/apps/jwql/bokeh_containers.py @@ -230,7 +230,7 @@ def edb_monitor_tabs(instrument): """ html_file_list = file_list[instrument] print('read in html files') - + #CLEARLY THIS IS NOT BEING USED def generic_telemetry_plot(times, values, name, nominal_value=None, yellow_limits=None, red_limits=None, save=True): diff --git a/jwql/website/apps/jwql/monitor_models/edb.py b/jwql/website/apps/jwql/monitor_models/edb.py index 01a617af6..26a8220ce 100644 --- a/jwql/website/apps/jwql/monitor_models/edb.py +++ b/jwql/website/apps/jwql/monitor_models/edb.py @@ -32,7 +32,7 @@ from jwql.utils.constants import MAX_LEN_DEPENDENCY_VALUE, MAX_LEN_MNEMONIC -class FGSEdbBlocksStats(models.Model): +class FGSEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -45,7 +45,7 @@ class FGSEdbBlocksStats(models.Model): class Meta: managed = True - db_table = 'fgs_edb_blocks_stats' + db_table = 'fgs_edb_block_means_stats' unique_together = (('id', 'entry_date'),) @@ -114,7 +114,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class MIRIEdbBlocksStats(models.Model): +class MIRIEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -127,7 +127,7 @@ class MIRIEdbBlocksStats(models.Model): class Meta: managed = True - db_table = 'miri_edb_blocks_stats' + db_table = 'miri_edb_block_means_stats' unique_together = (('id', 'entry_date'),) @@ -196,7 +196,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class NIRCamEdbBlocksStats(models.Model): +class NIRCamEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -209,7 +209,7 @@ class NIRCamEdbBlocksStats(models.Model): class Meta: managed = True - db_table = 'nircam_edb_blocks_stats' + db_table = 'nircam_edb_block_means_stats' unique_together = (('id', 'entry_date'),) @@ -278,7 +278,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class NIRISSEdbBlocksStats(models.Model): +class NIRISSEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -291,7 +291,7 @@ class NIRISSEdbBlocksStats(models.Model): class Meta: managed = True - db_table = 'niriss_edb_blocks_stats' + db_table = 'niriss_edb_block_means_stats' unique_together = (('id', 'entry_date'),) @@ -360,7 +360,7 @@ class Meta: unique_together = (('id', 'entry_date'),) -class NIRSpecEdbBlocksStats(models.Model): +class NIRSpecEdbBlockMeansStats(models.Model): mnemonic = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) latest_query = models.DateTimeField(blank=True, null=True) times = ArrayField(models.DateTimeField()) @@ -373,7 +373,7 @@ class NIRSpecEdbBlocksStats(models.Model): class Meta: managed = True - db_table = 'nirspec_edb_blocks_stats' + db_table = 'nirspec_edb_block_means_stats' unique_together = (('id', 'entry_date'),) From 8dc469c8cbbfe9da48bbafdbe64f3d8591ccd91e Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Mon, 18 Mar 2024 16:43:14 -0400 Subject: [PATCH 07/28] fix New Group button --- jwql/website/apps/jwql/templates/view_exposure.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jwql/website/apps/jwql/templates/view_exposure.html b/jwql/website/apps/jwql/templates/view_exposure.html index 7b9d71856..2a9e5cf2f 100644 --- a/jwql/website/apps/jwql/templates/view_exposure.html +++ b/jwql/website/apps/jwql/templates/view_exposure.html @@ -18,7 +18,7 @@

Exposure {{ group_root }}

Observation:
Visit:
- +
Visit Status: {{ basic_info.visit_status }}
@@ -264,7 +264,7 @@
Submit Anomaly for Group
{% elif 'uncal' in suffixes %} {% elif suffixes|length == 1 %} - + {% else %} Unable to show image for: {{suffixes}} {% endif %} From 783af45f56ac11d9d81e8830919f68aeb3456009 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 15 Mar 2024 15:47:09 -0400 Subject: [PATCH 08/28] interactive preview fix --- jwql/utils/interactive_preview_image.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/jwql/utils/interactive_preview_image.py b/jwql/utils/interactive_preview_image.py index 1ffd2ab7b..c8a090a28 100644 --- a/jwql/utils/interactive_preview_image.py +++ b/jwql/utils/interactive_preview_image.py @@ -23,6 +23,7 @@ from astropy.io import fits from astropy.visualization import ZScaleInterval, MinMaxInterval, PercentileInterval from astropy.wcs import WCS +from bokeh import events from bokeh.embed import components from bokeh.layouts import gridplot, layout from bokeh.models import ( @@ -554,7 +555,7 @@ def add_interactive_controls(self, images, color_bars): # JS callbacks for client side controls # set alternate image visibility when scale selection changes - scale_group.js_on_click(CustomJS(args={'i1': images[0], 'c1': color_bars[0], + scale_group.js_on_change('labels', CustomJS(args={'i1': images[0], 'c1': color_bars[0], 'i2': images[1], 'c2': color_bars[1]}, code=""" if (i1.visible == true) { @@ -594,10 +595,10 @@ def add_interactive_controls(self, images, color_bars): limit_high.js_link('value', color_bars[i].color_mapper, 'high') # reset boxes to preset range on button click - reset.js_on_click(limit_reset) + reset.js_on_event(events.ButtonClick, limit_reset) # also reset when swapping limit style - scale_group.js_on_click(limit_reset) + scale_group.js_on_change('labels', limit_reset) # return widgets spacer = Spacer(height=20) From eb042bfd3a04011af108f5c8ee78858c1ee023af Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Sat, 16 Mar 2024 21:39:28 -0400 Subject: [PATCH 09/28] All buttons are now working --- jwql/utils/interactive_preview_image.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/jwql/utils/interactive_preview_image.py b/jwql/utils/interactive_preview_image.py index c8a090a28..1845e0d91 100644 --- a/jwql/utils/interactive_preview_image.py +++ b/jwql/utils/interactive_preview_image.py @@ -23,7 +23,6 @@ from astropy.io import fits from astropy.visualization import ZScaleInterval, MinMaxInterval, PercentileInterval from astropy.wcs import WCS -from bokeh import events from bokeh.embed import components from bokeh.layouts import gridplot, layout from bokeh.models import ( @@ -555,7 +554,7 @@ def add_interactive_controls(self, images, color_bars): # JS callbacks for client side controls # set alternate image visibility when scale selection changes - scale_group.js_on_change('labels', CustomJS(args={'i1': images[0], 'c1': color_bars[0], + scale_group.js_on_change('active', CustomJS(args={'i1': images[0], 'c1': color_bars[0], 'i2': images[1], 'c2': color_bars[1]}, code=""" if (i1.visible == true) { @@ -595,10 +594,10 @@ def add_interactive_controls(self, images, color_bars): limit_high.js_link('value', color_bars[i].color_mapper, 'high') # reset boxes to preset range on button click - reset.js_on_event(events.ButtonClick, limit_reset) + reset.js_on_event('button_click', limit_reset) # also reset when swapping limit style - scale_group.js_on_change('labels', limit_reset) + scale_group.js_on_change('active', limit_reset) # return widgets spacer = Spacer(height=20) From 7d2c6133c2cc27fd282ecc49c8d47f95b62757c5 Mon Sep 17 00:00:00 2001 From: Mees Fix Date: Tue, 19 Mar 2024 10:54:35 -0400 Subject: [PATCH 10/28] v1.2.5 changelog --- CHANGES.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 9d4cdc1a5..01be6e18f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,21 @@ ## What's Changed +1.2.5 (2024-03-19) +================== + +Web Application +~~~~~~~~~~~~~~~ +- Fix Bokeh `file_html` Call by @mfixstsci +- Update Bad Pix Exclude Line by @mfixstsci +- Interactive preview image - updates for Bokeh 3 by @bhilbert4 + +Project & API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- Allow creation of pngs from 3D and 4D arrays by @bhilbert4 +- Add max length to charfield by @BradleySappington +- Header fix by @BradleySappington + + 1.2.4 (2024-03-11) ================== From 9ad0f9f24c14354d140c849475c61612de6010bd Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Wed, 20 Mar 2024 09:02:53 -0400 Subject: [PATCH 11/28] remove test code --- jwql/website/apps/jwql/data_containers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index 5c162d4bf..1300a6b6a 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -475,7 +475,6 @@ def get_additional_exposure_info(root_file_infos, image_info): additional_info['TITLE'] = header.get('TITLE', 'N/A') additional_info['PI_NAME'] = header.get('PI_NAME', 'N/A') additional_info['TARGNAME'] = header.get('TARGPROP', 'N/A') - additional_info['BRADTEST'] = header.get('BRADTEST', 'N/A') # For the exposure level (i.e. multiple files) present the target # RA and Dec. For the image level, give RA_REF, DEC_REF, since those From d0c097c6fcee816682e3aabc68c8729c6e96ef3a Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Wed, 20 Mar 2024 10:36:53 -0400 Subject: [PATCH 12/28] update defaults for empty charfields --- jwql/tests/test_data_containers.py | 52 +++++++-------- jwql/utils/constants.py | 3 + .../0020_alter_proposal_category_and_more.py | 63 +++++++++++++++++++ jwql/website/apps/jwql/models.py | 21 ++++--- 4 files changed, 103 insertions(+), 36 deletions(-) create mode 100644 jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py diff --git a/jwql/tests/test_data_containers.py b/jwql/tests/test_data_containers.py index 7c4f68401..195aa4093 100644 --- a/jwql/tests/test_data_containers.py +++ b/jwql/tests/test_data_containers.py @@ -31,7 +31,7 @@ import pandas as pd import pytest -from jwql.utils.constants import ON_GITHUB_ACTIONS +from jwql.utils.constants import ON_GITHUB_ACTIONS, DEFAULT_MODEL_CHARFIELD os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") @@ -45,7 +45,7 @@ from jwql.utils.utils import get_config # noqa: E402 (module level import not at top of file) from jwql.website.apps.jwql.models import RootFileInfo - + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_build_table(): tab = data_containers.build_table('filesystem_general') @@ -607,42 +607,42 @@ def test_mast_query_by_rootname(): instrument = 'NIRCam' rootname1 = 'jw02767002001_02103_00005_nrcb4' dict_stuff = data_containers.mast_query_by_rootname(instrument, rootname1) - defaults = dict(filter=dict_stuff.get('filter', ''), - detector=dict_stuff.get('detector', ''), - exp_type=dict_stuff.get('exp_type', ''), - read_pat=dict_stuff.get('readpatt', ''), - grating=dict_stuff.get('grating', ''), + defaults = dict(filter=dict_stuff.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=dict_stuff.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=dict_stuff.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_pat=dict_stuff.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=dict_stuff.get('grating', DEFAULT_MODEL_CHARFIELD), patt_num=dict_stuff.get('patt_num', 0), - aperture=dict_stuff.get('apername', ''), - subarray=dict_stuff.get('subarray', ''), - pupil=dict_stuff.get('pupil', '')) + aperture=dict_stuff.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=dict_stuff.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=dict_stuff.get('pupil', DEFAULT_MODEL_CHARFIELD)) assert isinstance(defaults, dict) rootname2 = 'jw02084001001_04103_00001-seg003_nrca3' dict_stuff = data_containers.mast_query_by_rootname(instrument, rootname2) - defaults = dict(filter=dict_stuff.get('filter', ''), - detector=dict_stuff.get('detector', ''), - exp_type=dict_stuff.get('exp_type', ''), - read_pat=dict_stuff.get('readpatt', ''), - grating=dict_stuff.get('grating', ''), + defaults = dict(filter=dict_stuff.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=dict_stuff.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=dict_stuff.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_pat=dict_stuff.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=dict_stuff.get('grating', DEFAULT_MODEL_CHARFIELD), patt_num=dict_stuff.get('patt_num', 0), - aperture=dict_stuff.get('apername', ''), - subarray=dict_stuff.get('subarray', ''), - pupil=dict_stuff.get('pupil', '')) + aperture=dict_stuff.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=dict_stuff.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=dict_stuff.get('pupil', DEFAULT_MODEL_CHARFIELD)) assert isinstance(defaults, dict) instrument2 = 'FGS' rootname3 = 'jw01029003001_06201_00001_guider2' dict_stuff = data_containers.mast_query_by_rootname(instrument2, rootname3) - defaults = dict(filter=dict_stuff.get('filter', ''), - detector=dict_stuff.get('detector', ''), - exp_type=dict_stuff.get('exp_type', ''), - read_pat=dict_stuff.get('readpatt', ''), - grating=dict_stuff.get('grating', ''), + defaults = dict(filter=dict_stuff.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=dict_stuff.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=dict_stuff.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_pat=dict_stuff.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=dict_stuff.get('grating', DEFAULT_MODEL_CHARFIELD), patt_num=dict_stuff.get('patt_num', 0), - aperture=dict_stuff.get('apername', ''), - subarray=dict_stuff.get('subarray', ''), - pupil=dict_stuff.get('pupil', '')) + aperture=dict_stuff.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=dict_stuff.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=dict_stuff.get('pupil', DEFAULT_MODEL_CHARFIELD)) assert isinstance(defaults, dict) diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 92e2774d5..6ea90e172 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -364,6 +364,9 @@ "wfscmb", ] +#Default Model Values +DEFAULT_MODEL_CHARFIELD = "empty" + # Filename Component Lengths FILE_AC_CAR_ID_LEN = 4 FILE_AC_O_ID_LEN = 3 diff --git a/jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py b/jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py new file mode 100644 index 000000000..afdc7686c --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0020_alter_proposal_category_and_more.py @@ -0,0 +1,63 @@ +# Generated by Django 4.1.7 on 2024-03-20 14:34 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0019_alter_fgsreadnoisequeryhistory_aperture_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='proposal', + name='category', + field=models.CharField(default='empty', help_text='Category Type', max_length=10), + ), + migrations.AlterField( + model_name='proposal', + name='thumbnail_path', + field=models.CharField(default='empty', help_text='Path to the proposal thumbnail', max_length=1000), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='aperture', + field=models.CharField(blank=True, default='empty', help_text='Aperture', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='detector', + field=models.CharField(blank=True, default='empty', help_text='Detector', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='exp_type', + field=models.CharField(blank=True, default='empty', help_text='Exposure Type', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='filter', + field=models.CharField(blank=True, default='empty', help_text='Instrument name', max_length=7, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='grating', + field=models.CharField(blank=True, default='empty', help_text='Grating', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='pupil', + field=models.CharField(blank=True, default='empty', help_text='Pupil', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='read_patt', + field=models.CharField(blank=True, default='empty', help_text='Read Pattern', max_length=40, null=True), + ), + migrations.AlterField( + model_name='rootfileinfo', + name='subarray', + field=models.CharField(blank=True, default='empty', help_text='Subarray', max_length=40, null=True), + ), + ] diff --git a/jwql/website/apps/jwql/models.py b/jwql/website/apps/jwql/models.py index 5a51b7428..74eb008c6 100644 --- a/jwql/website/apps/jwql/models.py +++ b/jwql/website/apps/jwql/models.py @@ -31,6 +31,7 @@ from django.db import models from jwql.utils.constants import ( + DEFAULT_MODEL_CHARFIELD, MAX_LEN_APERTURE, MAX_LEN_DETECTOR, MAX_LEN_FILTER, @@ -74,9 +75,9 @@ class Proposal(models.Model): """A class defining the model used to hold information about a given proposal""" # Fields prop_id = models.CharField(max_length=5, help_text="5-digit proposal ID string") - thumbnail_path = models.CharField(max_length=MAX_LEN_PATH, help_text='Path to the proposal thumbnail', default='') + thumbnail_path = models.CharField(max_length=MAX_LEN_PATH, help_text='Path to the proposal thumbnail', default=DEFAULT_MODEL_CHARFIELD) archive = models.ForeignKey(Archive, blank=False, null=False, on_delete=models.CASCADE) - category = models.CharField(max_length=10, help_text="Category Type", default='') + category = models.CharField(max_length=10, help_text="Category Type", default=DEFAULT_MODEL_CHARFIELD) # Metadata class Meta: @@ -119,15 +120,15 @@ class RootFileInfo(models.Model): proposal = models.CharField(max_length=MAX_LEN_PROPOSAL, help_text="5-digit proposal ID string") root_name = models.TextField(primary_key=True, max_length=300) viewed = models.BooleanField(default=False) - filter = models.CharField(max_length=MAX_LEN_FILTER, help_text="Instrument name", default='', null=True, blank=True) - aperture = models.CharField(max_length=MAX_LEN_APERTURE, help_text="Aperture", default='', null=True, blank=True) - detector = models.CharField(max_length=MAX_LEN_DETECTOR, help_text="Detector", default='', null=True, blank=True) + filter = models.CharField(max_length=MAX_LEN_FILTER, help_text="Instrument name", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + aperture = models.CharField(max_length=MAX_LEN_APERTURE, help_text="Aperture", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + detector = models.CharField(max_length=MAX_LEN_DETECTOR, help_text="Detector", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) read_patt_num = models.IntegerField(help_text='Read Pattern Number', default=0) - read_patt = models.CharField(max_length=MAX_LEN_READPATTERN, help_text="Read Pattern", default='', null=True, blank=True) - grating = models.CharField(max_length=MAX_LEN_GRATING, help_text="Grating", default='', null=True, blank=True) - subarray = models.CharField(max_length=MAX_LEN_SUBARRAY, help_text="Subarray", default='', null=True, blank=True) - pupil = models.CharField(max_length=MAX_LEN_PUPIL, help_text="Pupil", default='', null=True, blank=True) - exp_type = models.CharField(max_length=MAX_LEN_TYPE, help_text="Exposure Type", default='', null=True, blank=True) + read_patt = models.CharField(max_length=MAX_LEN_READPATTERN, help_text="Read Pattern", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + grating = models.CharField(max_length=MAX_LEN_GRATING, help_text="Grating", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + subarray = models.CharField(max_length=MAX_LEN_SUBARRAY, help_text="Subarray", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + pupil = models.CharField(max_length=MAX_LEN_PUPIL, help_text="Pupil", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) + exp_type = models.CharField(max_length=MAX_LEN_TYPE, help_text="Exposure Type", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) expstart = models.FloatField(help_text='Exposure Start Time', default=0.0) # Metadata From 089d076ace9ef77c03a72703f28ba80e7f0f9328 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Wed, 20 Mar 2024 10:37:19 -0400 Subject: [PATCH 13/28] check for emptys on each run of archive_database_update --- .../apps/jwql/archive_database_update.py | 50 ++++++++++++------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index 071df24a8..46dba0b39 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -26,7 +26,7 @@ Use the '--fill_empty' argument to provide a model and field. Updates ALL fields for any model with empty/null/0 specified field $ python archive_database_update.py --fill_empty rootfileinfo expstart WARNING: Not all fields will be populated by all model objects. This will result in updates that may not be necessary. - While this will not disturb the data, it has the potential to increase run time. + While this will not disturb the data, it has the potential to increase run time. Select the field that is most pertient to the models you need updated minimize run time Use the 'update' argument to update every rootfileinfo data model with the most complete information from MAST @@ -49,6 +49,7 @@ from django.apps import apps from jwql.utils.protect_module import lock_module +from jwql.utils.constants import DEFAULT_MODEL_CHARFIELD # These lines are needed in order to use the Django models in a standalone # script (as opposed to code run as a result of a webpage request). If these @@ -159,6 +160,14 @@ def get_updates(update_database): create_archived_proposals_context(inst) +@log_info +@log_fail +def cleanup_past_runs(): + logging.debug("Starting cleanup_past_runs") + rootfileinfo_field_set = ["filter", "detector", "exp_type", "readpatt", "grating", "patt_num", "apername", "subarray", "pupil", "expstart"] + # Consume iterator created in map with list in order to make it run + list(map(lambda x: fill_empty_model("rootfileinfo", x), rootfileinfo_field_set)) + logging.debug("Finished cleanup_past_runs") def get_all_possible_filenames_for_proposal(instrument, proposal_num): """Wrapper around a MAST query for filenames from a given instrument/proposal @@ -332,15 +341,15 @@ def update_database_table(update, instrument, prop, obs, thumbnail, obsfiles, ty # Updating defaults only on update or creation to prevent call to mast_query_by_rootname on every file name. defaults_dict = mast_query_by_rootname(instrument, file) - defaults = dict(filter=defaults_dict.get('filter', ''), - detector=defaults_dict.get('detector', ''), - exp_type=defaults_dict.get('exp_type', ''), - read_patt=defaults_dict.get('readpatt', ''), - grating=defaults_dict.get('grating', ''), + defaults = dict(filter=defaults_dict.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=defaults_dict.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=defaults_dict.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_patt=defaults_dict.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=defaults_dict.get('grating', DEFAULT_MODEL_CHARFIELD), read_patt_num=defaults_dict.get('patt_num', 0), - aperture=defaults_dict.get('apername', ''), - subarray=defaults_dict.get('subarray', ''), - pupil=defaults_dict.get('pupil', ''), + aperture=defaults_dict.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=defaults_dict.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=defaults_dict.get('pupil', DEFAULT_MODEL_CHARFIELD), expstart=defaults_dict.get('expstart', 0.0)) for key, value in defaults.items(): @@ -387,6 +396,12 @@ def fill_empty_model(model_name, model_field): except ValueError: pass + # filter(field__exact=DEFAULT_MODEL_CHARFIELD) + try: + empty_models = model.objects.filter(**{model_field_empty: DEFAULT_MODEL_CHARFIELD}) + except ValueError: + pass + # filter(field=0) try: zero_models = model.objects.filter(**{model_field: 0}) @@ -458,15 +473,15 @@ def fill_empty_rootfileinfo(rootfileinfo_set): for rootfileinfo_mod in rootfileinfo_set: defaults_dict = mast_query_by_rootname(rootfileinfo_mod.instrument, rootfileinfo_mod.root_name) - defaults = dict(filter=defaults_dict.get('filter', ''), - detector=defaults_dict.get('detector', ''), - exp_type=defaults_dict.get('exp_type', ''), - read_patt=defaults_dict.get('readpatt', ''), - grating=defaults_dict.get('grating', ''), + defaults = dict(filter=defaults_dict.get('filter', DEFAULT_MODEL_CHARFIELD), + detector=defaults_dict.get('detector', DEFAULT_MODEL_CHARFIELD), + exp_type=defaults_dict.get('exp_type', DEFAULT_MODEL_CHARFIELD), + read_patt=defaults_dict.get('readpatt', DEFAULT_MODEL_CHARFIELD), + grating=defaults_dict.get('grating', DEFAULT_MODEL_CHARFIELD), read_patt_num=defaults_dict.get('patt_num', 0), - aperture=defaults_dict.get('apername', ''), - subarray=defaults_dict.get('subarray', ''), - pupil=defaults_dict.get('pupil', ''), + aperture=defaults_dict.get('apername', DEFAULT_MODEL_CHARFIELD), + subarray=defaults_dict.get('subarray', DEFAULT_MODEL_CHARFIELD), + pupil=defaults_dict.get('pupil', DEFAULT_MODEL_CHARFIELD), expstart=defaults_dict.get('expstart', 0.0)) for key, value in defaults.items(): @@ -496,6 +511,7 @@ def protected_code(update_database, fill_empty_list): fill_empty_model(fill_empty_list[0], fill_empty_list[1]) else: get_updates(update_database) + cleanup_past_runs() if __name__ == '__main__': From d5bad34c72a0ea5a8bd5c31a6bad7e131ca19ed2 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Thu, 21 Mar 2024 08:29:02 -0400 Subject: [PATCH 14/28] update field spelling --- jwql/website/apps/jwql/archive_database_update.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index 46dba0b39..9f74a8012 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -163,11 +163,11 @@ def get_updates(update_database): @log_info @log_fail def cleanup_past_runs(): - logging.debug("Starting cleanup_past_runs") - rootfileinfo_field_set = ["filter", "detector", "exp_type", "readpatt", "grating", "patt_num", "apername", "subarray", "pupil", "expstart"] + logging.info("Starting cleanup_past_runs") + rootfileinfo_field_set = ["filter", "detector", "exp_type", "read_patt", "grating", "patt_num", "aperture", "subarray", "pupil", "expstart"] # Consume iterator created in map with list in order to make it run list(map(lambda x: fill_empty_model("rootfileinfo", x), rootfileinfo_field_set)) - logging.debug("Finished cleanup_past_runs") + logging.info("Finished cleanup_past_runs") def get_all_possible_filenames_for_proposal(instrument, proposal_num): """Wrapper around a MAST query for filenames from a given instrument/proposal From 669720c665cbd57e7f908f1bcd5b0ab6757d1524 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 22 Mar 2024 10:39:22 -0400 Subject: [PATCH 15/28] update patt_num to read_patt_num --- jwql/website/apps/jwql/archive_database_update.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index 9f74a8012..f0584ee03 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -164,7 +164,7 @@ def get_updates(update_database): @log_fail def cleanup_past_runs(): logging.info("Starting cleanup_past_runs") - rootfileinfo_field_set = ["filter", "detector", "exp_type", "read_patt", "grating", "patt_num", "aperture", "subarray", "pupil", "expstart"] + rootfileinfo_field_set = ["filter", "detector", "exp_type", "read_patt", "grating", "read_patt_num", "aperture", "subarray", "pupil", "expstart"] # Consume iterator created in map with list in order to make it run list(map(lambda x: fill_empty_model("rootfileinfo", x), rootfileinfo_field_set)) logging.info("Finished cleanup_past_runs") From 84acddc4e1731306d1d751c1b66278ff3d07f4cc Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 22 Mar 2024 12:49:27 -0400 Subject: [PATCH 16/28] some fields are okay for default --- .../apps/jwql/archive_database_update.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index f0584ee03..5c8ef0997 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -346,7 +346,7 @@ def update_database_table(update, instrument, prop, obs, thumbnail, obsfiles, ty exp_type=defaults_dict.get('exp_type', DEFAULT_MODEL_CHARFIELD), read_patt=defaults_dict.get('readpatt', DEFAULT_MODEL_CHARFIELD), grating=defaults_dict.get('grating', DEFAULT_MODEL_CHARFIELD), - read_patt_num=defaults_dict.get('patt_num', 0), + read_patt_num=defaults_dict.get('patt_num', 1), aperture=defaults_dict.get('apername', DEFAULT_MODEL_CHARFIELD), subarray=defaults_dict.get('subarray', DEFAULT_MODEL_CHARFIELD), pupil=defaults_dict.get('pupil', DEFAULT_MODEL_CHARFIELD), @@ -378,10 +378,14 @@ def fill_empty_model(model_name, model_field): ''' + is_proposal = (model_name == "proposal") + is_rootfileinfo = (model_name == "rootfileinfo") + rootfile_info_fields_default_ok = ["filter", "grating", "pupil"] + model_field_null = model_field + "__isnull" model_field_empty = model_field + "__exact" - model = apps.get_model('jwql', model_name) + model = apps.get_model("jwql", model_name) null_models = empty_models = zero_models = model.objects.none() # filter(field__isnull=True) @@ -398,7 +402,8 @@ def fill_empty_model(model_name, model_field): # filter(field__exact=DEFAULT_MODEL_CHARFIELD) try: - empty_models = model.objects.filter(**{model_field_empty: DEFAULT_MODEL_CHARFIELD}) + if is_proposal or model_field not in rootfile_info_fields_default_ok: + empty_models = model.objects.filter(**{model_field_empty: DEFAULT_MODEL_CHARFIELD}) except ValueError: pass @@ -411,9 +416,9 @@ def fill_empty_model(model_name, model_field): model_set = null_models | empty_models | zero_models if model_set.exists(): logging.info(f'{model_set.count()} models to be updated') - if model_name == 'proposal': + if is_proposal: fill_empty_proposals(model_set) - elif model_name == 'rootfileinfo': + elif is_rootfileinfo: fill_empty_rootfileinfo(model_set) else: logging.warning(f'Filling {model_name} model is not currently implemented') @@ -478,7 +483,7 @@ def fill_empty_rootfileinfo(rootfileinfo_set): exp_type=defaults_dict.get('exp_type', DEFAULT_MODEL_CHARFIELD), read_patt=defaults_dict.get('readpatt', DEFAULT_MODEL_CHARFIELD), grating=defaults_dict.get('grating', DEFAULT_MODEL_CHARFIELD), - read_patt_num=defaults_dict.get('patt_num', 0), + read_patt_num=defaults_dict.get('patt_num', 1), aperture=defaults_dict.get('apername', DEFAULT_MODEL_CHARFIELD), subarray=defaults_dict.get('subarray', DEFAULT_MODEL_CHARFIELD), pupil=defaults_dict.get('pupil', DEFAULT_MODEL_CHARFIELD), From 98af5cf2f1c9136f4e4e47d06a8c5ec8db1506cc Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 22 Mar 2024 14:49:34 -0400 Subject: [PATCH 17/28] update default read_patt --- jwql/website/apps/jwql/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwql/website/apps/jwql/models.py b/jwql/website/apps/jwql/models.py index 74eb008c6..cd770cb18 100644 --- a/jwql/website/apps/jwql/models.py +++ b/jwql/website/apps/jwql/models.py @@ -123,7 +123,7 @@ class RootFileInfo(models.Model): filter = models.CharField(max_length=MAX_LEN_FILTER, help_text="Instrument name", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) aperture = models.CharField(max_length=MAX_LEN_APERTURE, help_text="Aperture", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) detector = models.CharField(max_length=MAX_LEN_DETECTOR, help_text="Detector", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) - read_patt_num = models.IntegerField(help_text='Read Pattern Number', default=0) + read_patt_num = models.IntegerField(help_text='Read Pattern Number', default=1) read_patt = models.CharField(max_length=MAX_LEN_READPATTERN, help_text="Read Pattern", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) grating = models.CharField(max_length=MAX_LEN_GRATING, help_text="Grating", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) subarray = models.CharField(max_length=MAX_LEN_SUBARRAY, help_text="Subarray", default=DEFAULT_MODEL_CHARFIELD, null=True, blank=True) From c97c4e82b916d0ab6408579a3fb471adfa78c89c Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 22 Mar 2024 16:01:06 -0400 Subject: [PATCH 18/28] Final switch to default if None is found --- jwql/website/apps/jwql/archive_database_update.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index 5c8ef0997..5479d6632 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -490,6 +490,9 @@ def fill_empty_rootfileinfo(rootfileinfo_set): expstart=defaults_dict.get('expstart', 0.0)) for key, value in defaults.items(): + # Final check to verify no None exists + if value is None: + value = DEFAULT_MODEL_CHARFIELD setattr(rootfileinfo_mod, key, value) try: rootfileinfo_mod.save() From dcf664111ae275e19b8f61e061ff9b8e5f22f244 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 22 Mar 2024 16:15:09 -0400 Subject: [PATCH 19/28] Pep8Speaks cleanup --- jwql/tests/test_data_containers.py | 2 +- jwql/utils/constants.py | 2 +- jwql/website/apps/jwql/archive_database_update.py | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/jwql/tests/test_data_containers.py b/jwql/tests/test_data_containers.py index 195aa4093..4b1f1c4ba 100644 --- a/jwql/tests/test_data_containers.py +++ b/jwql/tests/test_data_containers.py @@ -199,7 +199,6 @@ def test_get_all_proposals(): (['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'], {'bad'})), (False, ['rate', 'uncal', 'bad', 'o006_crfints', 'o001_crf'], ['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'])]) - def test_get_available_suffixes(untracked, input_suffixes, expected): result = data_containers.get_available_suffixes( input_suffixes, return_untracked=untracked) @@ -339,6 +338,7 @@ def test_get_anomaly_form_post_group(mocker): assert update_mock.call_count == 2 """ + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_get_dashboard_components(): request = MockPostRequest() diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 6ea90e172..5114542d6 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -364,7 +364,7 @@ "wfscmb", ] -#Default Model Values +# Default Model Values DEFAULT_MODEL_CHARFIELD = "empty" # Filename Component Lengths diff --git a/jwql/website/apps/jwql/archive_database_update.py b/jwql/website/apps/jwql/archive_database_update.py index 5479d6632..bb7ffe481 100755 --- a/jwql/website/apps/jwql/archive_database_update.py +++ b/jwql/website/apps/jwql/archive_database_update.py @@ -160,6 +160,7 @@ def get_updates(update_database): create_archived_proposals_context(inst) + @log_info @log_fail def cleanup_past_runs(): @@ -169,6 +170,7 @@ def cleanup_past_runs(): list(map(lambda x: fill_empty_model("rootfileinfo", x), rootfileinfo_field_set)) logging.info("Finished cleanup_past_runs") + def get_all_possible_filenames_for_proposal(instrument, proposal_num): """Wrapper around a MAST query for filenames from a given instrument/proposal From 3f7fa255f89e3f8460dd2fb7d231e7df8e96dffe Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 29 Mar 2024 13:18:53 -0400 Subject: [PATCH 20/28] do initial fetch --- jwql/pull_jwql_branch.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh index 5544124d7..ed0dceeaa 100644 --- a/jwql/pull_jwql_branch.sh +++ b/jwql/pull_jwql_branch.sh @@ -62,6 +62,7 @@ echo "Reset: $reset"; echo "Notify: $notify $recipient"; # 1. Pull updated code from GitHub deployment branch (keep second checkout in case its already defined for some weird reason) +git fetch origin git checkout -b $branch_name --track origin/$branch_name git checkout $branch_name git fetch origin $branch_name From 171e828515e8d019da9d2b713be391d087a7ae5e Mon Sep 17 00:00:00 2001 From: Ben Sunnquist Date: Tue, 27 Feb 2024 10:52:01 -0500 Subject: [PATCH 21/28] minor change to plotting for data that was processed through the claw monitor before the model measurements were implemented --- jwql/instrument_monitors/nircam_monitors/claw_monitor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py index b73d916c0..76d7f7145 100644 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -192,6 +192,7 @@ def make_background_plots(self, plot_type='bkg'): df = df[df['stddev'] != 0] # older data has no accurate stddev measures plot_data = df['stddev'].values if plot_type == 'model': + df = df[np.isfinite(df['total_bkg'])] # the claw monitor did not track model measurements at first plot_data = df['median'].values / df['total_bkg'].values plot_expstarts = df['expstart_mjd'].values From 9b957ea546cb44bce5862e849df0546ad11efacb Mon Sep 17 00:00:00 2001 From: Ben Sunnquist Date: Fri, 8 Mar 2024 15:54:33 -0500 Subject: [PATCH 22/28] updated pivot wavelengths with new filters --- .../nircam_monitors/claw_monitor.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py index 76d7f7145..3ee7c4acb 100644 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -424,11 +424,13 @@ def run(self): mast_table = self.query_mast() logging.info('{} files found between {} and {}.'.format(len(mast_table), self.query_start_mjd, self.query_end_mjd)) - # Define pivot wavelengths - self.filter_wave = {'F070W': 0.704, 'F090W': 0.902, 'F115W': 1.154, 'F150W': 1.501, 'F150W2': 1.659, - 'F200W': 1.989, 'F212N': 2.121, 'F250M': 2.503, 'F277W': 2.762, 'F300M': 2.989, - 'F322W2': 3.232, 'F356W': 3.568, 'F410M': 4.082, 'F430M': 4.281, 'F444W': 4.408, - 'F480M': 4.874} + # Define pivot wavelengths - last downloaded March 8 2024 from: + # https://jwst-docs.stsci.edu/jwst-near-infrared-camera/nircam-instrumentation/nircam-filters + self.filter_wave = {'F070W': 0.704, 'F090W': 0.901, 'F115W': 1.154, 'F140M': 1.404, 'F150W': 1.501, 'F162M': 1.626, 'F164N': 1.644, + 'F150W2': 1.671, 'F182M': 1.845, 'F187N': 1.874, 'F200W': 1.99, 'F210M': 2.093, 'F212N': 2.12, 'F250M': 2.503, + 'F277W': 2.786, 'F300M': 2.996, 'F322W2': 3.247, 'F323N': 3.237, 'F335M': 3.365, 'F356W': 3.563, 'F360M': 3.621, + 'F405N': 4.055, 'F410M': 4.092, 'F430M': 4.28, 'F444W': 4.421, 'F460M': 4.624, 'F466N': 4.654, 'F470N': 4.707, + 'F480M': 4.834} # Create observation-level median stacks for each filter/pupil combo, in pixel-space combos = np.array(['{}_{}_{}_{}'.format(str(row['program']), row['observtn'], row['filter'], row['pupil']).lower() for row in mast_table]) From 745990a3706a92c2ca7a1b1d68bbfe8446b3d1a4 Mon Sep 17 00:00:00 2001 From: Ben Sunnquist Date: Wed, 13 Mar 2024 11:05:25 -0400 Subject: [PATCH 23/28] fixed background prediction call for narrow and medium bands to use correct pivot wavelength --- jwql/instrument_monitors/nircam_monitors/claw_monitor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py index 3ee7c4acb..fa31cb66d 100644 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -301,7 +301,11 @@ def process(self): # Get predicted background level using JWST background tool ra, dec = hdu[1].header['RA_V1'], hdu[1].header['DEC_V1'] - wv = self.filter_wave[self.fltr.upper()] + if ('N' in self.pupil.upper()) | ('M' in self.pupil.upper()): + fltr_wv = self.pupil.upper() + else: + fltr_wv = self.fltr.upper() + wv = self.filter_wave[fltr_wv] date = hdu[0].header['DATE-BEG'] doy = int(Time(date).yday.split(':')[1]) try: From b7555ac4f2d351249f089155c2121006bb8221e6 Mon Sep 17 00:00:00 2001 From: Ben Sunnquist Date: Tue, 26 Mar 2024 11:10:35 -0400 Subject: [PATCH 24/28] minor update to resolve time zone django warnings for entry dates --- jwql/instrument_monitors/nircam_monitors/claw_monitor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 jwql/instrument_monitors/nircam_monitors/claw_monitor.py diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py old mode 100644 new mode 100755 index fa31cb66d..9978ec182 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -337,7 +337,7 @@ def process(self): 'skyflat_filename': os.path.basename(self.outfile), 'doy': float(doy), 'total_bkg': float(total_bkg), - 'entry_date': datetime.datetime.now() + 'entry_date': datetime.datetime.now(datetime.timezone.utc) } entry = self.stats_table(**claw_db_entry) entry.save() @@ -476,7 +476,7 @@ def run(self): 'start_time_mjd': self.query_start_mjd, 'end_time_mjd': self.query_end_mjd, 'run_monitor': monitor_run, - 'entry_date': datetime.datetime.now()} + 'entry_date': datetime.datetime.now(datetime.timezone.utc)} entry = self.query_table(**new_entry) entry.save() From 49cf1c598ac7539f655c817d5fe09c9aad517fe8 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Fri, 5 Apr 2024 14:16:51 -0400 Subject: [PATCH 25/28] add default to read_patt_num --- .../0021_alter_rootfileinfo_read_patt_num.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py diff --git a/jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py b/jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py new file mode 100644 index 000000000..96f6e6d58 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0021_alter_rootfileinfo_read_patt_num.py @@ -0,0 +1,18 @@ +# Generated by Django 4.1.7 on 2024-04-05 18:08 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0020_alter_proposal_category_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='rootfileinfo', + name='read_patt_num', + field=models.IntegerField(default=1, help_text='Read Pattern Number'), + ), + ] From 25939a55bbb338bdd42e4f40dd54f0837876f32e Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 10 Apr 2024 16:37:10 -0400 Subject: [PATCH 26/28] import models by name in __init__ --- .../common_monitors/edb_telemetry_monitor.py | 2 +- jwql/website/apps/jwql/monitor_models/edb.py | 11 ++++++----- jwql/website/apps/jwql/monitor_pages/__init__.py | 12 ++++++++++++ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index 2557d1935..07cd9744d 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -404,7 +404,7 @@ # Import * is okay here because this module specifically only contains database models # for this monitor - from jwql.website.apps.jwql.monitor_models.edb import * # noqa: E402 (module level import not at top of file) + #from jwql.website.apps.jwql.monitor_models.edb import * # noqa: E402 (module level import not at top of file) ALLOWED_COMBINATION_TYPES = ['all+daily_means', 'all+block_means', 'all+every_change', 'all+time_interval'] diff --git a/jwql/website/apps/jwql/monitor_models/edb.py b/jwql/website/apps/jwql/monitor_models/edb.py index 26a8220ce..8f5c12825 100644 --- a/jwql/website/apps/jwql/monitor_models/edb.py +++ b/jwql/website/apps/jwql/monitor_models/edb.py @@ -41,11 +41,12 @@ class FGSEdbBlockMeansStats(models.Model): median = ArrayField(models.FloatField()) max = ArrayField(models.FloatField()) min = ArrayField(models.FloatField()) + delme = models.CharField(max_length=MAX_LEN_MNEMONIC, blank=True, null=True) entry_date = models.DateTimeField(blank=True, null=True) class Meta: managed = True - db_table = 'fgs_edb_block_means_stats' + db_table = 'fgs_edb_blocks_stats' unique_together = (('id', 'entry_date'),) @@ -127,7 +128,7 @@ class MIRIEdbBlockMeansStats(models.Model): class Meta: managed = True - db_table = 'miri_edb_block_means_stats' + db_table = 'miri_edb_blocks_stats' unique_together = (('id', 'entry_date'),) @@ -209,7 +210,7 @@ class NIRCamEdbBlockMeansStats(models.Model): class Meta: managed = True - db_table = 'nircam_edb_block_means_stats' + db_table = 'nircam_edb_blocks_stats' unique_together = (('id', 'entry_date'),) @@ -291,7 +292,7 @@ class NIRISSEdbBlockMeansStats(models.Model): class Meta: managed = True - db_table = 'niriss_edb_block_means_stats' + db_table = 'niriss_edb_blocks_stats' unique_together = (('id', 'entry_date'),) @@ -373,7 +374,7 @@ class NIRSpecEdbBlockMeansStats(models.Model): class Meta: managed = True - db_table = 'nirspec_edb_block_means_stats' + db_table = 'nirspec_edb_blocks_stats' unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_pages/__init__.py b/jwql/website/apps/jwql/monitor_pages/__init__.py index ed184d7ff..1d7b7ee18 100644 --- a/jwql/website/apps/jwql/monitor_pages/__init__.py +++ b/jwql/website/apps/jwql/monitor_pages/__init__.py @@ -1 +1,13 @@ +import os + from .monitor_cosmic_rays_bokeh import CosmicRayMonitor + + +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + from jwql.website.apps.jwql.monitor_models.edb import MIRIEdbBlockMeansStats, NIRCamEdbBlockMeansStats From f6928d562c4102fbf894330d466bee84cbbd0e2c Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 11 Apr 2024 16:28:40 -0400 Subject: [PATCH 27/28] get tests working --- jwql/tests/test_edb.py | 8 ++++---- jwql/tests/test_edb_telemetry_monitor.py | 26 ++++++++++++++---------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/jwql/tests/test_edb.py b/jwql/tests/test_edb.py index bdbb4b179..167b99345 100644 --- a/jwql/tests/test_edb.py +++ b/jwql/tests/test_edb.py @@ -17,7 +17,7 @@ pytest -s test_edb.py """ -from datetime import datetime +from datetime import datetime, timezone import os from astropy.table import Table @@ -65,10 +65,10 @@ def test_change_only_bounding_points(): """Make sure we correctly add starting and ending time entries to a set of change-only data """ - dates = [datetime(2022, 3, 2, 12, i) for i in range(10)] + dates = [datetime(2022, 3, 2, 12, i, tzinfo=timezone.utc) for i in range(10)] values = np.arange(10) - starting_time = datetime(2022, 3, 2, 12, 3, 3) - ending_time = datetime(2022, 3, 2, 12, 8, 4) + starting_time = datetime(2022, 3, 2, 12, 3, 3, tzinfo=timezone.utc) + ending_time = datetime(2022, 3, 2, 12, 8, 4, tzinfo=timezone.utc) new_dates, new_values = ed.change_only_bounding_points(dates, values, starting_time, ending_time) diff --git a/jwql/tests/test_edb_telemetry_monitor.py b/jwql/tests/test_edb_telemetry_monitor.py index 8ddd21f6e..ab5230521 100644 --- a/jwql/tests/test_edb_telemetry_monitor.py +++ b/jwql/tests/test_edb_telemetry_monitor.py @@ -43,6 +43,10 @@ def test_add_every_change_history(): """Test that every_change data is correctly combined with an existing set of every_change data """ + +this test needs work.... + + dates1 = np.array([datetime.datetime(2022, 3, 4, 1, 5, i) for i in range(10)]) data1 = np.array([0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2]) means1 = 0.15 @@ -51,14 +55,14 @@ def test_add_every_change_history(): data2 = np.array([0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4]) means2 = 0.35 devs2 = 0.07 - ec1 = {'0.15': (dates1, data1, means1, devs1), - '0.35': (dates2, data2, means2, devs2) + ec1 = {'0.15': ([dates1], [data1], [means1], [devs1]), + '0.35': ([dates2], [data2], [means2], [devs2]) } - ec2 = {'0.15': (dates1, data1, means1, devs1)} + ec2 = {'0.15': ([dates2], [data2], [means2], [devs2])} combine1 = etm.add_every_change_history(ec1, ec2) expected1 = defaultdict(list) - expected1['0.15'] = (np.append(dates1, dates1), np.append(data1, data1), np.append(means1, means1), np.append(devs1, devs1)) - expected1['0.35'] = (dates2, data2, means2, devs2) + expected1['0.15'] = ([np.append(dates1, dates1)], [np.append(data1, data1)], np.append(means1, means1), np.append(devs1, devs1)) + expected1['0.35'] = ([dates2], [data2], [means2], [devs2]) for key in combine1: print('compare ', key) @@ -66,7 +70,7 @@ def test_add_every_change_history(): assert np.all(cele == expected1[key][i]) dates3 = np.array([dates2[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) - ec3 = {'0.55': (dates3, data2 + 0.2, means2 + 0.2, devs2)} + ec3 = {'0.55': ([dates3], [data2 + 0.2], [means2 + 0.2], [devs2])} combine2 = etm.add_every_change_history(ec1, ec3) expected2 = defaultdict(list) expected2['0.15'] = (dates1, data1, means1, devs1) @@ -299,11 +303,11 @@ def test_organize_every_change(): f770mean, _, _ = sigma_clipped_stats(f770_vals, sigma=3) f1000mean, _, _ = sigma_clipped_stats(f1000_vals, sigma=3) f1500mean, _, _ = sigma_clipped_stats(f1500_vals, sigma=3) - expected = {'F2550W': (np.array(dates[f2550_idx]), f2550_vals, MIRI_POS_RATIO_VALUES['FW']['F2550W'][0]), - 'F560W': (np.array(dates[f560_idx]), f560_vals, MIRI_POS_RATIO_VALUES['FW']['F560W'][0]), - 'F770W': (np.array(dates[f770_idx]), f770_vals, MIRI_POS_RATIO_VALUES['FW']['F770W'][0]), - 'F1000W': (np.array(dates[f1000_idx]), f1000_vals, MIRI_POS_RATIO_VALUES['FW']['F1000W'][0]), - 'F1500W': (np.array(dates[f1500_idx]), f1500_vals, MIRI_POS_RATIO_VALUES['FW']['F1500W'][0])} + expected = {'F2550W': (np.array(dates[f2550_idx]), f2550_vals, [MIRI_POS_RATIO_VALUES['FW']['F2550W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F2550W'][1]]), + 'F560W': (np.array(dates[f560_idx]), f560_vals, [MIRI_POS_RATIO_VALUES['FW']['F560W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F560W'][1]]), + 'F770W': (np.array(dates[f770_idx]), f770_vals, [MIRI_POS_RATIO_VALUES['FW']['F770W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F770W'][1]]), + 'F1000W': (np.array(dates[f1000_idx]), f1000_vals, [MIRI_POS_RATIO_VALUES['FW']['F1000W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F1000W'][1]]), + 'F1500W': (np.array(dates[f1500_idx]), f1500_vals, [MIRI_POS_RATIO_VALUES['FW']['F1500W'][0]], [MIRI_POS_RATIO_VALUES['FW']['F1500W'][1]])} for key, val in expected.items(): assert np.all(val[0] == data[key][0]) From 07b660df5cd25fce3e95feef5a583bb1070efb72 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 12 Apr 2024 15:25:19 -0400 Subject: [PATCH 28/28] Fix failing test --- .../common_monitors/edb_telemetry_monitor.py | 83 ++++++++++++++----- jwql/tests/test_edb_telemetry_monitor.py | 68 +++++++-------- 2 files changed, 97 insertions(+), 54 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index 07cd9744d..e23778b8e 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -404,8 +404,8 @@ # Import * is okay here because this module specifically only contains database models # for this monitor - #from jwql.website.apps.jwql.monitor_models.edb import * # noqa: E402 (module level import not at top of file) - + from jwql.website.apps.jwql.monitor_models.edb import * # noqa: E402 (module level import not at top of file) + #from jwql.website.apps.jwql.monitor_models.edb import MIRIEdbDailyMeansStats ALLOWED_COMBINATION_TYPES = ['all+daily_means', 'all+block_means', 'all+every_change', 'all+time_interval'] @@ -731,7 +731,7 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): # as defined by the json files. This is the default operation. # Loop over instruments - for instrument_name in JWST_INSTRUMENT_NAMES: + for instrument_name in ['miri']: #JWST_INSTRUMENT_NAMES: monitor_dir = os.path.dirname(os.path.abspath(__file__)) # File of mnemonics to monitor @@ -1145,9 +1145,10 @@ def get_history_every_change(self, mnemonic, start_date, end_date): ------- hist : dict Retrieved data. Keys are the value of the dependency mnemonic, - and each value is a 3-tuple. The tuple contains the times, values, - and mean value of the primary mnemonic corresponding to the times - that they dependency mnemonic has the value of the key. + and each value is a 4-tuple. The tuple contains the times, values, + mean value, and standard deviation of the primary mnemonic corresponding + to the times that they dependency mnemonic has the value of the key. + Values are lists of lists. """ filters = {"mnemonic__iexact": mnemonic, "latest_query__range": (start_date, end_date) @@ -1285,10 +1286,15 @@ def identify_tables(self, inst, tel_type): Examples include "every_change", "daily", "all", etc """ mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst] - #if '_means' in tel_type: - # tel_type = tel_type.strip('_means') + if '_means' in tel_type: # this is used for dailymeans and blockmeans. need to update both table names + tel_type = tel_type.strip('_means') # in order to get rid of these lines tel_type = tel_type.title().replace('_', '') self.history_table_name = f'{mixed_case_name}Edb{tel_type}Stats' + + # temporary fix + if 'BlockStats' in self.history_table_name: + self.history_table_name = self.history_table_name.replace('BlockStats', 'BlocksStats') + self.history_table = eval(self.history_table_name) def most_recent_search(self, telem_name): @@ -1554,7 +1560,7 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # At the top level, we loop over the different types of telemetry. These types # largely control if/how the data will be averaged. - for telemetry_kind in mnemonic_dict: # ['every_change']'] + for telemetry_kind in ['every_change']: # mnemonic_dict: telem_type = telemetry_kind logging.info(f'Working on telemetry_type: {telem_type}') @@ -1727,8 +1733,21 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Before we can add the every-change data to the database, organize it to make it # easier to access. Note that every_change_data is now a dict rather than an EDBMnemonic instance + + print('new_data:') + print(new_data.data) + print(new_data.every_change_values) + print(new_data.blocks) + + every_change_data = organize_every_change(new_data) + print('\n\nevery_change_data:') + print(every_change_data) + print('\n\nhistorical data:') + print(historical_data) + stop + # Add new data to JWQLDB. # If no new data were retrieved from the EDB, then there is no need to add an entry to the JWQLDB if create_new_history_entry: @@ -1746,6 +1765,12 @@ def run(self, instrument, mnemonic_dict, plot_start=None, plot_end=None): # Note that the line below will change mnemonic_info into a dictionary mnemonic_info = add_every_change_history(historical_data, every_change_data) + + print('mnemonic_info:') + print(mnemonic_info) + + + logging.info(f'Combined new data plus historical data. Number of data points per key:') for key in mnemonic_info: logging.info(f'Key: {key}, Num of Points: {len(mnemonic_info[key][0])}') @@ -1872,10 +1897,15 @@ def add_every_change_history(dict1, dict2): Parameters ---------- dict1 : dict - First dictionary to combine + First dictionary to combine. The intention is that dict1 will contain + the historical data for the mnemonic, retrieved from the database. This + means the values will be tuples of nested lists. Also allow for this to be an + empty dictionary, if there are no historical data. dict2 : dict - Second dictionary to combine + Second dictionary to combine. The intention is that dict2 will contain + the new mnemonic data from the latest EDB query. This means the values + will be tuples of lists. Returns ------- @@ -1943,19 +1973,23 @@ def add_every_change_history(dict1, dict2): min_time_dict1 = min(min(m) for m in value[0]) if min_time_dict1 < np.min(dict2[key][0]): + # Here, the minimum date in the history (dict1) is earlier + # than the minimum date in the new EDB query data (dict2). + # This is where we expect to be most of the time. + #all_dates = np.append(value[0], dict2[key][0]) #all_data = np.append(value[1], dict2[key][1]) - all_dates = value[0] + all_dates = deepcopy(value[0]) all_dates.append(list(dict2[key][0])) - all_values = value[1] + all_values = deepcopy(value[1]) all_values.append(list(dict2[key][1])) - all_medians = value[2] + all_medians = deepcopy(value[2]) all_medians.append(list(dict2[key][2])) - all_devs = value[3] + all_devs = deepcopy(value[3]) all_devs.append(list(dict2[key][3])) #all_medians = np.append(value[2], dict2[key][2]) @@ -1964,6 +1998,7 @@ def add_every_change_history(dict1, dict2): # Seems unlikely we'll ever want to be here. This would be # for a case where a given set of values has an earliest date # that is earlier than anything in the database. + #all_dates = np.append(dict2[key][0], value[0]) #all_data = np.append(dict2[key][1], value[1]) #all_medians = np.append(dict2[key][2], value[2]) @@ -2001,6 +2036,8 @@ def add_every_change_history(dict1, dict2): updated_value = (all_dates, all_values, all_medians, all_devs) combined[key] = updated_value else: + # In this case, a given key is present in the historical data from the database, + # but not in the data from the new EDB query if key == 'OPAQUE': print(key) print(value[0]) @@ -2011,7 +2048,9 @@ def add_every_change_history(dict1, dict2): print(key) print(value) #stop - combined[key] = value + + # value here is already a nested list, so we can transfer that directly to the new dictionary + combined[key] = deepcopy(value) if key == 'OPAQUE': print('before dict2 only keys:') @@ -2026,7 +2065,8 @@ def add_every_change_history(dict1, dict2): #stop logging.info(f'In add_every_change_history: key: {key}, len data: {len(all_dates)}, median: {all_medians}, dev: {all_devs}') - # Add entries for keys that are in dict2 but not dict1 + + # Add entries for keys that are in dict2 (recent query) but not dict1 (historical data) for key, value in dict2.items(): if key not in dict1: dates =[] @@ -2162,8 +2202,9 @@ def organize_every_change(mnemonic): all_data : dict Dictionary of organized results. Keys are the dependency values, and values are tuples. The first element of each tuple is a list - of dates, the second element is a list of data values, and the third - is a the sigma-clipped mean value of the data. + of dates, the second element is a list of data values, the third is + a single element list of the sigma-clipped mean value of the data, + and the fourth is a single element list of the stdev of the data. """ all_data = {} @@ -2298,8 +2339,8 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True # Find the min and max values in the x-range. These may be used for plotting # the nominal_value line later. Initialize here, and then dial them in based # on the data. - min_time = datetime.datetime.today() - max_time = datetime.datetime(2021, 12, 25) + min_time = datetime.datetime.now(datetime.timezone.utc) + max_time = datetime.datetime(2021, 12, 25, tzinfo=datetime.timezone.utc) logging.info('In plot_every_change_data:') for (key, value), color in zip(data.items(), colors): diff --git a/jwql/tests/test_edb_telemetry_monitor.py b/jwql/tests/test_edb_telemetry_monitor.py index ab5230521..b2f17cf31 100644 --- a/jwql/tests/test_edb_telemetry_monitor.py +++ b/jwql/tests/test_edb_telemetry_monitor.py @@ -43,44 +43,46 @@ def test_add_every_change_history(): """Test that every_change data is correctly combined with an existing set of every_change data """ + dates1 = [datetime.datetime(2022, 3, 4, 1, 5, i) for i in range(10)] + data1 = [0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2] + means1 = [0.15] + devs1 = [0.07] + dates2 = [dates1[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)] + data2 = [0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4] + means2 = [0.35] + devs2 = [0.07] + #ec1 = {'0.15': (dates1, data1, means1, devs1), + # '0.35': (dates2, data2, means2, devs2) + # } + history = {'0.15': ([dates1], [data1], [means1], [devs1]), + '0.35': ([dates2], [data2], [means2], [devs2]) + } + ec2 = {'0.15': (dates2, data2, means2, devs2)} + combine1 = etm.add_every_change_history(history, ec2) -this test needs work.... - - - dates1 = np.array([datetime.datetime(2022, 3, 4, 1, 5, i) for i in range(10)]) - data1 = np.array([0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2]) - means1 = 0.15 - devs1 = 0.07 - dates2 = np.array([dates1[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) - data2 = np.array([0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4]) - means2 = 0.35 - devs2 = 0.07 - ec1 = {'0.15': ([dates1], [data1], [means1], [devs1]), - '0.35': ([dates2], [data2], [means2], [devs2]) - } - ec2 = {'0.15': ([dates2], [data2], [means2], [devs2])} - combine1 = etm.add_every_change_history(ec1, ec2) expected1 = defaultdict(list) - expected1['0.15'] = ([np.append(dates1, dates1)], [np.append(data1, data1)], np.append(means1, means1), np.append(devs1, devs1)) + expected_dates = [dates1] + expected_dates.append(dates2) + expected_data = [data1] + expected_data.append(data2) + expected_means = [means1] + expected_means.append(means2) + expected_devs = [devs1] + expected_devs.append(devs2) + expected1['0.15'] = (expected_dates, expected_data, expected_means, expected_devs) expected1['0.35'] = ([dates2], [data2], [means2], [devs2]) + assert combine1 == expected1 - for key in combine1: - print('compare ', key) - for i, cele in enumerate(combine1[key]): - assert np.all(cele == expected1[key][i]) - - dates3 = np.array([dates2[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)]) - ec3 = {'0.55': ([dates3], [data2 + 0.2], [means2 + 0.2], [devs2])} - combine2 = etm.add_every_change_history(ec1, ec3) + dates3 = [dates2[-1] + datetime.timedelta(seconds=1 * i) for i in range(1, 11)] + data3 = [e + 0.2 for e in data2] + means3 = [0.55] + ec3 = {'0.55': (dates3, data3, means3, devs2)} + combine2 = etm.add_every_change_history(history, ec3) expected2 = defaultdict(list) - expected2['0.15'] = (dates1, data1, means1, devs1) - expected2['0.35'] = (dates2, data2, means2, devs2) - expected2['0.55'] = (dates3, data2 + 0.2, means2 + 0.2, devs2) - - for key in combine2: - print('compare ', key) - for i, cele in enumerate(combine2[key]): - assert np.all(cele == expected2[key][i]) + expected2['0.15'] = ([dates1], [data1], [means1], [devs1]) + expected2['0.35'] = ([dates2], [data2], [means2], [devs2]) + expected2['0.55'] = ([dates3], [data3], [means3], [devs2]) + assert combine2 == expected2 def test_change_only_add_points():