diff --git a/.pep8speaks.yml b/.pep8speaks.yml index d6a129ad9..85ea0eacd 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -17,6 +17,7 @@ scanner: pycodestyle: max-line-length: 100 # Default is 79 in PEP8 ignore: # Errors and warnings to ignore + - E501 # Line too long - E505 only_mention_files_with_errors: True # If False, a separate status comment for each file is made. diff --git a/.pyup.yml b/.pyup.yml index 783352eaf..2ecac0f44 100644 --- a/.pyup.yml +++ b/.pyup.yml @@ -18,7 +18,7 @@ branch: develop # update schedule # default: empty # allowed: "every day", "every week", .. -schedule: "every week" +schedule: "every month" # search for requirement files # default: True diff --git a/CHANGES.rst b/CHANGES.rst index 507a7ae40..b078109fc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,32 @@ +0.22.0 (2019-08-26) +=================== + +New Features +------------ + +Project & API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Added slides from July 2019 TIPS presentation to ``presentations/`` directory + + +``jwql`` Repository +~~~~~~~~~~~~~~~~~~~ + +- Updated dark monitor to support all five JWST instruments, instead of only NIRCam +- Changed the ``jwql-3.5`` and ``jwql-3.6`` conda environments to be more simple and to work on Linux distributions +- Added library code for creating instrument monitoring ``bokeh`` plots with new ``bokeh`` templating software + + +Bug Fixes +--------- + +Project & API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Fixed various bugs that were causing the ``sphinx`` API documentation to crash on ReadTheDocs + + 0.21.0 (2019-07-23) =================== diff --git a/Jenkinsfile b/Jenkinsfile index 44d1185ff..6b86ed368 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -27,7 +27,7 @@ if (utils.scm_checkout()) return // Establish OS and Python version variables for the matrix matrix_os = ["linux-stable"] // (Note that Jenkins can only be run with Linux, not MacOSX/Windows) -matrix_python = ["3.5", "3.6"] +matrix_python = ["3.6"] // Set up the matrix of builds matrix = [] diff --git a/README.md b/README.md index 94f46dd14..711806759 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ Next, activate the `base` or `root` environment (depending on your version of `c source activate base/root ``` -Lastly, create the `jwql` environment with either Python 3.5 or 3.6, via the `environment_python_3_5.yml` or `environment_python_3_6.yml` file, respectively. We recommend installing with the 3.6 version: +Lastly, create the `jwql` environment via the `environment_python_3_6.yml` file: ``` conda env create -f environment_python_3_6.yml --name jwql-3.6 @@ -94,7 +94,7 @@ Next, you need to install the `jwql` package under development mode. This can b python setup.py develop ``` -or +or ``` pip install -e . diff --git a/environment_python_3_5.yml b/environment_python_3_5.yml deleted file mode 100644 index ac9bbb1a3..000000000 --- a/environment_python_3_5.yml +++ /dev/null @@ -1,37 +0,0 @@ -channels: -- defaults -- http://ssb.stsci.edu/astroconda-dev -dependencies: -- astroquery=0.3.9 -- bokeh=1.3.2 -- django=2.2.1 -- flake8=3.7.7 -- inflection=0.3.1 -- ipython=7.7.0 -- jinja2=2.10 -- jsonschema=2.6.0 -- jwst=0.13.0 -- matplotlib=3.1.0 -- numpy=1.16.4 -- numpydoc=0.9.0 -- pandas=0.24.2 -- pip=19.1.1 -- postgresql=9.6.6 -- psycopg2=2.7.5 -- python=3.6.4 -- pytest=5.0.1 -- pytest-cov=2.7.1 -- scipy=1.3.0 -- setuptools=41.0.1 -- sphinx=2.1.0 -- sqlalchemy=1.3.5 -- stsci_rtd_theme=0.0.2 -- twine=1.13.0 -- pip: - - asdf==2.3.3 - - astropy==3.2.1 - - authlib==0.11 - - codecov==2.0.15 - - jwedb>=0.0.3 - - pysiaf==0.3.1 - - pysqlite3==0.2.2 \ No newline at end of file diff --git a/environment_python_3_6.yml b/environment_python_3_6.yml index 1d60c7f2e..76f04216a 100644 --- a/environment_python_3_6.yml +++ b/environment_python_3_6.yml @@ -1,37 +1,38 @@ channels: - defaults -- http://ssb.stsci.edu/astroconda-dev +- http://ssb.stsci.edu/astroconda dependencies: -- astroquery=0.3.9 -- bokeh=1.3.4 -- django=2.2.1 -- flake8=3.7.7 +- astroquery=0.3.10 +- bokeh>=1.0,<1.4 +- django=2.2.5 +- flake8=3.7.9 - inflection=0.3.1 -- ipython=7.7.0 -- jinja2=2.10 -- jsonschema=2.6.0 -- jwst=0.13.0 +- ipython=7.13.0 +- jinja2=2.10.3 +- jsonschema=3.2.0 - matplotlib=3.1.0 +- nodejs=10.13.0 - numpy=1.16.4 -- numpydoc=0.9.0 -- pandas=0.24.2 +- numpydoc=0.9.2 +- pandas=1.0.3 - pip=19.1.1 - postgresql=9.6.6 - psycopg2=2.7.5 +- pysiaf==0.7.1 - python=3.6.4 -- pytest=5.0.1 -- pytest-cov=2.7.1 +- pytest=5.4.2 +- pytest-cov=2.8.1 - scipy=1.3.0 - setuptools=41.0.1 -- sphinx=2.1.0 -- sqlalchemy=1.3.5 -- stsci_rtd_theme=0.0.2 -- twine=1.13.0 +- sphinx=3.0.3 +- sqlalchemy=1.3.17 +- twine=2.0.0 - pip: - - asdf==2.3.3 - - astropy==3.2.1 - - authlib==0.11 - - codecov==2.0.15 + - asdf==2.6.0 + - astropy==4.0.1 + - authlib==0.14.3 + - codecov==2.1.3 - jwedb>=0.0.3 - - pysiaf==0.4.0 - pysqlite3==0.2.2 + - stsci_rtd_theme==0.0.2 + - git+https://github.com/spacetelescope/jwst@0.15.0 diff --git a/jwql/bokeh_templating/example/main.py b/jwql/bokeh_templating/example/main.py index 94ea80340..3aa0ac856 100644 --- a/jwql/bokeh_templating/example/main.py +++ b/jwql/bokeh_templating/example/main.py @@ -15,7 +15,6 @@ """ import os - import numpy as np from jwql.bokeh_templating import BokehTemplate diff --git a/jwql/bokeh_templating/factory.py b/jwql/bokeh_templating/factory.py index 5f3013e70..11e7d91bb 100644 --- a/jwql/bokeh_templating/factory.py +++ b/jwql/bokeh_templating/factory.py @@ -219,7 +219,6 @@ def figure_constructor(tool, loader, node): key = element.pop('kind') shape = {'line': ('Line', figure.line), 'circle': ('Circle', figure.circle), - #'step': ('Step', figure.step), #not supported in 0.12.5 'diamond': ('Diamond', figure.diamond), 'triangle': ('Triangle', figure.triangle), 'square': ('Square', figure.square), diff --git a/jwql/bokeh_templating/keyword_map.py b/jwql/bokeh_templating/keyword_map.py index 97d1dc9f5..502d3ef93 100644 --- a/jwql/bokeh_templating/keyword_map.py +++ b/jwql/bokeh_templating/keyword_map.py @@ -47,6 +47,7 @@ def _parse_module(module): test = lambda nm, mem: (not nm.startswith("_")) and (module.__name__ in mem.__module__) seqs = {nm: mem for nm, mem in getmembers(module, isfunction) if test(nm, mem)} maps = {nm: mem for nm, mem in getmembers(module, isclass) if test(nm, mem)} + # these need to be mappings if 'gridplot' in seqs: maps['gridplot'] = seqs.pop('gridplot') diff --git a/jwql/bokeh_templating/template.py b/jwql/bokeh_templating/template.py index a4bea064d..073d67782 100644 --- a/jwql/bokeh_templating/template.py +++ b/jwql/bokeh_templating/template.py @@ -75,7 +75,6 @@ class BokehTemplate(object): _sequence_factory = factory.sequence_factory _figure_constructor = factory.figure_constructor _document_constructor = factory.document_constructor - _embed = False document = None format_string = "" @@ -113,7 +112,7 @@ def _include_formatting(self): if not self.format_string: return - self.formats = yaml.load(self.format_string) + self.formats = yaml.load(self.format_string, Loader=yaml.Loader) def _parse_interface(self): """Parse the YAML interface file using the registered @@ -138,7 +137,6 @@ def _parse_interface(self): # entire string with yaml. We don't need to assign the result to a # variable, since the constructors store everything in self.refs # (and self.document, for the document). - try: yaml.load_all(interface) except yaml.YAMLError as exc: diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py index e376ff2db..f2d5b9c13 100755 --- a/jwql/database/database_interface.py +++ b/jwql/database/database_interface.py @@ -79,7 +79,7 @@ from sqlalchemy.orm.query import Query from sqlalchemy.types import ARRAY -from jwql.utils.constants import ANOMALIES, FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT, FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES from jwql.utils.utils import get_config ON_JENKINS = '/home/jenkins' in os.path.expanduser('~') @@ -218,7 +218,7 @@ class Monitor(base): monitor_name = Column(String(), nullable=False) start_time = Column(DateTime, nullable=False) end_time = Column(DateTime, nullable=True) - status = Column(Enum('SUCESS', 'FAILURE', name='monitor_status'), nullable=True) + status = Column(Enum('SUCCESS', 'FAILURE', name='monitor_status'), nullable=True) affected_tables = Column(ARRAY(String, dimensions=1), nullable=True) log_file = Column(String(), nullable=False) @@ -241,8 +241,14 @@ class : obj data_dict = {} data_dict['__tablename__'] = class_name.lower() + instrument = data_dict['__tablename__'].split('_')[0] + instrument_anomalies = [] + for anomaly in ANOMALIES_PER_INSTRUMENT: + if instrument in ANOMALIES_PER_INSTRUMENT[anomaly]: + instrument_anomalies.append(anomaly) + # Define anomaly table column names - data_dict['columns'] = ANOMALIES + data_dict['columns'] = instrument_anomalies data_dict['names'] = [name.replace('_', ' ') for name in data_dict['columns']] # Create a table with the appropriate Columns @@ -381,7 +387,11 @@ class : obj # Create tables from ORM factory -Anomaly = anomaly_orm_factory('anomaly') +NIRCamAnomaly = anomaly_orm_factory('nircam_anomaly') +NIRISSAnomaly = anomaly_orm_factory('niriss_anomaly') +NIRSpecAnomaly = anomaly_orm_factory('nirspec_anomaly') +MIRIAnomaly = anomaly_orm_factory('miri_anomaly') +FGSAnomaly = anomaly_orm_factory('fgs_anomaly') NIRCamDarkQueryHistory = monitor_orm_factory('nircam_dark_query_history') NIRCamDarkPixelStats = monitor_orm_factory('nircam_dark_pixel_stats') NIRCamDarkDarkCurrent = monitor_orm_factory('nircam_dark_dark_current') @@ -397,6 +407,8 @@ class : obj FGSDarkQueryHistory = monitor_orm_factory('fgs_dark_query_history') FGSDarkPixelStats = monitor_orm_factory('fgs_dark_pixel_stats') FGSDarkDarkCurrent = monitor_orm_factory('fgs_dark_dark_current') +NIRCamBiasQueryHistory = monitor_orm_factory('nircam_bias_query_history') +NIRCamBiasStats = monitor_orm_factory('nircam_bias_stats') if __name__ == '__main__': diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_bias_query_history.txt b/jwql/database/monitor_table_definitions/nircam/nircam_bias_query_history.txt new file mode 100644 index 000000000..c6deea152 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_bias_query_history.txt @@ -0,0 +1,8 @@ +INSTRUMENT, string +APERTURE, string +START_TIME_MJD, float +END_TIME_MJD, float +ENTRIES_FOUND, integer +FILES_FOUND, integer +RUN_MONITOR, bool +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_bias_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_bias_stats.txt new file mode 100644 index 000000000..610713581 --- /dev/null +++ b/jwql/database/monitor_table_definitions/nircam/nircam_bias_stats.txt @@ -0,0 +1,19 @@ +APERTURE, string +UNCAL_FILENAME, string +CAL_FILENAME, string +CAL_IMAGE, string +EXPSTART, string +MEAN, float +MEDIAN, float +STDDEV, float +COLLAPSED_ROWS, float_array_1d +COLLAPSED_COLUMNS, float_array_1d +AMP1_EVEN_MED, float +AMP1_ODD_MED, float +AMP2_EVEN_MED, float +AMP2_ODD_MED, float +AMP3_EVEN_MED, float +AMP3_ODD_MED, float +AMP4_EVEN_MED, float +AMP4_ODD_MED, float +ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/instrument_monitors/common_monitors/bias_monitor.py b/jwql/instrument_monitors/common_monitors/bias_monitor.py new file mode 100755 index 000000000..c5c59cecf --- /dev/null +++ b/jwql/instrument_monitors/common_monitors/bias_monitor.py @@ -0,0 +1,537 @@ +#! /usr/bin/env python + +"""This module contains code for the bias monitor, which monitors +the bias levels in dark exposures as well as the performance of +the pipeline superbias subtraction over time. + +For each instrument, the 0th group of full-frame dark exposures is +saved to a fits file. The median signal levels in these images are +recorded in the ``BiasStats`` database table for the +odd/even columns of each amp. + +Next, these images are run through the jwst pipeline up through the +reference pixel correction step. These calibrated images are saved +to a fits file as well as a png file for visual inspection of the +quality of the pipeline calibration. The median-collpsed row and +column values, as well as the sigma-clipped mean and standard +deviation of these images, are recorded in the +``BiasStats`` database table. + +Author +------ + - Ben Sunnquist + +Use +--- + This module can be used from the command line as such: + + :: + + python bias_monitor.py +""" + +import datetime +import logging +import os + +from astropy.io import fits +from astropy.stats import sigma_clipped_stats +from astropy.time import Time +from astropy.visualization import ZScaleInterval +from jwst.dq_init import DQInitStep +from jwst.group_scale import GroupScaleStep +from jwst.refpix import RefPixStep +from jwst.saturation import SaturationStep +from jwst.superbias import SuperBiasStep +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable +import numpy as np +from pysiaf import Siaf +from sqlalchemy import func +from sqlalchemy.sql.expression import and_ + +from jwql.database.database_interface import session +from jwql.database.database_interface import NIRCamBiasQueryHistory, NIRCamBiasStats +from jwql.instrument_monitors import pipeline_tools +from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks +from jwql.utils import instrument_properties +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.logging_functions import log_info, log_fail +from jwql.utils.permissions import set_permissions +from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, initialize_instrument_monitor, update_monitor_table + +class Bias(): + """Class for executing the bias monitor. + + This class will search for new full-frame dark current files in + the file system for each instrument and will run the monitor on + these files. The monitor will extract the 0th group from the new + dark files and output the contents into a new file located in + a working directory. It will then perform statistical measurements + on these files before and after pipeline calibration in order to + monitor the bias levels over time as well as ensure the pipeline + superbias is sufficiently calibrating new data. Results are all + saved to database tables. + + Attributes + ---------- + output_dir : str + Path into which outputs will be placed + + data_dir : str + Path into which new dark files will be copied to be worked on + + query_start : float + MJD start date to use for querying MAST + + query_end : float + MJD end date to use for querying MAST + + instrument : str + Name of instrument used to collect the dark current data + + aperture : str + Name of the aperture used for the dark current (e.g. + ``NRCA1_FULL``) + """ + + def __init__(self): + """Initialize an instance of the ``Bias`` class.""" + + def collapse_image(self, image): + """Median-collapse the rows and columns of an image. + + Parameters + ---------- + image : numpy.ndarray + 2D array on which to calculate statistics + + Returns + ------- + collapsed_rows : numpy.ndarray + 1D array of the collapsed row values + + collapsed_columns : numpy.ndarray + 1D array of the collapsed column values + """ + + collapsed_rows = np.nanmedian(image, axis=1) + collapsed_columns = np.nanmedian(image, axis=0) + + return collapsed_rows, collapsed_columns + + def extract_zeroth_group(self, filename): + """Extracts the 0th group of a fits image and outputs it into + a new fits file. + + Parameters + ---------- + filename : str + The fits file from which the 0th group will be extracted. + + Returns + ------- + output_filename : str + The full path to the output file + """ + + output_filename = os.path.join(self.data_dir, os.path.basename(filename).replace('.fits', '_0thgroup.fits')) + + # Write a new fits file containing the primary and science + # headers from the input file, as well as the 0th group + # data of the first integration + if not os.path.isfile(output_filename): + hdu = fits.open(filename) + new_hdu = fits.HDUList([hdu['PRIMARY'], hdu['SCI']]) + new_hdu['SCI'].data = hdu['SCI'].data[0:1, 0:1, :, :] + new_hdu.writeto(output_filename) + hdu.close() + new_hdu.close() + set_permissions(output_filename) + logging.info('\t{} created'.format(output_filename)) + else: + logging.info('\t{} already exists'.format(output_filename)) + + return output_filename + + def file_exists_in_database(self, filename): + """Checks if an entry for filename exists in the bias stats + database. + + Parameters + ---------- + filename : str + The full path to the uncal filename + + Returns + ------- + file_exists : bool + ``True`` if filename exists in the bias stats database + """ + + query = session.query(self.stats_table) + results = query.filter(self.stats_table.uncal_filename == filename).all() + + if len(results) != 0: + file_exists = True + else: + file_exists = False + + return file_exists + + def get_amp_medians(self, image, amps): + """Calculates the median in the input image for each amplifier + and for odd and even columns separately. + + Parameters + ---------- + image : numpy.ndarray + 2D array on which to calculate statistics + + amps : dict + Dictionary containing amp boundary coordinates (output from + ``amplifier_info`` function) + ``amps[key] = [(xmin, xmax, xstep), (ymin, ymax, ystep)]`` + + Returns + ------- + amp_medians : dict + Median values for each amp. Keys are ramp numbers as + strings with even/odd designation (e.g. ``'1_even'``) + """ + + amp_medians = {} + + for key in amps: + x_start, x_end, x_step = amps[key][0] + y_start, y_end, y_step = amps[key][1] + + # Find median value of both even and odd columns for this amp + amp_med_even = np.nanmedian(image[y_start: y_end, x_start: x_end][:, 1::2]) + amp_medians['amp{}_even_med'.format(key)] = amp_med_even + amp_med_odd = np.nanmedian(image[y_start: y_end, x_start: x_end][:, ::2]) + amp_medians['amp{}_odd_med'.format(key)] = amp_med_odd + + return amp_medians + + def identify_tables(self): + """Determine which database tables to use for a run of the bias + monitor. + """ + + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument] + self.query_table = eval('{}BiasQueryHistory'.format(mixed_case_name)) + self.stats_table = eval('{}BiasStats'.format(mixed_case_name)) + + def image_to_png(self, image, outname): + """Ouputs an image array into a png file. + + Parameters + ---------- + image : numpy.ndarray + 2D image array + + outname : str + The name given to the output png file + + Returns + ------- + output_filename : str + The full path to the output png file + """ + + output_filename = os.path.join(self.data_dir, '{}.png'.format(outname)) + + if not os.path.isfile(output_filename): + # Get image scale limits + z = ZScaleInterval() + vmin, vmax = z.get_limits(image) + + # Plot the image + plt.figure(figsize=(12,12)) + ax = plt.gca() + im = ax.imshow(image, cmap='gray', origin='lower', vmin=vmin, vmax=vmax) + ax.set_title('{}'.format(outname)) + + # Make the colorbar + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.4) + cbar = plt.colorbar(im, cax=cax) + cbar.set_label('Signal [DN]') + + plt.savefig(output_filename, bbox_inches='tight', dpi=200) + set_permissions(output_filename) + logging.info('\t{} created'.format(output_filename)) + else: + logging.info('\t{} already exists'.format(output_filename)) + + return output_filename + + def most_recent_search(self): + """Query the query history database and return the information + on the most recent query for the given ``aperture_name`` where + the bias monitor was executed. + + Returns + ------- + query_result : float + Date (in MJD) of the ending range of the previous MAST query + where the bias monitor was run. + """ + + sub_query = session.query( + self.query_table.aperture, + func.max(self.query_table.end_time_mjd).label('maxdate') + ).group_by(self.query_table.aperture).subquery('t2') + + # Note that "self.query_table.run_monitor == True" below is + # intentional. Switching = to "is" results in an error in the query. + query = session.query(self.query_table).join( + sub_query, + and_( + self.query_table.aperture == self.aperture, + self.query_table.end_time_mjd == sub_query.c.maxdate, + self.query_table.run_monitor == True + ) + ).all() + + query_count = len(query) + if query_count == 0: + query_result = 57357.0 # a.k.a. Dec 1, 2015 == CV3 + logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'.format(self.aperture, query_result))) + elif query_count > 1: + raise ValueError('More than one "most recent" query?') + else: + query_result = query[0].end_time_mjd + + return query_result + + def process(self, file_list): + """The main method for processing darks. See module docstrings + for further details. + + Parameters + ---------- + file_list : list + List of filenames (including full paths) to the dark current + files + """ + + for filename in file_list: + logging.info('\tWorking on file: {}'.format(filename)) + + # Skip processing if an entry for this file already exists in + # the bias stats database. + file_exists = self.file_exists_in_database(filename) + if file_exists: + logging.info('\t{} already exists in the bias database table.'.format(filename)) + continue + + # Get the exposure start time of this file + expstart = '{}T{}'.format(fits.getheader(filename, 0)['DATE-OBS'], fits.getheader(filename, 0)['TIME-OBS']) + + # Determine if the file needs group_scale in pipeline run + read_pattern = fits.getheader(filename, 0)['READPATT'] + if read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS: + group_scale = False + else: + group_scale = True + + # Run the file through the pipeline up through the refpix step + logging.info('\tRunning pipeline on {}'.format(filename)) + processed_file = self.run_early_pipeline(filename, odd_even_rows=False, odd_even_columns=True, use_side_ref_pixels=True, group_scale=group_scale) + logging.info('\tPipeline complete. Output: {}'.format(processed_file)) + + # Find amplifier boundaries so per-amp statistics can be calculated + _, amp_bounds = instrument_properties.amplifier_info(processed_file, omit_reference_pixels=True) + logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) + + # Get the uncalibrated 0th group data for this file + uncal_data = fits.getdata(filename, 'SCI')[0, 0, :, :].astype(float) + + # Calculate the uncal median values of each amplifier for odd/even columns + amp_medians = self.get_amp_medians(uncal_data, amp_bounds) + logging.info('\tCalculated uncalibrated image stats: {}'.format(amp_medians)) + + # Calculate image statistics and the collapsed row/column values + # in the calibrated image + cal_data = fits.getdata(processed_file, 'SCI')[0, 0, :, :] + dq = fits.getdata(processed_file, 'PIXELDQ') + mean, median, stddev = sigma_clipped_stats(cal_data[dq==0], sigma=3.0, maxiters=5) + logging.info('\tCalculated calibrated image stats: {:.3f} +/- {:.3f}'.format(mean, stddev)) + collapsed_rows, collapsed_columns = self.collapse_image(cal_data) + logging.info('\tCalculated collapsed row/column values of calibrated image.') + + # Save a png of the calibrated image for visual inspection + logging.info('\tCreating png of calibrated image') + output_png = self.image_to_png(cal_data, outname=os.path.basename(processed_file).replace('.fits','')) + + # Construct new entry for this file for the bias database table. + # Can't insert values with numpy.float32 datatypes into database + # so need to change the datatypes of these values. + bias_db_entry = {'aperture': self.aperture, + 'uncal_filename': filename, + 'cal_filename': processed_file, + 'cal_image': output_png, + 'expstart': expstart, + 'mean': float(mean), + 'median': float(median), + 'stddev': float(stddev), + 'collapsed_rows': collapsed_rows.astype(float), + 'collapsed_columns': collapsed_columns.astype(float), + 'entry_date': datetime.datetime.now() + } + for key in amp_medians.keys(): + bias_db_entry[key] = float(amp_medians[key]) + + # Add this new entry to the bias database table + self.stats_table.__table__.insert().execute(bias_db_entry) + logging.info('\tNew entry added to bias database table: {}'.format(bias_db_entry)) + + @log_fail + @log_info + def run(self): + """The main method. See module docstrings for further details.""" + + logging.info('Begin logging for bias_monitor') + + # Get the output directory and setup a directory to store the data + self.output_dir = os.path.join(get_config()['outputs'], 'bias_monitor') + ensure_dir_exists(os.path.join(self.output_dir, 'data')) + + # Use the current time as the end time for MAST query + self.query_end = Time.now().mjd + + # Loop over all instruments + for instrument in ['nircam']: + self.instrument = instrument + + # Identify which database tables to use + self.identify_tables() + + # Get a list of all possible full-frame apertures for this instrument + siaf = Siaf(self.instrument) + possible_apertures = [aperture for aperture in siaf.apertures if siaf[aperture].AperType=='FULLSCA'] + + for aperture in possible_apertures: + + logging.info('Working on aperture {} in {}'.format(aperture, instrument)) + self.aperture = aperture + + # Locate the record of the most recent MAST search; use this time + # (plus a 30 day buffer to catch any missing files from the previous + # run) as the start time in the new MAST search. + most_recent_search = self.most_recent_search() + self.query_start = most_recent_search - 30 + + # Query MAST for new dark files for this instrument/aperture + logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end) + logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries))) + + # Set up a directory to store the data for this aperture + self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + if len(new_entries) > 0: + ensure_dir_exists(self.data_dir) + + # Save the 0th group image from each new file in the output directory; + # some dont exist in JWQL filesystem. + new_files = [] + for file_entry in new_entries: + try: + filename = filesystem_path(file_entry['filename']) + uncal_filename = filename.replace('_dark', '_uncal') + if not os.path.isfile(uncal_filename): + logging.info('\t{} does not exist in JWQL filesystem, even though {} does'.format(uncal_filename, filename)) + else: + new_file = self.extract_zeroth_group(uncal_filename) + new_files.append(new_file) + except FileNotFoundError: + logging.info('\t{} does not exist in JWQL filesystem'.format(file_entry['filename'])) + + # Run the bias monitor on any new files + if len(new_files) > 0: + self.process(new_files) + monitor_run = True + else: + logging.info('\tBias monitor skipped. {} new dark files for {}, {}.'.format(len(new_files), instrument, aperture)) + monitor_run = False + + # Update the query history + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'entries_found': len(new_entries), + 'files_found': len(new_files), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + self.query_table.__table__.insert().execute(new_entry) + logging.info('\tUpdated the query history table') + + logging.info('Bias Monitor completed successfully.') + + def run_early_pipeline(self, filename, odd_even_rows=False, odd_even_columns=True, + use_side_ref_pixels=True, group_scale=False): + """Runs the early steps of the jwst pipeline (dq_init, saturation, + superbias, refpix) on uncalibrated files and outputs the result. + + Parameters + ---------- + filename : str + File on which to run the pipeline steps + + odd_even_rows : bool + Option to treat odd and even rows separately during refpix step + + odd_even_columns : bools + Option to treat odd and even columns separately during refpix step + + use_side_ref_pixels : bool + Option to perform the side refpix correction during refpix step + + group_scale : bool + Option to rescale pixel values to correct for instances where + on-board frame averaging did not result in the proper values + + Returns + ------- + output_filename : str + The full path to the calibrated file + """ + + output_filename = filename.replace('_uncal', '').replace('.fits', '_superbias_refpix.fits') + + if not os.path.isfile(output_filename): + # Run the group_scale and dq_init steps on the input file + if group_scale: + model = GroupScaleStep.call(filename) + model = DQInitStep.call(model) + else: + model = DQInitStep.call(filename) + + # Run the saturation and superbias steps + model = SaturationStep.call(model) + model = SuperBiasStep.call(model) + + # Run the refpix step and save the output + model = RefPixStep.call(model, odd_even_rows=odd_even_rows, odd_even_columns=odd_even_columns, use_side_ref_pixels=use_side_ref_pixels) + model.save(output_filename) + set_permissions(output_filename) + else: + logging.info('\t{} already exists'.format(output_filename)) + + return output_filename + + +if __name__ == '__main__': + + module = os.path.basename(__file__).strip('.py') + start_time, log_file = initialize_instrument_monitor(module) + + monitor = Bias() + monitor.run() + + update_monitor_table(module, start_time, log_file) diff --git a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py index c2532edf1..54fa286b2 100755 --- a/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py +++ b/jwql/instrument_monitors/miri_monitors/data_trending/dt_cron_job.py @@ -19,9 +19,9 @@ ---------- ''' -import .utils.mnemonics as mn -import .utils.sql_interface as sql -from .utils.process_data import whole_day_routine, wheelpos_routine +import utils.mnemonics as mn +import utils.sql_interface as sql +from utils.process_data import whole_day_routine, wheelpos_routine from jwql.utils.engineering_database import query_single_mnemonic import pandas as pd diff --git a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py index 01fe1ab74..f4932db5a 100644 --- a/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py +++ b/jwql/instrument_monitors/nirspec_monitors/data_trending/dt_cron_job.py @@ -24,9 +24,9 @@ Notes ----- ''' -import .utils.mnemonics as mn -import .utils.sql_interface as sql -from .utils.process_data import whole_day_routine, wheelpos_routine +import utils.mnemonics as mn +import utils.sql_interface as sql +from utils.process_data import whole_day_routine, wheelpos_routine from jwql.utils.engineering_database import query_single_mnemonic import pandas as pd diff --git a/jwql/jwql_monitors/monitor_filesystem.py b/jwql/jwql_monitors/monitor_filesystem.py old mode 100644 new mode 100755 index 6548b8633..e394c5f70 --- a/jwql/jwql_monitors/monitor_filesystem.py +++ b/jwql/jwql_monitors/monitor_filesystem.py @@ -9,64 +9,42 @@ ------- - Misty Cracraft + - Sara Ogaz + - Matthew Bourque Use --- - This module can be executed from the command line: + This module is intended to be executed from the command line: :: python monitor_filesystem.py - Alternatively, it can be called from scripts with the following - import statements: - - :: - - from monitor_filesystem import filesystem_monitor - from monitor_filesystem import plot_system_stats - - - Required arguments (in a ``config.json`` file): - ``filepath`` - The path to the input file needs to be in a - ``config.json`` file in the ``utils`` directory - ``outputs`` - The path to the output files needs to be in a - ``config.json`` file in the ``utils`` directory. - - Required arguments for plotting: - ``inputfile`` - The name of the file to save all of the system - statistics to - ``filebytype`` - The name of the file to save stats on fits type - files to - + The user must have a ``config.json`` file in the ``utils`` + directory with the following keys: + - ``filesystem`` - The path to the filesystem + - ``outputs`` - The path to where the output plots will be + written Dependencies ------------ The user must have a configuration file named ``config.json`` placed in the ``utils`` directory. - -Notes ------ - - The ``monitor_filesystem`` function queries the filesystem, - calculates the statistics and saves the output file(s) in the - directory specified in the ``config.json`` file. - - The ``plot_system_stats`` function reads in the two specified files - of statistics and plots the figures to an html output page as well - as saving them to an output html file. """ from collections import defaultdict import datetime +import itertools import logging import os import subprocess -import json -from astropy.utils.misc import JsonCustomEncoder +from bokeh.embed import components +from bokeh.layouts import gridplot +from bokeh.palettes import Category20_20 as palette +from bokeh.plotting import figure, output_file, save from jwql.database.database_interface import engine from jwql.database.database_interface import session @@ -75,6 +53,7 @@ from jwql.database.database_interface import CentralStore from jwql.utils.logging_functions import configure_logging, log_info, log_fail from jwql.utils.permissions import set_permissions +from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import filename_parser from jwql.utils.utils import get_config @@ -277,36 +256,18 @@ def initialize_results_dicts(): @log_fail @log_info def monitor_filesystem(): - """Tabulates the inventory of the JWST filesystem, saving - statistics to files, and generates plots. + """ + Tabulates the inventory of the JWST filesystem, saving statistics + to database tables, and generates plots. """ - # Begin logging logging.info('Beginning filesystem monitoring.') # Initialize dictionaries for database input general_results_dict, instrument_results_dict, central_storage_dict = initialize_results_dicts() - # set up dictionaries for output - results_dict = defaultdict(int) - size_dict = defaultdict(float) - # Walk through all directories recursively and count files - logging.info('Searching filesystem...') - for dirpath, dirs, files in os.walk(filesystem): - results_dict['file_count'] += len(files) # find number of all files - for filename in files: - file_path = os.path.join(dirpath, filename) - if filename.endswith(".fits"): # find total number of fits files - results_dict['fits_files'] += 1 - size_dict['size_fits'] += os.path.getsize(file_path) - suffix = filename_parser(filename)['suffix'] - results_dict[suffix] += 1 - size_dict[suffix] += os.path.getsize(file_path) - detector = filename_parser(filename)['detector'] - instrument = detector[0:3] # first three characters of detector specify instrument - results_dict[instrument] += 1 - size_dict[instrument] += os.path.getsize(file_path) - logging.info('{} files found in filesystem'.format(results_dict['fits_files'])) + # Walk through filesystem recursively to gather statistics + general_results_dict, instrument_results_dict = gather_statistics(general_results_dict, instrument_results_dict) # Get df style stats on file system general_results_dict = get_global_filesystem_stats(general_results_dict) @@ -366,7 +327,7 @@ def plot_by_filetype(plot_type, instrument): # Query for counts results = session.query(FilesystemInstrument.date, getattr(FilesystemInstrument, plot_type))\ - .filter(FilesystemInstrument.filetype == filetype) + .filter(FilesystemInstrument.filetype == filetype) if instrument == 'all': results = results.all() @@ -490,7 +451,7 @@ def plot_filesystem_stats(): plot_list.append(plot_by_filetype('size', instrument)) # Create a layout with a grid pattern - grid_chunks = [plot_list[i:i + 2] for i in range(0, len(plot_list), 2)] + grid_chunks = [plot_list[i:i+2] for i in range(0, len(plot_list), 2)] grid = gridplot(grid_chunks) # Save all of the plots in one file @@ -598,4 +559,4 @@ def update_database(general_results_dict, instrument_results_dict, central_stora module = os.path.basename(__file__).strip('.py') configure_logging(module) - monitor_filesystem() + monitor_filesystem() \ No newline at end of file diff --git a/jwql/jwql_monitors/monitor_mast.py b/jwql/jwql_monitors/monitor_mast.py old mode 100644 new mode 100755 index 5883bdf76..825b9e1c5 --- a/jwql/jwql_monitors/monitor_mast.py +++ b/jwql/jwql_monitors/monitor_mast.py @@ -1,3 +1,5 @@ +#! /usr/bin/env python + """This module is home to a suite of MAST queries that gather bulk properties of available JWST data for JWQL. @@ -20,11 +22,15 @@ import os from astroquery.mast import Mast +from bokeh.embed import components +from bokeh.io import save, output_file import pandas as pd from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_DATAPRODUCTS from jwql.utils.logging_functions import configure_logging, log_info, log_fail +from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config +from jwql.utils.plotting import bar_chart def instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, @@ -140,7 +146,7 @@ def instrument_keywords(instrument, caom=False): def jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, dataproducts=['image', 'spectrum', 'cube'], - caom=False): + caom=False, plot=False): """Gather a full inventory of all JWST data in each instrument service by instrument/dtype @@ -152,6 +158,8 @@ def jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, The types of dataproducts to count caom: bool Query CAOM service + plot: bool + Return a pie chart of the data Returns ------- @@ -183,6 +191,48 @@ def jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, all_cols = ['instrument'] + dataproducts + ['total'] table = pd.DataFrame(inventory, columns=all_cols) + # Plot it + if plot: + # Determine plot location and names + output_dir = get_config()['outputs'] + + if caom: + output_filename = 'database_monitor_caom' + else: + output_filename = 'database_monitor_jwst' + + # Make the plot + plt = bar_chart(table, 'instrument', dataproducts, + title="JWST Inventory") + + # Save the plot as full html + html_filename = output_filename + '.html' + outfile = os.path.join(output_dir, 'monitor_mast', html_filename) + output_file(outfile) + save(plt) + set_permissions(outfile) + + logging.info('Saved Bokeh plots as HTML file: {}'.format(html_filename)) + + # Save the plot as components + plt.sizing_mode = 'stretch_both' + script, div = components(plt) + + div_outfile = os.path.join(output_dir, 'monitor_mast', output_filename + "_component.html") + with open(div_outfile, 'w') as f: + f.write(div) + f.close() + set_permissions(div_outfile) + + script_outfile = os.path.join(output_dir, 'monitor_mast', output_filename + "_component.js") + with open(script_outfile, 'w') as f: + f.write(script) + f.close() + set_permissions(script_outfile) + + logging.info('Saved Bokeh components files: {}_component.html and {}_component.js'.format( + output_filename, output_filename)) + # Melt the table table = pd.melt(table, id_vars=['instrument'], value_vars=dataproducts, @@ -202,20 +252,14 @@ def monitor_mast(): outputs_dir = os.path.join(get_config()['outputs'], 'monitor_mast') # Perform inventory of the JWST service - jwst_df, kw = jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, - dataproducts=['image', 'spectrum', 'cube'], - caom=False) - - with open(os.path.join(outputs_dir, 'database_monitor_jwst.json')) as f: - f.write(jwst_df.to_json(orient='records')) + jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, + dataproducts=['image', 'spectrum', 'cube'], + caom=False, plot=True) # Perform inventory of the CAOM service - caom_df, kw = jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, - dataproducts=['image', 'spectrum', 'cube'], - caom=True) - - with open(os.path.join(outputs_dir, 'database_monitor_caom.json')) as f: - f.write(caom_df.to_json(orient='records')) + jwst_inventory(instruments=JWST_INSTRUMENT_NAMES, + dataproducts=['image', 'spectrum', 'cube'], + caom=True, plot=True) if __name__ == '__main__': diff --git a/jwql/tests/test_database_interface.py b/jwql/tests/test_database_interface.py index 898af27fb..e35e18608 100755 --- a/jwql/tests/test_database_interface.py +++ b/jwql/tests/test_database_interface.py @@ -25,7 +25,7 @@ import string from jwql.database import database_interface as di -from jwql.utils.constants import ANOMALIES +from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT from jwql.utils.utils import get_config # Determine if tests are being run on jenkins @@ -61,14 +61,14 @@ def test_anomaly_orm_factory(): creates an ORM and contains the appropriate columns""" test_table_name = 'test_anomaly_table' - TestAnomalyTable = di.anomaly_orm_factory('test_anomaly_table') + TestAnomalyTable = di.anomaly_orm_factory(test_table_name) table_attributes = TestAnomalyTable.__dict__.keys() assert str(TestAnomalyTable) == ""\ .format(test_table_name) - for anomaly in ANOMALIES: - assert anomaly in table_attributes + for item in ['id', 'rootname', 'flag_date', 'user']: + assert item in table_attributes @pytest.mark.skipif(ON_JENKINS, reason='Requires access to development database server.') @@ -79,15 +79,15 @@ def test_anomaly_records(): random_rootname = ''.join(random.SystemRandom().choice(string.ascii_lowercase + \ string.ascii_uppercase + \ string.digits) for _ in range(10)) - di.session.add(di.Anomaly(rootname=random_rootname, + di.session.add(di.FGSAnomaly(rootname=random_rootname, flag_date=datetime.datetime.today(), user='test', ghost=True)) di.session.commit() # Test the ghosts column - ghosts = di.session.query(di.Anomaly)\ - .filter(di.Anomaly.rootname == random_rootname)\ - .filter(di.Anomaly.ghost == "True") + ghosts = di.session.query(di.FGSAnomaly)\ + .filter(di.FGSAnomaly.rootname == random_rootname)\ + .filter(di.FGSAnomaly.ghost == "True") assert ghosts.data_frame.iloc[0]['ghost'] == True diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index b8f0f209c..4d81fba54 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -47,16 +47,43 @@ '3': [(2, 1032, 4), (0, 1024, 1)], '4': [(3, 1032, 4), (0, 1024, 1)]}} - -# Defines the possible anomalies to flag through the web app -ANOMALIES = ['snowball', 'cosmic_ray_shower', 'crosstalk', 'data_transfer_error', 'diffraction_spike', - 'excessive_saturation', 'ghost', 'guidestar_failure', 'persistence', 'satellite_trail', 'other'] +ANOMALIES_PER_INSTRUMENT = { + # anomalies affecting all instruments: + 'cosmic_ray_shower': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], + 'diffraction_spike': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], + 'excessive_saturation': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], + 'guidestar_failure': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], + 'persistence': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec'], + #anomalies affecting multiple instruments: + 'crosstalk': ['fgs', 'nircam', 'niriss', 'nirspec'], + 'data_transfer_error': ['fgs', 'nircam', 'niriss', 'nirspec'], + 'ghost': ['fgs', 'nircam', 'niriss', 'nirspec'], + 'snowball': ['fgs', 'nircam', 'niriss', 'nirspec'], + # instrument-specific anomalies: + 'column_pull_up': ['miri'], + 'dominant_msa_leakage': ['nirspec'], + 'dragons_breath': ['nircam'], + 'glow': ['miri'], + 'internal_reflection': ['miri'], + 'optical_short': ['nirspec'], # Only for MOS observations + 'row_pull_down': ['miri'], + # additional anomalies: + 'other': ['fgs', 'miri', 'nircam', 'niriss', 'nirspec']} # Defines the possible anomalies (with rendered name) to flag through the web app -ANOMALY_CHOICES = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES] +ANOMALY_CHOICES = [(anomaly, inflection.titleize(anomaly)) for anomaly in ANOMALIES_PER_INSTRUMENT] FOUR_AMP_SUBARRAYS = ['WFSS128R', 'WFSS64R', 'WFSS128C', 'WFSS64C'] +# Names of full-frame apertures for all instruments +FULL_FRAME_APERTURES = {'NIRCAM': ['NRCA1_FULL', 'NRCA2_FULL', 'NRCA3_FULL', 'NRCA4_FULL', + 'NRCA5_FULL', 'NRCB1_FULL', 'NRCB2_FULL', 'NRCB3_FULL', + 'NRCB4_FULL', 'NRCB5_FULL'], + 'NIRISS': ['NIS_CEN'], + 'NIRSPEC': ['NRS1_FULL', 'NRS2_FULL'], + 'MIRI': ['MIRIM_FULL'] + } + # Possible suffix types for nominal files GENERIC_SUFFIX_TYPES = ['uncal', 'cal', 'rateints', 'rate', 'trapsfilled', 'i2d', 'x1dints', 'x1d', 's2d', 's3d', 'dark', 'crfints', @@ -112,7 +139,7 @@ 'nircam': [('Bias Monitor', '#'), ('Readnoise Monitor', '#'), ('Gain Level Monitor', '#'), - ('Mean Dark Current Rate Monitor', '#'), + ('Mean Dark Current Rate Monitor', '/nircam/dark_monitor'), ('Photometric Stability Monitor', '#')], 'niriss': [('Bad Pixel Monitor', '#'), ('Readnoise Monitor', '#'), diff --git a/jwql/utils/instrument_properties.py b/jwql/utils/instrument_properties.py index 470724081..083bb6dae 100644 --- a/jwql/utils/instrument_properties.py +++ b/jwql/utils/instrument_properties.py @@ -124,7 +124,10 @@ def amplifier_info(filename, omit_reference_pixels=True): try: data_quality = hdu['DQ'].data except KeyError: - raise KeyError('DQ extension not found.') + try: + data_quality = hdu['PIXELDQ'].data + except KeyError: + raise KeyError('DQ extension not found.') # Reference pixels should be flagged in the DQ array with the # REFERENCE_PIXEL flag. Find the science pixels by looping for diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py index 1f38bb851..b929c83c0 100644 --- a/jwql/utils/logging_functions.py +++ b/jwql/utils/logging_functions.py @@ -10,8 +10,9 @@ ------- - Catherine Martlin - - Alex Viana (WFC3 QL Version) + - Alex Viana (wfc3ql Version) - Matthew Bourque + - Jason Neal Use --- @@ -59,6 +60,7 @@ def my_main_function(): import os import pwd import socket +import subprocess import sys import time import traceback @@ -236,7 +238,10 @@ def wrapped(*args, **kwargs): except (ImportError, AttributeError) as err: logging.warning(err) - logging.info('') + environment = subprocess.check_output(['conda', 'env', 'export'], universal_newlines=True) + logging.info('Environment:') + for line in environment.split('\n'): + logging.info(line) # Call the function and time it t1_cpu = time.clock() diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py index dfcb3ffe1..b46c7a230 100644 --- a/jwql/website/apps/jwql/bokeh_containers.py +++ b/jwql/website/apps/jwql/bokeh_containers.py @@ -1,8 +1,8 @@ -"""Various functions to generate Bokeh objects to be used by the ``views`` of -the ``jwql`` app. +"""Various functions to generate Bokeh objects to be used by the +``views`` of the ``jwql`` app. -This module contains several functions that instantiate BokehTemplate objects -to be rendered in ``views.py`` for use by the ``jwql`` app. +This module contains several functions that instantiate +``BokehTemplate`` objects to be rendered in ``views.py``. Authors ------- @@ -16,17 +16,18 @@ used by ``views.py``, e.g.: :: - from .data_containers import get_mast_monitor + from .bokeh_containers import dark_monitor_tabs """ -import glob import os -from astropy.io import fits -import numpy as np +from bokeh.embed import components +from bokeh.layouts import layout +from bokeh.models.widgets import Tabs, Panel -from jwql.preview_image.preview_image import PreviewImage -from jwql.utils.utils import get_config, filename_parser, MONITORS +from . import monitor_pages +from jwql.utils.constants import FULL_FRAME_APERTURES +from jwql.utils.utils import get_config __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') @@ -34,375 +35,111 @@ REPO_DIR = os.path.split(PACKAGE_DIR)[0] -def get_acknowledgements(): - """Returns a list of individuals who are acknowledged on the - ``about`` page. - - The list is generated by reading in the contents of the ``jwql`` - ``README`` file. In this way, the website will automatically - update with updates to the ``README`` file. - - Returns - ------- - acknowledgements : list - A list of individuals to be acknowledged. - """ - - # Locate README file - readme_file = os.path.join(REPO_DIR, 'README.md') - - # Get contents of the README file - with open(readme_file, 'r') as f: - data = f.readlines() - - # Find where the acknowledgements start - for i, line in enumerate(data): - if 'Acknowledgments' in line: - index = i - - # Parse out the list of individuals - acknowledgements = data[index + 1:] - acknowledgements = [item.strip().replace('- ', '').split(' [@')[0].strip() for item in acknowledgements] - - return acknowledgements - - -def get_dashboard_components(): - """Build and return a dictionary containing components needed for - the dashboard. - - Returns - ------- - dashboard_components : dict - A dictionary containing components needed for the dashboard. - """ - - output_dir = get_config()['outputs'] - name_dict = {'': '', - 'monitor_mast': 'Database Monitor', - 'database_monitor_jwst': 'JWST', - 'database_monitor_caom': 'JWST (CAOM)', - 'monitor_filesystem': 'Filesystem Monitor', - 'filecount_type': 'Total File Counts by Type', - 'size_type': 'Total File Sizes by Type', - 'filecount': 'Total File Counts', - 'system_stats': 'System Statistics'} - - dashboard_components = {} - for dir_name, subdir_list, file_list in os.walk(output_dir): - monitor_name = os.path.basename(dir_name) - dashboard_components[name_dict[monitor_name]] = {} - for fname in file_list: - if 'component' in fname: - full_fname = '{}/{}'.format(monitor_name, fname) - plot_name = fname.split('_component')[0] - - # Get the div - html_file = full_fname.split('.')[0] + '.html' - with open(os.path.join(output_dir, html_file)) as f: - div = f.read() - - # Get the script - js_file = full_fname.split('.')[0] + '.js' - with open(os.path.join(output_dir, js_file)) as f: - script = f.read() - dashboard_components[name_dict[monitor_name]][name_dict[plot_name]] = [div, script] - - return dashboard_components - - -def get_filenames_by_instrument(instrument): - """Returns a list of paths to files that match the given - ``instrument``. +def dark_monitor_tabs(instrument): + """Creates the various tabs of the dark monitor results page. Parameters ---------- instrument : str - The instrument of interest (e.g. `FGS`). - - Returns - ------- - filepaths : list - A list of full paths to the files that match the given - instrument. - """ - - # Query files from MAST database - # filepaths, filenames = DatabaseConnection('MAST', instrument=instrument).\ - # get_files_for_instrument(instrument) - - # Find all of the matching files in filesytem - # (TEMPORARY WHILE THE MAST STUFF IS BEING WORKED OUT) - instrument_match = {'FGS': 'guider', - 'MIRI': 'mir', - 'NIRCam': 'nrc', - 'NIRISS': 'nis', - 'NIRSpec': 'nrs'} - search_filepath = os.path.join(FILESYSTEM_DIR, '*', '*.fits') - filepaths = [f for f in glob.glob(search_filepath) if instrument_match[instrument] in f] - - return filepaths - - -def get_header_info(file): - """Return the header information for a given ``file``. - - Parameters - ---------- - file : str - The name of the file of interest. - - Returns - ------- - header : str - The primary FITS header for the given ``file``. - """ - - dirname = file[:7] - fits_filepath = os.path.join(FILESYSTEM_DIR, dirname, file) - header = fits.getheader(fits_filepath, ext=0).tostring(sep='\n') - - return header - - -def get_image_info(file_root, rewrite): - """Build and return a dictionary containing information for a given - ``file_root``. - - Parameters - ---------- - file_root : str - The rootname of the file of interest. - rewrite : bool - ``True`` if the corresponding JPEG needs to be rewritten, - ``False`` if not. - - Returns - ------- - image_info : dict - A dictionary containing various information for the given - ``file_root``. - """ - - # Initialize dictionary to store information - image_info = {} - image_info['all_jpegs'] = [] - image_info['suffixes'] = [] - image_info['num_ints'] = {} - - preview_dir = os.path.join(get_config()['jwql_dir'], 'preview_images') - - # Find all of the matching files - dirname = file_root[:7] - search_filepath = os.path.join(FILESYSTEM_DIR, dirname, file_root + '*.fits') - image_info['all_files'] = glob.glob(search_filepath) - - for file in image_info['all_files']: - - # Get suffix information - suffix = os.path.basename(file).split('_')[4].split('.')[0] - image_info['suffixes'].append(suffix) - - # Determine JPEG file location - jpg_dir = os.path.join(preview_dir, dirname) - jpg_filename = os.path.basename(os.path.splitext(file)[0] + '_integ0.jpg') - jpg_filepath = os.path.join(jpg_dir, jpg_filename) - - # Check that a jpg does not already exist. If it does (and rewrite=False), - # just call the existing jpg file - if os.path.exists(jpg_filepath) and not rewrite: - pass - - # If it doesn't, make it using the preview_image module - else: - if not os.path.exists(jpg_dir): - os.makedirs(jpg_dir) - im = PreviewImage(file, 'SCI') - im.output_directory = jpg_dir - im.make_image() - - # Record how many integrations there are per filetype - search_jpgs = os.path.join(preview_dir, dirname, file_root + '_{}_integ*.jpg'.format(suffix)) - num_jpgs = len(glob.glob(search_jpgs)) - image_info['num_ints'][suffix] = num_jpgs - - image_info['all_jpegs'].append(jpg_filepath) - - return image_info - - -def get_proposal_info(filepaths): - """Builds and returns a dictionary containing various information - about the proposal(s) that correspond to the given ``filepaths``. - - The information returned contains such things as the number of - proposals, the paths to the corresponding thumbnails, and the total - number of files. - - Parameters - ---------- - filepaths : list - A list of full paths to files of interest. - - Returns - ------- - proposal_info : dict - A dictionary containing various information about the - proposal(s) and files corresponding to the given ``filepaths``. - """ - - proposals = list(set([f.split('/')[-1][2:7] for f in filepaths])) - thumbnail_dir = os.path.join(get_config()['jwql_dir'], 'thumbnails') - thumbnail_paths = [] - num_files = [] - for proposal in proposals: - thumbnail_search_filepath = os.path.join(thumbnail_dir, 'jw{}'.format(proposal), 'jw{}*rate*.thumb'.format(proposal)) - thumbnail = glob.glob(thumbnail_search_filepath) - if len(thumbnail) > 0: - thumbnail = thumbnail[0] - thumbnail = '/'.join(thumbnail.split('/')[-2:]) - thumbnail_paths.append(thumbnail) - - fits_search_filepath = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(proposal), 'jw{}*.fits'.format(proposal)) - num_files.append(len(glob.glob(fits_search_filepath))) - - # Put the various information into a dictionary of results - proposal_info = {} - proposal_info['num_proposals'] = len(proposals) - proposal_info['proposals'] = proposals - proposal_info['thumbnail_paths'] = thumbnail_paths - proposal_info['num_files'] = num_files - - return proposal_info - - -def split_files(file_list, page_type): - """JUST FOR USE DURING DEVELOPMENT WITH FILESYSTEM - - Splits the files in the filesystem into "unlooked" and "archived", - with the "unlooked" images being the most recent 10% of files. - """ - exp_times = [] - for file in file_list: - hdr = fits.getheader(file, ext=0) - exp_start = hdr['EXPSTART'] - exp_times.append(exp_start) - - exp_times_sorted = sorted(exp_times) - i_cutoff = int(len(exp_times) * .1) - t_cutoff = exp_times_sorted[i_cutoff] - - mask_unlooked = np.array([t < t_cutoff for t in exp_times]) - - if page_type == 'unlooked': - print('ONLY RETURNING {} "UNLOOKED" FILES OF {} ORIGINAL FILES'.format(len([m for m in mask_unlooked if m]), len(file_list))) - return [f for i, f in enumerate(file_list) if mask_unlooked[i]] - elif page_type == 'archive': - print('ONLY RETURNING {} "ARCHIVED" FILES OF {} ORIGINAL FILES'.format(len([m for m in mask_unlooked if not m]), len(file_list))) - return [f for i, f in enumerate(file_list) if not mask_unlooked[i]] - - -def thumbnails(inst, proposal=None): - """Generate a page showing thumbnail images corresponding to - activities, from a given ``proposal`` - - Parameters - ---------- - inst : str - Name of JWST instrument - proposal : str (optional) - Number of APT proposal to filter + The JWST instrument of interest (e.g. ``nircam``). Returns ------- - dict_to_render : dict - Dictionary of parameters for the thumbnails + div : str + The HTML div to render dark monitor plots + script : str + The JS script to render dark monitor plots """ - filepaths = get_filenames_by_instrument(inst) - - # JUST FOR DEVELOPMENT - # Split files into "archived" and "unlooked" - if proposal is not None: - page_type = 'archive' - else: - page_type = 'unlooked' - filepaths = split_files(filepaths, page_type) - - # Determine file ID (everything except suffix) - # e.g. jw00327001001_02101_00002_nrca1 - full_ids = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filepaths]) - - # If the proposal is specified (i.e. if the page being loaded is - # an archive page), only collect data for given proposal - if proposal is not None: - full_ids = [f for f in full_ids if f[2:7] == proposal] - - # Group files by ID - file_data = [] - detectors = [] - proposals = [] - for i, file_id in enumerate(full_ids): - suffixes = [] - count = 0 - for file in filepaths: - if '_'.join(file.split('/')[-1].split('_')[:-1]) == file_id: - count += 1 - - # Parse filename - try: - file_dict = filename_parser(file) - except ValueError: - # Temporary workaround for noncompliant files in filesystem - file_dict = {'activity': file_id[17:19], - 'detector': file_id[26:], - 'exposure_id': file_id[20:25], - 'observation': file_id[7:10], - 'parallel_seq_id': file_id[16], - 'program_id': file_id[2:7], - 'suffix': file.split('/')[-1].split('.')[0].split('_')[-1], - 'visit': file_id[10:13], - 'visit_group': file_id[14:16]} - - # Determine suffix - suffix = file_dict['suffix'] - suffixes.append(suffix) - - hdr = fits.getheader(file, ext=0) - exp_start = hdr['EXPSTART'] - - suffixes = list(set(suffixes)) - - # Add parameters to sort by - if file_dict['detector'] not in detectors and \ - not file_dict['detector'].startswith('f'): - detectors.append(file_dict['detector']) - if file_dict['program_id'] not in proposals: - proposals.append(file_dict['program_id']) - - file_dict['exp_start'] = exp_start - file_dict['suffixes'] = suffixes - file_dict['file_count'] = count - file_dict['file_root'] = file_id - - file_data.append(file_dict) - file_indices = np.arange(len(file_data)) - - # Extract information for sorting with dropdown menus - # (Don't include the proposal as a sorting parameter if the - # proposal has already been specified) - if proposal is not None: - dropdown_menus = {'detector': detectors} - else: - dropdown_menus = {'detector': detectors, - 'proposal': proposals} - - dict_to_render = {'inst': inst, - 'all_filenames': [os.path.basename(f) for f in filepaths], - 'tools': MONITORS, - 'thumbnail_zipped_list': zip(file_indices, file_data), - 'dropdown_menus': dropdown_menus, - 'n_fileids': len(file_data), - 'prop': proposal} - - return dict_to_render + full_apertures = FULL_FRAME_APERTURES[instrument.upper()] + + templates_all_apertures = {} + for aperture in full_apertures: + + # Start with default values for instrument and aperture because + # BokehTemplate's __init__ method does not allow input arguments + monitor_template = monitor_pages.DarkMonitor() + + # Set instrument and monitor using DarkMonitor's setters + monitor_template.aperture_info = (instrument, aperture) + templates_all_apertures[aperture] = monitor_template + + # Histogram tab + histograms_all_apertures = [] + for aperture_name, template in templates_all_apertures.items(): + histogram = template.refs["dark_full_histogram_figure"] + histogram.sizing_mode = "scale_width" # Make sure the sizing is adjustable + histograms_all_apertures.append(histogram) + + if instrument == 'NIRCam': + a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = histograms_all_apertures + histogram_layout = layout( + [a2, a4, b3, b1], + [a1, a3, b4, b2], + [a5, b5] + ) + + elif instrument in ['NIRISS', 'MIRI']: + single_aperture = histograms_all_apertures[0] + histogram_layout = layout( + [single_aperture] + ) + + elif instrument == 'NIRSpec': + d1, d2 = histograms_all_apertures + histogram_layout = layout( + [d1, d2] + ) + + histogram_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable + histogram_tab = Panel(child=histogram_layout, title="Histogram") + + # Current v. time tab + lines_all_apertures = [] + for aperture_name, template in templates_all_apertures.items(): + line = template.refs["dark_current_time_figure"] + line.title.align = "center" + line.title.text_font_size = "20px" + line.sizing_mode = "scale_width" # Make sure the sizing is adjustable + lines_all_apertures.append(line) + + if instrument == 'NIRCam': + a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = lines_all_apertures + line_layout = layout( + [a2, a4, b3, b1], + [a1, a3, b4, b2], + [a5, b5] + ) + + elif instrument in ['NIRISS', 'MIRI']: + single_aperture = lines_all_apertures[0] + line_layout = layout( + [single_aperture] + ) + + elif instrument == 'NIRSpec': + d1, d2 = lines_all_apertures + line_layout = layout( + [d1, d2] + ) + + line_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable + line_tab = Panel(child=line_layout, title="Trending") + + # Mean dark image tab + + # The three lines below work for displaying a single image + image = templates_all_apertures['NRCA3_FULL'].refs["mean_dark_image_figure"] + image.sizing_mode = "scale_width" # Make sure the sizing is adjustable + image_layout = layout(image) + image.height = 250 # Not working + image_layout.sizing_mode = "scale_width" + image_tab = Panel(child=image_layout, title="Mean Dark Image") + + # Build tabs + tabs = Tabs(tabs=[histogram_tab, line_tab, image_tab]) + + # Return tab HTML and JavaScript to web app + script, div = components(tabs) + + return div, script diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index 45adecc47..a1d74bf3b 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -28,9 +28,12 @@ import tempfile from astropy.io import fits +from astropy.table import Table from astropy.time import Time from django.conf import settings import numpy as np +from operator import itemgetter + # astroquery.mast import that depends on value of auth_mast # this import has to be made before any other import of astroquery.mast @@ -45,6 +48,7 @@ from jwedb.edb_interface import mnemonic_inventory from jwql.database import database_interface as di +from jwql.database.database_interface import load_connection from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info from jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash from jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash @@ -147,7 +151,7 @@ def get_all_proposals(): return proposals -def get_current_flagged_anomalies(rootname): +def get_current_flagged_anomalies(rootname, instrument): """Return a list of currently flagged anomalies for the given ``rootname`` @@ -164,7 +168,13 @@ def get_current_flagged_anomalies(rootname): (e.g. ``['snowball', 'crosstalk']``) """ - query = di.session.query(di.Anomaly).filter(di.Anomaly.rootname == rootname).order_by(di.Anomaly.flag_date.desc()).limit(1) + table_dict = {} + for instrument in JWST_INSTRUMENT_NAMES_MIXEDCASE: + table_dict[instrument.lower()] = getattr(di, '{}Anomaly'.format(JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument])) + + table = table_dict[instrument.lower()] + query = di.session.query(table).filter(table.rootname == rootname).order_by(table.flag_date.desc()).limit(1) + all_records = query.data_frame if not all_records.empty: current_anomalies = [col for col, val in np.sum(all_records, axis=0).items() if val] @@ -494,7 +504,7 @@ def get_filenames_by_rootname(rootname): def get_header_info(filename): - """Return the header information for a given ``file``. + """Return the header information for a given ``filename``. Parameters ---------- @@ -508,11 +518,47 @@ def get_header_info(filename): The primary FITS header for the given ``file``. """ - dirname = filename[:7] - fits_filepath = os.path.join(FILESYSTEM_DIR, dirname, filename) - header = fits.getheader(fits_filepath, ext=0).tostring(sep='\n') + # Initialize dictionary to store header information + header_info = {} + + # Open the file + fits_filepath = os.path.join(FILESYSTEM_DIR, filename[:7], '{}.fits'.format(filename)) + hdulist = fits.open(fits_filepath) - return header + # Extract header information from file + for ext in range(0, len(hdulist)): + + # Initialize dictionary to store header information for particular extension + header_info[ext] = {} + + # Get header + header = fits.getheader(fits_filepath, ext=ext) + + # Determine the extension name + if ext == 0: + header_info[ext]['EXTNAME'] = 'PRIMARY' + else: + header_info[ext]['EXTNAME'] = header['EXTNAME'] + + # Get list of keywords and values + exclude_list = ['', 'COMMENT'] + header_info[ext]['keywords'] = [item for item in list(header.keys()) if item not in exclude_list] + header_info[ext]['values'] = [] + for key in header_info[ext]['keywords']: + header_info[ext]['values'].append(hdulist[ext].header[key]) + + # Close the file + hdulist.close() + + # Build tables + for ext in header_info: + table = Table([header_info[ext]['keywords'], header_info[ext]['values']], names=('Key', 'Value')) + temp_path_for_html = os.path.join(tempfile.mkdtemp(), '{}_table.html'.format(header_info[ext]['EXTNAME'])) + with open(temp_path_for_html, 'w') as f: + table.write(f, format='jsviewer', jskwargs={'display_length': 20}) + header_info[ext]['table'] = open(temp_path_for_html, 'r').read() + + return header_info def get_image_info(file_root, rewrite): @@ -747,6 +793,48 @@ def get_proposal_info(filepaths): return proposal_info +def get_jwqldb_table_view_components(request): + """Renders view for JWQLDB table viewer. + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + + Returns + ------- + None + """ + + if request.method == 'POST': + # Make dictionary of tablename : class object + # This matches what the user selects in the drop down to the python obj. + tables_of_interest = {} + for item in di.__dict__.keys(): + table = getattr(di, item) + if hasattr(table, '__tablename__'): + tables_of_interest[table.__tablename__] = table + + session, base, engine, meta = load_connection(get_config()['connection_string']) + tablename_from_dropdown = request.POST['db_table_select'] + table_object = tables_of_interest[tablename_from_dropdown] # Select table object + + result = session.query(table_object) + + result_dict = [row.__dict__ for row in result.all()] # Turn query result into list of dicts + column_names = table_object.__table__.columns.keys() + + # Build list of column data based on column name. + data = [] + for column in column_names: + column_data = list(map(itemgetter(column), result_dict)) + data.append(column_data) + + # Build table. + table_to_display = Table(data, names=column_names) + table_to_display.show_in_browser(jsviewer=True, max_lines=-1) # Negative max_lines shows all lines avaliable. + + def get_thumbnails_by_instrument(inst): """Return a list of thumbnails available in the filesystem for the given instrument. diff --git a/jwql/website/apps/jwql/models.py b/jwql/website/apps/jwql/models.py index 2e4b2976f..90454b267 100644 --- a/jwql/website/apps/jwql/models.py +++ b/jwql/website/apps/jwql/models.py @@ -40,6 +40,7 @@ ('NIRISS', 'NIRISS'), ('NIRSpec', 'NIRSpec')) + class BaseModel(models.Model): """A base model that other classes will inherit. Created to avoid an obscure error about a missing ``app_label``. @@ -64,9 +65,9 @@ class ImageData(BaseModel): Date and time when datum was added to the database. """ - inst = models.CharField('instrument', max_length=6, choices=INSTRUMENT_LIST, default=None) + inst = models.CharField('instrument', max_length=7, choices=INSTRUMENT_LIST, default=None) pub_date = models.DateTimeField('date published') - filepath = models.FilePathField(path='/user/lchambers/jwql/') #upload_to=str(inst)) + filepath = models.FilePathField(path='/user/lchambers/jwql/') def filename(self): return os.path.basename(self.filepath) diff --git a/jwql/website/apps/jwql/monitor_pages/__init__.py b/jwql/website/apps/jwql/monitor_pages/__init__.py index be7161b60..29f975f8a 100644 --- a/jwql/website/apps/jwql/monitor_pages/__init__.py +++ b/jwql/website/apps/jwql/monitor_pages/__init__.py @@ -1,3 +1,3 @@ -from .monitor_ta_bokeh import MonitorTA -from .monitor_mast_bokeh import MastMonitor +from .monitor_dark_bokeh import DarkMonitor from .monitor_filesystem_bokeh import MonitorFilesystem +from .monitor_mast_bokeh import MastMonitor diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py new file mode 100755 index 000000000..1199a840e --- /dev/null +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -0,0 +1,193 @@ +"""This module contains code for the dark current monitor Bokeh plots. + +Author +------ + + - Bryan Hilbert + - Gray Kanarek + - Lauren Chambers + +Use +--- + + This module can be used from the command line as such: + + :: + + from jwql.website.apps.jwql import monitor_pages + monitor_template = monitor_pages.DarkMonitor('NIRCam', 'NRCA3_FULL') + script, div = monitor_template.embed("dark_current_time_figure") +""" + +import os + +from astropy.io import fits +from astropy.time import Time +from bokeh.models.tickers import LogTicker +import numpy as np + +from jwql.database.database_interface import session +from jwql.database.database_interface import NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent +from jwql.database.database_interface import NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent +from jwql.database.database_interface import MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent +from jwql.database.database_interface import NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent +from jwql.database.database_interface import FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.utils import get_config +from jwql.bokeh_templating import BokehTemplate + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class DarkMonitor(BokehTemplate): + + # Combine instrument and aperture into a single property because we + # do not want to invoke the setter unless both are updated + @property + def aperture_info(self): + return (self._instrument, self._aperture) + + @aperture_info.setter + def aperture_info(self, info): + self._instrument, self._aperture = info + self.pre_init() + self.post_init() + + def _dark_mean_image(self): + """Update bokeh objects with mean dark image data.""" + + # Open the mean dark current file and get the data + mean_dark_image_file = self.pixel_table[-1].mean_dark_image_file + mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') + mean_dark_image_path = os.path.join(mean_slope_dir, mean_dark_image_file) + with fits.open(mean_dark_image_path) as hdulist: + data = hdulist[1].data + + # Update the plot with the data and boundaries + y_size, x_size = np.shape(data) + self.refs["mean_dark_source"].data['image'] = [data] + self.refs["stamp_xr"].end = x_size + self.refs["stamp_yr"].end = y_size + self.refs["mean_dark_source"].data['dw'] = [x_size] + self.refs["mean_dark_source"].data['dh'] = [x_size] + + # Set the image color scale + self.refs["log_mapper"].high = 0 + self.refs["log_mapper"].low = -.2 + + # This should add ticks to the colorbar, but it doesn't + self.refs["mean_dark_cbar"].ticker = LogTicker() + + # Add a title + self.refs['mean_dark_image_figure'].title.text = self._aperture + self.refs['mean_dark_image_figure'].title.align = "center" + self.refs['mean_dark_image_figure'].title.text_font_size = "20px" + + def pre_init(self): + # Start with default values for instrument and aperture because + # BokehTemplate's __init__ method does not allow input arguments + try: + dummy_instrument = self._instrument + dummy_aperture = self._aperture + except AttributeError: + self._instrument = 'NIRCam' + self._aperture = 'NRCA1_FULL' + + self._embed = True + + # Fix aperture/detector name discrepency + if self._aperture in ['NRCA5_FULL', 'NRCB5_FULL']: + self.detector = '{}LONG'.format(self._aperture[0:4]) + else: + self.detector = self._aperture.split('_')[0] + + # App design + self.format_string = None + self.interface_file = os.path.join(SCRIPT_DIR, "yaml", "dark_monitor_interface.yaml") + + # Load data tables + self.load_data() + + # Data for mean dark versus time plot + datetime_stamps = [row.obs_mid_time for row in self.dark_table] + times = Time(datetime_stamps, format='datetime', scale='utc') # Convert to MJD + self.timestamps = times.mjd + self.dark_current = [row.mean for row in self.dark_table] + + # Data for dark current histogram plot (full detector) + # Just show the last histogram, which is the one most recently + # added to the database + last_hist_index = -1 + self.last_timestamp = datetime_stamps[last_hist_index].isoformat() + self.full_dark_bin_center = np.array([row.hist_dark_values for + row in self.dark_table])[last_hist_index] + self.full_dark_amplitude = [row.hist_amplitudes for + row in self.dark_table][last_hist_index] + self.full_dark_bottom = np.zeros(len(self.full_dark_amplitude)) + deltas = self.full_dark_bin_center[1:] - self.full_dark_bin_center[0: -1] + self.full_dark_bin_width = np.append(deltas[0], deltas) + + def post_init(self): + + self._update_dark_v_time() + self._update_hist() + self._dark_mean_image() + + def identify_tables(self): + """Determine which dark current database tables as associated with + a given instrument""" + + mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()] + self.query_table = eval('{}DarkQueryHistory'.format(mixed_case_name)) + self.pixel_table = eval('{}DarkPixelStats'.format(mixed_case_name)) + self.stats_table = eval('{}DarkDarkCurrent'.format(mixed_case_name)) + + def load_data(self): + """Query the database tables to get data""" + + # Determine which database tables are needed based on instrument + self.identify_tables() + + # Query database for all data in NIRCamDarkDarkCurrent with a matching aperture + self.dark_table = session.query(self.stats_table) \ + .filter(self.stats_table.aperture == self._aperture) \ + .all() + + self.pixel_table = session.query(self.pixel_table) \ + .filter(self.pixel_table.detector == self.detector) \ + .all() + + def _update_dark_v_time(self): + + # Define y range of dark current v. time plot + buffer_size = 0.05 * (max(self.dark_current) - min(self.dark_current)) + self.refs['dark_current_yrange'].start = min(self.dark_current) - buffer_size + self.refs['dark_current_yrange'].end = max(self.dark_current) + buffer_size + + # Define x range of dark current v. time plot + horizontal_half_buffer = (max(self.timestamps) - min(self.timestamps)) * 0.05 + if horizontal_half_buffer == 0: + horizontal_half_buffer = 1. # day + self.refs['dark_current_xrange'].start = min(self.timestamps) - horizontal_half_buffer + self.refs['dark_current_xrange'].end = max(self.timestamps) + horizontal_half_buffer + + # Add a title + self.refs['dark_current_time_figure'].title.text = self._aperture + self.refs['dark_current_time_figure'].title.align = "center" + self.refs['dark_current_time_figure'].title.text_font_size = "20px" + + def _update_hist(self): + + # Define y range of dark current histogram + buffer_size = 0.05 * (max(self.full_dark_amplitude) - min(self.full_dark_bottom)) + self.refs['dark_histogram_yrange'].start = min(self.full_dark_bottom) + self.refs['dark_histogram_yrange'].end = max(self.full_dark_amplitude) + buffer_size + + # Define x range of dark current histogram + self.refs['dark_histogram_xrange'].start = min(self.full_dark_bin_center) + self.refs['dark_histogram_xrange'].end = max(self.full_dark_bin_center) + + # Add a title + self.refs['dark_full_histogram_figure'].title.text = self._aperture + self.refs['dark_full_histogram_figure'].title.align = "center" + self.refs['dark_full_histogram_figure'].title.text_font_size = "20px" diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml new file mode 100755 index 000000000..cdadceb33 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yaml @@ -0,0 +1,109 @@ +# YAML file defining bokeh figures for the dark monitor +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Dark Current v. Time Figure +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +- !ColumnDataSource: &dark_current_source + ref: "dark_current_source" + data: + time: !self.timestamps + dark_current: !self.dark_current + +- !Range1d: &dark_current_xrange + ref: "dark_current_xrange" + #start: 0 + #end: 1 + #bounds: 'auto' #!!python/tuple [0, 1] + +- !Range1d: &dark_current_yrange + ref: "dark_current_yrange" + #start: 0 + #end: 1 + #bounds: !!python/tuple [-1, 1] + +- !Figure: &dark_current_time_figure + ref: "dark_current_time_figure" + x_axis_label: "Time (MJD)" + y_axis_label: "Dark current (e-)" + x_range: *dark_current_xrange + y_range: *dark_current_yrange + elements: + - {'kind': 'circle', 'x': 'time', 'y': 'dark_current', line_width: 5, 'source': *dark_current_source} + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Dark Histogram Figure +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +- !ColumnDataSource: &dark_full_hist_source + ref: "dark_full_hist_source" + data: + full_dark_bin_center: !self.full_dark_bin_center + full_dark_amplitude: !self.full_dark_amplitude + full_dark_bottom: !self.full_dark_bottom + full_dark_bin_width: !self.full_dark_bin_width + +- !Range1d: &dark_histogram_xrange + ref: "dark_histogram_xrange" + #start: 0 + #end: 1 + #bounds: 'auto' #!!python/tuple [0, 1] + +- !Range1d: &dark_histogram_yrange + ref: "dark_histogram_yrange" + #start: 0 + #end: 1 + #bounds: !!python/tuple [0, 1] + +- !Figure: &dark_full_histogram_figure + ref: "dark_full_histogram_figure" + x_axis_label: "Dark Current (DN/sec)" + y_axis_label: "Number of Pixels" + x_range: *dark_histogram_xrange + y_range: *dark_histogram_yrange + elements: + - {'kind': 'vbar', 'x': 'full_dark_bin_center', 'y': 'full_dark_bin_width', 'top': 'full_dark_amplitude', 'bottom': 'full_dark_bottom', 'source': *dark_full_hist_source} +# - {'kind': 'text', 'x': 0, 'y': 20000, 'id': 1001} + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Mean Dark Image Figure +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +- !ColumnDataSource: &mean_dark_source + ref: "mean_dark_source" + data: + dh: [1] + dw: [1] + image: [[[1,0], [0, 1]]] +- !Range1d: &stamp_xr + ref: "stamp_xr" + #start: 0 + #end: 1 + #bounds: !!python/tuple [0, 1] +- !Range1d: &stamp_yr + ref: "stamp_yr" + #start: 0 + #end: 1 + #bounds: !!python/tuple [0, 1] +- !LogColorMapper: &log_mapper + ref: "log_mapper" + palette: "Viridis256" + low: 0. + high: 1. +- !ColorBar: &mean_dark_cbar + ref: "mean_dark_cbar" + color_mapper: *log_mapper + location: !!python/tuple [0, 0] +- !Figure: &mean_dark_image_figure + ref: "mean_dark_image_figure" + x_axis_label: "Col = SIAF det Y" + y_axis_label: "Row = SIAF det X" + x_range: *stamp_xr + y_range: *stamp_yr + tools: "" + height: 250 # Not working + width: 250 # Not working + elements: + - {"kind": "image", "image": "image", "x": 0, "y": 0, "dh": 'dh', "dw": 'dh', "source": *mean_dark_source, "color_mapper": *log_mapper} + - {"kind": "layout", "obj": *mean_dark_cbar, "place": "right"} + +#- !Document: +# - !column: +# - *dark_current_time_figure +# - *dark_full_histogram_figure diff --git a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yml b/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yml deleted file mode 100644 index a60f0f643..000000000 --- a/jwql/website/apps/jwql/monitor_pages/yaml/dark_monitor_interface.yml +++ /dev/null @@ -1,29 +0,0 @@ -- !ColumnDataSource: &dark_current_source - ref: "dark_current_source" - data: - time: !self.timestamps - dark_current: !self.dark_current -- !Range1d: &dark_current_xrange - ref: "dark_current_xrange" - start: 0 - end: 1 - bounds: !!python/tuple [0, 1] -- !Range1d: &dark_current_yrange - ref: "dark_current_yrange" - start: 0 - end: 1 - bounds: !!python/tuple [0, 1] -- !Figure: &dark_current_time_figure - ref: "dark_current_time_figure" - x_axis_label: "Time (s)" - y_axis_label: "Dark current (e-)" - x_range: *dark_current_xrange - y_range: *dark_current_yrange - elements: - - {'kind': 'line', 'x': 'time', 'y': 'dark_current', 'source': *dark_current_source} -- !Figure: &hot_pixel_locations_figure - ref: "hot_pixel_locations_figure" -- !Document: - - !column: - - *dark_current_time_figure - - *hot_pixel_locations_figure \ No newline at end of file diff --git a/jwql/website/apps/jwql/monitor_views.py b/jwql/website/apps/jwql/monitor_views.py new file mode 100644 index 000000000..1668072d0 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_views.py @@ -0,0 +1,74 @@ +"""Defines the views for the ``jwql`` web app instrument monitors. + +Authors +------- + + - Lauren Chambers + +Use +--- + + This module is called in ``urls.py`` as such: + :: + + from django.urls import path + from . import monitor_views + urlpatterns = [path('web/path/to/view/', monitor_views.view_name, + name='view_name')] + +References +---------- + For more information please see: + ``https://docs.djangoproject.com/en/2.0/topics/http/views/`` + +Dependencies +------------ + The user must have a configuration file named ``config.json`` + placed in the ``jwql/utils/`` directory. +""" + +import os + +from django.shortcuts import render + +from . import bokeh_containers +from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.utils import get_config + +FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') + + +def dark_monitor(request, inst): + """Generate the dark monitor page for a given instrument + + Parameters + ---------- + request : HttpRequest object + Incoming request from the webpage + inst : str + Name of JWST instrument + + Returns + ------- + HttpResponse object + Outgoing response sent to the webpage + """ + + # Ensure the instrument is correctly capitalized + inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()] + + # Deal with the fact that only the NIRCam database is populated + if inst == 'NIRCam': + tabs_components = bokeh_containers.dark_monitor_tabs(inst) + else: + tabs_components = None + + template = "dark_monitor.html" + + context = { + 'inst': inst, + 'tabs_components': tabs_components, + } + + # Return a HTTP response with the template and dictionary of variables + return render(request, template, context) diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index 162bcbc8e..398363600 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -409,6 +409,37 @@ li:hover .nav-link, .navbar-brand:hover { margin-bottom: 1rem; } +.slider{ + -webkit-appearance: none; + width: 250px; + height: 15px; + background: #BEC4D4; + outline: none; +} + +/* slider style for Chrome/Safari/Opera/Edge */ +.slider::-webkit-slider-thumb { + -webkit-appearance: none; + appearance: none; + width: 15px; + height: 30px; + background: #C85108; + cursor: pointer; +} + +/* slider style for Firefox */ +.slider::-moz-range-thumb { + width: 15px; + height: 30px; + background: #C85108; + cursor: pointer; +} + +/* remove slider outline for Firefox */ +.slider::-moz-focus-outer { + border: 0; + } + .row .row { margin-top: 1rem; margin-bottom: 0; diff --git a/jwql/website/apps/jwql/static/js/jwql.js b/jwql/website/apps/jwql/static/js/jwql.js index a15f03a7b..af47d3469 100644 --- a/jwql/website/apps/jwql/static/js/jwql.js +++ b/jwql/website/apps/jwql/static/js/jwql.js @@ -24,9 +24,9 @@ function change_filetype(type, file_root, num_ints, inst) { var num_ints = JSON.parse(num_ints); // Propogate the text fields showing the filename and APT parameters - var fits_filename = file_root + '_' + type + '.fits' + var fits_filename = file_root + '_' + type document.getElementById("jpg_filename").innerHTML = file_root + '_' + type + '_integ0.jpg'; - document.getElementById("fits_filename").innerHTML = fits_filename; + document.getElementById("fits_filename").innerHTML = fits_filename + '.fits'; document.getElementById("proposal").innerHTML = file_root.slice(2,7); document.getElementById("obs_id").innerHTML = file_root.slice(7,10); document.getElementById("visit_id").innerHTML = file_root.slice(10,13); @@ -38,6 +38,11 @@ function change_filetype(type, file_root, num_ints, inst) { img.src = jpg_filepath; img.alt = jpg_filepath; + // Reset the slider values + document.getElementById("slider_range").value = 1 + document.getElementById("slider_range").max = num_ints[type] + document.getElementById("slider_val").innerHTML = 1 + // Update the number of integrations var int_counter = document.getElementById("int_count"); int_counter.innerHTML = 'Displaying integration 1/' + num_ints[type]; @@ -50,55 +55,64 @@ function change_filetype(type, file_root, num_ints, inst) { } // Update the image download and header links - document.getElementById("download_fits").href = '/static/filesystem/' + file_root.slice(0,7) + '/' + fits_filename; + document.getElementById("download_fits").href = '/static/filesystem/' + file_root.slice(0,7) + '/' + fits_filename + '.fits'; document.getElementById("download_jpg").href = jpg_filepath; - document.getElementById("view_header").href = '/' + inst + '/' + fits_filename + '/hdr/'; + document.getElementById("view_header").href = '/' + inst + '/' + fits_filename + '/header/'; // Disable the "left" button, since this will be showing integ0 document.getElementById("int_before").disabled = true; }; + /** * Change the integration number of the displayed image - * @param {String} direction - The direction to switch to, either "left" (decrease) or "right" (increase). * @param {String} file_root - The rootname of the file * @param {Dict} num_ints - A dictionary whose keys are suffix types and whose * values are the number of integrations for that suffix + * @param {String} method - How the integration change was initialized, either "button" or "slider" + * @param {String} direction - The direction to switch to, either "left" (decrease) or "right" (increase). + * Only relevant if method is "button". */ -function change_int(direction, file_root, num_ints) { +function change_int(file_root, num_ints, method, direction = 'right') { // Figure out the current image and integration var suffix = document.getElementById("jpg_filename").innerHTML.split('_'); var integration = Number(suffix[suffix.length - 1][5]); var suffix = suffix[suffix.length - 2]; var program = file_root.slice(0,7); - + + // Find the total number of integrations for the current image var num_ints = num_ints.replace(/'/g, '"'); var num_ints = JSON.parse(num_ints)[suffix]; + // Get the desired integration value + switch (method) { + case "button": + if ((integration == num_ints - 1 && direction == 'right')|| + (integration == 0 && direction == 'left')) { + return; + } else if (direction == 'right') { + new_integration = integration + 1 + } else if (direction == 'left') { + new_integration = integration - 1 + } + break; + case "slider": + new_integration = document.getElementById("slider_range").value - 1; + break; + } - if ((integration == num_ints - 1 && direction == 'right')|| - (integration == 0 && direction == 'left')) { - return; - } else if (direction == 'right') { - // Update integration number - var new_integration = integration + 1 - - // Don't let them go further if they're at the last integration - if (new_integration == num_ints - 1) { - document.getElementById("int_after").disabled = true; - } - document.getElementById("int_before").disabled = false; - } else if (direction == 'left') { - // Update integration number - var new_integration = integration - 1 - - // Don't let them go further if they're at the first integration - if (new_integration == 0) { - document.getElementById("int_before").disabled = true; - } + // Update which button are disabled based on the new integration + if (new_integration == 0) { + document.getElementById("int_after").disabled = false; + document.getElementById("int_before").disabled = true; + } else if (new_integration < num_ints - 1) { document.getElementById("int_after").disabled = false; + document.getElementById("int_before").disabled = false; + } else if (new_integration == num_ints - 1) { + document.getElementById("int_after").disabled = true; + document.getElementById("int_before").disabled = false; } // Update the JPG filename @@ -118,8 +132,13 @@ function change_int(direction, file_root, num_ints) { // Update the jpg download link document.getElementById("download_jpg").href = jpg_filepath; + + // Update the slider values + document.getElementById("slider_range").value = new_integration + 1 + document.getElementById("slider_val").innerHTML = new_integration + 1 }; + /** * Determine what filetype to use for a thumbnail * @param {String} thumbnail_dir - The path to the thumbnail directory @@ -383,6 +402,29 @@ function update_filter_options(data) { $("#thumbnail-filter")[0].innerHTML = content; }; +/** + * Change the header extension displayed + * @param {String} extension - The extension of the header selected + * @param {String} num_extensions - The total number of extensions + */ +function update_header_display(extension, num_extensions) { + + // Hide all headers + for (var i = 0; i < num_extensions; i++) { + var header_name = document.getElementById("header-display-name-extension" + i); + var header_table = document.getElementById("header-table-extension" + i); + header_name.style.display = 'none'; + header_table.style.display = 'none'; + }; + + // Display the header selected + var header_name_to_show = document.getElementById("header-display-name-extension" + extension); + var header_table_to_show = document.getElementById("header-table-extension" + extension); + header_name_to_show.style.display = 'inline'; + header_table_to_show.style.display = 'inline'; + +}; + /** * Updates the img_show_count component * @param {Integer} count - The count to display diff --git a/jwql/website/apps/jwql/templates/base.html b/jwql/website/apps/jwql/templates/base.html index 1b7eefb06..592a59b72 100644 --- a/jwql/website/apps/jwql/templates/base.html +++ b/jwql/website/apps/jwql/templates/base.html @@ -131,6 +131,9 @@ {% endfor %} +