Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Photon counting step function #260

Open
wants to merge 11 commits into
base: main
Choose a base branch
from
2 changes: 1 addition & 1 deletion corgidrp/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mea
pri_hdr = input_dataset[num_frames_per_group*i].pri_hdr.copy()
ext_hdr = input_dataset[num_frames_per_group*i].ext_hdr.copy()
err_hdr = input_dataset[num_frames_per_group*i].err_hdr.copy()
dq_hdr = input_dataset[num_frames_per_group*i].err_hdr.copy()
dq_hdr = input_dataset[num_frames_per_group*i].dq_hdr.copy()
hdulist = input_dataset[num_frames_per_group*i].hdu_list.copy()
new_image = data.Image(data_collapse, pri_hdr=pri_hdr, ext_hdr=ext_hdr, err=err_collapse, dq=dq_collapse, err_hdr=err_hdr,
dq_hdr=dq_hdr, input_hdulist=hdulist)
Expand Down
14 changes: 7 additions & 7 deletions corgidrp/darks.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,14 +105,14 @@ def mean_combine(image_list, bpmap_list, err=False):
sum_im += masked
map_im += (im_m.mask == False).astype(int)

if err: # sqrt of sum of sigma**2 terms
sum_im = np.sqrt(sum_im)

# Divide sum_im by map_im only where map_im is not equal to 0 (i.e.,
# not masked).
# Where map_im is equal to 0, set combined_im to zero
comb_image = np.divide(sum_im, map_im, out=np.zeros_like(sum_im),
where=map_im != 0)

if err: # (sqrt of sum of sigma**2 terms)/sqrt(n)
comb_image = np.sqrt(comb_image)

# Mask any value that was never mapped (aka masked in every frame)
comb_bpmap = (map_im == 0).astype(int)
Expand Down Expand Up @@ -329,7 +329,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
output Dark's dq after assigning these pixels a flag value of 256.
They should have large err values.
The pixels that are masked for EVERY frame in all sub-stacks
but 4 (or less) are assigned a flag value of
but 3 (or less) are assigned a flag value of
1, which falls under the category of "Bad pixel - unspecified reason".
These pixels would have no reliability for dark subtraction.

Expand Down Expand Up @@ -415,7 +415,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
output Dark's dq after assigning these pixels a flag value of 256.
They should have large err values.
The pixels that are masked for EVERY frame in all sub-stacks
but 4 (or less) are assigned a flag value of
but 3 (or less) are assigned a flag value of
1, which falls under the category of "Bad pixel - unspecified reason".
These pixels would have no reliability for dark subtraction.
FPN_std_map : array-like (full frame)
Expand Down Expand Up @@ -522,7 +522,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
# flag value of 256; unreliable pixels, large err
output_dq = (unreliable_pix_map >= len(datasets)-3).astype(int)*256
# flag value of 1 for those that are masked all the way through for all
# but 4 (or less) stacks; this overwrites the flag value of 256 that was assigned to
# but 3 (or less) stacks; this overwrites the flag value of 256 that was assigned to
# these pixels in previous line
unfittable_ind = np.where(unfittable_pix_map >= len(datasets)-3)
output_dq[unfittable_ind] = 1
Expand Down Expand Up @@ -577,7 +577,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
# input data error comes from .err arrays; could use this for error bars
# in input data for weighted least squares, but we'll just simply get the
# std error and add it in quadrature to least squares fit standard dev
stacks_err = np.sqrt(np.sum(mean_err_stack**2, axis=0))/len(mean_err_stack)
stacks_err = np.sqrt(np.sum(mean_err_stack**2, axis=0)/len(mean_err_stack))

# matrix to be used for least squares and covariance matrix
X = np.array([np.ones([len(EMgain_arr)]), EMgain_arr, EMgain_arr*exptime_arr]).T
Expand Down
2 changes: 2 additions & 0 deletions corgidrp/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ def update_after_processing_step(self, history_entry, new_all_data=None, new_all
if new_all_err.shape[-2:] != self.all_err.shape[-2:] or new_all_err.shape[0] != self.all_err.shape[0]:
raise ValueError("The shape of new_all_err is {0}, whereas we are expecting {1}".format(new_all_err.shape, self.all_err.shape))
self.all_err = new_all_err
for i in range(len(self.frames)):
self.frames[i].err = self.all_err[i]
if new_all_dq is not None:
if new_all_dq.shape != self.all_dq.shape:
raise ValueError("The shape of new_all_dq is {0}, whereas we are expecting {1}".format(new_all_dq.shape, self.all_dq.shape))
Expand Down
11 changes: 7 additions & 4 deletions corgidrp/l2a_to_l2b.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,16 +248,19 @@ def convert_to_electrons(input_dataset, k_gain):
# you should make a copy the dataset to start
kgain_dataset = input_dataset.copy()
kgain_cube = kgain_dataset.all_data
kgain_error = kgain_dataset.all_err

kgain = k_gain.value #extract from caldb
kgainerr = k_gain.error[0]
# x = c*kgain, where c (counts beforehand) and kgain both have error, so do propogation of error due to the product of 2 independent sources
#[:,0:1,:,:] to get same dimensions as input_dataset.all_err
kgain_error = (np.sqrt((kgain*kgain_dataset.all_err)**2 + (kgain_cube*kgainerr)**2))[:,0:1,:,:]
kgain_cube *= kgain
kgain_error *= kgain


history_msg = "data converted to detected EM electrons by kgain {0}".format(str(kgain))

# update the output dataset with this converted data and update the history
kgain_dataset.update_after_processing_step(history_msg, new_all_data=kgain_cube, new_all_err=kgain_error, header_entries = {"BUNIT":"detected EM electrons", "KGAIN":kgain})
kgain_dataset.update_after_processing_step(history_msg, new_all_data=kgain_cube, new_all_err=kgain_error, header_entries = {"BUNIT":"detected EM electrons", "KGAIN":kgain,
"KGAIN_ER": k_gain.error[0], "RN":k_gain.ext_hdr['RN'], "RN_ERR":k_gain.ext_hdr["RN_ERR"]})
return kgain_dataset

def em_gain_division(input_dataset):
Expand Down
91 changes: 90 additions & 1 deletion corgidrp/mocks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from pathlib import Path
import numpy as np
import warnings
import scipy.ndimage
import pandas as pd
import astropy.io.fits as fits
Expand Down Expand Up @@ -166,7 +167,7 @@ def create_synthesized_master_dark_calib(detector_areas):
# image area, including "shielded" rows and cols:
imrows, imcols, imr0c0 = imaging_area_geom('SCI', detector_areas)
prerows, precols, prer0c0 = unpack_geom('SCI', 'prescan', detector_areas)

np.random.seed(4567)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

generally, seeding should be done by the function that calls this function, not this function itself (by doing it here, it will always produce the same result and can't be overridden).

frame_list = []
for i in range(len(EMgain_arr)):
for l in range(N): #number of frames to produce
Expand Down Expand Up @@ -1857,3 +1858,91 @@ def add_defect(sch_imgs, prob, ori, temp):
hdul.writeto(str(filename)[:-4]+'_'+str(mult_counter)+'.fits', overwrite = True)
else:
hdul.writeto(filename, overwrite = True)

def create_photon_countable_frames(Nbrights=30, Ndarks=40, EMgain=5000, kgain=7, exptime=0.1, cosmic_rate=0, full_frame=True):
'''This creates mock Dataset containing frames with large gain and short exposure time, illuminated and dark frames.
Used for unit tests for photon counting.

Args:
Nbrights (int): number of illuminated frames to simulate
Ndarks (int): number of dark frames to simulate
EMgain (float): EM gain
kgain (float): k gain (e-/DN)
exptime (float): exposure time (in s)
cosmic_rate: (float) simulated cosmic rays incidence, hits/cm^2/s
full_frame: (bool) If True, simulated frames are SCI full frames. If False, 50x50 images are simulated. Defaults to True.

Returns:
dataset (corgidrp.data.Dataset): Dataset containing both the illuminated and dark frames
ill_mean (float): mean electron count value simulated in the illuminated frames
dark_mean (float): mean electron count value simulated in the dark frames
'''
np.random.seed(1234)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same comment with seeding as above.

pix_row = 1024 #number of rows and number of columns
fluxmap = np.ones((pix_row,pix_row)) #photon flux map, photons/s

emccd = EMCCDDetect(
em_gain=EMgain,
full_well_image=60000., # e-
full_well_serial=100000., # e-
dark_current=8.33e-4, # e-/pix/s
cic=0.01, # e-/pix/frame
read_noise=100., # e-/pix/frame
bias=2000, # e-
qe=0.9, # quantum efficiency, e-/photon
cr_rate=cosmic_rate, # cosmic rays incidence, hits/cm^2/s
pixel_pitch=13e-6, # m
eperdn=kgain,
nbits=64, # number of ADU bits
numel_gain_register=604 #number of gain register elements
)

thresh = emccd.em_gain/10 # threshold

if np.average(exptime*fluxmap) > 0.1:
warnings.warn('average # of photons/pixel is > 0.1. Decrease frame '
'time to get lower average # of photons/pixel.')

if emccd.read_noise <=0:
warnings.warn('read noise should be greater than 0 for effective '
'photon counting')
if thresh < 4*emccd.read_noise:
warnings.warn('thresh should be at least 4 or 5 times read_noise for '
'accurate photon counting')

avg_ph_flux = np.mean(fluxmap)
# theoretical electron flux for brights
ill_mean = avg_ph_flux*emccd.qe*exptime + emccd.dark_current*exptime + emccd.cic
# theoretical electron flux for darks
dark_mean = emccd.dark_current*exptime + emccd.cic

frame_e_list = []
frame_e_dark_list = []
prihdr, exthdr = create_default_headers()
for i in range(Nbrights):
# Simulate bright
if full_frame:
frame_dn = emccd.sim_full_frame(fluxmap, exptime)
else:
frame_dn = emccd.sim_sub_frame(fluxmap[:50,:50], exptime)
frame = data.Image(frame_dn, pri_hdr=prihdr, ext_hdr=exthdr)
frame.ext_hdr['CMDGAIN'] = EMgain
frame.ext_hdr['EXPTIME'] = exptime
frame.pri_hdr["VISTYPE"] = "TDEMO"
frame_e_list.append(frame)

for i in range(Ndarks):
# Simulate dark
if full_frame:
frame_dn_dark = emccd.sim_full_frame(np.zeros_like(fluxmap), exptime)
else:
frame_dn_dark = emccd.sim_sub_frame(np.zeros_like(fluxmap[:50,:50]), exptime)
frame_dark = data.Image(frame_dn_dark, pri_hdr=prihdr.copy(), ext_hdr=exthdr.copy())
frame_dark.ext_hdr['CMDGAIN'] = EMgain
frame_dark.ext_hdr['EXPTIME'] = exptime
frame_dark.pri_hdr["VISTYPE"] = "DARK"
frame_e_dark_list.append(frame_dark)

dataset = data.Dataset(frame_e_list+frame_e_dark_list)

return dataset, ill_mean, dark_mean
Loading
Loading