Skip to content

Commit

Permalink
JP-3014: This Sets Invalid Slope Data to NaN in the Rate and Rateints…
Browse files Browse the repository at this point in the history
… products. (#131)

* Changing invalid computations of slopes to NANs.

Updating the CI tests to reflect using NaN's for invalid slope calculations.

Updating variable names to be more clear.

Updating the change log.

Removing debugging statements and updating comments.

Updating integration level checking for invalid data for slopes and setting integration values to NaN where invalid.

Updating the calculations description for ramp fitting detailing that invalid data will result in a NaN value in the rateints and/or rates product.

Updating description according to feedback during code review.

* Update CHANGES.rst

Co-authored-by: Howard Bushouse <[email protected]>
  • Loading branch information
kmacdonald-stsci and hbushouse authored Dec 13, 2022
1 parent 9ec86a7 commit 4bebdbf
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 19 deletions.
8 changes: 7 additions & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,14 @@ General
- use ``tox`` environments [#130]

- Added support for Quantities in models required for the RomanCAL pipeline. [#124]


ramp_fitting
~~~~~~~~~~~~

- Set values in the rate and rateints product to NaN when no usable data is
available to compute slopes. [#131]


1.2.2 (2022-12-01)
==================

Expand Down
7 changes: 7 additions & 0 deletions docs/stcal/ramp_fitting/description.rst
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,13 @@ is the following: the type of noise (when appropriate) will appear as the supers
and the form of the data will appear as the subscript: ‘s’, ‘i’, ‘o’ for segment,
integration, or overall (for the entire dataset), respectively.

It is possible for an integration or pixel to have invalid data, so useable
slope data will not be available. If a pixel has an invalid integration, the value
for that integration for that pixel will be set to NaN in the rateints product.
Further, if all integrations for a given pixel are invalid the pixel value for
the rate product will be set to NaN. An example of invalid data would be a
fully saturated integration for a pixel.

Optimal Weighting Algorithm
---------------------------
The slope of each segment is calculated using the least-squares method with optimal
Expand Down
9 changes: 7 additions & 2 deletions src/stcal/ramp_fitting/ols_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -1280,7 +1280,6 @@ def ramp_fit_overall(
# Adjust DQ flags for NaNs.
wh_nans = np.isnan(slope_int)
dq_int[wh_nans] = np.bitwise_or(dq_int[wh_nans], ramp_data.flags_do_not_use)
slope_int[wh_nans] = 0.
warnings.resetwarnings()

del the_num, the_den, wh_nans
Expand Down Expand Up @@ -1347,7 +1346,7 @@ def ramp_fit_overall(

# Output integration-specific results to separate file
integ_info = utils.output_integ(
slope_int, dq_int, effintim, var_p3, var_r3, var_both3)
ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_both3)

if opt_res is not None:
del opt_res
Expand All @@ -1366,6 +1365,12 @@ def ramp_fit_overall(
# primary output
final_pixeldq = utils.dq_compress_final(dq_int, ramp_data)

# For invalid slope calculations set to NaN. Pixels flagged as SATURATED or
# DO_NOT_USE have invalid data.
invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use
wh_invalid = np.where(np.bitwise_and(final_pixeldq, invalid_data))
c_rates[wh_invalid] = np.nan

if dq_int is not None:
del dq_int

Expand Down
11 changes: 9 additions & 2 deletions src/stcal/ramp_fitting/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, nframes,
return ped


def output_integ(slope_int, dq_int, effintim, var_p3, var_r3, var_both3):
def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_both3):
"""
For the OLS algorithm, construct the output integration-specific results.
Any variance values that are a large fraction of the default value
Expand All @@ -676,6 +676,9 @@ def output_integ(slope_int, dq_int, effintim, var_p3, var_r3, var_both3):
Parameters
----------
ramp_data : RampData
Contains flag information.
model : instance of Data Model
DM object for input
Expand Down Expand Up @@ -715,6 +718,10 @@ def output_integ(slope_int, dq_int, effintim, var_p3, var_r3, var_both3):
var_both3[var_both3 > 0.4 * LARGE_VARIANCE] = 0.

data = slope_int / effintim
invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use
wh_invalid = np.where(np.bitwise_and(dq_int, invalid_data))
data[wh_invalid] = np.nan

err = np.sqrt(var_both3)
dq = dq_int
var_poisson = var_p3
Expand Down Expand Up @@ -1133,7 +1140,7 @@ def fix_sat_ramps(ramp_data, sat_0th_group_int, var_p3, var_both3, slope_int, dq
"""
var_p3[sat_0th_group_int > 0] = LARGE_VARIANCE
var_both3[sat_0th_group_int > 0] = LARGE_VARIANCE
slope_int[sat_0th_group_int > 0] = 0.
slope_int[sat_0th_group_int > 0] = np.nan
dq_int[sat_0th_group_int > 0] = np.bitwise_or(
dq_int[sat_0th_group_int > 0], ramp_data.flags_do_not_use)

Expand Down
29 changes: 15 additions & 14 deletions tests/test_ramp_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ def test_2_group_cases():

# Check the outputs
data, dq, var_poisson, var_rnoise, err = slopes
chk_dt = np.array([[551.0735, 0., 0., 0., -293.9943, -845.0678, -845.0677]])
chk_dt = np.array([[551.0735, np.nan, np.nan, np.nan, -293.9943, -845.0678, -845.0677]])
chk_dq = np.array([[GOOD, DNU | SAT, DNU | SAT, DNU, GOOD, GOOD, GOOD]])
chk_vp = np.array([[38.945766, 0., 0., 0., 38.945766, 38.945766, 0.]])
chk_vr = np.array([[0.420046, 0.420046, 0.420046, 0., 0.420046, 0.420046, 0.420046]])
Expand Down Expand Up @@ -527,7 +527,7 @@ def test_one_group_ramp_suppressed_one_integration():
# Check slopes information
sdata, sdq, svp, svr, serr = slopes

check = np.array([[0., 0., 1.0000002]])
check = np.array([[np.nan, np.nan, 1.0000002]])
np.testing.assert_allclose(sdata, check, tol)

check = np.array([[DNU | SAT, DNU, GOOD]])
Expand All @@ -545,7 +545,7 @@ def test_one_group_ramp_suppressed_one_integration():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0., 0., 1.0000001]]])
check = np.array([[[np.nan, np.nan, 1.0000001]]])
np.testing.assert_allclose(cdata, check, tol)

check = np.array([[[DNU | SAT, DNU, GOOD]]])
Expand All @@ -572,7 +572,7 @@ def test_one_group_ramp_not_suppressed_one_integration():
# Check slopes information
sdata, sdq, svp, svr, serr = slopes

check = np.array([[0., 1., 1.0000002]])
check = np.array([[np.nan, 1., 1.0000002]])
np.testing.assert_allclose(sdata, check, tol)

check = np.array([[DNU | SAT, GOOD, GOOD]])
Expand All @@ -590,7 +590,7 @@ def test_one_group_ramp_not_suppressed_one_integration():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0., 1., 1.0000001]]])
check = np.array([[[np.nan, 1., 1.0000001]]])
np.testing.assert_allclose(cdata, check, tol)

check = np.array([[[DNU | SAT, GOOD, GOOD]]])
Expand Down Expand Up @@ -636,7 +636,7 @@ def test_one_group_ramp_suppressed_two_integrations():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0., 0., 1.0000001]],
check = np.array([[[np.nan, np.nan, 1.0000001]],
[[1.0000001, 1.0000001, 1.0000001]]])
np.testing.assert_allclose(cdata, check, tol)

Expand Down Expand Up @@ -687,7 +687,7 @@ def test_one_group_ramp_not_suppressed_two_integrations():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0., 1., 1.0000001]],
check = np.array([[[np.nan, 1., 1.0000001]],
[[1.0000001, 1.0000001, 1.0000001]]])
np.testing.assert_allclose(cdata, check, tol)

Expand Down Expand Up @@ -815,7 +815,10 @@ def test_zeroframe():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[149.0313, 0., 130.40239]],
# The third pixel in integration zero has good data
# because the zeroframe has good data, so the ramp
# is not fully saturated.
check = np.array([[[149.0313, np.nan, 130.40239]],
[[18.62891, 18.62891, 18.62891]]])
np.testing.assert_allclose(cdata, check, tol, tol)

Expand Down Expand Up @@ -865,7 +868,6 @@ def test_dq_multi_int_dnu():
Tests to make sure that integration DQ flags get set when all groups
in an integration are set to DO_NOT_USE.
"""
# XXX JP-2669
nints, ngroups, nrows, ncols = 2, 5, 1, 1
rnval, gval = 10., 5.
frame_time, nframes, groupgap = 10.736, 4, 1
Expand Down Expand Up @@ -910,7 +912,7 @@ def test_dq_multi_int_dnu():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0.]],
check = np.array([[[np.nan]],
[[1.8628913]]])
np.testing.assert_allclose(cdata, check, tol, tol)

Expand Down Expand Up @@ -995,7 +997,6 @@ def test_new_saturation():
as saturated, then it must also be marked as do not
use.
"""
# XXX JP-2988
ramp, gain, rnoise = get_new_saturation()

save_opt, ncores, bufsize, algo = False, "none", 1024 * 30000, "OLS"
Expand All @@ -1007,7 +1008,7 @@ def test_new_saturation():
# Check slopes information
sdata, sdq, svp, svr, serr = slopes

check = np.array([[2.797567 , 2.8022935, 0.]])
check = np.array([[2.797567 , 2.8022935, np.nan]])
np.testing.assert_allclose(sdata, check, tol, tol)

check = np.array([[JUMP, JUMP, DNU | SAT]])
Expand All @@ -1025,8 +1026,8 @@ def test_new_saturation():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[2.7949152, 2.8022935, 0.]],
[[2.8020892, 0. , 0.]]])
check = np.array([[[2.7949152, 2.8022935, np.nan]],
[[2.8020892, np.nan, np.nan]]])
np.testing.assert_allclose(cdata, check, tol, tol)

check = np.array([[[GOOD, JUMP, DNU | SAT]],
Expand Down

0 comments on commit 4bebdbf

Please sign in to comment.