Skip to content

Commit

Permalink
First attempt at fixing the NaN and DO_NOT_USE bug.
Browse files Browse the repository at this point in the history
Updating handling NaNs in the rateints product, as well as the DQ flags associated with those NaNs.

Updating the change log.
  • Loading branch information
kmacdonald-stsci committed Aug 10, 2022
1 parent 5013a69 commit c5aa144
Show file tree
Hide file tree
Showing 5 changed files with 143 additions and 15 deletions.
3 changes: 3 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ ramp_fitting
- Fixed crash due to two group ramps with saturated groups that used
an intermediate array with an incorrect shape. [#109]

- Updating how NaNs and DO_NOT_USE flags are handled in the rateints
product. [#112]

Changes to API
--------------

Expand Down
14 changes: 11 additions & 3 deletions src/stcal/ramp_fitting/ols_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,6 @@ def slice_ramp_data(ramp_data, start_row, nrows):
ramp_data_slice = ramp_fit_class.RampData()

# Slice data by row
# XXX change the type of copy
data = ramp_data.data[:, :, start_row:start_row + nrows, :].copy()
err = ramp_data.err[:, :, start_row:start_row + nrows, :].copy()
groupdq = ramp_data.groupdq[:, :, start_row:start_row + nrows, :].copy()
Expand Down Expand Up @@ -1179,7 +1178,9 @@ def ramp_fit_overall(
variances_ans, save_opt, tstart):
"""
Computes the final/overall slope and variance values using the
intermediate computations previously computed.
intermediate computations previously computed. When computing
integration slopes, if NaNs are computed, the corresponding DQ
flag is set to DO_NOT_USE and the value is set to 0. per INS.
Parameters
Expand Down Expand Up @@ -1273,13 +1274,20 @@ def ramp_fit_overall(
# Suppress, then re-enable harmless arithmetic warnings
warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning)
warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning)

slope_int = the_num / the_den

# Adjust DQ flags for NaNs.
wh_nans = np.where(np.isnan(slope_int))
dq_int[wh_nans] = np.bitwise_or(dq_int[wh_nans], ramp_data.flags_do_not_use)
slope_int[wh_nans] = 0.
warnings.resetwarnings()

del the_num, the_den
del the_num, the_den, wh_nans

# Clean up ramps that are SAT on their initial groups; set ramp parameters
# for variances and slope so they will not contribute

var_p3, var_both3, slope_int, dq_int = utils.fix_sat_ramps(
ramp_data, sat_0th_group_int, var_p3, var_both3, slope_int, dq_int)

Expand Down
9 changes: 8 additions & 1 deletion src/stcal/ramp_fitting/ramp_fit_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,10 +169,17 @@ def dbg_print_1grp(self):
def dbg_print_basic_info(self):
# Arrays from the data model
print("-" * 80)
print(" Array Types:")
print(f"Shape : {self.data.shape}")
print(f"data : {self.data}")
print(f"err : {self.err}")
print(f"groupdq : {self.groupdq}")
print(f"pixeldq : {self.pixeldq}")

self.dbg_print_meta()

def dbg_print_pixel_info(self, row, col):
print("-" * 80)
print(f" data :\n{self.data[:, :, row, col]}")
print(f" err :\n{self.err[:, :, row, col]}")
print(f" groupdq :\n{self.groupdq[:, :, row, col]}")
print(f" pixeldq :\n{self.pixeldq[row, col]}")
11 changes: 11 additions & 0 deletions src/stcal/ramp_fitting/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1363,7 +1363,18 @@ def dq_compress_sect(ramp_data, num_int, gdq_sect, pixeldq_sect):
"""
sat_flag = ramp_data.flags_saturated
jump_flag = ramp_data.flags_jump_det
dnu_flag = ramp_data.flags_do_not_use

ngroups, nrows, ncols = gdq_sect.shape

# If all groups are set to DO_NOT_USE, mark as DO_NOT_USE.
dnu = np.zeros(gdq_sect.shape, dtype=np.uint32)
dnu[np.where(np.bitwise_and(gdq_sect, dnu_flag))] = 1
dnu_sum = dnu.sum(axis=0)
all_dnu = np.where(dnu_sum == ngroups)
pixeldq_sect[all_dnu] = np.bitwise_or(pixeldq_sect[all_dnu], dnu_flag)

# If saturation or a jump occures mark the appropriate flag.
sat_loc_r = np.bitwise_and(gdq_sect, sat_flag)
sat_loc_im = np.where(sat_loc_r.sum(axis=0) > 0)
pixeldq_sect[sat_loc_im] = np.bitwise_or(pixeldq_sect[sat_loc_im], sat_flag)
Expand Down
121 changes: 110 additions & 11 deletions tests/test_ramp_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ def base_neg_med_rates_multi_integrations():


def test_neg_med_rates_multi_integrations_slopes():
"""
Test computing median rates of a ramp with multiple integrations.
"""
slopes, cube, optional, gls_dummy, dims = \
base_neg_med_rates_multi_integrations()

Expand Down Expand Up @@ -292,6 +295,9 @@ def test_utils_dq_compress_final():


def jp_2326_test_setup():
"""
Sets up data for MIRI testing DO_NOT_USE flags at the beginning of ramps.
"""
# Set up ramp data
ramp = np.array([120.133545, 117.85222, 87.38832, 66.90588, 51.392555,
41.65941, 32.15081, 24.25277, 15.955284, 9.500946])
Expand Down Expand Up @@ -365,9 +371,8 @@ def test_miri_ramp_dnu_and_jump_at_ramp_beginning():

def test_2_group_cases():
"""
Tests the special cases of 2 group ramps.
JP-2346
Tests the special cases of 2 group ramps. Create multiple pixel ramps
with two groups to test the various DQ cases.
"""
base_group = [-12328.601, -4289.051]
base_err = [0., 0.]
Expand Down Expand Up @@ -437,7 +442,7 @@ def test_2_group_cases():
# Check the outputs
data, dq, var_poisson, var_rnoise, err = slopes
chk_dt = np.array([[551.0735, 0., 0., 0., -293.9943, -845.0678, -845.0677]])
chk_dq = np.array([[0, dnu | sat, dnu | sat, sat, 0, 0, sat]])
chk_dq = np.array([[0, dnu | sat, dnu | sat, dnu | sat, 0, 0, sat]])
chk_vp = np.array([[38.945766, 0., 0., 0., 38.945766, 38.945766, 0.]])
chk_vr = np.array([[0.420046, 0.420046, 0.420046, 0., 0.420046, 0.420046, 0.420046]])
chk_er = np.array([[6.274218, 0.64811, 0.64811, 0., 6.274218, 6.274218, 0.64811]])
Expand Down Expand Up @@ -508,17 +513,21 @@ def run_one_group_ramp_suppression(nints, suppress):


def test_one_group_ramp_suppressed_one_integration():
"""
Tests one group ramp fitting where suppression turned on.
"""
slopes, cube, dims = run_one_group_ramp_suppression(1, True)
nints, ngroups, nrows, ncols = dims
tol = 1e-5

# Check slopes information
sdata, sdq, svp, svr, serr = slopes
sat, dnu = dqflags["SATURATED"], dqflags["DO_NOT_USE"]

check = np.array([[0., 0., 1.0000002]])
np.testing.assert_allclose(sdata, check, tol)

check = np.array([[3, 2, 0]])
check = np.array([[dnu | sat, dnu | sat, 0]])
np.testing.assert_allclose(sdq, check, tol)

check = np.array([[0., 0., 0.01]])
Expand All @@ -533,10 +542,10 @@ def test_one_group_ramp_suppressed_one_integration():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0., np.nan, 1.0000001]]])
check = np.array([[[0., 0., 1.0000001]]])
np.testing.assert_allclose(cdata, check, tol)

check = np.array([[[3, 2, 0]]])
check = np.array([[[dnu | sat, dnu | sat, 0]]])
np.testing.assert_allclose(cdq, check, tol)

check = np.array([[[0., 0., 0.01]]])
Expand All @@ -550,6 +559,9 @@ def test_one_group_ramp_suppressed_one_integration():


def test_one_group_ramp_not_suppressed_one_integration():
"""
Tests one group ramp fitting where suppression turned off.
"""
slopes, cube, dims = run_one_group_ramp_suppression(1, False)
nints, ngroups, nrows, ncols = dims
tol = 1e-5
Expand Down Expand Up @@ -592,17 +604,22 @@ def test_one_group_ramp_not_suppressed_one_integration():


def test_one_group_ramp_suppressed_two_integrations():
"""
Test one good group ramp and two integrations with
suppression suppression turned on.
"""
slopes, cube, dims = run_one_group_ramp_suppression(2, True)
nints, ngroups, nrows, ncols = dims
tol = 1e-5

# Check slopes information
sdata, sdq, svp, svr, serr = slopes
sat, dnu = dqflags["SATURATED"], dqflags["DO_NOT_USE"]

check = np.array([[1.0000001, 1.0000001, 1.0000002]])
np.testing.assert_allclose(sdata, check, tol)

check = np.array([[2, 2, 0]])
check = np.array([[sat, sat, 0]])
np.testing.assert_allclose(sdq, check, tol)

check = np.array([[0.005, 0.01, 0.005]])
Expand All @@ -617,11 +634,11 @@ def test_one_group_ramp_suppressed_two_integrations():
# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0., np.nan, 1.0000001]],
check = np.array([[[0., 0., 1.0000001]],
[[1.0000001, 1.0000001, 1.0000001]]])
np.testing.assert_allclose(cdata, check, tol)

check = np.array([[[3, 2, 0]],
check = np.array([[[dnu | sat, dnu | sat, 0]],
[[0, 0, 0]]])
np.testing.assert_allclose(cdq, check, tol)

Expand All @@ -639,6 +656,10 @@ def test_one_group_ramp_suppressed_two_integrations():


def test_one_group_ramp_not_suppressed_two_integrations():
"""
Test one good group ramp and two integrations with
suppression suppression turned off.
"""
slopes, cube, dims = run_one_group_ramp_suppression(2, False)
nints, ngroups, nrows, ncols = dims
tol = 1e-5
Expand Down Expand Up @@ -816,6 +837,9 @@ def test_zeroframe():


def test_all_sat():
"""
Test all ramps in all integrations saturated.
"""
nints, ngroups, nrows, ncols = 2, 5, 1, 3
rnval, gval = 10., 5.
frame_time, nframes, groupgap = 10.736, 4, 1
Expand All @@ -836,7 +860,82 @@ def test_all_sat():
assert cube is None


def test_dq_multi_int_dnu():
"""
Tests to make sure that integration DQ flags get set when all groups
in an integration are set to DO_NOT_USE.
"""
# XXX JP-2669
nints, ngroups, nrows, ncols = 2, 5, 1, 1
rnval, gval = 10., 5.
frame_time, nframes, groupgap = 10.736, 4, 1

dims = nints, ngroups, nrows, ncols
var = rnval, gval
tm = frame_time, nframes, groupgap

ramp, gain, rnoise = create_blank_ramp_data(dims, var, tm)
base_arr = [(k+1) * 100 for k in range(ngroups)]
dq_arr = [ramp.flags_do_not_use] * ngroups

ramp.data[0, :, 0, 0] = np.array(base_arr)
ramp.data[1, :, 0, 0] = np.array(base_arr)
ramp.groupdq[0, :, 0, 0] = np.array(dq_arr)

algo, save_opt, ncores, bufsize = "OLS", False, "none", 1024 * 30000
slopes, cube, ols_opt, gls_opt = ramp_fit_data(
ramp, bufsize, save_opt, rnoise, gain, algo,
"optimal", ncores, dqflags)

tol = 1.e-5

# Check slopes information
sdata, sdq, svp, svr, serr = slopes

check = np.array([[1.8628913]])
np.testing.assert_allclose(sdata, check, tol, tol)

check = np.array([[0]])
np.testing.assert_allclose(sdq, check, tol, tol)

check = np.array([[0.00173518]])
np.testing.assert_allclose(svp, check, tol, tol)

check = np.array([[0.0004338]])
np.testing.assert_allclose(svr, check, tol, tol)

check = np.array([[0.04657228]])
np.testing.assert_allclose(serr, check, tol, tol)

# Check slopes information
cdata, cdq, cvp, cvr, cerr = cube

check = np.array([[[0.]],
[[1.8628913]]])
np.testing.assert_allclose(cdata, check, tol, tol)

check = np.array([[[dqflags["DO_NOT_USE"]]],
[[0]]])
np.testing.assert_allclose(cdq, check, tol, tol)

check = np.array([[[0.]],
[[0.00173518]]])
np.testing.assert_allclose(cvp, check, tol, tol)

check = np.array([[[2.5000000e+07]],
[[4.3379547e-04]]])
np.testing.assert_allclose(cvr, check, tol, tol)

check = np.array([[[0.]],
[[0.04657228]]])
np.testing.assert_allclose(cerr, check, tol, tol)


def create_blank_ramp_data(dims, var, tm):
"""
Create empty RampData classes, as well as gain and read noise arrays,
based on dimensional, variance, and timing input.
"""
nints, ngroups, nrows, ncols = dims
rnval, gval = var
frame_time, nframes, groupgap = tm
Expand Down Expand Up @@ -909,7 +1008,7 @@ def setup_inputs(dims, var, tm):


def base_print(label, arr):
arr_str = np.array2string(arr, separator=", ")
arr_str = np.array2string(arr, max_line_width=np.nan, separator=", ")
print(label)
print(arr_str)

Expand Down

0 comments on commit c5aa144

Please sign in to comment.