diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 6fe8173e..d2c18ff7 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -27,35 +27,27 @@ jobs: strategy: matrix: include: - - name: 'ubuntu-py38-oldestdeps' + - name: 'ubuntu-py311-oldestdeps' os: ubuntu-latest - python: '3.8' + python: '3.11' # Test the oldest supported dependencies on the oldest supported Python - tox_env: 'py38-test-oldestdeps' - - - name: 'macos-py310-astroscrappy11' - # Keep this test until astroscrappy 1.1.0 is the oldest supported - # version. - os: macos-latest - python: '3.10' - tox_env: 'py310-test-astroscrappy11' + tox_env: 'py311-test-oldestdeps' - - name: 'ubuntu-py312-bottleneck' + # Do not include bottleneck in this coverage test. By not including + # it we get a better measure of how we are covered when using the + # array API, which bottleneck short-circuits. + - name: 'ubuntu-py312-coverage' os: ubuntu-latest python: '3.12' - tox_env: 'py312-test-alldeps-bottleneck-cov' - - - name: 'ubuntu-py310' - os: ubuntu-latest - python: '3.10' - tox_env: 'py310-test-alldeps-numpy124' + tox_env: 'py312-test-alldeps-cov' - name: 'ubuntu-py311' os: ubuntu-latest python: '3.11' tox_env: 'py311-test-alldeps-numpy124' - - name: 'ubuntu-py312' + # Move bottleneck test a test without coverage + - name: 'ubuntu-py312-bottleneck' os: ubuntu-latest python: '3.12' tox_env: 'py312-test-alldeps-numpy126' diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 615f4cc5..4f24ba59 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -2,9 +2,6 @@ """This module implements the combiner class.""" -import numpy as np -from numpy import ma - try: import bottleneck as bn except ImportError: @@ -12,6 +9,8 @@ else: HAS_BOTTLENECK = True +import array_api_compat +import array_api_extra as xpx from astropy import log from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip @@ -22,32 +21,68 @@ __all__ = ["Combiner", "combine"] -def _default_median(): # pragma: no cover +def _default_median(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmedian else: - return np.nanmedian + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanmedian + except AttributeError as e: + raise RuntimeError( + "No NaN-aware median function available. Please install bottleneck." + ) from e -def _default_average(): # pragma: no cover +def _default_average(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmean else: - return np.nanmean + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanmean + except AttributeError as e: + raise RuntimeError( + "No NaN-aware mean function available. Please install bottleneck." + ) from e -def _default_sum(): # pragma: no cover +def _default_sum(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nansum else: - return np.nansum + if xp is None: + return None + # No bottleneck, but we have a namespace. + try: + return xp.nansum + except AttributeError as e: + raise RuntimeError( + "No NaN-aware sum function available. Please install bottleneck." + ) from e -def _default_std(): # pragma: no cover + +def _default_std(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanstd else: - return np.nanstd + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanstd + except AttributeError as e: + raise RuntimeError( + "No NaN-aware std function available. Please install bottleneck." + ) from e _default_sum_func = _default_sum() @@ -105,9 +140,6 @@ def __init__(self, ccd_iter, dtype=None): "ccd_iter should be a list or a generator of CCDData objects." ) - if dtype is None: - dtype = np.float64 - default_shape = None default_unit = None @@ -132,22 +164,28 @@ def __init__(self, ccd_iter, dtype=None): if not (default_unit == ccd.unit): raise TypeError("CCDData objects don't have the same unit.") + # Set array namespace + xp = array_api_compat.array_namespace(ccd_list[0].data) + if dtype is None: + dtype = xp.float64 self.ccd_list = ccd_list self.unit = default_unit self.weights = None self._dtype = dtype # set up the data array - new_shape = (len(ccd_list),) + default_shape - self.data_arr = ma.masked_all(new_shape, dtype=dtype) + # new_shape = (len(ccd_list),) + default_shape + self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr - for i, ccd in enumerate(ccd_list): - self.data_arr[i] = ccd.data + mask_list = [] + for ccd in ccd_list: if ccd.mask is not None: - self.data_arr.mask[i] = ccd.mask + mask_list.append(ccd.mask) else: - self.data_arr.mask[i] = ma.zeros(default_shape) + mask_list.append(xp.zeros(default_shape)) + + self.data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the # length of the data array. @@ -173,20 +211,23 @@ def weights(self): @weights.setter def weights(self, value): if value is not None: - if isinstance(value, np.ndarray): - if value.shape != self.data_arr.data.shape: - if value.ndim != 1: - raise ValueError( - "1D weights expected when shapes of the " - "data and weights differ." - ) - if value.shape[0] != self.data_arr.data.shape[0]: - raise ValueError( - "Length of weights not compatible with specified axis." - ) - self._weights = value - else: - raise TypeError("weights must be a numpy.ndarray.") + try: + _ = array_api_compat.array_namespace(value) + except TypeError as err: + raise TypeError("weights must be an array.") from err + + if value.shape != self.data_arr.shape: + if value.ndim != 1: + raise ValueError( + "1D weights expected when shapes of the " + "data and weights differ." + ) + if value.shape[0] != self.data_arr.shape[0]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + self._weights = value + else: self._weights = None @@ -207,13 +248,14 @@ def scaling(self): @scaling.setter def scaling(self, value): + xp = array_api_compat.array_namespace(self.data_arr) if value is None: self._scaling = value else: - n_images = self.data_arr.data.shape[0] + n_images = self.data_arr.shape[0] if callable(value): self._scaling = [value(self.data_arr[i]) for i in range(n_images)] - self._scaling = np.array(self._scaling) + self._scaling = xp.array(self._scaling) else: try: len(value) @@ -227,10 +269,10 @@ def scaling(self, value): "scaling must be a function or an array " "the same length as the number of images." ) - self._scaling = np.array(value) + self._scaling = xp.array(value) # reshape so that broadcasting occurs properly - for _ in range(len(self.data_arr.data.shape) - 1): - self._scaling = self.scaling[:, np.newaxis] + for _ in range(len(self.data_arr.shape) - 1): + self._scaling = self.scaling[:, xp.newaxis] # set up IRAF-like minmax clipping def clip_extrema(self, nlow=0, nhigh=0): @@ -275,20 +317,20 @@ def clip_extrema(self, nlow=0, nhigh=0): .. [0] image.imcombine help text. http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?imcombine """ - + xp = array_api_compat.array_namespace(self.data_arr) if nlow is None: nlow = 0 if nhigh is None: nhigh = 0 - argsorted = np.argsort(self.data_arr.data, axis=0) - mg = np.mgrid[ + argsorted = xp.argsort(self.data_arr, axis=0) + mg = xp.mgrid[ [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] ] for i in range(-1 * nhigh, nlow): # create a tuple with the indices where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr.mask[where] = True + self.data_arr_mask[where] = True # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -306,10 +348,12 @@ def minmax_clipping(self, min_clip=None, max_clip=None): """ if min_clip is not None: mask = self.data_arr < min_clip - self.data_arr.mask[mask] = True + # Written to avoid in-place modification of array + self.data_arr_mask = self.data_arr_mask | mask if max_clip is not None: mask = self.data_arr > max_clip - self.data_arr.mask[mask] = True + # Written to avoid in-place modification of array + self.data_arr_mask = self.data_arr_mask | mask # set up sigma clipping algorithms @deprecated_renamed_argument( @@ -368,18 +412,21 @@ def sigma_clipping( # Remove in 3.0 _ = kwd.pop("use_astropy", True) - self.data_arr.mask |= sigma_clip( - self.data_arr.data, - sigma_lower=low_thresh, - sigma_upper=high_thresh, - axis=kwd.get("axis", 0), - copy=kwd.get("copy", False), - maxiters=kwd.get("maxiters", 1), - cenfunc=func, - stdfunc=dev_func, - masked=True, - **kwd, - ).mask + self.data_arr_mask = ( + self.data_arr_mask + | sigma_clip( + self.data_arr, + sigma_lower=low_thresh, + sigma_upper=high_thresh, + axis=kwd.get("axis", 0), + copy=kwd.get("copy", False), + maxiters=kwd.get("maxiters", 1), + cenfunc=func, + stdfunc=dev_func, + masked=True, + **kwd, + ).mask + ) def _get_scaled_data(self, scale_arg): if scale_arg is not None: @@ -389,11 +436,14 @@ def _get_scaled_data(self, scale_arg): return self.data_arr def _get_nan_substituted_data(self, data): + xp = array_api_compat.array_namespace(self.data_arr) + # Get the data as an unmasked array with masked values filled as NaN - if self.data_arr.mask.any(): - data = np.ma.filled(data, fill_value=np.nan) + if self.data_arr_mask.any(): + # Use array_api_extra so that we can use at with all array libraries + data = xpx.at(data)[self.data_arr_mask].set(xp.nan) else: - data = data.data + data = data return data def _combination_setup(self, user_func, default_func, scale_to): @@ -401,16 +451,16 @@ def _combination_setup(self, user_func, default_func, scale_to): Handle the common pieces of image combination data/mask setup. """ data = self._get_scaled_data(scale_to) - + xp = array_api_compat.array_namespace(data) # Play it safe for now and only do the nan thing if the user is using # the default combination function. if user_func is None: combo_func = default_func # Subtitute NaN for masked entries data = self._get_nan_substituted_data(data) - masked_values = np.isnan(data).sum(axis=0) + masked_values = xp.isnan(data).sum(axis=0) else: - masked_values = self.data_arr.mask.sum(axis=0) + masked_values = self.data_arr_mask.sum(axis=0) combo_func = user_func return data, masked_values, combo_func @@ -454,9 +504,12 @@ def median_combine( The uncertainty currently calculated using the median absolute deviation does not account for rejected pixels. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_median_func = _default_median(xp=xp) data, masked_values, median_func = self._combination_setup( - median_func, _default_median(), scale_to + median_func, _default_median_func, scale_to ) medianed = median_func(data, axis=0) @@ -476,17 +529,17 @@ def median_combine( else: uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(self.data_arr) - masked_values) + uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) # There is no need to care about potential masks because the # uncertainty was calculated based on the data so potential masked # elements are also masked in the data. No need to keep two identical # masks. - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # create the combined image with a dtype matching the combiner combined_image = CCDData( - np.asarray(medianed, dtype=self.dtype), + xp.asarray(medianed, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -503,9 +556,10 @@ def _weighted_sum(self, data, sum_func): Perform weighted sum, used by both ``sum_combine`` and in some cases by ``average_combine``. """ + xp = array_api_compat.array_namespace(data) if self.weights.shape != data.shape: # Add extra axes to the weights for broadcasting - weights = np.reshape(self.weights, [len(self.weights), 1, 1]) + weights = xp.reshape(self.weights, [len(self.weights), 1, 1]) else: weights = self.weights @@ -556,8 +610,18 @@ def average_combine( combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_average_func = _default_average(xp=xp) + + if sum_func is None: + sum_func = _default_sum(xp=xp) + + if uncertainty_func is None: + uncertainty_func = _default_std(xp=xp) + data, masked_values, scale_func = self._combination_setup( - scale_func, _default_average(), scale_to + scale_func, _default_average_func, scale_to ) # Do NOT modify data after this -- we need it to be intact when we @@ -575,13 +639,13 @@ def average_combine( # set up the deviation uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(data) - masked_values) + uncertainty /= xp.sqrt(len(data) - masked_values) # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # create the combined image with a dtype that matches the combiner combined_image = CCDData( - np.asarray(mean, dtype=self.dtype), + xp.asarray(mean, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -629,8 +693,15 @@ def sum_combine( CCDData object based on the combined input of CCDData objects. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_sum_func = _default_sum(xp=xp) + + if uncertainty_func is None: + uncertainty_func = _default_std(xp=xp) + data, masked_values, sum_func = self._combination_setup( - sum_func, _default_sum(), scale_to + sum_func, _default_sum_func, scale_to ) if self.weights is not None: @@ -644,15 +715,15 @@ def sum_combine( # set up the deviation uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(data) - masked_values) + uncertainty /= xp.sqrt(len(data) - masked_values) # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # Multiply uncertainty by square root of the number of images uncertainty *= len(data) - masked_values # create the combined image with a dtype that matches the combiner combined_image = CCDData( - np.asarray(summed, dtype=self.dtype), + xp.asarray(summed, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -692,12 +763,10 @@ def _calculate_step_sizes(x_size, y_size, num_chunks): return xstep, ystep -def _calculate_size_of_image(ccd, combine_uncertainty_function): +def _calculate_size_of_image(ccd): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. - if ccd.uncertainty is None and combine_uncertainty_function is not None: - ccd.uncertainty = StdDevUncertainty(np.zeros(ccd.data.shape)) size_of_an_img = ccd.data.nbytes try: @@ -737,8 +806,8 @@ def combine( sigma_clip=False, sigma_clip_low_thresh=3, sigma_clip_high_thresh=3, - sigma_clip_func=ma.mean, - sigma_clip_dev_func=ma.std, + sigma_clip_func=None, + sigma_clip_dev_func=None, dtype=None, combine_uncertainty_function=None, overwrite_output=False, @@ -852,21 +921,30 @@ def combine( combined_image : `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + # Handle case where the input is an array of file names first if not isinstance(img_list, list): - # If not a list, check whether it is a numpy ndarray or string of - # filenames separated by comma - if isinstance(img_list, np.ndarray): - img_list = img_list.tolist() - elif isinstance(img_list, str) and ("," in img_list): - img_list = img_list.split(",") + try: + _ = array_api_compat.array_namespace(img_list) + except TypeError: + pass else: - try: - # Maybe the input can be made into a list, so try that - img_list = list(img_list) - except TypeError as err: - raise ValueError( - "unrecognised input for list of images to combine." - ) from err + # If it is an array, convert it to a list + img_list = list(img_list) + if ( + not isinstance(img_list, list) + and isinstance(img_list, str) + and ("," in img_list) + ): + # Handle case where the input is a string of file names separated by comma + img_list = img_list.split(",") + else: + try: + # Maybe the input can be made into a list, so try that + img_list = list(img_list) + except TypeError as err: + raise ValueError( + "unrecognised input for list of images to combine." + ) from err # Select Combine function to call in Combiner if method == "average": @@ -885,26 +963,34 @@ def combine( # User has provided fits filenames to read from ccd = CCDData.read(img_list[0], **ccdkwargs) + # Get the array namespace + xp = array_api_compat.array_namespace(ccd.data) + if dtype is None: - dtype = np.float64 + dtype = xp.float64 + + if sigma_clip_func is None: + sigma_clip_func = xp.mean + if sigma_clip_dev_func is None: + sigma_clip_dev_func = xp.std # Convert the master image to the appropriate dtype so when overwriting it # later the data is not downcast and the memory consumption calculation # uses the internally used dtype instead of the original dtype. #391 if ccd.data.dtype != dtype: - ccd.data = ccd.data.astype(dtype) + ccd.data = xp.astype(ccd.data, dtype) # If the template image doesn't have an uncertainty, add one, because the # result always has an uncertainty. if ccd.uncertainty is None: - ccd.uncertainty = StdDevUncertainty(np.zeros_like(ccd.data)) + ccd.uncertainty = StdDevUncertainty(xp.zeros_like(ccd.data)) # If the template doesn't have a mask, add one, because the result may have # a mask if ccd.mask is None: - ccd.mask = np.zeros_like(ccd.data, dtype=bool) + ccd.mask = xp.zeros_like(ccd.data, dtype=bool) - size_of_an_img = _calculate_size_of_image(ccd, combine_uncertainty_function) + size_of_an_img = _calculate_size_of_image(ccd) no_of_img = len(img_list) @@ -951,7 +1037,7 @@ def combine( scalevalues.append(scale(imgccd.data)) - to_set_in_combiner["scaling"] = np.array(scalevalues) + to_set_in_combiner["scaling"] = xp.array(scalevalues) else: to_set_in_combiner["scaling"] = scale @@ -1008,11 +1094,20 @@ def combine( comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) # add it back into the master image - ccd.data[x:xend, y:yend] = comb_tile.data + # Use array_api_extra so that we can use at with all array libraries + ccd.data = xpx.at(ccd.data)[x:xend, y:yend].set(comb_tile.data) + if ccd.mask is not None: - ccd.mask[x:xend, y:yend] = comb_tile.mask + # Maybe temporary workaround for the mask not being writeable... + ccd.mask = ccd.mask.copy() + # Handle immutable arrays with array_api_extra + ccd.mask = xpx.at(ccd.mask)[x:xend, y:yend].set(comb_tile.mask) + if ccd.uncertainty is not None: - ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array + # Handle immutable arrays with array_api_extra + ccd.uncertainty.array = xpx.at(ccd.uncertainty.array)[ + x:xend, y:yend + ].set(comb_tile.uncertainty.array) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner diff --git a/ccdproc/core.py b/ccdproc/core.py index 26b27a20..daaf7be5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -7,7 +7,8 @@ import numbers import warnings -import numpy as np +import array_api_compat +import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -68,6 +69,62 @@ } +def _is_array(arr): + """ + Check whether an object is an array by tring to find a namespace + for it. + + Parameters + ---------- + arr : object + Object to be tested. + + Returns + ------- + is_array : bool + ``True`` if arr is an array, ``False`` otherwise. + """ + try: + array_api_compat.array_namespace(arr) + except TypeError: + return False + return True + + +# Ideally this would eventually be covered by tests. Looks like Sparse +# could be used to test this, since it has no percentile... +def _percentile_fallback(array, percentiles): # pragma: no cover + """ + Try calculating percentile using namespace, otherwise fall back to + an implmentation that uses sort. As of the 2023 version of the array API + there is no percentile function in the API but there is a sort function. + + Parameters + ---------- + array : array-like + Array from which to calculate the percentile. + + percentiles : float or list-like + Percentile to calculate. + + Returns + ------- + percentile : float or list-like + Calculated percentile. + """ + xp = array_api_compat.array_namespace(array) + try: + return xp.percentile(array, percentiles) + except AttributeError: + pass + + # Fall back to using sort + sorted_array = xp.sort(array) + + indexes = xp.astype(len(sorted_array) * xp.asarray(percentiles), int) + return sorted_array[indexes] + + @log_to_metadata def ccd_process( ccd, @@ -222,6 +279,9 @@ def ccd_process( # make a copy of the object nccd = ccd.copy() + # Set array namespace + xp = array_api_compat.array_namespace(nccd.data) + # apply the overscan correction if isinstance(oscan, CCDData): nccd = subtract_overscan( @@ -251,12 +311,13 @@ def ccd_process( raise ValueError("gain and readnoise must be specified to create error frame.") # apply the bad pixel mask - if isinstance(bad_pixel_mask, np.ndarray): - nccd.mask = bad_pixel_mask - elif bad_pixel_mask is None: + if bad_pixel_mask is None: + # Handle this simple case first.... pass + elif _is_array(bad_pixel_mask): + nccd.mask = xp.asarray(bad_pixel_mask, dtype=bool) else: - raise TypeError("bad_pixel_mask is not None or numpy.ndarray.") + raise TypeError("bad_pixel_mask is not None or an array.") # apply the gain correction if not (gain is None or isinstance(gain, Quantity)): @@ -347,6 +408,8 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): units as the data in the parameter ``ccd_data``. """ + # Get array namespace + xp = array_api_compat.array_namespace(ccd_data.data) if gain is not None and not isinstance(gain, Quantity): raise TypeError("gain must be a astropy.units.Quantity.") @@ -370,14 +433,15 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): # remove values that might be negative or treat as nan data = gain_value * ccd_data.data mask = data < 0 + if disregard_nan: - data[mask] = 0 + data = data * ~mask else: - data[mask] = np.nan + # data[mask] = xp.nan logging.warning("Negative values in array will be replaced with nan") # calculate the deviation - var = (data + readnoise_value**2) ** 0.5 + var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 # ensure uncertainty and image data have same unit ccd = ccd_data.copy() @@ -480,10 +544,13 @@ def subtract_overscan( if not isinstance(ccd, CCDData): raise TypeError("ccddata is not a CCDData object.") + # Set array namespace + xp = array_api_compat.array_namespace(ccd.data) + if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None ): - raise TypeError("specify either overscan or fits_section, but not " "both.") + raise TypeError("specify either overscan or fits_section, but not both.") if (overscan is not None) and (not isinstance(overscan, CCDData)): raise TypeError("overscan is not a CCDData object.") @@ -498,24 +565,24 @@ def subtract_overscan( overscan_axis = 0 if overscan.shape[1] > overscan.shape[0] else 1 if median: - oscan = np.median(overscan.data, axis=overscan_axis) + oscan = xp.median(overscan.data, axis=overscan_axis) else: - oscan = np.mean(overscan.data, axis=overscan_axis) + oscan = xp.mean(overscan.data, axis=overscan_axis) if model is not None: of = fitting.LinearLSQFitter() - yarr = np.arange(len(oscan)) + yarr = xp.arange(len(oscan)) oscan = of(model, yarr, oscan) oscan = oscan(yarr) if overscan_axis == 1: - oscan = np.reshape(oscan, (oscan.size, 1)) + oscan = xp.reshape(oscan, (oscan.size, 1)) else: - oscan = np.reshape(oscan, (1, oscan.size)) + oscan = xp.reshape(oscan, (1, oscan.size)) else: if overscan_axis == 1: - oscan = np.reshape(oscan, oscan.shape + (1,)) + oscan = xp.reshape(oscan, oscan.shape + (1,)) else: - oscan = np.reshape(oscan, (1,) + oscan.shape) + oscan = xp.reshape(oscan, (1,) + oscan.shape) subtracted = ccd.copy() @@ -968,6 +1035,9 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): from astropy.nddata.ccddata import _generate_wcs_and_update_header from reproject import reproject_interp + # Set array namespace + xp = array_api_compat.array_namespace(ccd.data) + if not (ccd.wcs.is_celestial and target_wcs.is_celestial): raise ValueError("one or both WCS is not celestial.") @@ -990,7 +1060,7 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): # The reprojection will contain nan for any pixels for which the source # was outside the original image. Those should be masked also. - output_mask = np.isnan(projected_image_raw) + output_mask = xp.isnan(projected_image_raw) if reprojected_mask is not None: output_mask = output_mask | reprojected_mask @@ -1216,14 +1286,27 @@ def rebin(ccd, newshape): rebin(arr1, (20,20)) """ # check to see that is in a nddata type - if isinstance(ccd, np.ndarray): + try: + xp = array_api_compat.array_namespace(ccd) + except TypeError: + try: + # This will also raise a TypeError if ccd.data isn't an array + # but that is fine. + xp = array_api_compat.array_namespace(ccd.data) + except AttributeError as e: + raise TypeError("ccd is not an ndarray or a CCDData object.") from e + + if isinstance(ccd, xp.ndarray): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as " "ccd.") - slices = [slice(0, old, old / new) for old, new in zip(ccd.shape, newshape)] - coordinates = np.mgrid[slices] + slices = [ + slice(0, old, old / new) + for old, new in zip(ccd.shape, newshape, strict=True) + ] + coordinates = xp.mgrid[slices] indices = coordinates.astype("i") return ccd[tuple(indices)] @@ -1249,8 +1332,11 @@ def rebin(ccd, newshape): raise TypeError("ccd is not an ndarray or a CCDData object.") -def block_reduce(ccd, block_size, func=np.sum): +def block_reduce(ccd, block_size, func=None): """Thin wrapper around `astropy.nddata.block_reduce`.""" + if func is None: + xp = array_api_compat.array_namespace(ccd.data) + func = xp.sum data = nddata.block_reduce(ccd, block_size, func) if isinstance(ccd, CCDData): # unit and meta "should" be unaffected by the change of shape and can @@ -1261,7 +1347,10 @@ def block_reduce(ccd, block_size, func=np.sum): def block_average(ccd, block_size): """Like `block_reduce` but with predefined ``func=np.mean``.""" - data = nddata.block_reduce(ccd, block_size, np.mean) + + xp = array_api_compat.array_namespace(ccd.data) + + data = nddata.block_reduce(ccd, block_size, xp.mean) # Like in block_reduce: if isinstance(ccd, CCDData): data = CCDData(data, unit=ccd.unit, meta=ccd.meta.copy()) @@ -1573,39 +1662,7 @@ def cosmicray_lacosmic( asy_background_kwargs = dict(inbkg=inbkg, invar=invar) - if isinstance(ccd, np.ndarray): - data = ccd - - crmask, cleanarr = detect_cosmics( - data + data_offset, - inmask=None, - sigclip=sigclip, - sigfrac=sigfrac, - objlim=objlim, - gain=gain.value, - readnoise=readnoise.value, - satlevel=satlevel, - niter=niter, - sepmed=sepmed, - cleantype=cleantype, - fsmode=fsmode, - psfmodel=psfmodel, - psffwhm=psffwhm, - psfsize=psfsize, - psfk=psfk, - psfbeta=psfbeta, - verbose=verbose, - **asy_background_kwargs, - ) - - cleanarr = cleanarr - data_offset - cleanarr = _astroscrappy_gain_apply_helper( - cleanarr, gain.value, gain_apply, old_astroscrappy_interface - ) - - return cleanarr, crmask - - elif isinstance(ccd, CCDData): + if isinstance(ccd, CCDData): # Start with a check for a special case: ccd is in electron, and # gain and readnoise have no units. In that case we issue a warning # instead of raising an error to avoid crashing user's pipelines. @@ -1672,6 +1729,37 @@ def cosmicray_lacosmic( nccd.mask = nccd.mask + crmask return nccd + elif _is_array(ccd): + data = ccd + + crmask, cleanarr = detect_cosmics( + data + data_offset, + inmask=None, + sigclip=sigclip, + sigfrac=sigfrac, + objlim=objlim, + gain=gain.value, + readnoise=readnoise.value, + satlevel=satlevel, + niter=niter, + sepmed=sepmed, + cleantype=cleantype, + fsmode=fsmode, + psfmodel=psfmodel, + psffwhm=psffwhm, + psfsize=psfsize, + psfk=psfk, + psfbeta=psfbeta, + verbose=verbose, + **asy_background_kwargs, + ) + + cleanarr = cleanarr - data_offset + cleanarr = _astroscrappy_gain_apply_helper( + cleanarr, gain.value, gain_apply, old_astroscrappy_interface + ) + + return cleanarr, crmask else: raise TypeError("ccd is not a CCDData or ndarray object.") @@ -1785,22 +1873,26 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): mask of the object will be created if it did not previously exist or be updated with the detected cosmic rays. """ - if isinstance(ccd, np.ndarray): - data = ccd + if _is_array(ccd): + xp = array_api_compat.array_namespace(ccd) + + # Masked data is not part of the array API so remove mask if present. + # Only look at the data array, guessing that if there is a .mask then + # there is also a .data. + if hasattr(ccd, "mask"): + data = ccd.data + + data = xp.asarray(ccd) if error_image is None: - error_image = data.std() - else: - if not isinstance(error_image, (float, np.ndarray)): + error_image = xp.std(data) + elif not isinstance(error_image, float): + if not _is_array(error_image): raise TypeError("error_image is not a float or ndarray.") # create the median image marr = ndimage.median_filter(data, size=(mbox, mbox)) - # Only look at the data array - if isinstance(data, np.ma.MaskedArray): - data = data.data - # Find the residual image rarr = (data - marr) / error_image @@ -1814,9 +1906,13 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # replace bad pixels in the image ndata = data.copy() if rbox > 0: - data = np.ma.masked_array(data, (crarr == 1)) - mdata = ndimage.median_filter(data, rbox) - ndata[crarr == 1] = mdata[crarr == 1] + # Fun fact: scipy.ndimage ignores the mask, so may as well not + # bother with it. + # data = np.ma.masked_array(data, (crarr == 1)) + + # make sure that mdata is the same type as data + mdata = xp.asarray(ndimage.median_filter(data, rbox)) + ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): @@ -1972,13 +2068,16 @@ def ccdmask( # No data attribute or data has no shape attribute. raise ValueError('"ratio" should be a "CCDData".') from err + # Get array namespace + xp = array_api_compat.array_namespace(ratio.data) + def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): """Helper function to mask values outside of the specified sigma range.""" return (baseline < -lower_sigma * one_sigma_value) | ( baseline > upper_sigma * one_sigma_value ) - mask = ~np.isfinite(ratio.data) + mask = ~xp.isfinite(ratio.data) medsub = ratio.data - ndimage.median_filter(ratio.data, size=(nlmed, ncmed)) if byblocks: @@ -1991,18 +2090,40 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): c1 = j * ncsig c2 = min((j + 1) * ncsig, ncols) block = medsub[l1:l2, c1:c2] - high = np.percentile(block.ravel(), 69.1) - low = np.percentile(block.ravel(), 30.9) + # The array API has no percentile function, so we use a small + # function that first tries percentile in case a particular + # array package has it but otherwise falls back to a sort. + # This is the case at least as of the 2023.12 API. + high = _percentile_fallback( + xp.reshape(block, (xp.prod(block.shape),)), 69.1 + ) + low = _percentile_fallback( + xp.reshape(block, (xp.prod(block.shape),)), 30.9 + ) block_sigma = (high - low) / 2.0 block_mask = _sigma_mask(block, block_sigma, lsigma, hsigma) - mblock = np.ma.MaskedArray(block, mask=block_mask, copy=False) + # mblock = np.ma.MaskedArray(block, mask=block_mask, copy=False) if findbadcolumns: - csum = np.ma.sum(mblock, axis=0) + # Not clear yet what the right solution to masking is in the array + # API, so we'll use a boolean index to get the elements we want + # and sum them....unfortunately, we'll need to do this in a loop + # as far as I can tell. + csum = [] + all_masked = [] + for k in range(block.shape[1]): + subset = block[:, k] + csum.append(xp.sum(subset[~block_mask[:, k]])) + all_masked.append(xp.all(block_mask[:, k])) + csum = xp.array(csum) csum[csum <= 0] = 0 - csum_sigma = np.ma.MaskedArray(np.sqrt(c2 - c1 - csum)) - colmask = _sigma_mask(csum.filled(1), csum_sigma, lsigma, hsigma) - block_mask[:, :] |= colmask[np.newaxis, :] + csum_sigma = xp.array(xp.sqrt(c2 - c1 - csum)) + # The prior code filled the csum array with the value 1, which + # only affects those cases where all of the input values to + # the csum were masked, so we fill those with 1. + csum[all_masked] = 1 + colmask = _sigma_mask(csum, csum_sigma, lsigma, hsigma) + block_mask[:, :] |= colmask[xp.newaxis, :] mask[l1:l2, c1:c2] = block_mask else: @@ -2020,7 +2141,7 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): if mask[line, col]: for i in range(2, ngood + 2): lend = line + i - if mask[lend, col] and not np.all(mask[line : lend + 1, col]): + if mask[lend, col] and not xp.all(mask[line : lend + 1, col]): mask[line:lend, col] = True return mask diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 35a8acda..6a01066b 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -8,13 +8,20 @@ from os import listdir, path import astropy.io.fits as fits -import numpy as np +import numpy as np # see numpy comment below import numpy.ma as ma from astropy.table import MaskedColumn, Table from astropy.utils.exceptions import AstropyUserWarning from .ccddata import _recognized_fits_file_extensions, fits_ccddata_reader +# ==> numpy comment <== +# numpy is used internally to keep track of masking in the summary +# table. It is not used for any CCD processing, so there is no need +# to implement the array API here. In other words, ImageFileCollection +# is fine to implement its internal tables however it wantrs, regardless +# of what the user is using for their data arrays. + logger = logging.getLogger(__name__) __all__ = ["ImageFileCollection"] @@ -696,7 +703,7 @@ def _find_keywords_by_values(self, **kwd): use_info = self._fits_summary(header_keywords=keywords) matches = np.ones(len(use_info), dtype=bool) - for key, value in zip(keywords, values): + for key, value in zip(keywords, values, strict=True): logger.debug("key %s, value %s", key, value) logger.debug("value in table %s", use_info[key]) value_missing = use_info[key].mask diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index 67a469c0..ebafa320 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -102,7 +102,7 @@ def wrapper(*args, **kwd): # been called as keywords. positional_args = original_args[: len(args)] - all_args = chain(zip(positional_args, args), kwd.items()) + all_args = chain(zip(positional_args, args, strict=True), kwd.items()) all_args = [ f"{name}={_replace_array_with_placeholder(val)}" for name, val in all_args @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, (NDData, np.ndarray)): + elif isinstance(value, NDData | np.ndarray): try: length = len(value) except TypeError: diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index e900e0f8..0c27a94a 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -2,6 +2,8 @@ from shutil import rmtree +# import dask.array as da +import jax.numpy as jnp import numpy as np import pytest from astropy import units as u @@ -59,7 +61,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(data, unit=u.adu) + ccd = CCDData(jnp.array(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/run_for_memory_profile.py b/ccdproc/tests/run_for_memory_profile.py index d9b445a2..50e39532 100644 --- a/ccdproc/tests/run_for_memory_profile.py +++ b/ccdproc/tests/run_for_memory_profile.py @@ -109,7 +109,7 @@ def run_memory_profile( ) ccd = CCDData.read(files[0]) - expected_img_size = _calculate_size_of_image(ccd, None) + expected_img_size = _calculate_size_of_image(ccd) if memory_limit: kwargs["mem_limit"] = memory_limit diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 55a1ccbe..ab4ebc60 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1,8 +1,11 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import warnings + +# import array_api_compat import astropy import astropy.units as u -import numpy as np +import jax.numpy as np import pytest import skimage from astropy.io import fits @@ -11,6 +14,9 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import array as np_array +from numpy import random as np_random +from numpy import testing as np_testing from ccdproc.core import ( Keyword, @@ -36,7 +42,16 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = False if np.__version__.startswith("1.") else None +_NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None + +RNG = np_random.default_rng + +# import dask.array as da +# import numpy +# data = numpy.arange(100_000).reshape(200, 500) +# a = da.from_array(data, chunks=(100, 100)) + +# np = array_api_compat.array_namespace(a) # Test creating deviation @@ -65,12 +80,12 @@ def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise) assert ccd_var.uncertainty.array.shape == (10, 10) assert ccd_var.uncertainty.array.size == 100 - assert ccd_var.uncertainty.array.dtype == np.dtype(float) + assert np.isdtype(ccd_var.uncertainty.array.dtype, "real floating") if gain is not None: expected_var = np.sqrt(2 * ccd_data.data + 5**2) / 2 else: expected_var = np.sqrt(ccd_data.data + 5**2) - np.testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? with pytest.raises(AttributeError): @@ -87,7 +102,7 @@ def test_create_deviation_from_negative(): ccd_var = create_deviation( ccd_data, gain=None, readnoise=readnoise, disregard_nan=False ) - np.testing.assert_array_equal( + np_testing.assert_array_equal( ccd_data.data < 0, np.isnan(ccd_var.uncertainty.array) ) @@ -100,9 +115,12 @@ def test_create_deviation_from_negative_2(): ccd_data, gain=None, readnoise=readnoise, disregard_nan=True ) mask = ccd_data.data < 0 - ccd_data.data[mask] = 0 + # Set the variance to zero where the data is negative + # In-place replacement of values does not work in some array + # libraries. + ccd_data.data = ccd_data.data * ~mask expected_var = np.sqrt(ccd_data.data + readnoise.value**2) - np.testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) def test_create_deviation_keywords_must_have_unit(): @@ -148,12 +166,19 @@ def test_subtract_overscan(median, transpose, data_rectangle): science_region = science_region[::-1] overscan_axis = 0 - ccd_data.data[oscan_region] = oscan + # Since some array libraries do not support in-place operations, we + # work on the science and overscan regions separately. + science_data = ccd_data.data[science_region].copy() + overscan_data = 0 * ccd_data.data[oscan_region].copy() + oscan + # Add a fake sky background so the "science" part of the image has a # different average than the "overscan" part. sky = 10.0 - original_mean = ccd_data.data[science_region].mean() - ccd_data.data[science_region] += oscan + sky + original_mean = science_data.mean() + science_data = science_data + oscan + sky + + # Reconstruct the full image + ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) # Test once using the overscan argument to specify the overscan region ccd_data_overscan = subtract_overscan( ccd_data, @@ -164,8 +189,8 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np.testing.assert_almost_equal( - ccd_data_overscan.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_overscan.data[science_region].mean(), sky + original_mean, rtol=1e-6 ) # Is the overscan region zero? assert (ccd_data_overscan.data[oscan_region] == 0).all() @@ -181,14 +206,16 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np.testing.assert_almost_equal( - ccd_data_fits_section.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_fits_section.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() # Do both ways of subtracting overscan give exactly the same result? - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data_overscan[science_region], ccd_data_fits_section[science_region] ) @@ -201,8 +228,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_almost_equal( - ccd_data_overscan_auto.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_overscan_auto.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, ) # Use overscan_axis=None with a FITS section ccd_data_fits_section_overscan_auto = subtract_overscan( @@ -212,9 +241,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_almost_equal( + np_testing.assert_allclose( ccd_data_fits_section_overscan_auto.data[science_region].mean(), sky + original_mean, + rtol=1e-6, ) # Overscan_axis should be 1 for a square overscan region # This test only works for a non-square data region, but the @@ -237,7 +267,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) + np_testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) # A more substantial test of overscan modeling @@ -263,8 +293,12 @@ def test_subtract_overscan_model(transpose): original_mean = ccd_data.data[science_region].mean() - ccd_data.data[oscan_region] = 0.0 # Only want overscan in that region - ccd_data.data = ccd_data.data + scan + science_data = ccd_data.data[science_region].copy() + # Set any existing overscan to zero. Overscan is stored for the entire + # image, so we need to do this before we add the new overscan. + overscan_data = 0 * ccd_data.data[oscan_region].copy() + # Reconstruct the full image + ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + scan ccd_data = subtract_overscan( ccd_data, @@ -273,7 +307,9 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose( + ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + ) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -283,7 +319,9 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose( + ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + ) def test_subtract_overscan_fails(): @@ -320,13 +358,13 @@ def test_trim_image_fits_section(mask_data, uncertainty): if mask_data: ccd_data.mask = np.zeros_like(ccd_data) if uncertainty: - err = np.random.default_rng().normal(size=ccd_data.shape) + err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) trimmed = trim_image(ccd_data, fits_section="[20:40,:]") # FITS reverse order, bounds are inclusive and starting index is 1-based assert trimmed.shape == (50, 21) - np.testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) if mask_data: assert trimmed.shape == trimmed.mask.shape if uncertainty: @@ -337,7 +375,7 @@ def test_trim_image_no_section(): ccd_data = ccd_data_func(data_size=50) trimmed = trim_image(ccd_data[:, 19:40]) assert trimmed.shape == (50, 21) - np.testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) def test_trim_with_wcs_alters_wcs(): @@ -367,7 +405,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - np.testing.assert_almost_equal(no_bias.data.mean(), data_avg) + np_testing.assert_allclose(no_bias.data.mean(), data_avg) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -434,7 +472,9 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np.testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) + np_testing.assert_allclose( + ccd_data.data - dark_scale * dark_level, dark_sub.data, rtol=1e-6 + ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -528,18 +568,22 @@ def test_flat_correct(): ccd_data.header["my_key"] = 42 size = ccd_data.shape[0] # create the flat, with some scatter - data = 2 * np.random.default_rng().normal(loc=1.0, scale=0.05, size=(size, size)) + data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None) # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np.testing.assert_almost_equal( - (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() + np_testing.assert_allclose( + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat.data.mean(), + rtol=1e-6, ) - np.testing.assert_allclose( - ccd_data.data / flat_data.data, flat.data / flat.data.mean() + np_testing.assert_allclose( + ccd_data.data / flat_data.data, + flat.data / flat.data.mean(), + rtol=1e-6, ) # Check that metadata is unchanged (since logging is turned off) @@ -552,7 +596,7 @@ def test_flat_correct_min_value(): size = ccd_data.shape[0] # Create the flat - data = 2 * np.random.default_rng().normal(loc=1.0, scale=0.05, size=(size, size)) + data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat_orig_data = flat.data.copy() min_value = 2.1 # Should replace some, but not all, values @@ -563,13 +607,15 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - np.testing.assert_almost_equal( + np_testing.assert_allclose( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), + rtol=1e-6, ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data.data / flat_corrected_data.data, flat_with_min.data / flat_with_min.data.mean(), + rtol=1e-6, ) # Test that flat is not modified. @@ -586,17 +632,21 @@ def test_flat_correct_norm_value(): # Note that mean value of flat is set below and is different than # the mean of the flat data. flat_mean = 5.0 - data = np.random.default_rng().normal(loc=1.0, scale=0.05, size=ccd_data.shape) + data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None, norm_value=flat_mean) # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np.testing.assert_almost_equal( - (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean + np_testing.assert_allclose( + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat_mean, + rtol=1e-6, + ) + np_testing.assert_allclose( + ccd_data.data / flat_data.data, flat.data / flat_mean, rtol=1e-6 ) - np.testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) def test_flat_correct_norm_value_bad_value(): @@ -605,7 +655,7 @@ def test_flat_correct_norm_value_bad_value(): # it is given a bad norm_value. Bad means <=0. # Create the flat, with some scatter - data = np.random.default_rng().normal(loc=1.0, scale=0.05, size=ccd_data.shape) + data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) with pytest.raises(ValueError) as e: flat_correct(ccd_data, flat, add_keyword=None, norm_value=-7) @@ -628,7 +678,11 @@ def test_flat_correct_deviation(): # Test the uncertainty on the data after flat correction def test_flat_correct_data_uncertainty(): # Regression test for #345 - dat = CCDData(np.ones([100, 100]), unit="adu", uncertainty=np.ones([100, 100])) + # Temporarily work around the fact that NDUncertainty explicitly checks + # whether the value is a numpy array. + dat = CCDData( + np.ones([100, 100]), unit="adu", uncertainty=np_array(np.ones([100, 100])) + ) # Note flat is set to 10, error, if present, is set to one. flat = CCDData(10 * np.ones([100, 100]), unit="adu") res = flat_correct(dat, flat) @@ -641,7 +695,7 @@ def test_gain_correct(): ccd_data = ccd_data_func() init_data = ccd_data.data gain_data = gain_correct(ccd_data, gain=3, add_keyword=None) - np.testing.assert_allclose(gain_data.data, 3 * init_data) + np_testing.assert_allclose(gain_data.data, 3 * init_data) assert ccd_data.meta == gain_data.meta @@ -651,7 +705,7 @@ def test_gain_correct_quantity(): g = Quantity(3, u.electron / u.adu) ccd_data = gain_correct(ccd_data, gain=g) - np.testing.assert_allclose(ccd_data.data, 3 * init_data) + np_testing.assert_allclose(ccd_data.data, 3 * init_data) assert ccd_data.unit == u.electron @@ -691,7 +745,7 @@ def test_transform_image(mask_data, uncertainty): ccd_data.mask = np.zeros_like(ccd_data) ccd_data.mask[10, 10] = 1 if uncertainty: - err = np.random.default_rng().normal(size=ccd_data.shape) + err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) def tran(arr): @@ -699,13 +753,13 @@ def tran(arr): tran = transform_image(ccd_data, tran) - np.testing.assert_allclose(10 * ccd_data.data, tran.data) + np_testing.assert_allclose(10 * ccd_data.data, tran.data) if mask_data: assert tran.shape == tran.mask.shape - np.testing.assert_array_equal(ccd_data.mask, tran.mask) + np_testing.assert_allclose(ccd_data.mask, tran.mask) if uncertainty: assert tran.shape == tran.uncertainty.array.shape - np.testing.assert_allclose( + np_testing.assert_allclose( 10 * ccd_data.uncertainty.array, tran.uncertainty.array ) @@ -749,14 +803,22 @@ def test_block_reduce(): reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_average(): + data = np.array( + [ + [2.0, 1.0, 2.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [2.0, 1.0, 2.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + ] + ) ccd = CCDData( - np.ones((4, 4)), + data, unit="adu", meta={"testkw": 1}, mask=np.zeros((4, 4), dtype=bool), uncertainty=StdDevUncertainty(np.ones((4, 4))), ) - ccd.data[::2, ::2] = 2 + with pytest.warns(AstropyUserWarning) as w: ccd_avgd = block_average(ccd, (2, 2)) assert len(w) == 1 @@ -814,7 +876,7 @@ def test__overscan_schange(): old_data = ccd_data.copy() new_data = subtract_overscan(ccd_data, overscan=ccd_data[:, 1], overscan_axis=0) assert not np.allclose(old_data.data, new_data.data) - np.testing.assert_allclose(old_data.data, ccd_data.data) + np_testing.assert_allclose(old_data.data, ccd_data.data) def test_create_deviation_does_not_change_input(): @@ -823,7 +885,7 @@ def test_create_deviation_does_not_change_input(): _ = create_deviation( ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron ) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -831,11 +893,8 @@ def test_cosmicray_median_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() error = np.zeros_like(ccd_data) - with np.errstate(invalid="ignore", divide="ignore"): - _ = cosmicray_median( - ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0 - ) - np.testing.assert_allclose(original.data, ccd_data.data) + _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -843,7 +902,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = cosmicray_lacosmic(ccd_data) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -851,9 +910,10 @@ def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) - with np.errstate(invalid="ignore"): + # Ignore the divide by zero warning that is raised when the flat is zero. + with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -861,7 +921,7 @@ def test_gain_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = gain_correct(ccd_data, gain=1, gain_unit=ccd_data.unit) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -870,7 +930,7 @@ def test_subtract_bias_does_not_change_input(): original = ccd_data.copy() master_frame = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -878,16 +938,15 @@ def test_trim_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = trim_image(ccd_data, fits_section=None) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - with np.errstate(invalid="ignore"): - _ = transform_image(ccd_data, np.sqrt) - np.testing.assert_allclose(original.data, ccd_data) + _ = transform_image(ccd_data, np.sqrt) + np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit @@ -916,13 +975,15 @@ def test_wcs_project_onto_same_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) ccd_data.wcs = wcs_for_testing(ccd_data.shape) + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) # Make sure new image has correct WCS. assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Make sure data matches within some reasonable tolerance. - np.testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) + np_testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) def test_wcs_project_onto_same_wcs_remove_headers(): @@ -932,6 +993,8 @@ def test_wcs_project_onto_same_wcs_remove_headers(): ccd_data.wcs = wcs_for_testing(ccd_data.shape) ccd_data.header = ccd_data.wcs.to_header() + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) for k in ccd_data.wcs.to_header(): @@ -947,8 +1010,10 @@ def test_wcs_project_onto_shifted_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) target_wcs.wcs.crpix += [1, 1] - ccd_data.mask = np.random.default_rng().choice([0, 1], size=ccd_data.shape) + ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) # Make sure new image has correct WCS. @@ -956,12 +1021,10 @@ def test_wcs_project_onto_shifted_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels should all be shifted. - masked_input = np.ma.array(ccd_data.data, mask=ccd_data.mask) - masked_output = np.ma.array(new_ccd.data, mask=new_ccd.mask) - np.testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) + np_testing.assert_allclose(ccd_data.data[:-1, :-1], new_ccd[1:, 1:], rtol=1e-5) # The masks should all be shifted too. - np.testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) + np_testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) # We should have more values that are masked in the output array # than on input because some on output were not in the footprint @@ -970,7 +1033,7 @@ def test_wcs_project_onto_shifted_wcs(): # In the case of a shift, one row and one column should be nan, and they # will share one common nan where they intersect, so we know how many nan # there should be. - assert np.isnan(new_ccd.data).sum() == np.sum(new_ccd.shape) - 1 + assert np.sum(np.isnan(new_ccd.data)) == np.sum(np.array(new_ccd.shape)) - 1 # Use an odd number of pixels to make a well-defined center pixel @@ -999,6 +1062,8 @@ def test_wcs_project_onto_scale_wcs(): target_shape = 2 * np.array(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) # Explicitly set the interpolation method so we know what to # expect for the mass. new_ccd = wcs_project( @@ -1017,7 +1082,7 @@ def test_wcs_project_onto_scale_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels have been scaled. - np.testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) + np_testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. @@ -1038,7 +1103,7 @@ def test_ccd_process_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = ccd_process(ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -1077,7 +1142,9 @@ def test_ccd_process_parameters_are_appropriate(): def test_ccd_process(): # Test the through ccd_process ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) - ccd_data.data[:, -10:] = 2 + # Rewrite to not change data in-place. + ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) @@ -1111,9 +1178,9 @@ def test_ccd_process(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np.testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) - np.testing.assert_array_equal(mask, occd.mask) + np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 @@ -1122,7 +1189,9 @@ def test_ccd_process(): def test_ccd_process_gain_corrected(): # Test the through ccd_process with gain_corrected as False ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) - ccd_data.data[:, -10:] = 2 + + # Rewrite to not change data in-place. + ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) @@ -1157,9 +1226,9 @@ def test_ccd_process_gain_corrected(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np.testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) - np.testing.assert_array_equal(mask, occd.mask) + np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index b5936f4f..5c2bbb06 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,4 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import array_api_compat +import array_api_extra as xpx import astropy import astropy.units as u import numpy as np @@ -83,7 +85,7 @@ def test_combiner_create(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr.mask.shape == (3, 100, 100) + assert c.data_arr_mask.shape == (3, 100, 100) # test if dtype matches the value that is passed @@ -112,8 +114,8 @@ def test_combiner_mask(): ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) assert c.data_arr.shape == (3, 10, 10) - assert c.data_arr.mask.shape == (3, 10, 10) - assert not c.data_arr.mask[0, 5, 5] + assert c.data_arr_mask.shape == (3, 10, 10) + assert not c.data_arr_mask[0, 5, 5] def test_weights(): @@ -185,7 +187,7 @@ def test_combiner_minmax_max(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) - assert c.data_arr[2].mask.all() + assert c.data_arr_mask[2].all() def test_combiner_minmax_min(): @@ -197,7 +199,7 @@ def test_combiner_minmax_min(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) - assert c.data_arr[1].mask.all() + assert c.data_arr_mask[1].all() def test_combiner_sigmaclip_high(): @@ -213,7 +215,7 @@ def test_combiner_sigmaclip_high(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr[5].mask.all() + assert c.data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): @@ -234,7 +236,7 @@ def test_combiner_sigmaclip_single_pix(): c.data_arr[3, 5, 5] = -5 c.data_arr[4, 5, 5] = 25 c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr.mask[4, 5, 5] + assert c.data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): @@ -250,7 +252,7 @@ def test_combiner_sigmaclip_low(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) - assert c.data_arr[5].mask.all() + assert c.data_arr_mask[5].all() # test that the median combination works and returns a ccddata object @@ -296,7 +298,7 @@ def test_combiner_sum_weighted(): c = Combiner(ccd_list) c.weights = np.array([1, 2, 3]) ccd = c.sum_combine() - expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list)) + expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list, strict=True)) np.testing.assert_almost_equal(ccd, expected_result) @@ -334,6 +336,7 @@ def test_combiner_mask_average(): # are masked?! # assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 1 + # THE LINE BELOW IS CATCHING A REAL ERROR assert ccd.mask[0, 0] assert not ccd.mask[5, 5] @@ -428,7 +431,7 @@ def test_combine_average_fitsimages(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combine_numpyndarray(): @@ -446,7 +449,7 @@ def test_combine_numpyndarray(): fitsfilename_list = np.array([fitsfile] * 3) avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combiner_result_dtype(): @@ -459,13 +462,13 @@ def test_combiner_result_dtype(): # The default dtype of Combiner is float64 assert res.data.dtype == np.float64 ref = np.ones((3, 3)) * 1.5 - np.testing.assert_array_almost_equal(res.data, ref) + np.testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: assert res.data.dtype == np.int_ ref = np.ones((3, 3)) * 2 - np.testing.assert_array_almost_equal(res.data, ref) + np.testing.assert_allclose(res.data, ref) def test_combiner_image_file_collection_input(tmp_path): @@ -476,7 +479,7 @@ def test_combiner_image_file_collection_input(tmp_path): ifc = ImageFileCollection(tmp_path) comb = Combiner(ifc.ccds()) - np.testing.assert_array_almost_equal(ccd.data, comb.average_combine().data) + np.testing.assert_allclose(ccd.data, comb.average_combine().data) def test_combine_image_file_collection_input(tmp_path): @@ -492,8 +495,13 @@ def test_combine_image_file_collection_input(tmp_path): comb_ccds = combine(ifc.ccds(), method="average") - np.testing.assert_array_almost_equal(ccd.data, comb_files.data) - np.testing.assert_array_almost_equal(ccd.data, comb_ccds.data) + comb_string = combine( + ",".join(ifc.files_filtered(include_path=True)), method="average" + ) + + np.testing.assert_allclose(ccd.data, comb_files.data) + np.testing.assert_allclose(ccd.data, comb_ccds.data) + np.testing.assert_allclose(ccd.data, comb_string.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -511,7 +519,7 @@ def test_combine_average_ccddata(): avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -528,7 +536,7 @@ def test_combine_limitedmem_fitsimages(): fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu ) # averaging same ccdData should give back same images - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -553,7 +561,7 @@ def test_combine_limitedmem_scale_fitsimages(): unit=u.adu, ) - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data, decimal=4) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test the optional uncertainty function in average_combine @@ -593,7 +601,7 @@ def test_sum_combine_uncertainty(): c = Combiner(ccd_list) ccd = c.sum_combine(uncertainty_func=np.sum) uncert_ref = np.sum(c.data_arr, 0) * np.sqrt(3) - np.testing.assert_almost_equal(ccd.uncertainty.array, uncert_ref) + np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=np.sum) @@ -623,7 +631,9 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): if mask_point: # Make one pixel really negative so we can clip it and guarantee a resulting # pixel is masked. - ccd_data.data[0, 0] = -1000 + # Handle case where array is immutable by using array_api_extra, + # which provides at for all array libraries. + ccd_data.data = xpx.at(ccd_data.data)[0, 0].set(-1000) ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) @@ -639,7 +649,7 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 ) - np.testing.assert_array_almost_equal( + np.testing.assert_allclose( ccd_comb.uncertainty.array, expected_result.uncertainty.array ) @@ -690,7 +700,7 @@ def test_combiner_uncertainty_average(): ref_uncertainty = np.ones((10, 10)) / 2 # Correction because we combined two images. ref_uncertainty /= np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -710,7 +720,7 @@ def test_combiner_uncertainty_average_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) / np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -731,7 +741,7 @@ def test_combiner_uncertainty_median_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) # 0.855980789955 ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254 - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -750,7 +760,7 @@ def test_combiner_uncertainty_sum_mask(): ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) ref_uncertainty *= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) def test_combiner_3d(): @@ -762,11 +772,11 @@ def test_combiner_3d(): c = Combiner(ccd_list) assert c.data_arr.shape == (3, 5, 5, 5) - assert c.data_arr.mask.shape == (3, 5, 5, 5) + assert c.data_arr_mask.shape == (3, 5, 5, 5) ccd = c.average_combine() assert ccd.shape == (5, 5, 5) - np.testing.assert_array_almost_equal(ccd.data, data1, decimal=4) + np.testing.assert_allclose(ccd.data, data1) def test_3d_combiner_with_scaling(): @@ -889,9 +899,9 @@ def test_clip_extrema_with_other_rejection(): ccdlist[1].data[2, 0] = 100.1 c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr.mask[1, 1, 2] = True + c.data_arr_mask[1, 1, 2] = True # Reject ccdlist[1].data[1,2] by other means - c.data_arr.mask[3, 0, 0] = True + c.data_arr_mask[3, 0, 0] = True c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -934,7 +944,7 @@ def create_gen(): c = Combiner(create_gen()) assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr.mask.shape == (3, 100, 100) + assert c.data_arr_mask.shape == (3, 100, 100) @pytest.mark.parametrize( @@ -967,13 +977,14 @@ def test_combiner_with_scaling_uncertainty(comb_func): avg_ccd = getattr(combiner, comb_func)() if comb_func != "median_combine": - uncertainty_func = _default_std() + xp = array_api_compat.array_namespace(ccd_data.data) + uncertainty_func = _default_std(xp=xp) else: uncertainty_func = sigma_func expected_unc = uncertainty_func(scaled_ccds, axis=0) - np.testing.assert_almost_equal(avg_ccd.uncertainty.array, expected_unc) + np.testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) @pytest.mark.parametrize( @@ -995,17 +1006,33 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): c = Combiner(ccd_list) if comb_func == "sum_combine": + + def my_summer(data, mask, axis=None): + xp = array_api_compat.array_namespace(data) + new_data = [] + for i in range(data.shape[0]): + if mask[i] is not None: + new_data.append(data[i] * ~mask[i]) + else: + new_data.append(xp.zeros_like(data[i])) + + new_data = xp.array(new_data) + + def sum_func(_, axis=axis): + return xp.sum(new_data, axis=axis) + expected_result = 3 * data - actual_result = c.sum_combine(sum_func=np.ma.sum) + actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) elif comb_func == "average_combine": expected_result = data - actual_result = c.average_combine(scale_func=np.ma.mean) + actual_result = c.average_combine(scale_func=np.mean) elif comb_func == "median_combine": expected_result = data - actual_result = c.median_combine(median_func=np.ma.median) + actual_result = c.median_combine(median_func=np.median) # Two of the three values are masked, so no matter what the combination # method is the result in this pixel should be 2. expected_result[5, 5] = 2 + # THIS IS A REAL TEST FAILURE!!! np.testing.assert_almost_equal(expected_result, actual_result) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index cfccc747..177a7f64 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,9 +1,14 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -import numpy as np +import array_api_compat +import array_api_extra as xpx import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning +from numpy import array as np_array +from numpy.ma import array as np_ma_array +from numpy.random import default_rng +from numpy.testing import assert_allclose from ccdproc.core import ( background_deviation_box, @@ -21,17 +26,18 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] - rng = np.random.default_rng(99) - crrays = rng.integers(0, size, size=(ncrays, 2)) + rng = default_rng(99) + xp = array_api_compat.array_namespace(data.data) + crrays = xp.asarray(rng.integers(0, size, size=(ncrays, 2))) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns # add_cosmicrays is highly sensitive to the seed # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now - crflux = 10 * scale * rng.random(NCRAYS) + (threshold + 15) * scale + crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) for i in range(ncrays): y, x = crrays[i] - data.data[y, x] = crflux[i] + data.data = xpx.at(data.data)[y, x].set(crflux[i]) def test_cosmicray_lacosmic(): @@ -48,10 +54,13 @@ def test_cosmicray_lacosmic(): def test_cosmicray_lacosmic_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9) # check the number of cosmic rays detected @@ -62,8 +71,9 @@ def test_cosmicray_lacosmic_ccddata(): def test_cosmicray_lacosmic_check_data(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) with pytest.raises(TypeError): - noise = DATA_SCALE * np.ones_like(ccd_data.data) + noise = DATA_SCALE * xp.ones_like(ccd_data.data) cosmicray_lacosmic(10, noise) @@ -76,10 +86,13 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # data and returns that gain corrected data. That is not the # intent... ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # No units here on purpose. gain = 2.0 @@ -93,22 +106,26 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): cr_mask = new_ccd.mask # Fill masked locations with 0 since there is no simple relationship # between the original value and the corrected value. - orig_data = np.ma.array(ccd_data.data, mask=cr_mask).filled(0) - new_data = np.ma.array(new_data.data, mask=cr_mask).filled(0) + # Masking using numpy is a handy way to check the results here. + orig_data = xp.array(np_ma_array(ccd_data.data, mask=cr_mask).filled(0)) + new_data = xp.array(np_ma_array(new_data.data, mask=cr_mask).filled(0)) if gain_correct_data: gain_for_test = gain else: gain_for_test = 1.0 - np.testing.assert_allclose(gain_for_test * orig_data, new_data) + assert_allclose(gain_for_test * orig_data, new_data) def test_cosmicray_lacosmic_accepts_quantity_gain(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # The units below are the point of the test gain = 2.0 * u.electron / u.adu @@ -117,10 +134,13 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): def test_cosmicray_lacosmic_accepts_quantity_readnoise(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) gain = 2.0 * u.electron / u.adu # The units below are the point of this test readnoise = 6.5 * u.electron @@ -132,11 +152,14 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # of adu, a readnoise in electrons and a gain in adu / electron. # That is not internally inconsistent. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) ccd_data.unit = "adu" threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) readnoise = 6.5 * u.electron # The units below are deliberately incorrect. @@ -149,13 +172,16 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # Check that an input ccd in electrons raises a warning. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) # The unit below is important for the test; this unit on # input is supposed to raise an error. ccd_data.unit = u.electron threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # No units here on purpose. gain = 2.0 # Don't really need to set this (6.5 is the default value) but want to @@ -176,10 +202,13 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): # that calling with the new keyword arguments to astroscrappy # 1.1.0 raises no error. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) with pytest.raises(TypeError): cosmicray_lacosmic(ccd_data, sigclip=5.9, **new_args) @@ -206,7 +235,9 @@ def test_cosmicray_median_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - ccd_data.uncertainty = ccd_data.data * 0.0 + DATA_SCALE + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(ccd_data.data * 0.0 + DATA_SCALE) nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, error_image=None) # check the number of cosmic rays detected @@ -217,7 +248,7 @@ def test_cosmicray_median_masked(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6)) + data = np_ma_array(ccd_data.data, mask=(ccd_data.data > -1e6)) ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE) # check the number of cosmic rays detected @@ -243,7 +274,7 @@ def test_cosmicray_median_gbox(): data, crarr = cosmicray_median( ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5 ) - data = np.ma.masked_array(data, crarr) + data = np_ma_array(data, mask=crarr) assert crarr.sum() > NCRAYS assert abs(data.std() - scale) < 0.1 @@ -269,28 +300,28 @@ def test_cosmicray_median_background_deviation(): def test_background_deviation_box(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) bd = background_deviation_box(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_box_fail(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) with pytest.raises(ValueError): background_deviation_box(cd, 0.5) def test_background_deviation_filter(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) bd = background_deviation_filter(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_filter_fail(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) with pytest.raises(ValueError): background_deviation_filter(cd, 0.5) @@ -321,10 +352,11 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): # to make sure that passing in pssl does not lead to an error # since the new interface does not include pssl. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + ccd_data.uncertainty = np_array(noise) with pytest.warns(AstropyDeprecationWarning): # The deprecation warning is expected and should be captured nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, pssl=0.0001) diff --git a/ccdproc/tests/test_image_collection.py b/ccdproc/tests/test_image_collection.py index f165dc92..4b79afc0 100644 --- a/ccdproc/tests/test_image_collection.py +++ b/ccdproc/tests/test_image_collection.py @@ -160,7 +160,7 @@ def test_filtered_files_have_proper_path(self, triage_setup): plain_biases = list(plain_biases) # Same subset, but with full path. path_biases = ic.files_filtered(imagetyp="bias", include_path=True) - for path_b, plain_b in zip(path_biases, plain_biases): + for path_b, plain_b in zip(path_biases, plain_biases, strict=True): # If the path munging has been done properly, this will succeed. assert os.path.basename(path_b) == plain_b @@ -207,7 +207,7 @@ def test_generator_full_path(self, triage_setup): location=triage_setup.test_dir, keywords=["imagetyp"] ) - for path, file_name in zip(collection._paths(), collection.files): + for path, file_name in zip(collection._paths(), collection.files, strict=True): assert path == os.path.join(triage_setup.test_dir, file_name) def test_hdus(self, triage_setup): @@ -278,7 +278,7 @@ def test_multiple_extensions(self, triage_setup, extension): ccd_kwargs = {"unit": "adu"} for data, hdr, hdu, ccd in zip( - ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs) + ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs), strict=True ): np.testing.assert_allclose(data, ext2.data) assert hdr == ext2.header diff --git a/ccdproc/tests/test_memory_use.py b/ccdproc/tests/test_memory_use.py index 5c22248f..173e0f96 100644 --- a/ccdproc/tests/test_memory_use.py +++ b/ccdproc/tests/test_memory_use.py @@ -15,6 +15,13 @@ else: memory_profile_present = True +try: + import jax # noqa +except ImportError: + JAX_PRESENT = False +else: + JAX_PRESENT = True + image_size = 2000 # Square image, so 4000 x 4000 num_files = 10 @@ -30,8 +37,10 @@ def teardown_module(): fil.unlink() +@pytest.mark.skipif(JAX_PRESENT, reason="JAX is present, and does not allow os.fork") @pytest.mark.skipif( - not platform.startswith("linux"), reason="memory tests only work on linux" + not platform.startswith("linux"), + reason="memory tests only work on linux", ) @pytest.mark.skipif(not memory_profile_present, reason="memory_profiler not installed") @pytest.mark.parametrize("combine_method", ["average", "sum", "median"]) diff --git a/docs/conf.py b/docs/conf.py index cb0fa5fe..68a64539 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,10 +39,7 @@ ) sys.exit(1) -if sys.version_info < (3, 11): - import tomli as tomllib -else: - import tomllib +import tomllib # Grab minversion from pyproject.toml with (Path(__file__).parents[1] / "pyproject.toml").open("rb") as f: diff --git a/docs/image_combination.rst b/docs/image_combination.rst index eed5ae1a..460a1570 100644 --- a/docs/image_combination.rst +++ b/docs/image_combination.rst @@ -104,11 +104,11 @@ To clip iteratively, continuing the clipping process until no more pixels are rejected, loop in the code calling the clipping method: >>> old_n_masked = 0 # dummy value to make loop execute at least once - >>> new_n_masked = combiner.data_arr.mask.sum() + >>> new_n_masked = combiner.data_arr_mask.sum() >>> while (new_n_masked > old_n_masked): ... combiner.sigma_clipping(func=np.ma.median) ... old_n_masked = new_n_masked - ... new_n_masked = combiner.data_arr.mask.sum() + ... new_n_masked = combiner.data_arr_mask.sum() Note that the default values for the high and low thresholds for rejection are 3 standard deviations. diff --git a/pyproject.toml b/pyproject.toml index b1f70b73..24e1ff98 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,17 +8,19 @@ dynamic = ["version"] description = "Astropy affiliated package" readme = "README.rst" license = { text = "BSD-3-Clause" } -requires-python = ">=3.8" +requires-python = ">=3.11" authors = [ { name = "Steve Crawford", email = "ccdproc@gmail.com" }, { name = "Matt Craig" }, { name = "and Michael Seifert" }, ] dependencies = [ - "astropy>=5.0.1", + "array_api_compat", + "array_api_extra>=0.7.0", + "astropy>=6.0.1", "astroscrappy>=1.1.0", - "numpy>=1.24", - "reproject>=0.7", + "numpy>=1.26", + "reproject>=0.9.1", "scikit-image", "scipy", ] @@ -34,6 +36,7 @@ test = [ "pre-commit", "pytest-astropy>=0.10.0", "ruff", + "jax", ] [project.urls] @@ -145,6 +148,7 @@ filterwarnings= [ "ignore:numpy\\.ufunc size changed:RuntimeWarning", "ignore:numpy.ndarray size changed:RuntimeWarning", "ignore:`np.bool` is a deprecated alias for the builtin `bool`:DeprecationWarning", + "ignore:invalid value encountered in sqrt:RuntimeWarning", ] markers = [ "data_size(N): set dimension of square data array for ccd_data fixture", diff --git a/tox.ini b/tox.ini index 62ff0d73..0ab4ee81 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ isolated_build = true [testenv] setenv = + test: JAX_ENABLE_X64 = True devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test @@ -32,8 +33,7 @@ description = deps = cov: coverage - numpy124: numpy==1.24.* # current oldest suppported numpy - numpy126: numpy==1.26.* + numpy126: numpy==1.26.* # currently oldest support numpy version numpy200: numpy==2.0.* numpy210: numpy==2.1.* @@ -47,10 +47,9 @@ deps = # Remember to transfer any changes here to setup.cfg also. Only listing # packages which are constrained in the setup.cfg - oldestdeps: numpy==1.24.* - oldestdeps: astropy==5.0.* - oldestdeps: reproject==0.7 - oldestdeps: cython + oldestdeps: numpy==1.26.* + oldestdeps: astropy==6.0.* + oldestdeps: reproject==0.9.1 commands = pip freeze