From 5d8edf2b16edc63c1a5699312c51cbdeb0ce0f93 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Tue, 16 Jul 2024 15:28:08 +0200 Subject: [PATCH 01/15] add radius min/max in ensamble and validation for its consistency --- clmm/clusterensemble.py | 6 +++++- clmm/gcdata.py | 2 +- clmm/utils/__init__.py | 1 + clmm/utils/validation.py | 36 ++++++++++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/clmm/clusterensemble.py b/clmm/clusterensemble.py index 6a518279c..aa8c83238 100644 --- a/clmm/clusterensemble.py +++ b/clmm/clusterensemble.py @@ -7,6 +7,7 @@ from .gcdata import GCData from .dataops import make_stacked_radial_profile +from .utils import DiffArray class ClusterEnsemble: @@ -46,7 +47,7 @@ def __init__(self, unique_id, gc_list=None, **kwargs): else: raise TypeError(f"unique_id incorrect type: {type(unique_id)}") self.unique_id = unique_id - self.data = GCData(meta={"bin_units": None}) + self.data = GCData(meta={"bin_units": None, "radius_min": None, "radius_max": None}) if gc_list is not None: self._add_values(gc_list, **kwargs) self.stacked_data = None @@ -198,6 +199,9 @@ def add_individual_radial_profile( """ cl_bin_units = profile_table.meta.get("bin_units", None) self.data.update_info_ext_valid("bin_units", self.data, cl_bin_units, overwrite=False) + for col in ("radius_min", "radius_max"): + value = DiffArray(profile_table[col]) + self.data.update_info_ext_valid(col, self.data, value, overwrite=False) cl_cosmo = profile_table.meta.get("cosmo", None) self.data.update_info_ext_valid("cosmo", self.data, cl_cosmo, overwrite=False) diff --git a/clmm/gcdata.py b/clmm/gcdata.py index eae2c102c..29f9eb5d5 100644 --- a/clmm/gcdata.py +++ b/clmm/gcdata.py @@ -150,7 +150,7 @@ def update_info_ext_valid(self, key, gcdata, ext_value, overwrite=False): key: str Name of key to compare and update. gcdata: GCData - Table to check if same cosmology. + Table to check if same cosmology and ensemble bins. ext_value: Value to be compared to. overwrite: bool diff --git a/clmm/utils/__init__.py b/clmm/utils/__init__.py index ae10a484f..a51d6d882 100644 --- a/clmm/utils/__init__.py +++ b/clmm/utils/__init__.py @@ -40,6 +40,7 @@ _validate_ra, _validate_dec, _validate_is_deltasigma_sigma_c, + DiffArray, ) from .units import ( diff --git a/clmm/utils/validation.py b/clmm/utils/validation.py index ed020d183..eebfa5868 100644 --- a/clmm/utils/validation.py +++ b/clmm/utils/validation.py @@ -224,3 +224,39 @@ def _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c): raise TypeError("sigma_c (=None) must be provided when is_deltasigma=True") if not is_deltasigma and sigma_c is not None: raise TypeError(f"sigma_c (={sigma_c}) must be None when is_deltasigma=False") + + +class DiffArray: + def __init__(self, array): + self.value = np.array(array) + + def _is_valid_operand(self, other): + return type(other) == type(self) + + def __eq__(self, other): + if other is None: + return False + if not self._is_valid_operand(other): + raise TypeError(f"other (type={type(other)}) must be a DiffArray object") + if self.value.size != other.value.size: + return False + return (self.value == other.value).all() + + def __repr__(self): + out = str(self.value) + if self.value.size < 5: + return out + + return self._get_lim_str(out) + " ... " + self._get_lim_str(out[::-1])[::-1] + + def _get_lim_str(self, out): + # get count starting point + for i0, s in enumerate(out): + if all(s != _s for _s in "[]() "): + break + # get str + sep = 0 + for i, s in enumerate(out[i0 + 1 :]): + sep += int(s == " " and out[i + i0] != " ") + if sep == 2: + return out[: i + i0 + 1] From 68035cf7a95721eeaf7bc39720c413998740f3f5 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Tue, 16 Jul 2024 15:30:04 +0200 Subject: [PATCH 02/15] move dataops/__init__ to dataops/ops --- clmm/dataops/__init__.py | 612 +-------------------------------------- clmm/dataops/ops.py | 606 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 613 insertions(+), 605 deletions(-) create mode 100644 clmm/dataops/ops.py diff --git a/clmm/dataops/__init__.py b/clmm/dataops/__init__.py index 33adb2dd9..6a0e150ba 100644 --- a/clmm/dataops/__init__.py +++ b/clmm/dataops/__init__.py @@ -1,606 +1,8 @@ -"""Functions to compute polar/azimuthal averages in radial bins""" -import warnings -import numpy as np -import scipy -from astropy.coordinates import SkyCoord -from astropy import units as u -from ..gcdata import GCData -from ..utils import ( - compute_radial_averages, - make_bins, - convert_units, - arguments_consistency, - validate_argument, - _validate_ra, - _validate_dec, - _validate_is_deltasigma_sigma_c, +"""Data operation for polar/azimuthal averages in radial bins and weights""" +from .ops import ( + compute_tangential_and_cross_components, + compute_background_probability, + compute_galaxy_weights, + make_radial_profile, + make_stacked_radial_profile, ) -from ..redshift import ( - _integ_pzfuncs, - compute_for_good_redshifts, -) -from ..theory import compute_critical_surface_density_eff - - -def compute_tangential_and_cross_components( - ra_lens, - dec_lens, - ra_source, - dec_source, - shear1, - shear2, - geometry="curve", - is_deltasigma=False, - sigma_c=None, - validate_input=True, -): - r"""Computes tangential- and cross- components for shear or ellipticity - - To do so, we need the right ascension and declination of the lens and of all of the sources. - We also need the two shape components of all of the sources. - - These quantities can be handed to `compute_tangential_and_cross_components` in two ways - - 1. Pass in each as parameters:: - - compute_tangential_and_cross_components(ra_lens, dec_lens, ra_source, dec_source, - shape_component1, shape_component2) - - 2. As a method of `GalaxyCluster`:: - - cluster.compute_tangential_and_cross_components() - - The angular separation between the source and the lens, :math:`\theta`, and the azimuthal - position of the source relative to the lens, :math:`\phi`, are computed within the function - and the angular separation is returned. - - In the flat sky approximation, these angles are calculated using (_lens: lens, - _source: source, RA is from right to left) - - .. math:: - \theta^2 = & \left(\delta_s-\delta_l\right)^2+ - \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)\\ - \tan\phi = & \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} - - The tangential, :math:`g_t`, and cross, :math:`g_x`, ellipticity/shear components are - calculated using the two ellipticity/shear components :math:`g_1` and :math:`g_2` of the - source galaxies, following Eq.7 and Eq.8 in Schrabback et al. (2018), arXiv:1611:03866 which - is consistent with arXiv:0509252 - - .. math:: - g_t =& -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right)\\ - g_x =& g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) - - Finally, if the critical surface density (:math:`\Sigma_\text{crit}`) is provided, an estimate - of the excess surface density :math:`\widehat{\Delta\Sigma}` is obtained from - - .. math:: - \widehat{\Delta\Sigma_{t,x}} = g_{t,x} \times \Sigma_\text{crit}(z_l, z_{\text{src}}) - - If :math:`g_{t,x}` correspond to the shear, the above expression is an accurate. However, if - :math:`g_{t,x}` correspond to ellipticities or reduced shear, this expression only gives an - estimate :math:`\widehat{\Delta\Sigma_{t,x}}`, valid only in the weak lensing regime. - - Parameters - ---------- - ra_lens: float - Right ascension of the lensing cluster in degrees - dec_lens: float - Declination of the lensing cluster in degrees - ra_source: array - Right ascensions of each source galaxy in degrees - dec_source: array - Declinations of each source galaxy in degrees - shear1: array - The measured shear (or reduced shear or ellipticity) of the source galaxies - shear2: array - The measured shear (or reduced shear or ellipticity) of the source galaxies - geometry: str, optional - Sky geometry to compute angular separation. - Options are curve (uses astropy) or flat. - is_deltasigma: bool - If `True`, the tangential and cross components returned are multiplied by Sigma_crit. - It requires `sigma_c` argument. Results in units of :math:`M_\odot\ Mpc^{-2}` - sigma_c : None, array_like - Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. - Used only when is_deltasigma=True. - validate_input: bool - Validade each input argument - - Returns - ------- - angsep: array_like - Angular separation between lens and each source galaxy in radians - tangential_component: array_like - Tangential shear (or assimilated quantity) for each source galaxy - cross_component: array_like - Cross shear (or assimilated quantity) for each source galaxy - """ - # pylint: disable-msg=too-many-locals - # Note: we make these quantities to be np.array so that a name is not passed from astropy - # columns - if validate_input: - _validate_ra(locals(), "ra_source", True) - _validate_dec(locals(), "dec_source", True) - _validate_ra(locals(), "ra_lens", True) - _validate_dec(locals(), "dec_lens", True) - validate_argument(locals(), "shear1", "float_array") - validate_argument(locals(), "shear2", "float_array") - validate_argument(locals(), "geometry", str) - validate_argument(locals(), "sigma_c", "float_array", none_ok=True) - ra_source_, dec_source_, shear1_, shear2_ = arguments_consistency( - [ra_source, dec_source, shear1, shear2], - names=("Ra", "Dec", "Shear1", "Shear2"), - prefix="Tangential- and Cross- shape components sources", - ) - _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) - elif np.iterable(ra_source): - ra_source_, dec_source_, shear1_, shear2_ = ( - np.array(col) for col in [ra_source, dec_source, shear1, shear2] - ) - else: - ra_source_, dec_source_, shear1_, shear2_ = ( - ra_source, - dec_source, - shear1, - shear2, - ) - # Compute the lensing angles - if geometry == "flat": - angsep, phi = _compute_lensing_angles_flatsky(ra_lens, dec_lens, ra_source_, dec_source_) - elif geometry == "curve": - angsep, phi = _compute_lensing_angles_astropy(ra_lens, dec_lens, ra_source_, dec_source_) - else: - raise NotImplementedError(f"Sky geometry {geometry} is not currently supported") - # Compute the tangential and cross shears - tangential_comp = _compute_tangential_shear(shear1_, shear2_, phi) - cross_comp = _compute_cross_shear(shear1_, shear2_, phi) - - if sigma_c is not None: - _sigma_c_arr = np.array(sigma_c) - tangential_comp *= _sigma_c_arr - cross_comp *= _sigma_c_arr - - return angsep, tangential_comp, cross_comp - - -def compute_background_probability( - z_lens, z_src=None, use_pdz=False, pzpdf=None, pzbins=None, validate_input=True -): - r"""Probability for being a background galaxy, defined by: - - .. math:: - P(z_s > z_l) = \int_{z_l}^{+\infty} dz_s \; p_{\text{photoz}}(z_s), - - when the photometric probability density functions (:math:`p_{\text{photoz}}(z_s)`) are - provided. In the case of true redshifts, it returns 1 if :math:`z_s > z_l` else returns 0. - - - Parameters - ---------- - z_lens: float - Redshift of the lens. - z_src: array, optional - Redshift of the source. Used only if pzpdf=pzbins=None. - - Returns - ------- - p_background : array - Probability for being a background galaxy - """ - if validate_input: - validate_argument(locals(), "z_lens", float, argmin=0, eqmin=True) - validate_argument(locals(), "z_src", "float_array", argmin=0, eqmin=True, none_ok=True) - - if use_pdz is False: - if z_src is None: - raise ValueError("z_src must be provided.") - p_background = np.array(z_src > z_lens, dtype=float) - else: - if pzpdf is None or pzbins is None: - raise ValueError("pzbins must be provided with pzpdf.") - p_background = _integ_pzfuncs(pzpdf, pzbins, z_lens) - - return p_background - - -def compute_galaxy_weights( - use_shape_noise=False, - shape_component1=None, - shape_component2=None, - use_shape_error=False, - shape_component1_err=None, - shape_component2_err=None, - is_deltasigma=False, - sigma_c=None, - validate_input=True, -): - r"""Computes the individual lens-source pair weights - - The weights :math:`w_{ls}` express as : :math:`w_{ls} = w_{ls, \text{geo}} \times w_{ls, - \text{shape}}`, following E. S. Sheldon et al. (2003), arXiv:astro-ph/0312036: - - 1. If computed for shear, the geometrical weights :math:`w_{ls, \text{geo}}` are equal to 1. If - computed for :math:`\Delta \Sigma`, it depends on lens and source redshift information via the - critical surface density. This component can be expressed as: - - .. math:: - w_{ls, \text{geo}} = \Sigma_\text{crit}(z_l, z_{\text{src}})^{-2}\;. - - when only redshift point estimates are provided, or as: - - - .. math:: - w_{ls, \text{geo}} = \Sigma_\text{crit}^\text{eff}(z_l, z_{\text{src}})^{-2} - = \left[\int_{\delta + z_l} dz_s \; p_{\text{photoz}}(z_s) - \Sigma_\text{crit}(z_l, z_s)^{-1}\right]^2 - - when the redshift pdf of each source, :math:`p_{\text{photoz}}(z_s)`, is known. - - 2. The shape weight :math:`w_{ls,{\text{shape}}}` depends on shapenoise and/or shape - measurement errors - - .. math:: - w_{ls, \text{shape}}^{-1} = \sigma_{\text{shapenoise}}^2 + - \sigma_{\text{measurement}}^2 - - - Parameters - ---------- - is_deltasigma: bool - If `False`, weights are computed for shear, else weights are computed for - :math:`\Delta \Sigma`. - sigma_c : None, array_like - Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. - Used only when is_deltasigma=True. - use_shape_noise: bool - If `True` shape noise is included in the weight computation. It then requires - `shape_componenet{1,2}` to be provided. Default: False. - shape_component1: array_like - The measured shear (or reduced shear or ellipticity) of the source galaxies, - used if `use_shapenoise=True` - shape_component2: array_like - The measured shear (or reduced shear or ellipticity) of the source galaxies, - used if `use_shapenoise=True` - use_shape_error: bool - If `True` shape errors are included in the weight computation. It then requires - shape_component{1,2}_err` to be provided. Default: False. - shape_component1_err: array_like - The measurement error on the 1st-component of ellipticity of the source galaxies, - used if `use_shape_error=True` - shape_component2_err: array_like - The measurement error on the 2nd-component of ellipticity of the source galaxies, - used if `use_shape_error=True` - validate_input: bool - Validade each input argument - - Returns - ------- - w_ls: array_like - Individual lens source pair weights - """ - if validate_input: - validate_argument(locals(), "sigma_c", "float_array", none_ok=True) - validate_argument(locals(), "shape_component1", "float_array", none_ok=True) - validate_argument(locals(), "shape_component2", "float_array", none_ok=True) - validate_argument(locals(), "shape_component1_err", "float_array", none_ok=True) - validate_argument(locals(), "shape_component2_err", "float_array", none_ok=True) - validate_argument(locals(), "use_shape_noise", bool) - validate_argument(locals(), "use_shape_error", bool) - arguments_consistency( - [shape_component1, shape_component2], - names=("shape_component1", "shape_component2"), - prefix="Shape components sources", - ) - _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) - - # computing w_ls_geo - w_ls_geo = 1.0 - if sigma_c is not None: - w_ls_geo /= np.array(sigma_c) ** 2 - - # computing w_ls_shape - err_e2 = 0 - - if use_shape_noise: - if shape_component1 is None or shape_component2 is None: - raise ValueError( - "With the shape noise option, the source shapes " - "`shape_component_{1,2}` must be specified" - ) - err_e2 += np.std(shape_component1) ** 2 + np.std(shape_component2) ** 2 - if use_shape_error: - if shape_component1_err is None or shape_component2_err is None: - raise ValueError( - "With the shape error option, the source shapes errors" - "`shape_component_err{1,2}` must be specified" - ) - err_e2 += shape_component1_err**2 - err_e2 += shape_component2_err**2 - - if hasattr(err_e2, "__len__"): - w_ls_shape = np.ones(len(err_e2)) - w_ls_shape[err_e2 > 0] = 1.0 / err_e2[err_e2 > 0] - else: - w_ls_shape = 1.0 / err_e2 if err_e2 > 0 else 1.0 - - w_ls = w_ls_shape * w_ls_geo - - return w_ls - - -def _compute_lensing_angles_flatsky(ra_lens, dec_lens, ra_source_list, dec_source_list): - r"""Compute the angular separation between the lens and the source and the azimuthal - angle from the lens to the source in radians. - - In the flat sky approximation, these angles are calculated using - - .. math:: - \theta = \sqrt{\left(\delta_s-\delta_l\right)^2+ - \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)} - - \tan\phi = \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} - - For extended descriptions of parameters, see `compute_shear()` documentation. - - Parameters - ---------- - ra_lens: float - Right ascension of the lensing cluster in degrees - dec_lens: float - Declination of the lensing cluster in degrees - ra_source_list: array - Right ascensions of each source galaxy in degrees - dec_source_list: array - Declinations of each source galaxy in degrees - - Returns - ------- - angsep: array - Angular separation between the lens and the source in radians - phi: array - Azimuthal angle from the lens to the source in radians - """ - delta_ra = np.radians(ra_source_list - ra_lens) - # Put angles between -pi and pi - delta_ra -= np.round(delta_ra / (2.0 * np.pi)) * 2.0 * np.pi - deltax = delta_ra * np.cos(np.radians(dec_lens)) - deltay = np.radians(dec_source_list - dec_lens) - # Ensure that abs(delta ra) < pi - # deltax[deltax >= np.pi] = deltax[deltax >= np.pi]-2.*np.pi - # deltax[deltax < -np.pi] = deltax[deltax < -np.pi]+2.*np.pi - angsep = np.sqrt(deltax**2 + deltay**2) - phi = np.arctan2(deltay, -deltax) - # Forcing phi to be zero everytime angsep is zero. This is necessary due to arctan2 features. - if np.iterable(phi): - phi[angsep == 0.0] = 0.0 - else: - phi = 0.0 if angsep == 0.0 else phi - if np.any(angsep > np.pi / 180.0): - warnings.warn("Using the flat-sky approximation with separations >1 deg may be inaccurate") - return angsep, phi - - -def _compute_lensing_angles_astropy(ra_lens, dec_lens, ra_source_list, dec_source_list): - r"""Compute the angular separation between the lens and the source and the azimuthal - angle from the lens to the source in radians. - - Parameters - ---------- - ra_lens: float - Right ascension of the lensing cluster in degrees - dec_lens: float - Declination of the lensing cluster in degrees - ra_source_list: array - Right ascensions of each source galaxy in degrees - dec_source_list: array - Declinations of each source galaxy in degrees - - Returns - ------- - angsep: array - Angular separation between the lens and the source in radians - phi: array - Azimuthal angle from the lens to the source in radians - """ - sk_lens = SkyCoord(ra_lens * u.deg, dec_lens * u.deg, frame="icrs") - sk_src = SkyCoord(ra_source_list * u.deg, dec_source_list * u.deg, frame="icrs") - angsep, phi = sk_lens.separation(sk_src).rad, sk_lens.position_angle(sk_src).rad - # Transformations for phi to have same orientation as _compute_lensing_angles_flatsky - phi += 0.5 * np.pi - if np.iterable(phi): - phi[phi > np.pi] -= 2 * np.pi - phi[angsep == 0] = 0 - else: - phi -= 2 * np.pi if phi > np.pi else 0 - phi = 0 if angsep == 0 else phi - return angsep, phi - - -def _compute_tangential_shear(shear1, shear2, phi): - r"""Compute the tangential shear given the two shears and azimuthal positions for - a single source or list of sources. - - We compute the tangential shear following Eq. 7 of Schrabback et al. 2018, arXiv:1611:03866 - - .. math:: - g_t = -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right) - - For extended descriptions of parameters, see `compute_shear()` documentation. - """ - return -(shear1 * np.cos(2.0 * phi) + shear2 * np.sin(2.0 * phi)) - - -def _compute_cross_shear(shear1, shear2, phi): - r"""Compute the cross shear given the two shears and azimuthal position for a single - source of list of sources. - - We compute the cross shear following Eq. 8 of Schrabback et al. 2018, arXiv:1611:03866 - also checked arxiv 0509252 - - .. math:: - g_x = g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) - - For extended descriptions of parameters, see `compute_shear()` documentation. - """ - return shear1 * np.sin(2.0 * phi) - shear2 * np.cos(2.0 * phi) - - -def make_radial_profile( - components, - angsep, - angsep_units, - bin_units, - bins=10, - components_error=None, - error_model="ste", - include_empty_bins=False, - return_binnumber=False, - cosmo=None, - z_lens=None, - validate_input=True, - weights=None, -): - r"""Compute the angular profile of given components - - We assume that the cluster object contains information on the cross and - tangential shears or ellipticities and angular separation of the source galaxies - - This function can be called in two ways: - - 1. Pass explict arguments:: - - make_radial_profile([component1, component2], distances, 'radians') - - 2. Call it as a method of a GalaxyCluster instance and specify `bin_units`: - - cluster.make_radial_profile('Mpc') - - Parameters - ---------- - components: list of arrays - List of arrays to be binned in the radial profile - angsep: array - Transvesal distances between the sources and the lens - angsep_units : str - Units of the calculated separation of the source galaxies - Allowed Options = ["radians"] - bin_units : str - Units to use for the radial bins of the radial profile - Allowed Options = ["radians", deg", "arcmin", "arcsec", kpc", "Mpc"] - (letter case independent) - bins : array_like, optional - User defined bins to use for the shear profile. If a list is provided, use that as - the bin edges. If a scalar is provided, create that many equally spaced bins between - the minimum and maximum angular separations in bin_units. If nothing is provided, - default to 10 equally spaced bins. - components_error: list of arrays, None - List of errors for input arrays - error_model : str, optional - Statistical error model to use for y uncertainties. (letter case independent) - `ste` - Standard error [=std/sqrt(n) in unweighted computation] (Default). - `std` - Standard deviation. - include_empty_bins: bool, optional - Also include empty bins in the returned table - return_binnumber: bool, optional - Also returns the indices of the bins for each object - cosmo : CLMM.Cosmology - CLMM Cosmology object to convert angular separations to physical distances - z_lens: array, optional - Redshift of the lens - validate_input: bool - Validade each input argument - weights: array-like, optional - Array of individual galaxy weights. If specified, the radial binned profile is - computed using a weighted average - - Returns - ------- - profile : GCData - Output table containing the radius grid points, the profile of the components `p_i`, - errors `p_i_err` and number of sources. The errors are defined as the standard errors in - each bin. - binnumber: 1-D ndarray of ints, optional - Indices of the bins (corresponding to `xbins`) in which each value - of `xvals` belongs. Same length as `yvals`. A binnumber of `i` means the - corresponding value is between (xbins[i-1], xbins[i]). - - Notes - ----- - This is an example of a place where the cosmology-dependence can be sequestered to another - module. - """ - # pylint: disable-msg=too-many-locals - if validate_input: - validate_argument(locals(), "angsep", "float_array") - validate_argument(locals(), "angsep_units", str) - validate_argument(locals(), "bin_units", str) - validate_argument(locals(), "include_empty_bins", bool) - validate_argument(locals(), "return_binnumber", bool) - validate_argument(locals(), "z_lens", "float_array", none_ok=True) - comp_dict = {f"components[{i}]": comp for i, comp in enumerate(components)} - arguments_consistency(components, names=comp_dict.keys(), prefix="Input components") - for component in comp_dict: - validate_argument(comp_dict, component, "float_array") - # Check to see if we need to do a unit conversion - if angsep_units is not bin_units: - source_seps = convert_units(angsep, angsep_units, bin_units, redshift=z_lens, cosmo=cosmo) - else: - source_seps = angsep - # Make bins if they are not provided - if not hasattr(bins, "__len__"): - bins = make_bins(np.min(source_seps), np.max(source_seps), bins) - # Create output table - profile_table = GCData( - [bins[:-1], np.zeros(len(bins) - 1), bins[1:]], - names=("radius_min", "radius", "radius_max"), - meta={"bin_units": bin_units}, # Add metadata - ) - # Compute the binned averages and associated errors - for i, component in enumerate(components): - r_avg, comp_avg, comp_err, nsrc, binnumber, wts_sum = compute_radial_averages( - source_seps, - component, - xbins=bins, - yerr=None if components_error is None else components_error[i], - error_model=error_model, - weights=weights, - ) - profile_table[f"p_{i}"] = comp_avg - profile_table[f"p_{i}_err"] = comp_err - profile_table["radius"] = r_avg - profile_table["n_src"] = nsrc - profile_table["weights_sum"] = wts_sum - # return empty bins? - if not include_empty_bins: - profile_table = profile_table[nsrc > 1] - if return_binnumber: - return profile_table, binnumber - return profile_table - - -def make_stacked_radial_profile(angsep, weights, components): - """Compute stacked profile, and mean separation distances. - - Parameters - ---------- - angsep: 2d array - Transvesal distances corresponding to each object with shape `n_obj, n_rad_bins`. - weights: 2d array - Weights corresponding to each objects with shape `n_obj, n_rad_bins`. - components: list of 2d arrays - List of 2d properties of each array to be stacked with shape - `n_components, n_obj, n_rad_bins`. - - Returns - ------- - staked_angsep: array - Mean transversal distance in each radial bin. - stacked_components: list of arrays - List of stacked components. - """ - staked_angsep = np.average(angsep, axis=0, weights=None) - stacked_components = [ - np.average(component, axis=0, weights=weights) for component in components - ] - return staked_angsep, stacked_components diff --git a/clmm/dataops/ops.py b/clmm/dataops/ops.py new file mode 100644 index 000000000..33adb2dd9 --- /dev/null +++ b/clmm/dataops/ops.py @@ -0,0 +1,606 @@ +"""Functions to compute polar/azimuthal averages in radial bins""" +import warnings +import numpy as np +import scipy +from astropy.coordinates import SkyCoord +from astropy import units as u +from ..gcdata import GCData +from ..utils import ( + compute_radial_averages, + make_bins, + convert_units, + arguments_consistency, + validate_argument, + _validate_ra, + _validate_dec, + _validate_is_deltasigma_sigma_c, +) +from ..redshift import ( + _integ_pzfuncs, + compute_for_good_redshifts, +) +from ..theory import compute_critical_surface_density_eff + + +def compute_tangential_and_cross_components( + ra_lens, + dec_lens, + ra_source, + dec_source, + shear1, + shear2, + geometry="curve", + is_deltasigma=False, + sigma_c=None, + validate_input=True, +): + r"""Computes tangential- and cross- components for shear or ellipticity + + To do so, we need the right ascension and declination of the lens and of all of the sources. + We also need the two shape components of all of the sources. + + These quantities can be handed to `compute_tangential_and_cross_components` in two ways + + 1. Pass in each as parameters:: + + compute_tangential_and_cross_components(ra_lens, dec_lens, ra_source, dec_source, + shape_component1, shape_component2) + + 2. As a method of `GalaxyCluster`:: + + cluster.compute_tangential_and_cross_components() + + The angular separation between the source and the lens, :math:`\theta`, and the azimuthal + position of the source relative to the lens, :math:`\phi`, are computed within the function + and the angular separation is returned. + + In the flat sky approximation, these angles are calculated using (_lens: lens, + _source: source, RA is from right to left) + + .. math:: + \theta^2 = & \left(\delta_s-\delta_l\right)^2+ + \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)\\ + \tan\phi = & \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} + + The tangential, :math:`g_t`, and cross, :math:`g_x`, ellipticity/shear components are + calculated using the two ellipticity/shear components :math:`g_1` and :math:`g_2` of the + source galaxies, following Eq.7 and Eq.8 in Schrabback et al. (2018), arXiv:1611:03866 which + is consistent with arXiv:0509252 + + .. math:: + g_t =& -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right)\\ + g_x =& g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) + + Finally, if the critical surface density (:math:`\Sigma_\text{crit}`) is provided, an estimate + of the excess surface density :math:`\widehat{\Delta\Sigma}` is obtained from + + .. math:: + \widehat{\Delta\Sigma_{t,x}} = g_{t,x} \times \Sigma_\text{crit}(z_l, z_{\text{src}}) + + If :math:`g_{t,x}` correspond to the shear, the above expression is an accurate. However, if + :math:`g_{t,x}` correspond to ellipticities or reduced shear, this expression only gives an + estimate :math:`\widehat{\Delta\Sigma_{t,x}}`, valid only in the weak lensing regime. + + Parameters + ---------- + ra_lens: float + Right ascension of the lensing cluster in degrees + dec_lens: float + Declination of the lensing cluster in degrees + ra_source: array + Right ascensions of each source galaxy in degrees + dec_source: array + Declinations of each source galaxy in degrees + shear1: array + The measured shear (or reduced shear or ellipticity) of the source galaxies + shear2: array + The measured shear (or reduced shear or ellipticity) of the source galaxies + geometry: str, optional + Sky geometry to compute angular separation. + Options are curve (uses astropy) or flat. + is_deltasigma: bool + If `True`, the tangential and cross components returned are multiplied by Sigma_crit. + It requires `sigma_c` argument. Results in units of :math:`M_\odot\ Mpc^{-2}` + sigma_c : None, array_like + Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. + Used only when is_deltasigma=True. + validate_input: bool + Validade each input argument + + Returns + ------- + angsep: array_like + Angular separation between lens and each source galaxy in radians + tangential_component: array_like + Tangential shear (or assimilated quantity) for each source galaxy + cross_component: array_like + Cross shear (or assimilated quantity) for each source galaxy + """ + # pylint: disable-msg=too-many-locals + # Note: we make these quantities to be np.array so that a name is not passed from astropy + # columns + if validate_input: + _validate_ra(locals(), "ra_source", True) + _validate_dec(locals(), "dec_source", True) + _validate_ra(locals(), "ra_lens", True) + _validate_dec(locals(), "dec_lens", True) + validate_argument(locals(), "shear1", "float_array") + validate_argument(locals(), "shear2", "float_array") + validate_argument(locals(), "geometry", str) + validate_argument(locals(), "sigma_c", "float_array", none_ok=True) + ra_source_, dec_source_, shear1_, shear2_ = arguments_consistency( + [ra_source, dec_source, shear1, shear2], + names=("Ra", "Dec", "Shear1", "Shear2"), + prefix="Tangential- and Cross- shape components sources", + ) + _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) + elif np.iterable(ra_source): + ra_source_, dec_source_, shear1_, shear2_ = ( + np.array(col) for col in [ra_source, dec_source, shear1, shear2] + ) + else: + ra_source_, dec_source_, shear1_, shear2_ = ( + ra_source, + dec_source, + shear1, + shear2, + ) + # Compute the lensing angles + if geometry == "flat": + angsep, phi = _compute_lensing_angles_flatsky(ra_lens, dec_lens, ra_source_, dec_source_) + elif geometry == "curve": + angsep, phi = _compute_lensing_angles_astropy(ra_lens, dec_lens, ra_source_, dec_source_) + else: + raise NotImplementedError(f"Sky geometry {geometry} is not currently supported") + # Compute the tangential and cross shears + tangential_comp = _compute_tangential_shear(shear1_, shear2_, phi) + cross_comp = _compute_cross_shear(shear1_, shear2_, phi) + + if sigma_c is not None: + _sigma_c_arr = np.array(sigma_c) + tangential_comp *= _sigma_c_arr + cross_comp *= _sigma_c_arr + + return angsep, tangential_comp, cross_comp + + +def compute_background_probability( + z_lens, z_src=None, use_pdz=False, pzpdf=None, pzbins=None, validate_input=True +): + r"""Probability for being a background galaxy, defined by: + + .. math:: + P(z_s > z_l) = \int_{z_l}^{+\infty} dz_s \; p_{\text{photoz}}(z_s), + + when the photometric probability density functions (:math:`p_{\text{photoz}}(z_s)`) are + provided. In the case of true redshifts, it returns 1 if :math:`z_s > z_l` else returns 0. + + + Parameters + ---------- + z_lens: float + Redshift of the lens. + z_src: array, optional + Redshift of the source. Used only if pzpdf=pzbins=None. + + Returns + ------- + p_background : array + Probability for being a background galaxy + """ + if validate_input: + validate_argument(locals(), "z_lens", float, argmin=0, eqmin=True) + validate_argument(locals(), "z_src", "float_array", argmin=0, eqmin=True, none_ok=True) + + if use_pdz is False: + if z_src is None: + raise ValueError("z_src must be provided.") + p_background = np.array(z_src > z_lens, dtype=float) + else: + if pzpdf is None or pzbins is None: + raise ValueError("pzbins must be provided with pzpdf.") + p_background = _integ_pzfuncs(pzpdf, pzbins, z_lens) + + return p_background + + +def compute_galaxy_weights( + use_shape_noise=False, + shape_component1=None, + shape_component2=None, + use_shape_error=False, + shape_component1_err=None, + shape_component2_err=None, + is_deltasigma=False, + sigma_c=None, + validate_input=True, +): + r"""Computes the individual lens-source pair weights + + The weights :math:`w_{ls}` express as : :math:`w_{ls} = w_{ls, \text{geo}} \times w_{ls, + \text{shape}}`, following E. S. Sheldon et al. (2003), arXiv:astro-ph/0312036: + + 1. If computed for shear, the geometrical weights :math:`w_{ls, \text{geo}}` are equal to 1. If + computed for :math:`\Delta \Sigma`, it depends on lens and source redshift information via the + critical surface density. This component can be expressed as: + + .. math:: + w_{ls, \text{geo}} = \Sigma_\text{crit}(z_l, z_{\text{src}})^{-2}\;. + + when only redshift point estimates are provided, or as: + + + .. math:: + w_{ls, \text{geo}} = \Sigma_\text{crit}^\text{eff}(z_l, z_{\text{src}})^{-2} + = \left[\int_{\delta + z_l} dz_s \; p_{\text{photoz}}(z_s) + \Sigma_\text{crit}(z_l, z_s)^{-1}\right]^2 + + when the redshift pdf of each source, :math:`p_{\text{photoz}}(z_s)`, is known. + + 2. The shape weight :math:`w_{ls,{\text{shape}}}` depends on shapenoise and/or shape + measurement errors + + .. math:: + w_{ls, \text{shape}}^{-1} = \sigma_{\text{shapenoise}}^2 + + \sigma_{\text{measurement}}^2 + + + Parameters + ---------- + is_deltasigma: bool + If `False`, weights are computed for shear, else weights are computed for + :math:`\Delta \Sigma`. + sigma_c : None, array_like + Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. + Used only when is_deltasigma=True. + use_shape_noise: bool + If `True` shape noise is included in the weight computation. It then requires + `shape_componenet{1,2}` to be provided. Default: False. + shape_component1: array_like + The measured shear (or reduced shear or ellipticity) of the source galaxies, + used if `use_shapenoise=True` + shape_component2: array_like + The measured shear (or reduced shear or ellipticity) of the source galaxies, + used if `use_shapenoise=True` + use_shape_error: bool + If `True` shape errors are included in the weight computation. It then requires + shape_component{1,2}_err` to be provided. Default: False. + shape_component1_err: array_like + The measurement error on the 1st-component of ellipticity of the source galaxies, + used if `use_shape_error=True` + shape_component2_err: array_like + The measurement error on the 2nd-component of ellipticity of the source galaxies, + used if `use_shape_error=True` + validate_input: bool + Validade each input argument + + Returns + ------- + w_ls: array_like + Individual lens source pair weights + """ + if validate_input: + validate_argument(locals(), "sigma_c", "float_array", none_ok=True) + validate_argument(locals(), "shape_component1", "float_array", none_ok=True) + validate_argument(locals(), "shape_component2", "float_array", none_ok=True) + validate_argument(locals(), "shape_component1_err", "float_array", none_ok=True) + validate_argument(locals(), "shape_component2_err", "float_array", none_ok=True) + validate_argument(locals(), "use_shape_noise", bool) + validate_argument(locals(), "use_shape_error", bool) + arguments_consistency( + [shape_component1, shape_component2], + names=("shape_component1", "shape_component2"), + prefix="Shape components sources", + ) + _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) + + # computing w_ls_geo + w_ls_geo = 1.0 + if sigma_c is not None: + w_ls_geo /= np.array(sigma_c) ** 2 + + # computing w_ls_shape + err_e2 = 0 + + if use_shape_noise: + if shape_component1 is None or shape_component2 is None: + raise ValueError( + "With the shape noise option, the source shapes " + "`shape_component_{1,2}` must be specified" + ) + err_e2 += np.std(shape_component1) ** 2 + np.std(shape_component2) ** 2 + if use_shape_error: + if shape_component1_err is None or shape_component2_err is None: + raise ValueError( + "With the shape error option, the source shapes errors" + "`shape_component_err{1,2}` must be specified" + ) + err_e2 += shape_component1_err**2 + err_e2 += shape_component2_err**2 + + if hasattr(err_e2, "__len__"): + w_ls_shape = np.ones(len(err_e2)) + w_ls_shape[err_e2 > 0] = 1.0 / err_e2[err_e2 > 0] + else: + w_ls_shape = 1.0 / err_e2 if err_e2 > 0 else 1.0 + + w_ls = w_ls_shape * w_ls_geo + + return w_ls + + +def _compute_lensing_angles_flatsky(ra_lens, dec_lens, ra_source_list, dec_source_list): + r"""Compute the angular separation between the lens and the source and the azimuthal + angle from the lens to the source in radians. + + In the flat sky approximation, these angles are calculated using + + .. math:: + \theta = \sqrt{\left(\delta_s-\delta_l\right)^2+ + \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)} + + \tan\phi = \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} + + For extended descriptions of parameters, see `compute_shear()` documentation. + + Parameters + ---------- + ra_lens: float + Right ascension of the lensing cluster in degrees + dec_lens: float + Declination of the lensing cluster in degrees + ra_source_list: array + Right ascensions of each source galaxy in degrees + dec_source_list: array + Declinations of each source galaxy in degrees + + Returns + ------- + angsep: array + Angular separation between the lens and the source in radians + phi: array + Azimuthal angle from the lens to the source in radians + """ + delta_ra = np.radians(ra_source_list - ra_lens) + # Put angles between -pi and pi + delta_ra -= np.round(delta_ra / (2.0 * np.pi)) * 2.0 * np.pi + deltax = delta_ra * np.cos(np.radians(dec_lens)) + deltay = np.radians(dec_source_list - dec_lens) + # Ensure that abs(delta ra) < pi + # deltax[deltax >= np.pi] = deltax[deltax >= np.pi]-2.*np.pi + # deltax[deltax < -np.pi] = deltax[deltax < -np.pi]+2.*np.pi + angsep = np.sqrt(deltax**2 + deltay**2) + phi = np.arctan2(deltay, -deltax) + # Forcing phi to be zero everytime angsep is zero. This is necessary due to arctan2 features. + if np.iterable(phi): + phi[angsep == 0.0] = 0.0 + else: + phi = 0.0 if angsep == 0.0 else phi + if np.any(angsep > np.pi / 180.0): + warnings.warn("Using the flat-sky approximation with separations >1 deg may be inaccurate") + return angsep, phi + + +def _compute_lensing_angles_astropy(ra_lens, dec_lens, ra_source_list, dec_source_list): + r"""Compute the angular separation between the lens and the source and the azimuthal + angle from the lens to the source in radians. + + Parameters + ---------- + ra_lens: float + Right ascension of the lensing cluster in degrees + dec_lens: float + Declination of the lensing cluster in degrees + ra_source_list: array + Right ascensions of each source galaxy in degrees + dec_source_list: array + Declinations of each source galaxy in degrees + + Returns + ------- + angsep: array + Angular separation between the lens and the source in radians + phi: array + Azimuthal angle from the lens to the source in radians + """ + sk_lens = SkyCoord(ra_lens * u.deg, dec_lens * u.deg, frame="icrs") + sk_src = SkyCoord(ra_source_list * u.deg, dec_source_list * u.deg, frame="icrs") + angsep, phi = sk_lens.separation(sk_src).rad, sk_lens.position_angle(sk_src).rad + # Transformations for phi to have same orientation as _compute_lensing_angles_flatsky + phi += 0.5 * np.pi + if np.iterable(phi): + phi[phi > np.pi] -= 2 * np.pi + phi[angsep == 0] = 0 + else: + phi -= 2 * np.pi if phi > np.pi else 0 + phi = 0 if angsep == 0 else phi + return angsep, phi + + +def _compute_tangential_shear(shear1, shear2, phi): + r"""Compute the tangential shear given the two shears and azimuthal positions for + a single source or list of sources. + + We compute the tangential shear following Eq. 7 of Schrabback et al. 2018, arXiv:1611:03866 + + .. math:: + g_t = -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right) + + For extended descriptions of parameters, see `compute_shear()` documentation. + """ + return -(shear1 * np.cos(2.0 * phi) + shear2 * np.sin(2.0 * phi)) + + +def _compute_cross_shear(shear1, shear2, phi): + r"""Compute the cross shear given the two shears and azimuthal position for a single + source of list of sources. + + We compute the cross shear following Eq. 8 of Schrabback et al. 2018, arXiv:1611:03866 + also checked arxiv 0509252 + + .. math:: + g_x = g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) + + For extended descriptions of parameters, see `compute_shear()` documentation. + """ + return shear1 * np.sin(2.0 * phi) - shear2 * np.cos(2.0 * phi) + + +def make_radial_profile( + components, + angsep, + angsep_units, + bin_units, + bins=10, + components_error=None, + error_model="ste", + include_empty_bins=False, + return_binnumber=False, + cosmo=None, + z_lens=None, + validate_input=True, + weights=None, +): + r"""Compute the angular profile of given components + + We assume that the cluster object contains information on the cross and + tangential shears or ellipticities and angular separation of the source galaxies + + This function can be called in two ways: + + 1. Pass explict arguments:: + + make_radial_profile([component1, component2], distances, 'radians') + + 2. Call it as a method of a GalaxyCluster instance and specify `bin_units`: + + cluster.make_radial_profile('Mpc') + + Parameters + ---------- + components: list of arrays + List of arrays to be binned in the radial profile + angsep: array + Transvesal distances between the sources and the lens + angsep_units : str + Units of the calculated separation of the source galaxies + Allowed Options = ["radians"] + bin_units : str + Units to use for the radial bins of the radial profile + Allowed Options = ["radians", deg", "arcmin", "arcsec", kpc", "Mpc"] + (letter case independent) + bins : array_like, optional + User defined bins to use for the shear profile. If a list is provided, use that as + the bin edges. If a scalar is provided, create that many equally spaced bins between + the minimum and maximum angular separations in bin_units. If nothing is provided, + default to 10 equally spaced bins. + components_error: list of arrays, None + List of errors for input arrays + error_model : str, optional + Statistical error model to use for y uncertainties. (letter case independent) + `ste` - Standard error [=std/sqrt(n) in unweighted computation] (Default). + `std` - Standard deviation. + include_empty_bins: bool, optional + Also include empty bins in the returned table + return_binnumber: bool, optional + Also returns the indices of the bins for each object + cosmo : CLMM.Cosmology + CLMM Cosmology object to convert angular separations to physical distances + z_lens: array, optional + Redshift of the lens + validate_input: bool + Validade each input argument + weights: array-like, optional + Array of individual galaxy weights. If specified, the radial binned profile is + computed using a weighted average + + Returns + ------- + profile : GCData + Output table containing the radius grid points, the profile of the components `p_i`, + errors `p_i_err` and number of sources. The errors are defined as the standard errors in + each bin. + binnumber: 1-D ndarray of ints, optional + Indices of the bins (corresponding to `xbins`) in which each value + of `xvals` belongs. Same length as `yvals`. A binnumber of `i` means the + corresponding value is between (xbins[i-1], xbins[i]). + + Notes + ----- + This is an example of a place where the cosmology-dependence can be sequestered to another + module. + """ + # pylint: disable-msg=too-many-locals + if validate_input: + validate_argument(locals(), "angsep", "float_array") + validate_argument(locals(), "angsep_units", str) + validate_argument(locals(), "bin_units", str) + validate_argument(locals(), "include_empty_bins", bool) + validate_argument(locals(), "return_binnumber", bool) + validate_argument(locals(), "z_lens", "float_array", none_ok=True) + comp_dict = {f"components[{i}]": comp for i, comp in enumerate(components)} + arguments_consistency(components, names=comp_dict.keys(), prefix="Input components") + for component in comp_dict: + validate_argument(comp_dict, component, "float_array") + # Check to see if we need to do a unit conversion + if angsep_units is not bin_units: + source_seps = convert_units(angsep, angsep_units, bin_units, redshift=z_lens, cosmo=cosmo) + else: + source_seps = angsep + # Make bins if they are not provided + if not hasattr(bins, "__len__"): + bins = make_bins(np.min(source_seps), np.max(source_seps), bins) + # Create output table + profile_table = GCData( + [bins[:-1], np.zeros(len(bins) - 1), bins[1:]], + names=("radius_min", "radius", "radius_max"), + meta={"bin_units": bin_units}, # Add metadata + ) + # Compute the binned averages and associated errors + for i, component in enumerate(components): + r_avg, comp_avg, comp_err, nsrc, binnumber, wts_sum = compute_radial_averages( + source_seps, + component, + xbins=bins, + yerr=None if components_error is None else components_error[i], + error_model=error_model, + weights=weights, + ) + profile_table[f"p_{i}"] = comp_avg + profile_table[f"p_{i}_err"] = comp_err + profile_table["radius"] = r_avg + profile_table["n_src"] = nsrc + profile_table["weights_sum"] = wts_sum + # return empty bins? + if not include_empty_bins: + profile_table = profile_table[nsrc > 1] + if return_binnumber: + return profile_table, binnumber + return profile_table + + +def make_stacked_radial_profile(angsep, weights, components): + """Compute stacked profile, and mean separation distances. + + Parameters + ---------- + angsep: 2d array + Transvesal distances corresponding to each object with shape `n_obj, n_rad_bins`. + weights: 2d array + Weights corresponding to each objects with shape `n_obj, n_rad_bins`. + components: list of 2d arrays + List of 2d properties of each array to be stacked with shape + `n_components, n_obj, n_rad_bins`. + + Returns + ------- + staked_angsep: array + Mean transversal distance in each radial bin. + stacked_components: list of arrays + List of stacked components. + """ + staked_angsep = np.average(angsep, axis=0, weights=None) + stacked_components = [ + np.average(component, axis=0, weights=weights) for component in components + ] + return staked_angsep, stacked_components From 7d28e41d3d2ba3e222df456636ba2457fbd75a13 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Tue, 16 Jul 2024 15:53:32 +0200 Subject: [PATCH 03/15] fix import --- clmm/dataops/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/clmm/dataops/__init__.py b/clmm/dataops/__init__.py index 6a0e150ba..9028b1fac 100644 --- a/clmm/dataops/__init__.py +++ b/clmm/dataops/__init__.py @@ -3,6 +3,7 @@ compute_tangential_and_cross_components, compute_background_probability, compute_galaxy_weights, + make_bins, make_radial_profile, make_stacked_radial_profile, ) From 2390466c98dd6e8410fb4db53fe95ac8ff0dd0a3 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Tue, 16 Jul 2024 15:54:14 +0200 Subject: [PATCH 04/15] update tests --- tests/test_clusterensemble.py | 4 +++- tests/test_dataops.py | 40 +++++++++++++++++------------------ 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/tests/test_clusterensemble.py b/tests/test_clusterensemble.py index 63c3d3958..4a49db065 100644 --- a/tests/test_clusterensemble.py +++ b/tests/test_clusterensemble.py @@ -129,7 +129,9 @@ def test_covariance(): for i in range(n_catalogs): # generate random catalog e1, e2 = np.random.randn(ngals) * 0.001, np.random.randn(ngals) * 0.001 - et, ex = da._compute_tangential_shear(e1, e2, phi), da._compute_cross_shear(e1, e2, phi) + et, ex = da.ops._compute_tangential_shear(e1, e2, phi), da.ops._compute_cross_shear( + e1, e2, phi + ) z_gal = np.random.random(ngals) * (3 - 1.1) + 1.1 id_gal = np.arange(ngals) theta_gal = np.linspace(0, 1, ngals) * (thetamax - thetamin) + thetamin diff --git a/tests/test_dataops.py b/tests/test_dataops.py index f60ac795b..511bcac10 100644 --- a/tests/test_dataops.py +++ b/tests/test_dataops.py @@ -15,43 +15,43 @@ def test_compute_cross_shear(): """test compute cross shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_cross_shear = 0.08886301350787848 - cross_shear = da._compute_cross_shear(shear1, shear2, phi) + cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_cross_shear = [0.08886301350787848, 0.48498333705834484] - cross_shear = da._compute_cross_shear(shear1, shear2, phi) + cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) # Edge case tests - assert_allclose(da._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) - assert_allclose(da._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE) - assert_allclose(da._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) - assert_allclose(da._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE) - assert_allclose(da._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE) - assert_allclose(da._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) + assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE) + assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) + assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE) + assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE) + assert_allclose(da.ops._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_tangential_shear(): """test compute tangential shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_tangential_shear = -0.14492537676438383 - tangential_shear = da._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_tangential_shear = [-0.14492537676438383, 0.1216189244145496] - tangential_shear = da._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) # test for reasonable values - assert_allclose(da._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE) - assert_allclose(da._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE) - assert_allclose(da._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose(da.ops._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE) + assert_allclose(da.ops._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE) + assert_allclose(da.ops._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_lensing_angles_flatsky(): @@ -62,7 +62,7 @@ def test_compute_lensing_angles_flatsky(): # Ensure that we throw a warning with >1 deg separation assert_warns( UserWarning, - da._compute_lensing_angles_flatsky, + da.ops._compute_lensing_angles_flatsky, ra_l, dec_l, np.array([151.32, 161.34]), @@ -72,7 +72,7 @@ def test_compute_lensing_angles_flatsky(): # Test outputs for reasonable values ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas, phis = da._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) + thetas, phis = da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) assert_allclose( thetas, @@ -90,7 +90,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra assert_allclose( - da._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s), + da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s), [ [0.00069813170079771690, 0.00106951489719733675], [-1.57079632679489655800, 1.77544123918164542530], @@ -101,7 +101,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same dec assert_allclose( - da._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])), + da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])), [ [0.00032601941539388962, 0.00106951489719733675], [0.00000000000000000000, 1.77544123918164542530], @@ -112,7 +112,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra and dec assert_allclose( - da._compute_lensing_angles_flatsky( + da.ops._compute_lensing_angles_flatsky( ra_l, dec_l, np.array([ra_l, 161.34]), np.array([dec_l, 51.55]) ), [ @@ -125,7 +125,7 @@ def test_compute_lensing_angles_flatsky(): # angles over the branch cut between 0 and 360 assert_allclose( - da._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s), + da.ops._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s), [ [0.0022828333888309108, 0.006603944760273219], [-0.31079754672938664, 0.15924369771830643], @@ -136,7 +136,7 @@ def test_compute_lensing_angles_flatsky(): # angles over the branch cut between 0 and 360 assert_allclose( - da._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s), + da.ops._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s), [[0.0012916551296819666, 0.003424250083245557], [-2.570568636904587, 0.31079754672944354]], TOLERANCE["rtol"], err_msg="Failure when ra_l and ra_s are the same but one is defined negative", From 0eeb7dd2baf01cdab736124cc530eb4c7648f6c1 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Wed, 17 Jul 2024 14:12:55 +0200 Subject: [PATCH 05/15] clean up code --- clmm/utils/validation.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/clmm/utils/validation.py b/clmm/utils/validation.py index eebfa5868..481030a2a 100644 --- a/clmm/utils/validation.py +++ b/clmm/utils/validation.py @@ -227,36 +227,35 @@ def _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c): class DiffArray: + """Array where arr1==arr2 is actually all(arr1==arr)""" + def __init__(self, array): self.value = np.array(array) - def _is_valid_operand(self, other): - return type(other) == type(self) - def __eq__(self, other): - if other is None: + # pylint: disable=unidiomatic-typecheck + if type(other) != type(self): return False - if not self._is_valid_operand(other): - raise TypeError(f"other (type={type(other)}) must be a DiffArray object") if self.value.size != other.value.size: return False return (self.value == other.value).all() def __repr__(self): out = str(self.value) - if self.value.size < 5: - return out - - return self._get_lim_str(out) + " ... " + self._get_lim_str(out[::-1])[::-1] + if self.value.size > 4: + out = self._get_lim_str(out) + " ... " + self._get_lim_str(out[::-1])[::-1] + return out def _get_lim_str(self, out): + # pylint: disable=undefined-loop-variable # get count starting point - for i0, s in enumerate(out): - if all(s != _s for _s in "[]() "): + for init_index, char in enumerate(out): + if all(char != _char for _char in "[]() "): break # get str sep = 0 - for i, s in enumerate(out[i0 + 1 :]): - sep += int(s == " " and out[i + i0] != " ") + for i, char in enumerate(out[init_index + 1 :]): + sep += int(char == " " and out[i + init_index] != " ") if sep == 2: - return out[: i + i0 + 1] + break + return out[: i + init_index + 1] From b097989afd34e1b8358f013e33f0dda17ef76923 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Wed, 17 Jul 2024 14:14:06 +0200 Subject: [PATCH 06/15] updated tests --- tests/test_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_utils.py b/tests/test_utils.py index 0b2db3591..a70524472 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -11,6 +11,7 @@ convert_shapes_to_epsilon, arguments_consistency, validate_argument, + DiffArray, ) from clmm.redshift import distributions as zdist @@ -535,6 +536,21 @@ def test_validate_argument(): ) +def test_diff_array(): + """test validate argument""" + # Validate diffs + assert DiffArray([1, 2]) == DiffArray([1, 2]) + assert DiffArray([1, 2]) == DiffArray(np.array([1, 2])) + assert DiffArray([1, 2]) != DiffArray(np.array([2, 2])) + assert DiffArray([1, 2]) != DiffArray(np.array([1, 2, 3])) + assert DiffArray([1, 2]) != None + # Validate prints + arr = DiffArray([1, 2]) + assert str(arr.value) == arr.__repr__() + arr = DiffArray(range(10)) + assert str(arr.value) != arr.__repr__() + + def test_beta_functions(modeling_data): z_cl = 1.0 z_src = [2.4, 2.1] From 65913aa41d1da657ad40903a70b401f62839807b Mon Sep 17 00:00:00 2001 From: m-aguena Date: Wed, 17 Jul 2024 14:32:22 +0200 Subject: [PATCH 07/15] update nbs --- examples/demo_mock_ensemble.ipynb | 52 +++++++++++++++------ examples/demo_mock_ensemble_realistic.ipynb | 42 +++++++++++++++-- 2 files changed, 78 insertions(+), 16 deletions(-) diff --git a/examples/demo_mock_ensemble.ipynb b/examples/demo_mock_ensemble.ipynb index 84354b111..2eb741e1a 100644 --- a/examples/demo_mock_ensemble.ipynb +++ b/examples/demo_mock_ensemble.ipynb @@ -307,16 +307,6 @@ " )" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "9de6b261-f1aa-4b43-941b-d415462fec95", - "metadata": {}, - "outputs": [], - "source": [ - "cluster.profile" - ] - }, { "cell_type": "code", "execution_count": null, @@ -336,6 +326,42 @@ " )" ] }, + { + "cell_type": "markdown", + "id": "1e699a03", + "metadata": {}, + "source": [ + "The individual cluster data and profiles are stored at the `.data` table of the `ClusterEnsemble`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "598f9e8a", + "metadata": {}, + "outputs": [], + "source": [ + "clusterensemble.data[:3]" + ] + }, + { + "cell_type": "markdown", + "id": "9429d37f", + "metadata": {}, + "source": [ + "The edges of the radial bins, their units, and the cosmology are stored on the metadata of this table:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1e5e841", + "metadata": {}, + "outputs": [], + "source": [ + "clusterensemble.data.meta" + ] + }, { "cell_type": "markdown", "id": "99e3fe18", @@ -618,9 +644,9 @@ ], "metadata": { "kernelspec": { - "display_name": "wrk", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "wrk" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -632,7 +658,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.10.8" } }, "nbformat": 4, diff --git a/examples/demo_mock_ensemble_realistic.ipynb b/examples/demo_mock_ensemble_realistic.ipynb index bb0f9147f..422de3f85 100644 --- a/examples/demo_mock_ensemble_realistic.ipynb +++ b/examples/demo_mock_ensemble_realistic.ipynb @@ -903,6 +903,42 @@ " )" ] }, + { + "cell_type": "markdown", + "id": "31c5e0a3", + "metadata": {}, + "source": [ + "The individual cluster data and profiles are stored at the `.data` table of the `ClusterEnsemble`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82600274", + "metadata": {}, + "outputs": [], + "source": [ + "clusterensemble.data[:3]" + ] + }, + { + "cell_type": "markdown", + "id": "f5a30238", + "metadata": {}, + "source": [ + "The edges of the radial bins, their units, and the cosmology are stored on the metadata of this table:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e4293187", + "metadata": {}, + "outputs": [], + "source": [ + "clusterensemble.data.meta" + ] + }, { "cell_type": "markdown", "id": "9091de7c", @@ -1179,9 +1215,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.11", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "python3.11" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -1193,7 +1229,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.10.8" } }, "nbformat": 4, From a4ab403e51f98c0ef9eb818b1a8a5da26c302d60 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Wed, 17 Jul 2024 15:17:24 +0200 Subject: [PATCH 08/15] pass rmin/max to ClusterEnsemble.stacked_data --- clmm/clusterensemble.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/clmm/clusterensemble.py b/clmm/clusterensemble.py index aa8c83238..5e7355605 100644 --- a/clmm/clusterensemble.py +++ b/clmm/clusterensemble.py @@ -252,9 +252,14 @@ def make_stacked_radial_profile(self, tan_component="gt", cross_component="gx", [self.data[tan_component], self.data[cross_component]], ) self.stacked_data = GCData( - [radius, *components], - meta=self.data.meta, - names=("radius", tan_component, cross_component), + [ + self.data.meta["radius_min"].value, + self.data.meta["radius_max"].value, + radius, + *components, + ], + meta={k: v for k, v in self.data.meta.items() if k not in ("radius_min", "radius_max")}, + names=("radius_min", "radius_max", "radius", tan_component, cross_component), ) def compute_sample_covariance(self, tan_component="gt", cross_component="gx"): From 8f51a04faf9f1a0ef25009c7a5877b070e1d7c69 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Wed, 17 Jul 2024 15:18:41 +0200 Subject: [PATCH 09/15] update nbs --- examples/demo_mock_ensemble.ipynb | 18 ++++++++++++++---- examples/demo_mock_ensemble_realistic.ipynb | 18 ++++++++++++++---- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/examples/demo_mock_ensemble.ipynb b/examples/demo_mock_ensemble.ipynb index 2eb741e1a..33f071391 100644 --- a/examples/demo_mock_ensemble.ipynb +++ b/examples/demo_mock_ensemble.ipynb @@ -328,7 +328,7 @@ }, { "cell_type": "markdown", - "id": "1e699a03", + "id": "84bd786f", "metadata": {}, "source": [ "The individual cluster data and profiles are stored at the `.data` table of the `ClusterEnsemble`:" @@ -337,7 +337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "598f9e8a", + "id": "864f3acb", "metadata": {}, "outputs": [], "source": [ @@ -346,7 +346,7 @@ }, { "cell_type": "markdown", - "id": "9429d37f", + "id": "6ea533b0", "metadata": {}, "source": [ "The edges of the radial bins, their units, and the cosmology are stored on the metadata of this table:" @@ -355,7 +355,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d1e5e841", + "id": "39723fb1", "metadata": {}, "outputs": [], "source": [ @@ -381,6 +381,16 @@ "clusterensemble.make_stacked_radial_profile(tan_component=\"DS_t\", cross_component=\"DS_x\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "98b9fa63", + "metadata": {}, + "outputs": [], + "source": [ + "clusterensemble.stacked_data" + ] + }, { "cell_type": "markdown", "id": "907fd664", diff --git a/examples/demo_mock_ensemble_realistic.ipynb b/examples/demo_mock_ensemble_realistic.ipynb index 422de3f85..df029c944 100644 --- a/examples/demo_mock_ensemble_realistic.ipynb +++ b/examples/demo_mock_ensemble_realistic.ipynb @@ -905,7 +905,7 @@ }, { "cell_type": "markdown", - "id": "31c5e0a3", + "id": "b5e17ee0", "metadata": {}, "source": [ "The individual cluster data and profiles are stored at the `.data` table of the `ClusterEnsemble`:" @@ -914,7 +914,7 @@ { "cell_type": "code", "execution_count": null, - "id": "82600274", + "id": "4a970edb", "metadata": {}, "outputs": [], "source": [ @@ -923,7 +923,7 @@ }, { "cell_type": "markdown", - "id": "f5a30238", + "id": "7320e899", "metadata": {}, "source": [ "The edges of the radial bins, their units, and the cosmology are stored on the metadata of this table:" @@ -932,7 +932,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e4293187", + "id": "f9ea8436", "metadata": {}, "outputs": [], "source": [ @@ -958,6 +958,16 @@ "clusterensemble.make_stacked_radial_profile(tan_component=\"DS_t\", cross_component=\"DS_x\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e4125570", + "metadata": {}, + "outputs": [], + "source": [ + "clusterensemble.stacked_data" + ] + }, { "cell_type": "markdown", "id": "771c1382", From d99daa28eb60892300e62b75dee125d447fdfbb5 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Wed, 17 Jul 2024 15:24:03 +0200 Subject: [PATCH 10/15] rm unused imports --- clmm/dataops/ops.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/clmm/dataops/ops.py b/clmm/dataops/ops.py index 33adb2dd9..77e9f6737 100644 --- a/clmm/dataops/ops.py +++ b/clmm/dataops/ops.py @@ -1,7 +1,6 @@ """Functions to compute polar/azimuthal averages in radial bins""" import warnings import numpy as np -import scipy from astropy.coordinates import SkyCoord from astropy import units as u from ..gcdata import GCData @@ -15,11 +14,7 @@ _validate_dec, _validate_is_deltasigma_sigma_c, ) -from ..redshift import ( - _integ_pzfuncs, - compute_for_good_redshifts, -) -from ..theory import compute_critical_surface_density_eff +from ..redshift import _integ_pzfuncs def compute_tangential_and_cross_components( From 4dbfdf7fb25155bf911cb0b8cd8526ac06cd3d64 Mon Sep 17 00:00:00 2001 From: Hsin Fan <57552401+hsinfan1996@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:40:27 +0800 Subject: [PATCH 11/15] Fix typo --- clmm/clusterensemble.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clmm/clusterensemble.py b/clmm/clusterensemble.py index 5e7355605..787a1de2e 100644 --- a/clmm/clusterensemble.py +++ b/clmm/clusterensemble.py @@ -28,7 +28,7 @@ class ClusterEnsemble: * "tan_sc" : tangential component computed with sample covariance * "cross_sc" : cross component computed with sample covariance - * "tan_jk" : tangential component computed with bootstrap + * "tan_bs" : tangential component computed with bootstrap * "cross_bs" : cross component computed with bootstrap * "tan_jk" : tangential component computed with jackknife * "cross_jk" : cross component computed with jackknife From 17a527ca263f389f7481e89cb993373902d6d18c Mon Sep 17 00:00:00 2001 From: Hsin Fan <57552401+hsinfan1996@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:51:31 +0800 Subject: [PATCH 12/15] Fix deprecated NumCosmo constructors --- examples/demo_mock_ensemble_realistic.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/demo_mock_ensemble_realistic.ipynb b/examples/demo_mock_ensemble_realistic.ipynb index df029c944..b251ab103 100644 --- a/examples/demo_mock_ensemble_realistic.ipynb +++ b/examples/demo_mock_ensemble_realistic.ipynb @@ -175,7 +175,7 @@ "source": [ "Ncm.cfg_init()\n", "# cosmo_nc = Nc.HICosmoDEXcdm()\n", - "cosmo_nc = Nc.HICosmo.new_from_name(Nc.HICosmo, \"NcHICosmoDECpl{'massnu-length':<1>}\")\n", + "cosmo_nc = Nc.HICosmoDECpl(massnu_length=1)\n", "cosmo_nc.omega_x2omega_k()\n", "cosmo_nc.param_set_by_name(\"w0\", -1.0)\n", "cosmo_nc.param_set_by_name(\"w1\", 0.0)\n", @@ -201,7 +201,7 @@ "\n", "dist = Nc.Distance.new(2.0)\n", "dist.prepare_if_needed(cosmo_nc)\n", - "tf = Nc.TransferFunc.new_from_name(\"NcTransferFuncEH\")\n", + "tf = Nc.TransferFuncEH()\n", "\n", "psml = Nc.PowspecMLTransfer.new(tf)\n", "psml.require_kmin(1.0e-6)\n", @@ -1239,7 +1239,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.11.9" } }, "nbformat": 4, From 7bf169665012428a6aa4132a37e71c0f676e65b7 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Thu, 18 Jul 2024 16:37:03 +0200 Subject: [PATCH 13/15] rename clmm/dataops/ops.py -> clmm/dataops/data_operations.py --- clmm/dataops/__init__.py | 2 +- clmm/dataops/{ops.py => data_operations.py} | 0 tests/test_clusterensemble.py | 4 +- tests/test_dataops.py | 66 +++++++++++++-------- tests/test_mockdata.py | 8 ++- tests/test_theory.py | 53 ++++++++++------- 6 files changed, 82 insertions(+), 51 deletions(-) rename clmm/dataops/{ops.py => data_operations.py} (100%) diff --git a/clmm/dataops/__init__.py b/clmm/dataops/__init__.py index 9028b1fac..8b345f7e5 100644 --- a/clmm/dataops/__init__.py +++ b/clmm/dataops/__init__.py @@ -1,5 +1,5 @@ """Data operation for polar/azimuthal averages in radial bins and weights""" -from .ops import ( +from .data_operations import ( compute_tangential_and_cross_components, compute_background_probability, compute_galaxy_weights, diff --git a/clmm/dataops/ops.py b/clmm/dataops/data_operations.py similarity index 100% rename from clmm/dataops/ops.py rename to clmm/dataops/data_operations.py diff --git a/tests/test_clusterensemble.py b/tests/test_clusterensemble.py index 4a49db065..e5ae00d62 100644 --- a/tests/test_clusterensemble.py +++ b/tests/test_clusterensemble.py @@ -129,9 +129,9 @@ def test_covariance(): for i in range(n_catalogs): # generate random catalog e1, e2 = np.random.randn(ngals) * 0.001, np.random.randn(ngals) * 0.001 - et, ex = da.ops._compute_tangential_shear(e1, e2, phi), da.ops._compute_cross_shear( + et, ex = da.data_operations._compute_tangential_shear( e1, e2, phi - ) + ), da.data_operations._compute_cross_shear(e1, e2, phi) z_gal = np.random.random(ngals) * (3 - 1.1) + 1.1 id_gal = np.arange(ngals) theta_gal = np.linspace(0, 1, ngals) * (thetamax - thetamin) + thetamin diff --git a/tests/test_dataops.py b/tests/test_dataops.py index 784e0d56c..ad2dc445f 100644 --- a/tests/test_dataops.py +++ b/tests/test_dataops.py @@ -16,43 +16,53 @@ def test_compute_cross_shear(): """test compute cross shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_cross_shear = 0.08886301350787848 - cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi) + cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_cross_shear = [0.08886301350787848, 0.48498333705834484] - cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi) + cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) # Edge case tests - assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose(da.data_operations._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) + assert_allclose( + da.data_operations._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE + ) + assert_allclose(da.data_operations._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) + assert_allclose( + da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE + ) + assert_allclose( + da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE + ) + assert_allclose(da.data_operations._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_tangential_shear(): """test compute tangential shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_tangential_shear = -0.14492537676438383 - tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_tangential_shear = [-0.14492537676438383, 0.1216189244145496] - tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) # test for reasonable values - assert_allclose(da.ops._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE) - assert_allclose(da.ops._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE) - assert_allclose(da.ops._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose( + da.data_operations._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE + ) + assert_allclose( + da.data_operations._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE + ) + assert_allclose(da.data_operations._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_lensing_angles_flatsky(): @@ -63,7 +73,7 @@ def test_compute_lensing_angles_flatsky(): # Ensure that we throw a warning with >1 deg separation assert_warns( UserWarning, - da.ops._compute_lensing_angles_flatsky, + da.data_operations._compute_lensing_angles_flatsky, ra_l, dec_l, np.array([151.32, 161.34]), @@ -73,7 +83,7 @@ def test_compute_lensing_angles_flatsky(): # Test outputs for reasonable values ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas, phis = da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) + thetas, phis = da.data_operations._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) assert_allclose( thetas, @@ -91,7 +101,9 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra assert_allclose( - da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s), + da.data_operations._compute_lensing_angles_flatsky( + ra_l, dec_l, np.array([161.32, 161.34]), dec_s + ), [ [0.00069813170079771690, 0.00106951489719733675], [-1.57079632679489655800, 1.77544123918164542530], @@ -102,7 +114,9 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same dec assert_allclose( - da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])), + da.data_operations._compute_lensing_angles_flatsky( + ra_l, dec_l, ra_s, np.array([51.49, 51.55]) + ), [ [0.00032601941539388962, 0.00106951489719733675], [0.00000000000000000000, 1.77544123918164542530], @@ -113,7 +127,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra and dec assert_allclose( - da.ops._compute_lensing_angles_flatsky( + da.data_operations._compute_lensing_angles_flatsky( ra_l, dec_l, np.array([ra_l, 161.34]), np.array([dec_l, 51.55]) ), [ @@ -126,7 +140,9 @@ def test_compute_lensing_angles_flatsky(): # angles over the branch cut between 0 and 360 assert_allclose( - da.ops._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s), + da.data_operations._compute_lensing_angles_flatsky( + 0.1, dec_l, np.array([359.9, 359.5]), dec_s + ), [ [0.0022828333888309108, 0.006603944760273219], [-0.31079754672938664, 0.15924369771830643], @@ -138,15 +154,17 @@ def test_compute_lensing_angles_flatsky(): # coordinate_system conversion ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas_pixel, phis_pixel = da.ops._compute_lensing_angles_flatsky( + thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_flatsky( ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean" ) - thetas_sky, phis_sky = da.ops._compute_lensing_angles_flatsky( + thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_flatsky( ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial" ) assert_allclose( - da.ops._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s), + da.data_operations._compute_lensing_angles_flatsky( + -180, dec_l, np.array([180.1, 179.7]), dec_s + ), [[0.0012916551296819666, 0.003424250083245557], [-2.570568636904587, 0.31079754672944354]], TOLERANCE["rtol"], err_msg="Failure when ra_l and ra_s are the same but one is defined negative", @@ -173,10 +191,10 @@ def test_compute_lensing_angles_astropy(): # coordinate_system conversion ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas_pixel, phis_pixel = da.ops._compute_lensing_angles_astropy( + thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_astropy( ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean" ) - thetas_sky, phis_sky = da.ops._compute_lensing_angles_astropy( + thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_astropy( ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial" ) diff --git a/tests/test_mockdata.py b/tests/test_mockdata.py index c075b6e79..d525ca8cc 100644 --- a/tests/test_mockdata.py +++ b/tests/test_mockdata.py @@ -264,13 +264,17 @@ def test_shapenoise(): # Verify that the shape noise is Gaussian around 0 (for the very small shear here) sigma = 0.25 - data = mock.generate_galaxy_catalog(10**12.0, 0.3, 4, cosmo, 0.8, ngals=50000, shapenoise=sigma) + data = mock.generate_galaxy_catalog( + 10**12.0, 0.3, 4, cosmo, 0.8, ngals=50000, shapenoise=sigma + ) # Check that there are no galaxies with |e|>1 assert_equal(np.count_nonzero((data["e1"] > 1) | (data["e1"] < -1)), 0) assert_equal(np.count_nonzero((data["e2"] > 1) | (data["e2"] < -1)), 0) # Check that shape noise is Guassian with correct std dev bins = np.arange(-1, 1.1, 0.1) - gauss = 5000 * np.exp(-0.5 * (bins[:-1] + 0.05) ** 2 / sigma**2) / (sigma * np.sqrt(2 * np.pi)) + gauss = ( + 5000 * np.exp(-0.5 * (bins[:-1] + 0.05) ** 2 / sigma**2) / (sigma * np.sqrt(2 * np.pi)) + ) assert_allclose(np.histogram(data["e1"], bins=bins)[0], gauss, atol=50, rtol=0.05) assert_allclose(np.histogram(data["e2"], bins=bins)[0], gauss, atol=50, rtol=0.05) diff --git a/tests/test_theory.py b/tests/test_theory.py index a17ab661d..6a5a7f96d 100644 --- a/tests/test_theory.py +++ b/tests/test_theory.py @@ -7,7 +7,11 @@ from clmm.constants import Constants as clc from clmm.galaxycluster import GalaxyCluster from clmm import GCData -from clmm.utils import compute_beta_s_square_mean_from_distribution, compute_beta_s_mean_from_distribution, compute_beta_s_func +from clmm.utils import ( + compute_beta_s_square_mean_from_distribution, + compute_beta_s_mean_from_distribution, + compute_beta_s_func, +) from clmm.redshift.distributions import chang2013, desc_srd TOLERANCE = {"rtol": 1.0e-8} @@ -213,7 +217,7 @@ def test_compute_reduced_shear(modeling_data): assert_allclose( theo.compute_reduced_shear_from_convergence(np.array(shear), np.array(convergence)), np.array(truth), - **TOLERANCE + **TOLERANCE, ) @@ -254,7 +258,7 @@ def helper_profiles(func): assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model="nfw"), defaulttruth, - **TOLERANCE + **TOLERANCE, ) assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, massdef="mean"), defaulttruth, **TOLERANCE @@ -263,7 +267,7 @@ def helper_profiles(func): assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model="NFW"), defaulttruth, - **TOLERANCE + **TOLERANCE, ) assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, massdef="MEAN"), defaulttruth, **TOLERANCE @@ -375,22 +379,25 @@ def test_profiles(modeling_data, profile_init): # Test use_projected_quad if mod.backend == "ccl" and profile_init == "einasto": - if hasattr(mod.hdpm, 'projected_quad'): + if hasattr(mod.hdpm, "projected_quad"): mod.set_projected_quad(True) assert_allclose( mod.eval_surface_density( cfg["SIGMA_PARAMS"]["r_proj"], cfg["SIGMA_PARAMS"]["z_cl"], verbose=True ), cfg["numcosmo_profiles"]["Sigma"], - reltol*1e-1, + reltol * 1e-1, ) assert_allclose( theo.compute_surface_density( - cosmo=cosmo, **cfg["SIGMA_PARAMS"], alpha_ein=alpha_ein, verbose=True, + cosmo=cosmo, + **cfg["SIGMA_PARAMS"], + alpha_ein=alpha_ein, + verbose=True, use_projected_quad=True, ), cfg["numcosmo_profiles"]["Sigma"], - reltol*1e-1, + reltol * 1e-1, ) delattr(mod.hdpm, "projected_quad") @@ -547,11 +554,13 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cfg_inf = load_validation_config() # compute some values - cfg_inf['GAMMA_PARAMS']['z_src'] = 1000. + cfg_inf["GAMMA_PARAMS"]["z_src"] = 1000.0 beta_s_mean = compute_beta_s_mean_from_distribution( - cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo) + cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo + ) beta_s_square_mean = compute_beta_s_square_mean_from_distribution( - cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo) + cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo + ) gammat_inf = theo.compute_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]) kappa_inf = theo.compute_convergence(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]) @@ -581,14 +590,14 @@ def test_shear_convergence_unittests(modeling_data, profile_init): theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="notvalid" + approx="notvalid", ) assert_raises( ValueError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="notvalid" + approx="notvalid", ) assert_raises( ValueError, @@ -596,7 +605,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha, - approx="notvalid" + approx="notvalid", ) # test KeyError from invalid key in integ_kwargs assert_raises( @@ -604,14 +613,14 @@ def test_shear_convergence_unittests(modeling_data, profile_init): theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - integ_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0}, ) assert_raises( KeyError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - integ_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0}, ) assert_raises( KeyError, @@ -619,7 +628,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha, - integ_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0}, ) # test ValueError from unsupported z_src_info cfg_inf["GAMMA_PARAMS"]["z_src_info"] = "notvalid" @@ -632,14 +641,14 @@ def test_shear_convergence_unittests(modeling_data, profile_init): theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="order1" + approx="order1", ) assert_raises( ValueError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="order1" + approx="order1", ) assert_raises( ValueError, @@ -647,7 +656,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=2, - approx="order1" + approx="order1", ) # test z_src_info = 'beta' @@ -1065,7 +1074,7 @@ def test_compute_magnification_bias(modeling_data): assert_allclose( theo.compute_magnification_bias_from_magnification(magnification[0], alpha[0]), truth[0][0], - **TOLERANCE + **TOLERANCE, ) assert_allclose( theo.compute_magnification_bias_from_magnification(magnification, alpha), truth, **TOLERANCE @@ -1075,7 +1084,7 @@ def test_compute_magnification_bias(modeling_data): np.array(magnification), np.array(alpha) ), np.array(truth), - **TOLERANCE + **TOLERANCE, ) From aa3e2a95d1f69d683b0d01d49aff4b6c25436107 Mon Sep 17 00:00:00 2001 From: m-aguena Date: Thu, 18 Jul 2024 17:54:47 +0200 Subject: [PATCH 14/15] revert clmm/dataops/data_operations.py -> clmm/dataops/__init__.py --- clmm/dataops/__init__.py | 631 +++++++++++++++++++++++++++++++- clmm/dataops/data_operations.py | 629 ------------------------------- tests/test_clusterensemble.py | 4 +- tests/test_dataops.py | 66 ++-- 4 files changed, 650 insertions(+), 680 deletions(-) delete mode 100644 clmm/dataops/data_operations.py diff --git a/clmm/dataops/__init__.py b/clmm/dataops/__init__.py index 8b345f7e5..eab62c280 100644 --- a/clmm/dataops/__init__.py +++ b/clmm/dataops/__init__.py @@ -1,9 +1,628 @@ """Data operation for polar/azimuthal averages in radial bins and weights""" -from .data_operations import ( - compute_tangential_and_cross_components, - compute_background_probability, - compute_galaxy_weights, +import warnings +import numpy as np +from astropy.coordinates import SkyCoord +from astropy import units as u +from ..gcdata import GCData +from ..utils import ( + compute_radial_averages, make_bins, - make_radial_profile, - make_stacked_radial_profile, + convert_units, + arguments_consistency, + validate_argument, + _validate_ra, + _validate_dec, + _validate_is_deltasigma_sigma_c, + _validate_coordinate_system, ) +from ..redshift import _integ_pzfuncs + + +def compute_tangential_and_cross_components( + ra_lens, + dec_lens, + ra_source, + dec_source, + shear1, + shear2, + coordinate_system="euclidean", + geometry="curve", + is_deltasigma=False, + sigma_c=None, + validate_input=True, +): + r"""Computes tangential- and cross- components for shear or ellipticity + + To do so, we need the right ascension and declination of the lens and of all of the sources. + We also need the two shape components of all of the sources. + + These quantities can be handed to `compute_tangential_and_cross_components` in two ways + + 1. Pass in each as parameters:: + + compute_tangential_and_cross_components(ra_lens, dec_lens, ra_source, dec_source, + shape_component1, shape_component2) + + 2. As a method of `GalaxyCluster`:: + + cluster.compute_tangential_and_cross_components() + + The angular separation between the source and the lens, :math:`\theta`, and the azimuthal + position of the source relative to the lens, :math:`\phi`, are computed within the function + and the angular separation is returned. + + In the flat sky approximation, these angles are calculated using (_lens: lens, + _source: source, RA is from right to left) + + .. math:: + \theta^2 = & \left(\delta_s-\delta_l\right)^2+ + \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)\\ + \tan\phi = & \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} + + The tangential, :math:`g_t`, and cross, :math:`g_x`, ellipticity/shear components are + calculated using the two ellipticity/shear components :math:`g_1` and :math:`g_2` of the + source galaxies, following Eq.7 and Eq.8 in Schrabback et al. (2018), arXiv:1611:03866 which + is consistent with arXiv:0509252 + + .. math:: + g_t =& -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right)\\ + g_x =& g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) + + Finally, if the critical surface density (:math:`\Sigma_\text{crit}`) is provided, an estimate + of the excess surface density :math:`\widehat{\Delta\Sigma}` is obtained from + + .. math:: + \widehat{\Delta\Sigma_{t,x}} = g_{t,x} \times \Sigma_\text{crit}(z_l, z_{\text{src}}) + + If :math:`g_{t,x}` correspond to the shear, the above expression is an accurate. However, if + :math:`g_{t,x}` correspond to ellipticities or reduced shear, this expression only gives an + estimate :math:`\widehat{\Delta\Sigma_{t,x}}`, valid only in the weak lensing regime. + + Parameters + ---------- + ra_lens: float + Right ascension of the lensing cluster in degrees + dec_lens: float + Declination of the lensing cluster in degrees + ra_source: array + Right ascensions of each source galaxy in degrees + dec_source: array + Declinations of each source galaxy in degrees + shear1: array + The measured shear (or reduced shear or ellipticity) of the source galaxies + shear2: array + The measured shear (or reduced shear or ellipticity) of the source galaxies + coordinate_system: str, optional + Coordinate system of the ellipticity components. Must be either 'celestial' or + euclidean'. See https://doi.org/10.48550/arXiv.1407.7676 section 5.1 for more details. + Default is 'euclidean'. + geometry: str, optional + Sky geometry to compute angular separation. + Options are curve (uses astropy) or flat. + is_deltasigma: bool + If `True`, the tangential and cross components returned are multiplied by Sigma_crit. + It requires `sigma_c` argument. Results in units of :math:`M_\odot\ Mpc^{-2}` + sigma_c : None, array_like + Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. + Used only when is_deltasigma=True. + validate_input: bool + Validade each input argument + + Returns + ------- + angsep: array_like + Angular separation between lens and each source galaxy in radians + tangential_component: array_like + Tangential shear (or assimilated quantity) for each source galaxy + cross_component: array_like + Cross shear (or assimilated quantity) for each source galaxy + """ + # pylint: disable-msg=too-many-locals + # Note: we make these quantities to be np.array so that a name is not passed from astropy + # columns + if validate_input: + _validate_ra(locals(), "ra_source", True) + _validate_dec(locals(), "dec_source", True) + _validate_ra(locals(), "ra_lens", True) + _validate_dec(locals(), "dec_lens", True) + validate_argument(locals(), "shear1", "float_array") + validate_argument(locals(), "shear2", "float_array") + validate_argument(locals(), "geometry", str) + validate_argument(locals(), "sigma_c", "float_array", none_ok=True) + _validate_coordinate_system(locals(), "coordinate_system", str) + ra_source_, dec_source_, shear1_, shear2_ = arguments_consistency( + [ra_source, dec_source, shear1, shear2], + names=("Ra", "Dec", "Shear1", "Shear2"), + prefix="Tangential- and Cross- shape components sources", + ) + _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) + elif np.iterable(ra_source): + ra_source_, dec_source_, shear1_, shear2_ = ( + np.array(col) for col in [ra_source, dec_source, shear1, shear2] + ) + else: + ra_source_, dec_source_, shear1_, shear2_ = ( + ra_source, + dec_source, + shear1, + shear2, + ) + # Compute the lensing angles + if geometry == "flat": + angsep, phi = _compute_lensing_angles_flatsky( + ra_lens, dec_lens, ra_source_, dec_source_, coordinate_system=coordinate_system + ) + elif geometry == "curve": + angsep, phi = _compute_lensing_angles_astropy( + ra_lens, dec_lens, ra_source_, dec_source_, coordinate_system=coordinate_system + ) + else: + raise NotImplementedError(f"Sky geometry {geometry} is not currently supported") + # Compute the tangential and cross shears + tangential_comp = _compute_tangential_shear(shear1_, shear2_, phi) + cross_comp = _compute_cross_shear(shear1_, shear2_, phi) + + if sigma_c is not None: + _sigma_c_arr = np.array(sigma_c) + tangential_comp *= _sigma_c_arr + cross_comp *= _sigma_c_arr + + return angsep, tangential_comp, cross_comp + + +def compute_background_probability( + z_lens, z_src=None, use_pdz=False, pzpdf=None, pzbins=None, validate_input=True +): + r"""Probability for being a background galaxy, defined by: + + .. math:: + P(z_s > z_l) = \int_{z_l}^{+\infty} dz_s \; p_{\text{photoz}}(z_s), + + when the photometric probability density functions (:math:`p_{\text{photoz}}(z_s)`) are + provided. In the case of true redshifts, it returns 1 if :math:`z_s > z_l` else returns 0. + + + Parameters + ---------- + z_lens: float + Redshift of the lens. + z_src: array, optional + Redshift of the source. Used only if pzpdf=pzbins=None. + + Returns + ------- + p_background : array + Probability for being a background galaxy + """ + if validate_input: + validate_argument(locals(), "z_lens", float, argmin=0, eqmin=True) + validate_argument(locals(), "z_src", "float_array", argmin=0, eqmin=True, none_ok=True) + + if use_pdz is False: + if z_src is None: + raise ValueError("z_src must be provided.") + p_background = np.array(z_src > z_lens, dtype=float) + else: + if pzpdf is None or pzbins is None: + raise ValueError("pzbins must be provided with pzpdf.") + p_background = _integ_pzfuncs(pzpdf, pzbins, z_lens) + + return p_background + + +def compute_galaxy_weights( + use_shape_noise=False, + shape_component1=None, + shape_component2=None, + use_shape_error=False, + shape_component1_err=None, + shape_component2_err=None, + is_deltasigma=False, + sigma_c=None, + validate_input=True, +): + r"""Computes the individual lens-source pair weights + + The weights :math:`w_{ls}` express as : :math:`w_{ls} = w_{ls, \text{geo}} \times w_{ls, + \text{shape}}`, following E. S. Sheldon et al. (2003), arXiv:astro-ph/0312036: + + 1. If computed for shear, the geometrical weights :math:`w_{ls, \text{geo}}` are equal to 1. If + computed for :math:`\Delta \Sigma`, it depends on lens and source redshift information via the + critical surface density. This component can be expressed as: + + .. math:: + w_{ls, \text{geo}} = \Sigma_\text{crit}(z_l, z_{\text{src}})^{-2}\;. + + when only redshift point estimates are provided, or as: + + + .. math:: + w_{ls, \text{geo}} = \Sigma_\text{crit}^\text{eff}(z_l, z_{\text{src}})^{-2} + = \left[\int_{\delta + z_l} dz_s \; p_{\text{photoz}}(z_s) + \Sigma_\text{crit}(z_l, z_s)^{-1}\right]^2 + + when the redshift pdf of each source, :math:`p_{\text{photoz}}(z_s)`, is known. + + 2. The shape weight :math:`w_{ls,{\text{shape}}}` depends on shapenoise and/or shape + measurement errors + + .. math:: + w_{ls, \text{shape}}^{-1} = \sigma_{\text{shapenoise}}^2 + + \sigma_{\text{measurement}}^2 + + + Parameters + ---------- + is_deltasigma: bool + If `False`, weights are computed for shear, else weights are computed for + :math:`\Delta \Sigma`. + sigma_c : None, array_like + Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. + Used only when is_deltasigma=True. + use_shape_noise: bool + If `True` shape noise is included in the weight computation. It then requires + `shape_componenet{1,2}` to be provided. Default: False. + shape_component1: array_like + The measured shear (or reduced shear or ellipticity) of the source galaxies, + used if `use_shapenoise=True` + shape_component2: array_like + The measured shear (or reduced shear or ellipticity) of the source galaxies, + used if `use_shapenoise=True` + use_shape_error: bool + If `True` shape errors are included in the weight computation. It then requires + shape_component{1,2}_err` to be provided. Default: False. + shape_component1_err: array_like + The measurement error on the 1st-component of ellipticity of the source galaxies, + used if `use_shape_error=True` + shape_component2_err: array_like + The measurement error on the 2nd-component of ellipticity of the source galaxies, + used if `use_shape_error=True` + validate_input: bool + Validade each input argument + + Returns + ------- + w_ls: array_like + Individual lens source pair weights + """ + if validate_input: + validate_argument(locals(), "sigma_c", "float_array", none_ok=True) + validate_argument(locals(), "shape_component1", "float_array", none_ok=True) + validate_argument(locals(), "shape_component2", "float_array", none_ok=True) + validate_argument(locals(), "shape_component1_err", "float_array", none_ok=True) + validate_argument(locals(), "shape_component2_err", "float_array", none_ok=True) + validate_argument(locals(), "use_shape_noise", bool) + validate_argument(locals(), "use_shape_error", bool) + arguments_consistency( + [shape_component1, shape_component2], + names=("shape_component1", "shape_component2"), + prefix="Shape components sources", + ) + _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) + + # computing w_ls_geo + w_ls_geo = 1.0 + if sigma_c is not None: + w_ls_geo /= np.array(sigma_c) ** 2 + + # computing w_ls_shape + err_e2 = 0 + + if use_shape_noise: + if shape_component1 is None or shape_component2 is None: + raise ValueError( + "With the shape noise option, the source shapes " + "`shape_component_{1,2}` must be specified" + ) + err_e2 += np.std(shape_component1) ** 2 + np.std(shape_component2) ** 2 + if use_shape_error: + if shape_component1_err is None or shape_component2_err is None: + raise ValueError( + "With the shape error option, the source shapes errors" + "`shape_component_err{1,2}` must be specified" + ) + err_e2 += shape_component1_err**2 + err_e2 += shape_component2_err**2 + + if hasattr(err_e2, "__len__"): + w_ls_shape = np.ones(len(err_e2)) + w_ls_shape[err_e2 > 0] = 1.0 / err_e2[err_e2 > 0] + else: + w_ls_shape = 1.0 / err_e2 if err_e2 > 0 else 1.0 + + w_ls = w_ls_shape * w_ls_geo + + return w_ls + + +def _compute_lensing_angles_flatsky( + ra_lens, dec_lens, ra_source_list, dec_source_list, coordinate_system="euclidean" +): + r"""Compute the angular separation between the lens and the source and the azimuthal + angle from the lens to the source in radians. + + In the flat sky approximation, these angles are calculated using + + .. math:: + \theta = \sqrt{\left(\delta_s-\delta_l\right)^2+ + \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)} + + \tan\phi = \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} + + For extended descriptions of parameters, see `compute_shear()` documentation. + + Parameters + ---------- + ra_lens: float + Right ascension of the lensing cluster in degrees + dec_lens: float + Declination of the lensing cluster in degrees + ra_source_list: array + Right ascensions of each source galaxy in degrees + dec_source_list: array + Declinations of each source galaxy in degrees + coordinate_system: str, optional + Coordinate system of the ellipticity components. Must be either 'celestial' or + euclidean'. See https://doi.org/10.48550/arXiv.1407.7676 section 5.1 for more details. + Default is 'euclidean'. + + Returns + ------- + angsep: array + Angular separation between the lens and the source in radians + phi: array + Azimuthal angle from the lens to the source in radians + """ + delta_ra = np.radians(ra_source_list - ra_lens) + # Put angles between -pi and pi + delta_ra -= np.round(delta_ra / (2.0 * np.pi)) * 2.0 * np.pi + deltax = delta_ra * np.cos(np.radians(dec_lens)) + deltay = np.radians(dec_source_list - dec_lens) + # Ensure that abs(delta ra) < pi + # deltax[deltax >= np.pi] = deltax[deltax >= np.pi]-2.*np.pi + # deltax[deltax < -np.pi] = deltax[deltax < -np.pi]+2.*np.pi + angsep = np.sqrt(deltax**2 + deltay**2) + phi = np.arctan2(deltay, -deltax) + # Forcing phi to be zero everytime angsep is zero. This is necessary due to arctan2 features. + if np.iterable(phi): + phi[angsep == 0.0] = 0.0 + else: + phi = 0.0 if angsep == 0.0 else phi + if coordinate_system == "celestial": + phi = np.pi - phi + if np.any(angsep > np.pi / 180.0): + warnings.warn("Using the flat-sky approximation with separations >1 deg may be inaccurate") + return angsep, phi + + +def _compute_lensing_angles_astropy( + ra_lens, dec_lens, ra_source_list, dec_source_list, coordinate_system="euclidean" +): + r"""Compute the angular separation between the lens and the source and the azimuthal + angle from the lens to the source in radians. + + Parameters + ---------- + ra_lens: float + Right ascension of the lensing cluster in degrees + dec_lens: float + Declination of the lensing cluster in degrees + ra_source_list: array + Right ascensions of each source galaxy in degrees + dec_source_list: array + Declinations of each source galaxy in degrees + coordinate_system: str, optional + Coordinate system of the ellipticity components. Must be either 'celestial' or + euclidean'. See https://doi.org/10.48550/arXiv.1407.7676 section 5.1 for more details. + Default is 'euclidean'. + + Returns + ------- + angsep: array + Angular separation between the lens and the source in radians + phi: array + Azimuthal angle from the lens to the source in radians + """ + sk_lens = SkyCoord(ra_lens * u.deg, dec_lens * u.deg, frame="icrs") + sk_src = SkyCoord(ra_source_list * u.deg, dec_source_list * u.deg, frame="icrs") + angsep, phi = sk_lens.separation(sk_src).rad, sk_lens.position_angle(sk_src).rad + # Transformations for phi to have same orientation as _compute_lensing_angles_flatsky + phi += 0.5 * np.pi + if np.iterable(phi): + phi[phi > np.pi] -= 2 * np.pi + phi[angsep == 0] = 0 + else: + phi -= 2 * np.pi if phi > np.pi else 0 + phi = 0 if angsep == 0 else phi + if coordinate_system == "celestial": + phi = np.pi - phi + return angsep, phi + + +def _compute_tangential_shear(shear1, shear2, phi): + r"""Compute the tangential shear given the two shears and azimuthal positions for + a single source or list of sources. + + We compute the tangential shear following Eq. 7 of Schrabback et al. 2018, arXiv:1611:03866 + + .. math:: + g_t = -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right) + + For extended descriptions of parameters, see `compute_shear()` documentation. + """ + return -(shear1 * np.cos(2.0 * phi) + shear2 * np.sin(2.0 * phi)) + + +def _compute_cross_shear(shear1, shear2, phi): + r"""Compute the cross shear given the two shears and azimuthal position for a single + source of list of sources. + + We compute the cross shear following Eq. 8 of Schrabback et al. 2018, arXiv:1611:03866 + also checked arxiv 0509252 + + .. math:: + g_x = g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) + + For extended descriptions of parameters, see `compute_shear()` documentation. + """ + return shear1 * np.sin(2.0 * phi) - shear2 * np.cos(2.0 * phi) + + +def make_radial_profile( + components, + angsep, + angsep_units, + bin_units, + bins=10, + components_error=None, + error_model="ste", + include_empty_bins=False, + return_binnumber=False, + cosmo=None, + z_lens=None, + validate_input=True, + weights=None, +): + r"""Compute the angular profile of given components + + We assume that the cluster object contains information on the cross and + tangential shears or ellipticities and angular separation of the source galaxies + + This function can be called in two ways: + + 1. Pass explict arguments:: + + make_radial_profile([component1, component2], distances, 'radians') + + 2. Call it as a method of a GalaxyCluster instance and specify `bin_units`: + + cluster.make_radial_profile('Mpc') + + Parameters + ---------- + components: list of arrays + List of arrays to be binned in the radial profile + angsep: array + Transvesal distances between the sources and the lens + angsep_units : str + Units of the calculated separation of the source galaxies + Allowed Options = ["radians"] + bin_units : str + Units to use for the radial bins of the radial profile + Allowed Options = ["radians", deg", "arcmin", "arcsec", kpc", "Mpc"] + (letter case independent) + bins : array_like, optional + User defined bins to use for the shear profile. If a list is provided, use that as + the bin edges. If a scalar is provided, create that many equally spaced bins between + the minimum and maximum angular separations in bin_units. If nothing is provided, + default to 10 equally spaced bins. + components_error: list of arrays, None + List of errors for input arrays + error_model : str, optional + Statistical error model to use for y uncertainties. (letter case independent) + `ste` - Standard error [=std/sqrt(n) in unweighted computation] (Default). + `std` - Standard deviation. + include_empty_bins: bool, optional + Also include empty bins in the returned table + return_binnumber: bool, optional + Also returns the indices of the bins for each object + cosmo : CLMM.Cosmology + CLMM Cosmology object to convert angular separations to physical distances + z_lens: array, optional + Redshift of the lens + validate_input: bool + Validade each input argument + weights: array-like, optional + Array of individual galaxy weights. If specified, the radial binned profile is + computed using a weighted average + + Returns + ------- + profile : GCData + Output table containing the radius grid points, the profile of the components `p_i`, + errors `p_i_err` and number of sources. The errors are defined as the standard errors in + each bin. + binnumber: 1-D ndarray of ints, optional + Indices of the bins (corresponding to `xbins`) in which each value + of `xvals` belongs. Same length as `yvals`. A binnumber of `i` means the + corresponding value is between (xbins[i-1], xbins[i]). + + Notes + ----- + This is an example of a place where the cosmology-dependence can be sequestered to another + module. + """ + # pylint: disable-msg=too-many-locals + if validate_input: + validate_argument(locals(), "angsep", "float_array") + validate_argument(locals(), "angsep_units", str) + validate_argument(locals(), "bin_units", str) + validate_argument(locals(), "include_empty_bins", bool) + validate_argument(locals(), "return_binnumber", bool) + validate_argument(locals(), "z_lens", "float_array", none_ok=True) + comp_dict = {f"components[{i}]": comp for i, comp in enumerate(components)} + arguments_consistency(components, names=comp_dict.keys(), prefix="Input components") + for component in comp_dict: + validate_argument(comp_dict, component, "float_array") + # Check to see if we need to do a unit conversion + if angsep_units is not bin_units: + source_seps = convert_units(angsep, angsep_units, bin_units, redshift=z_lens, cosmo=cosmo) + else: + source_seps = angsep + # Make bins if they are not provided + if not hasattr(bins, "__len__"): + bins = make_bins(np.min(source_seps), np.max(source_seps), bins) + # Create output table + profile_table = GCData( + [bins[:-1], np.zeros(len(bins) - 1), bins[1:]], + names=("radius_min", "radius", "radius_max"), + meta={"bin_units": bin_units}, # Add metadata + ) + # Compute the binned averages and associated errors + for i, component in enumerate(components): + r_avg, comp_avg, comp_err, nsrc, binnumber, wts_sum = compute_radial_averages( + source_seps, + component, + xbins=bins, + yerr=None if components_error is None else components_error[i], + error_model=error_model, + weights=weights, + ) + profile_table[f"p_{i}"] = comp_avg + profile_table[f"p_{i}_err"] = comp_err + profile_table["radius"] = r_avg + profile_table["n_src"] = nsrc + profile_table["weights_sum"] = wts_sum + # return empty bins? + if not include_empty_bins: + profile_table = profile_table[nsrc > 1] + if return_binnumber: + return profile_table, binnumber + return profile_table + + +def make_stacked_radial_profile(angsep, weights, components): + """Compute stacked profile, and mean separation distances. + + Parameters + ---------- + angsep: 2d array + Transvesal distances corresponding to each object with shape `n_obj, n_rad_bins`. + weights: 2d array + Weights corresponding to each objects with shape `n_obj, n_rad_bins`. + components: list of 2d arrays + List of 2d properties of each array to be stacked with shape + `n_components, n_obj, n_rad_bins`. + + Returns + ------- + staked_angsep: array + Mean transversal distance in each radial bin. + stacked_components: list of arrays + List of stacked components. + """ + staked_angsep = np.average(angsep, axis=0, weights=None) + stacked_components = [ + np.average(component, axis=0, weights=weights) for component in components + ] + return staked_angsep, stacked_components diff --git a/clmm/dataops/data_operations.py b/clmm/dataops/data_operations.py deleted file mode 100644 index 942804778..000000000 --- a/clmm/dataops/data_operations.py +++ /dev/null @@ -1,629 +0,0 @@ -"""Functions to compute polar/azimuthal averages in radial bins""" - -import warnings -import numpy as np -from astropy.coordinates import SkyCoord -from astropy import units as u -from ..gcdata import GCData -from ..utils import ( - compute_radial_averages, - make_bins, - convert_units, - arguments_consistency, - validate_argument, - _validate_ra, - _validate_dec, - _validate_is_deltasigma_sigma_c, - _validate_coordinate_system, -) -from ..redshift import _integ_pzfuncs - - -def compute_tangential_and_cross_components( - ra_lens, - dec_lens, - ra_source, - dec_source, - shear1, - shear2, - coordinate_system="euclidean", - geometry="curve", - is_deltasigma=False, - sigma_c=None, - validate_input=True, -): - r"""Computes tangential- and cross- components for shear or ellipticity - - To do so, we need the right ascension and declination of the lens and of all of the sources. - We also need the two shape components of all of the sources. - - These quantities can be handed to `compute_tangential_and_cross_components` in two ways - - 1. Pass in each as parameters:: - - compute_tangential_and_cross_components(ra_lens, dec_lens, ra_source, dec_source, - shape_component1, shape_component2) - - 2. As a method of `GalaxyCluster`:: - - cluster.compute_tangential_and_cross_components() - - The angular separation between the source and the lens, :math:`\theta`, and the azimuthal - position of the source relative to the lens, :math:`\phi`, are computed within the function - and the angular separation is returned. - - In the flat sky approximation, these angles are calculated using (_lens: lens, - _source: source, RA is from right to left) - - .. math:: - \theta^2 = & \left(\delta_s-\delta_l\right)^2+ - \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)\\ - \tan\phi = & \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} - - The tangential, :math:`g_t`, and cross, :math:`g_x`, ellipticity/shear components are - calculated using the two ellipticity/shear components :math:`g_1` and :math:`g_2` of the - source galaxies, following Eq.7 and Eq.8 in Schrabback et al. (2018), arXiv:1611:03866 which - is consistent with arXiv:0509252 - - .. math:: - g_t =& -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right)\\ - g_x =& g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) - - Finally, if the critical surface density (:math:`\Sigma_\text{crit}`) is provided, an estimate - of the excess surface density :math:`\widehat{\Delta\Sigma}` is obtained from - - .. math:: - \widehat{\Delta\Sigma_{t,x}} = g_{t,x} \times \Sigma_\text{crit}(z_l, z_{\text{src}}) - - If :math:`g_{t,x}` correspond to the shear, the above expression is an accurate. However, if - :math:`g_{t,x}` correspond to ellipticities or reduced shear, this expression only gives an - estimate :math:`\widehat{\Delta\Sigma_{t,x}}`, valid only in the weak lensing regime. - - Parameters - ---------- - ra_lens: float - Right ascension of the lensing cluster in degrees - dec_lens: float - Declination of the lensing cluster in degrees - ra_source: array - Right ascensions of each source galaxy in degrees - dec_source: array - Declinations of each source galaxy in degrees - shear1: array - The measured shear (or reduced shear or ellipticity) of the source galaxies - shear2: array - The measured shear (or reduced shear or ellipticity) of the source galaxies - coordinate_system: str, optional - Coordinate system of the ellipticity components. Must be either 'celestial' or - euclidean'. See https://doi.org/10.48550/arXiv.1407.7676 section 5.1 for more details. - Default is 'euclidean'. - geometry: str, optional - Sky geometry to compute angular separation. - Options are curve (uses astropy) or flat. - is_deltasigma: bool - If `True`, the tangential and cross components returned are multiplied by Sigma_crit. - It requires `sigma_c` argument. Results in units of :math:`M_\odot\ Mpc^{-2}` - sigma_c : None, array_like - Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. - Used only when is_deltasigma=True. - validate_input: bool - Validade each input argument - - Returns - ------- - angsep: array_like - Angular separation between lens and each source galaxy in radians - tangential_component: array_like - Tangential shear (or assimilated quantity) for each source galaxy - cross_component: array_like - Cross shear (or assimilated quantity) for each source galaxy - """ - # pylint: disable-msg=too-many-locals - # Note: we make these quantities to be np.array so that a name is not passed from astropy - # columns - if validate_input: - _validate_ra(locals(), "ra_source", True) - _validate_dec(locals(), "dec_source", True) - _validate_ra(locals(), "ra_lens", True) - _validate_dec(locals(), "dec_lens", True) - validate_argument(locals(), "shear1", "float_array") - validate_argument(locals(), "shear2", "float_array") - validate_argument(locals(), "geometry", str) - validate_argument(locals(), "sigma_c", "float_array", none_ok=True) - _validate_coordinate_system(locals(), "coordinate_system", str) - ra_source_, dec_source_, shear1_, shear2_ = arguments_consistency( - [ra_source, dec_source, shear1, shear2], - names=("Ra", "Dec", "Shear1", "Shear2"), - prefix="Tangential- and Cross- shape components sources", - ) - _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) - elif np.iterable(ra_source): - ra_source_, dec_source_, shear1_, shear2_ = ( - np.array(col) for col in [ra_source, dec_source, shear1, shear2] - ) - else: - ra_source_, dec_source_, shear1_, shear2_ = ( - ra_source, - dec_source, - shear1, - shear2, - ) - # Compute the lensing angles - if geometry == "flat": - angsep, phi = _compute_lensing_angles_flatsky( - ra_lens, dec_lens, ra_source_, dec_source_, coordinate_system=coordinate_system - ) - elif geometry == "curve": - angsep, phi = _compute_lensing_angles_astropy( - ra_lens, dec_lens, ra_source_, dec_source_, coordinate_system=coordinate_system - ) - else: - raise NotImplementedError(f"Sky geometry {geometry} is not currently supported") - # Compute the tangential and cross shears - tangential_comp = _compute_tangential_shear(shear1_, shear2_, phi) - cross_comp = _compute_cross_shear(shear1_, shear2_, phi) - - if sigma_c is not None: - _sigma_c_arr = np.array(sigma_c) - tangential_comp *= _sigma_c_arr - cross_comp *= _sigma_c_arr - - return angsep, tangential_comp, cross_comp - - -def compute_background_probability( - z_lens, z_src=None, use_pdz=False, pzpdf=None, pzbins=None, validate_input=True -): - r"""Probability for being a background galaxy, defined by: - - .. math:: - P(z_s > z_l) = \int_{z_l}^{+\infty} dz_s \; p_{\text{photoz}}(z_s), - - when the photometric probability density functions (:math:`p_{\text{photoz}}(z_s)`) are - provided. In the case of true redshifts, it returns 1 if :math:`z_s > z_l` else returns 0. - - - Parameters - ---------- - z_lens: float - Redshift of the lens. - z_src: array, optional - Redshift of the source. Used only if pzpdf=pzbins=None. - - Returns - ------- - p_background : array - Probability for being a background galaxy - """ - if validate_input: - validate_argument(locals(), "z_lens", float, argmin=0, eqmin=True) - validate_argument(locals(), "z_src", "float_array", argmin=0, eqmin=True, none_ok=True) - - if use_pdz is False: - if z_src is None: - raise ValueError("z_src must be provided.") - p_background = np.array(z_src > z_lens, dtype=float) - else: - if pzpdf is None or pzbins is None: - raise ValueError("pzbins must be provided with pzpdf.") - p_background = _integ_pzfuncs(pzpdf, pzbins, z_lens) - - return p_background - - -def compute_galaxy_weights( - use_shape_noise=False, - shape_component1=None, - shape_component2=None, - use_shape_error=False, - shape_component1_err=None, - shape_component2_err=None, - is_deltasigma=False, - sigma_c=None, - validate_input=True, -): - r"""Computes the individual lens-source pair weights - - The weights :math:`w_{ls}` express as : :math:`w_{ls} = w_{ls, \text{geo}} \times w_{ls, - \text{shape}}`, following E. S. Sheldon et al. (2003), arXiv:astro-ph/0312036: - - 1. If computed for shear, the geometrical weights :math:`w_{ls, \text{geo}}` are equal to 1. If - computed for :math:`\Delta \Sigma`, it depends on lens and source redshift information via the - critical surface density. This component can be expressed as: - - .. math:: - w_{ls, \text{geo}} = \Sigma_\text{crit}(z_l, z_{\text{src}})^{-2}\;. - - when only redshift point estimates are provided, or as: - - - .. math:: - w_{ls, \text{geo}} = \Sigma_\text{crit}^\text{eff}(z_l, z_{\text{src}})^{-2} - = \left[\int_{\delta + z_l} dz_s \; p_{\text{photoz}}(z_s) - \Sigma_\text{crit}(z_l, z_s)^{-1}\right]^2 - - when the redshift pdf of each source, :math:`p_{\text{photoz}}(z_s)`, is known. - - 2. The shape weight :math:`w_{ls,{\text{shape}}}` depends on shapenoise and/or shape - measurement errors - - .. math:: - w_{ls, \text{shape}}^{-1} = \sigma_{\text{shapenoise}}^2 + - \sigma_{\text{measurement}}^2 - - - Parameters - ---------- - is_deltasigma: bool - If `False`, weights are computed for shear, else weights are computed for - :math:`\Delta \Sigma`. - sigma_c : None, array_like - Critical (effective) surface density in units of :math:`M_\odot\ Mpc^{-2}`. - Used only when is_deltasigma=True. - use_shape_noise: bool - If `True` shape noise is included in the weight computation. It then requires - `shape_componenet{1,2}` to be provided. Default: False. - shape_component1: array_like - The measured shear (or reduced shear or ellipticity) of the source galaxies, - used if `use_shapenoise=True` - shape_component2: array_like - The measured shear (or reduced shear or ellipticity) of the source galaxies, - used if `use_shapenoise=True` - use_shape_error: bool - If `True` shape errors are included in the weight computation. It then requires - shape_component{1,2}_err` to be provided. Default: False. - shape_component1_err: array_like - The measurement error on the 1st-component of ellipticity of the source galaxies, - used if `use_shape_error=True` - shape_component2_err: array_like - The measurement error on the 2nd-component of ellipticity of the source galaxies, - used if `use_shape_error=True` - validate_input: bool - Validade each input argument - - Returns - ------- - w_ls: array_like - Individual lens source pair weights - """ - if validate_input: - validate_argument(locals(), "sigma_c", "float_array", none_ok=True) - validate_argument(locals(), "shape_component1", "float_array", none_ok=True) - validate_argument(locals(), "shape_component2", "float_array", none_ok=True) - validate_argument(locals(), "shape_component1_err", "float_array", none_ok=True) - validate_argument(locals(), "shape_component2_err", "float_array", none_ok=True) - validate_argument(locals(), "use_shape_noise", bool) - validate_argument(locals(), "use_shape_error", bool) - arguments_consistency( - [shape_component1, shape_component2], - names=("shape_component1", "shape_component2"), - prefix="Shape components sources", - ) - _validate_is_deltasigma_sigma_c(is_deltasigma, sigma_c) - - # computing w_ls_geo - w_ls_geo = 1.0 - if sigma_c is not None: - w_ls_geo /= np.array(sigma_c) ** 2 - - # computing w_ls_shape - err_e2 = 0 - - if use_shape_noise: - if shape_component1 is None or shape_component2 is None: - raise ValueError( - "With the shape noise option, the source shapes " - "`shape_component_{1,2}` must be specified" - ) - err_e2 += np.std(shape_component1) ** 2 + np.std(shape_component2) ** 2 - if use_shape_error: - if shape_component1_err is None or shape_component2_err is None: - raise ValueError( - "With the shape error option, the source shapes errors" - "`shape_component_err{1,2}` must be specified" - ) - err_e2 += shape_component1_err**2 - err_e2 += shape_component2_err**2 - - if hasattr(err_e2, "__len__"): - w_ls_shape = np.ones(len(err_e2)) - w_ls_shape[err_e2 > 0] = 1.0 / err_e2[err_e2 > 0] - else: - w_ls_shape = 1.0 / err_e2 if err_e2 > 0 else 1.0 - - w_ls = w_ls_shape * w_ls_geo - - return w_ls - - -def _compute_lensing_angles_flatsky( - ra_lens, dec_lens, ra_source_list, dec_source_list, coordinate_system="euclidean" -): - r"""Compute the angular separation between the lens and the source and the azimuthal - angle from the lens to the source in radians. - - In the flat sky approximation, these angles are calculated using - - .. math:: - \theta = \sqrt{\left(\delta_s-\delta_l\right)^2+ - \left(\alpha_l-\alpha_s\right)^2\cos^2(\delta_l)} - - \tan\phi = \frac{\delta_s-\delta_l}{\left(\alpha_l-\alpha_s\right)\cos(\delta_l)} - - For extended descriptions of parameters, see `compute_shear()` documentation. - - Parameters - ---------- - ra_lens: float - Right ascension of the lensing cluster in degrees - dec_lens: float - Declination of the lensing cluster in degrees - ra_source_list: array - Right ascensions of each source galaxy in degrees - dec_source_list: array - Declinations of each source galaxy in degrees - coordinate_system: str, optional - Coordinate system of the ellipticity components. Must be either 'celestial' or - euclidean'. See https://doi.org/10.48550/arXiv.1407.7676 section 5.1 for more details. - Default is 'euclidean'. - - Returns - ------- - angsep: array - Angular separation between the lens and the source in radians - phi: array - Azimuthal angle from the lens to the source in radians - """ - delta_ra = np.radians(ra_source_list - ra_lens) - # Put angles between -pi and pi - delta_ra -= np.round(delta_ra / (2.0 * np.pi)) * 2.0 * np.pi - deltax = delta_ra * np.cos(np.radians(dec_lens)) - deltay = np.radians(dec_source_list - dec_lens) - # Ensure that abs(delta ra) < pi - # deltax[deltax >= np.pi] = deltax[deltax >= np.pi]-2.*np.pi - # deltax[deltax < -np.pi] = deltax[deltax < -np.pi]+2.*np.pi - angsep = np.sqrt(deltax**2 + deltay**2) - phi = np.arctan2(deltay, -deltax) - # Forcing phi to be zero everytime angsep is zero. This is necessary due to arctan2 features. - if np.iterable(phi): - phi[angsep == 0.0] = 0.0 - else: - phi = 0.0 if angsep == 0.0 else phi - if coordinate_system == "celestial": - phi = np.pi - phi - if np.any(angsep > np.pi / 180.0): - warnings.warn("Using the flat-sky approximation with separations >1 deg may be inaccurate") - return angsep, phi - - -def _compute_lensing_angles_astropy( - ra_lens, dec_lens, ra_source_list, dec_source_list, coordinate_system="euclidean" -): - r"""Compute the angular separation between the lens and the source and the azimuthal - angle from the lens to the source in radians. - - Parameters - ---------- - ra_lens: float - Right ascension of the lensing cluster in degrees - dec_lens: float - Declination of the lensing cluster in degrees - ra_source_list: array - Right ascensions of each source galaxy in degrees - dec_source_list: array - Declinations of each source galaxy in degrees - coordinate_system: str, optional - Coordinate system of the ellipticity components. Must be either 'celestial' or - euclidean'. See https://doi.org/10.48550/arXiv.1407.7676 section 5.1 for more details. - Default is 'euclidean'. - - Returns - ------- - angsep: array - Angular separation between the lens and the source in radians - phi: array - Azimuthal angle from the lens to the source in radians - """ - sk_lens = SkyCoord(ra_lens * u.deg, dec_lens * u.deg, frame="icrs") - sk_src = SkyCoord(ra_source_list * u.deg, dec_source_list * u.deg, frame="icrs") - angsep, phi = sk_lens.separation(sk_src).rad, sk_lens.position_angle(sk_src).rad - # Transformations for phi to have same orientation as _compute_lensing_angles_flatsky - phi += 0.5 * np.pi - if np.iterable(phi): - phi[phi > np.pi] -= 2 * np.pi - phi[angsep == 0] = 0 - else: - phi -= 2 * np.pi if phi > np.pi else 0 - phi = 0 if angsep == 0 else phi - if coordinate_system == "celestial": - phi = np.pi - phi - return angsep, phi - - -def _compute_tangential_shear(shear1, shear2, phi): - r"""Compute the tangential shear given the two shears and azimuthal positions for - a single source or list of sources. - - We compute the tangential shear following Eq. 7 of Schrabback et al. 2018, arXiv:1611:03866 - - .. math:: - g_t = -\left( g_1\cos\left(2\phi\right)+g_2\sin\left(2\phi\right)\right) - - For extended descriptions of parameters, see `compute_shear()` documentation. - """ - return -(shear1 * np.cos(2.0 * phi) + shear2 * np.sin(2.0 * phi)) - - -def _compute_cross_shear(shear1, shear2, phi): - r"""Compute the cross shear given the two shears and azimuthal position for a single - source of list of sources. - - We compute the cross shear following Eq. 8 of Schrabback et al. 2018, arXiv:1611:03866 - also checked arxiv 0509252 - - .. math:: - g_x = g_1 \sin\left(2\phi\right)-g_2\cos\left(2\phi\right) - - For extended descriptions of parameters, see `compute_shear()` documentation. - """ - return shear1 * np.sin(2.0 * phi) - shear2 * np.cos(2.0 * phi) - - -def make_radial_profile( - components, - angsep, - angsep_units, - bin_units, - bins=10, - components_error=None, - error_model="ste", - include_empty_bins=False, - return_binnumber=False, - cosmo=None, - z_lens=None, - validate_input=True, - weights=None, -): - r"""Compute the angular profile of given components - - We assume that the cluster object contains information on the cross and - tangential shears or ellipticities and angular separation of the source galaxies - - This function can be called in two ways: - - 1. Pass explict arguments:: - - make_radial_profile([component1, component2], distances, 'radians') - - 2. Call it as a method of a GalaxyCluster instance and specify `bin_units`: - - cluster.make_radial_profile('Mpc') - - Parameters - ---------- - components: list of arrays - List of arrays to be binned in the radial profile - angsep: array - Transvesal distances between the sources and the lens - angsep_units : str - Units of the calculated separation of the source galaxies - Allowed Options = ["radians"] - bin_units : str - Units to use for the radial bins of the radial profile - Allowed Options = ["radians", deg", "arcmin", "arcsec", kpc", "Mpc"] - (letter case independent) - bins : array_like, optional - User defined bins to use for the shear profile. If a list is provided, use that as - the bin edges. If a scalar is provided, create that many equally spaced bins between - the minimum and maximum angular separations in bin_units. If nothing is provided, - default to 10 equally spaced bins. - components_error: list of arrays, None - List of errors for input arrays - error_model : str, optional - Statistical error model to use for y uncertainties. (letter case independent) - `ste` - Standard error [=std/sqrt(n) in unweighted computation] (Default). - `std` - Standard deviation. - include_empty_bins: bool, optional - Also include empty bins in the returned table - return_binnumber: bool, optional - Also returns the indices of the bins for each object - cosmo : CLMM.Cosmology - CLMM Cosmology object to convert angular separations to physical distances - z_lens: array, optional - Redshift of the lens - validate_input: bool - Validade each input argument - weights: array-like, optional - Array of individual galaxy weights. If specified, the radial binned profile is - computed using a weighted average - - Returns - ------- - profile : GCData - Output table containing the radius grid points, the profile of the components `p_i`, - errors `p_i_err` and number of sources. The errors are defined as the standard errors in - each bin. - binnumber: 1-D ndarray of ints, optional - Indices of the bins (corresponding to `xbins`) in which each value - of `xvals` belongs. Same length as `yvals`. A binnumber of `i` means the - corresponding value is between (xbins[i-1], xbins[i]). - - Notes - ----- - This is an example of a place where the cosmology-dependence can be sequestered to another - module. - """ - # pylint: disable-msg=too-many-locals - if validate_input: - validate_argument(locals(), "angsep", "float_array") - validate_argument(locals(), "angsep_units", str) - validate_argument(locals(), "bin_units", str) - validate_argument(locals(), "include_empty_bins", bool) - validate_argument(locals(), "return_binnumber", bool) - validate_argument(locals(), "z_lens", "float_array", none_ok=True) - comp_dict = {f"components[{i}]": comp for i, comp in enumerate(components)} - arguments_consistency(components, names=comp_dict.keys(), prefix="Input components") - for component in comp_dict: - validate_argument(comp_dict, component, "float_array") - # Check to see if we need to do a unit conversion - if angsep_units is not bin_units: - source_seps = convert_units(angsep, angsep_units, bin_units, redshift=z_lens, cosmo=cosmo) - else: - source_seps = angsep - # Make bins if they are not provided - if not hasattr(bins, "__len__"): - bins = make_bins(np.min(source_seps), np.max(source_seps), bins) - # Create output table - profile_table = GCData( - [bins[:-1], np.zeros(len(bins) - 1), bins[1:]], - names=("radius_min", "radius", "radius_max"), - meta={"bin_units": bin_units}, # Add metadata - ) - # Compute the binned averages and associated errors - for i, component in enumerate(components): - r_avg, comp_avg, comp_err, nsrc, binnumber, wts_sum = compute_radial_averages( - source_seps, - component, - xbins=bins, - yerr=None if components_error is None else components_error[i], - error_model=error_model, - weights=weights, - ) - profile_table[f"p_{i}"] = comp_avg - profile_table[f"p_{i}_err"] = comp_err - profile_table["radius"] = r_avg - profile_table["n_src"] = nsrc - profile_table["weights_sum"] = wts_sum - # return empty bins? - if not include_empty_bins: - profile_table = profile_table[nsrc > 1] - if return_binnumber: - return profile_table, binnumber - return profile_table - - -def make_stacked_radial_profile(angsep, weights, components): - """Compute stacked profile, and mean separation distances. - - Parameters - ---------- - angsep: 2d array - Transvesal distances corresponding to each object with shape `n_obj, n_rad_bins`. - weights: 2d array - Weights corresponding to each objects with shape `n_obj, n_rad_bins`. - components: list of 2d arrays - List of 2d properties of each array to be stacked with shape - `n_components, n_obj, n_rad_bins`. - - Returns - ------- - staked_angsep: array - Mean transversal distance in each radial bin. - stacked_components: list of arrays - List of stacked components. - """ - staked_angsep = np.average(angsep, axis=0, weights=None) - stacked_components = [ - np.average(component, axis=0, weights=weights) for component in components - ] - return staked_angsep, stacked_components diff --git a/tests/test_clusterensemble.py b/tests/test_clusterensemble.py index e5ae00d62..63c3d3958 100644 --- a/tests/test_clusterensemble.py +++ b/tests/test_clusterensemble.py @@ -129,9 +129,7 @@ def test_covariance(): for i in range(n_catalogs): # generate random catalog e1, e2 = np.random.randn(ngals) * 0.001, np.random.randn(ngals) * 0.001 - et, ex = da.data_operations._compute_tangential_shear( - e1, e2, phi - ), da.data_operations._compute_cross_shear(e1, e2, phi) + et, ex = da._compute_tangential_shear(e1, e2, phi), da._compute_cross_shear(e1, e2, phi) z_gal = np.random.random(ngals) * (3 - 1.1) + 1.1 id_gal = np.arange(ngals) theta_gal = np.linspace(0, 1, ngals) * (thetamax - thetamin) + thetamin diff --git a/tests/test_dataops.py b/tests/test_dataops.py index ad2dc445f..3e669aad0 100644 --- a/tests/test_dataops.py +++ b/tests/test_dataops.py @@ -16,53 +16,43 @@ def test_compute_cross_shear(): """test compute cross shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_cross_shear = 0.08886301350787848 - cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi) + cross_shear = da._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_cross_shear = [0.08886301350787848, 0.48498333705834484] - cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi) + cross_shear = da._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) # Edge case tests - assert_allclose(da.data_operations._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) - assert_allclose( - da.data_operations._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE - ) - assert_allclose(da.data_operations._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) - assert_allclose( - da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE - ) - assert_allclose( - da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE - ) - assert_allclose(da.data_operations._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose(da._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) + assert_allclose(da._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE) + assert_allclose(da._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) + assert_allclose(da._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE) + assert_allclose(da._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE) + assert_allclose(da._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_tangential_shear(): """test compute tangential shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_tangential_shear = -0.14492537676438383 - tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_tangential_shear = [-0.14492537676438383, 0.1216189244145496] - tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) # test for reasonable values - assert_allclose( - da.data_operations._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE - ) - assert_allclose( - da.data_operations._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE - ) - assert_allclose(da.data_operations._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose(da._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE) + assert_allclose(da._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE) + assert_allclose(da._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_lensing_angles_flatsky(): @@ -73,7 +63,7 @@ def test_compute_lensing_angles_flatsky(): # Ensure that we throw a warning with >1 deg separation assert_warns( UserWarning, - da.data_operations._compute_lensing_angles_flatsky, + da._compute_lensing_angles_flatsky, ra_l, dec_l, np.array([151.32, 161.34]), @@ -83,7 +73,7 @@ def test_compute_lensing_angles_flatsky(): # Test outputs for reasonable values ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas, phis = da.data_operations._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) + thetas, phis = da._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) assert_allclose( thetas, @@ -101,9 +91,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra assert_allclose( - da.data_operations._compute_lensing_angles_flatsky( - ra_l, dec_l, np.array([161.32, 161.34]), dec_s - ), + da._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s), [ [0.00069813170079771690, 0.00106951489719733675], [-1.57079632679489655800, 1.77544123918164542530], @@ -114,9 +102,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same dec assert_allclose( - da.data_operations._compute_lensing_angles_flatsky( - ra_l, dec_l, ra_s, np.array([51.49, 51.55]) - ), + da._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])), [ [0.00032601941539388962, 0.00106951489719733675], [0.00000000000000000000, 1.77544123918164542530], @@ -127,7 +113,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra and dec assert_allclose( - da.data_operations._compute_lensing_angles_flatsky( + da._compute_lensing_angles_flatsky( ra_l, dec_l, np.array([ra_l, 161.34]), np.array([dec_l, 51.55]) ), [ @@ -140,9 +126,7 @@ def test_compute_lensing_angles_flatsky(): # angles over the branch cut between 0 and 360 assert_allclose( - da.data_operations._compute_lensing_angles_flatsky( - 0.1, dec_l, np.array([359.9, 359.5]), dec_s - ), + da._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s), [ [0.0022828333888309108, 0.006603944760273219], [-0.31079754672938664, 0.15924369771830643], @@ -154,17 +138,15 @@ def test_compute_lensing_angles_flatsky(): # coordinate_system conversion ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_flatsky( + thetas_pixel, phis_pixel = da._compute_lensing_angles_flatsky( ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean" ) - thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_flatsky( + thetas_sky, phis_sky = da._compute_lensing_angles_flatsky( ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial" ) assert_allclose( - da.data_operations._compute_lensing_angles_flatsky( - -180, dec_l, np.array([180.1, 179.7]), dec_s - ), + da._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s), [[0.0012916551296819666, 0.003424250083245557], [-2.570568636904587, 0.31079754672944354]], TOLERANCE["rtol"], err_msg="Failure when ra_l and ra_s are the same but one is defined negative", @@ -191,10 +173,10 @@ def test_compute_lensing_angles_astropy(): # coordinate_system conversion ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_astropy( + thetas_pixel, phis_pixel = da._compute_lensing_angles_astropy( ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean" ) - thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_astropy( + thetas_sky, phis_sky = da._compute_lensing_angles_astropy( ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial" ) From 1ca862f63e7a217289f51573f7a61a96dd70e37f Mon Sep 17 00:00:00 2001 From: m-aguena Date: Mon, 22 Jul 2024 01:03:11 -0700 Subject: [PATCH 15/15] Update version to 1.13.0 --- clmm/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clmm/__init__.py b/clmm/__init__.py index 0111be973..d998efc18 100644 --- a/clmm/__init__.py +++ b/clmm/__init__.py @@ -26,4 +26,4 @@ ) from . import support -__version__ = "1.12.5" +__version__ = "1.13.0"