Skip to content

Commit

Permalink
rename clmm/dataops/ops.py -> clmm/dataops/data_operations.py
Browse files Browse the repository at this point in the history
  • Loading branch information
m-aguena committed Jul 18, 2024
1 parent 0c56b9e commit 7bf1696
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 51 deletions.
2 changes: 1 addition & 1 deletion clmm/dataops/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Data operation for polar/azimuthal averages in radial bins and weights"""
from .ops import (
from .data_operations import (
compute_tangential_and_cross_components,
compute_background_probability,
compute_galaxy_weights,
Expand Down
File renamed without changes.
4 changes: 2 additions & 2 deletions tests/test_clusterensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ def test_covariance():
for i in range(n_catalogs):
# generate random catalog
e1, e2 = np.random.randn(ngals) * 0.001, np.random.randn(ngals) * 0.001
et, ex = da.ops._compute_tangential_shear(e1, e2, phi), da.ops._compute_cross_shear(
et, ex = da.data_operations._compute_tangential_shear(
e1, e2, phi
)
), da.data_operations._compute_cross_shear(e1, e2, phi)
z_gal = np.random.random(ngals) * (3 - 1.1) + 1.1
id_gal = np.arange(ngals)
theta_gal = np.linspace(0, 1, ngals) * (thetamax - thetamin) + thetamin
Expand Down
66 changes: 42 additions & 24 deletions tests/test_dataops.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,43 +16,53 @@ def test_compute_cross_shear():
"""test compute cross shear"""
shear1, shear2, phi = 0.15, 0.08, 0.52
expected_cross_shear = 0.08886301350787848
cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi)
cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi)
assert_allclose(cross_shear, expected_cross_shear)

shear1 = np.array([0.15, 0.40])
shear2 = np.array([0.08, 0.30])
phi = np.array([0.52, 1.23])
expected_cross_shear = [0.08886301350787848, 0.48498333705834484]
cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi)
cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi)
assert_allclose(cross_shear, expected_cross_shear)

# Edge case tests
assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE)
assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE)
assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE)
assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE)
assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE)
assert_allclose(da.ops._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE)
assert_allclose(da.data_operations._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE)
assert_allclose(
da.data_operations._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE
)
assert_allclose(da.data_operations._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE)
assert_allclose(
da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE
)
assert_allclose(
da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE
)
assert_allclose(da.data_operations._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE)


def test_compute_tangential_shear():
"""test compute tangential shear"""
shear1, shear2, phi = 0.15, 0.08, 0.52
expected_tangential_shear = -0.14492537676438383
tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi)
tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi)
assert_allclose(tangential_shear, expected_tangential_shear)

shear1 = np.array([0.15, 0.40])
shear2 = np.array([0.08, 0.30])
phi = np.array([0.52, 1.23])
expected_tangential_shear = [-0.14492537676438383, 0.1216189244145496]
tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi)
tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi)
assert_allclose(tangential_shear, expected_tangential_shear)

# test for reasonable values
assert_allclose(da.ops._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE)
assert_allclose(da.ops._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE)
assert_allclose(da.ops._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE)
assert_allclose(
da.data_operations._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE
)
assert_allclose(
da.data_operations._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE
)
assert_allclose(da.data_operations._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE)


def test_compute_lensing_angles_flatsky():
Expand All @@ -63,7 +73,7 @@ def test_compute_lensing_angles_flatsky():
# Ensure that we throw a warning with >1 deg separation
assert_warns(
UserWarning,
da.ops._compute_lensing_angles_flatsky,
da.data_operations._compute_lensing_angles_flatsky,
ra_l,
dec_l,
np.array([151.32, 161.34]),
Expand All @@ -73,7 +83,7 @@ def test_compute_lensing_angles_flatsky():
# Test outputs for reasonable values
ra_l, dec_l = 161.32, 51.49
ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55])
thetas, phis = da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s)
thetas, phis = da.data_operations._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s)

assert_allclose(
thetas,
Expand All @@ -91,7 +101,9 @@ def test_compute_lensing_angles_flatsky():

# lens and source at the same ra
assert_allclose(
da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s),
da.data_operations._compute_lensing_angles_flatsky(
ra_l, dec_l, np.array([161.32, 161.34]), dec_s
),
[
[0.00069813170079771690, 0.00106951489719733675],
[-1.57079632679489655800, 1.77544123918164542530],
Expand All @@ -102,7 +114,9 @@ def test_compute_lensing_angles_flatsky():

# lens and source at the same dec
assert_allclose(
da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])),
da.data_operations._compute_lensing_angles_flatsky(
ra_l, dec_l, ra_s, np.array([51.49, 51.55])
),
[
[0.00032601941539388962, 0.00106951489719733675],
[0.00000000000000000000, 1.77544123918164542530],
Expand All @@ -113,7 +127,7 @@ def test_compute_lensing_angles_flatsky():

# lens and source at the same ra and dec
assert_allclose(
da.ops._compute_lensing_angles_flatsky(
da.data_operations._compute_lensing_angles_flatsky(
ra_l, dec_l, np.array([ra_l, 161.34]), np.array([dec_l, 51.55])
),
[
Expand All @@ -126,7 +140,9 @@ def test_compute_lensing_angles_flatsky():

# angles over the branch cut between 0 and 360
assert_allclose(
da.ops._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s),
da.data_operations._compute_lensing_angles_flatsky(
0.1, dec_l, np.array([359.9, 359.5]), dec_s
),
[
[0.0022828333888309108, 0.006603944760273219],
[-0.31079754672938664, 0.15924369771830643],
Expand All @@ -138,15 +154,17 @@ def test_compute_lensing_angles_flatsky():
# coordinate_system conversion
ra_l, dec_l = 161.32, 51.49
ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55])
thetas_pixel, phis_pixel = da.ops._compute_lensing_angles_flatsky(
thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_flatsky(
ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean"
)
thetas_sky, phis_sky = da.ops._compute_lensing_angles_flatsky(
thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_flatsky(
ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial"
)

assert_allclose(
da.ops._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s),
da.data_operations._compute_lensing_angles_flatsky(
-180, dec_l, np.array([180.1, 179.7]), dec_s
),
[[0.0012916551296819666, 0.003424250083245557], [-2.570568636904587, 0.31079754672944354]],
TOLERANCE["rtol"],
err_msg="Failure when ra_l and ra_s are the same but one is defined negative",
Expand All @@ -173,10 +191,10 @@ def test_compute_lensing_angles_astropy():
# coordinate_system conversion
ra_l, dec_l = 161.32, 51.49
ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55])
thetas_pixel, phis_pixel = da.ops._compute_lensing_angles_astropy(
thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_astropy(
ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean"
)
thetas_sky, phis_sky = da.ops._compute_lensing_angles_astropy(
thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_astropy(
ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial"
)

Expand Down
8 changes: 6 additions & 2 deletions tests/test_mockdata.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,13 +264,17 @@ def test_shapenoise():

# Verify that the shape noise is Gaussian around 0 (for the very small shear here)
sigma = 0.25
data = mock.generate_galaxy_catalog(10**12.0, 0.3, 4, cosmo, 0.8, ngals=50000, shapenoise=sigma)
data = mock.generate_galaxy_catalog(
10**12.0, 0.3, 4, cosmo, 0.8, ngals=50000, shapenoise=sigma
)
# Check that there are no galaxies with |e|>1
assert_equal(np.count_nonzero((data["e1"] > 1) | (data["e1"] < -1)), 0)
assert_equal(np.count_nonzero((data["e2"] > 1) | (data["e2"] < -1)), 0)
# Check that shape noise is Guassian with correct std dev
bins = np.arange(-1, 1.1, 0.1)
gauss = 5000 * np.exp(-0.5 * (bins[:-1] + 0.05) ** 2 / sigma**2) / (sigma * np.sqrt(2 * np.pi))
gauss = (
5000 * np.exp(-0.5 * (bins[:-1] + 0.05) ** 2 / sigma**2) / (sigma * np.sqrt(2 * np.pi))
)
assert_allclose(np.histogram(data["e1"], bins=bins)[0], gauss, atol=50, rtol=0.05)
assert_allclose(np.histogram(data["e2"], bins=bins)[0], gauss, atol=50, rtol=0.05)

Expand Down
53 changes: 31 additions & 22 deletions tests/test_theory.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,11 @@
from clmm.constants import Constants as clc
from clmm.galaxycluster import GalaxyCluster
from clmm import GCData
from clmm.utils import compute_beta_s_square_mean_from_distribution, compute_beta_s_mean_from_distribution, compute_beta_s_func
from clmm.utils import (
compute_beta_s_square_mean_from_distribution,
compute_beta_s_mean_from_distribution,
compute_beta_s_func,
)
from clmm.redshift.distributions import chang2013, desc_srd

TOLERANCE = {"rtol": 1.0e-8}
Expand Down Expand Up @@ -213,7 +217,7 @@ def test_compute_reduced_shear(modeling_data):
assert_allclose(
theo.compute_reduced_shear_from_convergence(np.array(shear), np.array(convergence)),
np.array(truth),
**TOLERANCE
**TOLERANCE,
)


Expand Down Expand Up @@ -254,7 +258,7 @@ def helper_profiles(func):
assert_allclose(
func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model="nfw"),
defaulttruth,
**TOLERANCE
**TOLERANCE,
)
assert_allclose(
func(r3d, mdelta, cdelta, z_cl, cclcosmo, massdef="mean"), defaulttruth, **TOLERANCE
Expand All @@ -263,7 +267,7 @@ def helper_profiles(func):
assert_allclose(
func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model="NFW"),
defaulttruth,
**TOLERANCE
**TOLERANCE,
)
assert_allclose(
func(r3d, mdelta, cdelta, z_cl, cclcosmo, massdef="MEAN"), defaulttruth, **TOLERANCE
Expand Down Expand Up @@ -375,22 +379,25 @@ def test_profiles(modeling_data, profile_init):

# Test use_projected_quad
if mod.backend == "ccl" and profile_init == "einasto":
if hasattr(mod.hdpm, 'projected_quad'):
if hasattr(mod.hdpm, "projected_quad"):
mod.set_projected_quad(True)
assert_allclose(
mod.eval_surface_density(
cfg["SIGMA_PARAMS"]["r_proj"], cfg["SIGMA_PARAMS"]["z_cl"], verbose=True
),
cfg["numcosmo_profiles"]["Sigma"],
reltol*1e-1,
reltol * 1e-1,
)
assert_allclose(
theo.compute_surface_density(
cosmo=cosmo, **cfg["SIGMA_PARAMS"], alpha_ein=alpha_ein, verbose=True,
cosmo=cosmo,
**cfg["SIGMA_PARAMS"],
alpha_ein=alpha_ein,
verbose=True,
use_projected_quad=True,
),
cfg["numcosmo_profiles"]["Sigma"],
reltol*1e-1,
reltol * 1e-1,
)

delattr(mod.hdpm, "projected_quad")
Expand Down Expand Up @@ -547,11 +554,13 @@ def test_shear_convergence_unittests(modeling_data, profile_init):
cfg_inf = load_validation_config()

# compute some values
cfg_inf['GAMMA_PARAMS']['z_src'] = 1000.
cfg_inf["GAMMA_PARAMS"]["z_src"] = 1000.0
beta_s_mean = compute_beta_s_mean_from_distribution(
cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo)
cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo
)
beta_s_square_mean = compute_beta_s_square_mean_from_distribution(
cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo)
cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo
)

gammat_inf = theo.compute_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"])
kappa_inf = theo.compute_convergence(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"])
Expand Down Expand Up @@ -581,45 +590,45 @@ def test_shear_convergence_unittests(modeling_data, profile_init):
theo.compute_reduced_tangential_shear,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
approx="notvalid"
approx="notvalid",
)
assert_raises(
ValueError,
theo.compute_magnification,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
approx="notvalid"
approx="notvalid",
)
assert_raises(
ValueError,
theo.compute_magnification_bias,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
alpha=alpha,
approx="notvalid"
approx="notvalid",
)
# test KeyError from invalid key in integ_kwargs
assert_raises(
KeyError,
theo.compute_reduced_tangential_shear,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
integ_kwargs={"notavalidkey": 0.0}
integ_kwargs={"notavalidkey": 0.0},
)
assert_raises(
KeyError,
theo.compute_magnification,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
integ_kwargs={"notavalidkey": 0.0}
integ_kwargs={"notavalidkey": 0.0},
)
assert_raises(
KeyError,
theo.compute_magnification_bias,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
alpha=alpha,
integ_kwargs={"notavalidkey": 0.0}
integ_kwargs={"notavalidkey": 0.0},
)
# test ValueError from unsupported z_src_info
cfg_inf["GAMMA_PARAMS"]["z_src_info"] = "notvalid"
Expand All @@ -632,22 +641,22 @@ def test_shear_convergence_unittests(modeling_data, profile_init):
theo.compute_reduced_tangential_shear,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
approx="order1"
approx="order1",
)
assert_raises(
ValueError,
theo.compute_magnification,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
approx="order1"
approx="order1",
)
assert_raises(
ValueError,
theo.compute_magnification_bias,
cosmo=cosmo,
**cfg_inf["GAMMA_PARAMS"],
alpha=2,
approx="order1"
approx="order1",
)

# test z_src_info = 'beta'
Expand Down Expand Up @@ -1065,7 +1074,7 @@ def test_compute_magnification_bias(modeling_data):
assert_allclose(
theo.compute_magnification_bias_from_magnification(magnification[0], alpha[0]),
truth[0][0],
**TOLERANCE
**TOLERANCE,
)
assert_allclose(
theo.compute_magnification_bias_from_magnification(magnification, alpha), truth, **TOLERANCE
Expand All @@ -1075,7 +1084,7 @@ def test_compute_magnification_bias(modeling_data):
np.array(magnification), np.array(alpha)
),
np.array(truth),
**TOLERANCE
**TOLERANCE,
)


Expand Down

0 comments on commit 7bf1696

Please sign in to comment.