diff --git a/clmm/dataops/__init__.py b/clmm/dataops/__init__.py index 9028b1fac..8b345f7e5 100644 --- a/clmm/dataops/__init__.py +++ b/clmm/dataops/__init__.py @@ -1,5 +1,5 @@ """Data operation for polar/azimuthal averages in radial bins and weights""" -from .ops import ( +from .data_operations import ( compute_tangential_and_cross_components, compute_background_probability, compute_galaxy_weights, diff --git a/clmm/dataops/ops.py b/clmm/dataops/data_operations.py similarity index 100% rename from clmm/dataops/ops.py rename to clmm/dataops/data_operations.py diff --git a/tests/test_clusterensemble.py b/tests/test_clusterensemble.py index 4a49db065..e5ae00d62 100644 --- a/tests/test_clusterensemble.py +++ b/tests/test_clusterensemble.py @@ -129,9 +129,9 @@ def test_covariance(): for i in range(n_catalogs): # generate random catalog e1, e2 = np.random.randn(ngals) * 0.001, np.random.randn(ngals) * 0.001 - et, ex = da.ops._compute_tangential_shear(e1, e2, phi), da.ops._compute_cross_shear( + et, ex = da.data_operations._compute_tangential_shear( e1, e2, phi - ) + ), da.data_operations._compute_cross_shear(e1, e2, phi) z_gal = np.random.random(ngals) * (3 - 1.1) + 1.1 id_gal = np.arange(ngals) theta_gal = np.linspace(0, 1, ngals) * (thetamax - thetamin) + thetamin diff --git a/tests/test_dataops.py b/tests/test_dataops.py index 784e0d56c..ad2dc445f 100644 --- a/tests/test_dataops.py +++ b/tests/test_dataops.py @@ -16,43 +16,53 @@ def test_compute_cross_shear(): """test compute cross shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_cross_shear = 0.08886301350787848 - cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi) + cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_cross_shear = [0.08886301350787848, 0.48498333705834484] - cross_shear = da.ops._compute_cross_shear(shear1, shear2, phi) + cross_shear = da.data_operations._compute_cross_shear(shear1, shear2, phi) assert_allclose(cross_shear, expected_cross_shear) # Edge case tests - assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE) - assert_allclose(da.ops._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose(da.data_operations._compute_cross_shear(100.0, 0.0, 0.0), 0.0, **TOLERANCE) + assert_allclose( + da.data_operations._compute_cross_shear(100.0, 0.0, np.pi / 2), 0.0, **TOLERANCE + ) + assert_allclose(da.data_operations._compute_cross_shear(0.0, 100.0, 0.0), -100.0, **TOLERANCE) + assert_allclose( + da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 2), 100.0, **TOLERANCE + ) + assert_allclose( + da.data_operations._compute_cross_shear(0.0, 100.0, np.pi / 4.0), 0.0, **TOLERANCE + ) + assert_allclose(da.data_operations._compute_cross_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_tangential_shear(): """test compute tangential shear""" shear1, shear2, phi = 0.15, 0.08, 0.52 expected_tangential_shear = -0.14492537676438383 - tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) shear1 = np.array([0.15, 0.40]) shear2 = np.array([0.08, 0.30]) phi = np.array([0.52, 1.23]) expected_tangential_shear = [-0.14492537676438383, 0.1216189244145496] - tangential_shear = da.ops._compute_tangential_shear(shear1, shear2, phi) + tangential_shear = da.data_operations._compute_tangential_shear(shear1, shear2, phi) assert_allclose(tangential_shear, expected_tangential_shear) # test for reasonable values - assert_allclose(da.ops._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE) - assert_allclose(da.ops._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE) - assert_allclose(da.ops._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) + assert_allclose( + da.data_operations._compute_tangential_shear(100.0, 0.0, 0.0), -100.0, **TOLERANCE + ) + assert_allclose( + da.data_operations._compute_tangential_shear(0.0, 100.0, np.pi / 4.0), -100.0, **TOLERANCE + ) + assert_allclose(da.data_operations._compute_tangential_shear(0.0, 0.0, 0.3), 0.0, **TOLERANCE) def test_compute_lensing_angles_flatsky(): @@ -63,7 +73,7 @@ def test_compute_lensing_angles_flatsky(): # Ensure that we throw a warning with >1 deg separation assert_warns( UserWarning, - da.ops._compute_lensing_angles_flatsky, + da.data_operations._compute_lensing_angles_flatsky, ra_l, dec_l, np.array([151.32, 161.34]), @@ -73,7 +83,7 @@ def test_compute_lensing_angles_flatsky(): # Test outputs for reasonable values ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas, phis = da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) + thetas, phis = da.data_operations._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s) assert_allclose( thetas, @@ -91,7 +101,9 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra assert_allclose( - da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s), + da.data_operations._compute_lensing_angles_flatsky( + ra_l, dec_l, np.array([161.32, 161.34]), dec_s + ), [ [0.00069813170079771690, 0.00106951489719733675], [-1.57079632679489655800, 1.77544123918164542530], @@ -102,7 +114,9 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same dec assert_allclose( - da.ops._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])), + da.data_operations._compute_lensing_angles_flatsky( + ra_l, dec_l, ra_s, np.array([51.49, 51.55]) + ), [ [0.00032601941539388962, 0.00106951489719733675], [0.00000000000000000000, 1.77544123918164542530], @@ -113,7 +127,7 @@ def test_compute_lensing_angles_flatsky(): # lens and source at the same ra and dec assert_allclose( - da.ops._compute_lensing_angles_flatsky( + da.data_operations._compute_lensing_angles_flatsky( ra_l, dec_l, np.array([ra_l, 161.34]), np.array([dec_l, 51.55]) ), [ @@ -126,7 +140,9 @@ def test_compute_lensing_angles_flatsky(): # angles over the branch cut between 0 and 360 assert_allclose( - da.ops._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s), + da.data_operations._compute_lensing_angles_flatsky( + 0.1, dec_l, np.array([359.9, 359.5]), dec_s + ), [ [0.0022828333888309108, 0.006603944760273219], [-0.31079754672938664, 0.15924369771830643], @@ -138,15 +154,17 @@ def test_compute_lensing_angles_flatsky(): # coordinate_system conversion ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas_pixel, phis_pixel = da.ops._compute_lensing_angles_flatsky( + thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_flatsky( ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean" ) - thetas_sky, phis_sky = da.ops._compute_lensing_angles_flatsky( + thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_flatsky( ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial" ) assert_allclose( - da.ops._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s), + da.data_operations._compute_lensing_angles_flatsky( + -180, dec_l, np.array([180.1, 179.7]), dec_s + ), [[0.0012916551296819666, 0.003424250083245557], [-2.570568636904587, 0.31079754672944354]], TOLERANCE["rtol"], err_msg="Failure when ra_l and ra_s are the same but one is defined negative", @@ -173,10 +191,10 @@ def test_compute_lensing_angles_astropy(): # coordinate_system conversion ra_l, dec_l = 161.32, 51.49 ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55]) - thetas_pixel, phis_pixel = da.ops._compute_lensing_angles_astropy( + thetas_pixel, phis_pixel = da.data_operations._compute_lensing_angles_astropy( ra_l, dec_l, ra_s, dec_s, coordinate_system="euclidean" ) - thetas_sky, phis_sky = da.ops._compute_lensing_angles_astropy( + thetas_sky, phis_sky = da.data_operations._compute_lensing_angles_astropy( ra_l, dec_l, ra_s, dec_s, coordinate_system="celestial" ) diff --git a/tests/test_mockdata.py b/tests/test_mockdata.py index c075b6e79..d525ca8cc 100644 --- a/tests/test_mockdata.py +++ b/tests/test_mockdata.py @@ -264,13 +264,17 @@ def test_shapenoise(): # Verify that the shape noise is Gaussian around 0 (for the very small shear here) sigma = 0.25 - data = mock.generate_galaxy_catalog(10**12.0, 0.3, 4, cosmo, 0.8, ngals=50000, shapenoise=sigma) + data = mock.generate_galaxy_catalog( + 10**12.0, 0.3, 4, cosmo, 0.8, ngals=50000, shapenoise=sigma + ) # Check that there are no galaxies with |e|>1 assert_equal(np.count_nonzero((data["e1"] > 1) | (data["e1"] < -1)), 0) assert_equal(np.count_nonzero((data["e2"] > 1) | (data["e2"] < -1)), 0) # Check that shape noise is Guassian with correct std dev bins = np.arange(-1, 1.1, 0.1) - gauss = 5000 * np.exp(-0.5 * (bins[:-1] + 0.05) ** 2 / sigma**2) / (sigma * np.sqrt(2 * np.pi)) + gauss = ( + 5000 * np.exp(-0.5 * (bins[:-1] + 0.05) ** 2 / sigma**2) / (sigma * np.sqrt(2 * np.pi)) + ) assert_allclose(np.histogram(data["e1"], bins=bins)[0], gauss, atol=50, rtol=0.05) assert_allclose(np.histogram(data["e2"], bins=bins)[0], gauss, atol=50, rtol=0.05) diff --git a/tests/test_theory.py b/tests/test_theory.py index a17ab661d..6a5a7f96d 100644 --- a/tests/test_theory.py +++ b/tests/test_theory.py @@ -7,7 +7,11 @@ from clmm.constants import Constants as clc from clmm.galaxycluster import GalaxyCluster from clmm import GCData -from clmm.utils import compute_beta_s_square_mean_from_distribution, compute_beta_s_mean_from_distribution, compute_beta_s_func +from clmm.utils import ( + compute_beta_s_square_mean_from_distribution, + compute_beta_s_mean_from_distribution, + compute_beta_s_func, +) from clmm.redshift.distributions import chang2013, desc_srd TOLERANCE = {"rtol": 1.0e-8} @@ -213,7 +217,7 @@ def test_compute_reduced_shear(modeling_data): assert_allclose( theo.compute_reduced_shear_from_convergence(np.array(shear), np.array(convergence)), np.array(truth), - **TOLERANCE + **TOLERANCE, ) @@ -254,7 +258,7 @@ def helper_profiles(func): assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model="nfw"), defaulttruth, - **TOLERANCE + **TOLERANCE, ) assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, massdef="mean"), defaulttruth, **TOLERANCE @@ -263,7 +267,7 @@ def helper_profiles(func): assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model="NFW"), defaulttruth, - **TOLERANCE + **TOLERANCE, ) assert_allclose( func(r3d, mdelta, cdelta, z_cl, cclcosmo, massdef="MEAN"), defaulttruth, **TOLERANCE @@ -375,22 +379,25 @@ def test_profiles(modeling_data, profile_init): # Test use_projected_quad if mod.backend == "ccl" and profile_init == "einasto": - if hasattr(mod.hdpm, 'projected_quad'): + if hasattr(mod.hdpm, "projected_quad"): mod.set_projected_quad(True) assert_allclose( mod.eval_surface_density( cfg["SIGMA_PARAMS"]["r_proj"], cfg["SIGMA_PARAMS"]["z_cl"], verbose=True ), cfg["numcosmo_profiles"]["Sigma"], - reltol*1e-1, + reltol * 1e-1, ) assert_allclose( theo.compute_surface_density( - cosmo=cosmo, **cfg["SIGMA_PARAMS"], alpha_ein=alpha_ein, verbose=True, + cosmo=cosmo, + **cfg["SIGMA_PARAMS"], + alpha_ein=alpha_ein, + verbose=True, use_projected_quad=True, ), cfg["numcosmo_profiles"]["Sigma"], - reltol*1e-1, + reltol * 1e-1, ) delattr(mod.hdpm, "projected_quad") @@ -547,11 +554,13 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cfg_inf = load_validation_config() # compute some values - cfg_inf['GAMMA_PARAMS']['z_src'] = 1000. + cfg_inf["GAMMA_PARAMS"]["z_src"] = 1000.0 beta_s_mean = compute_beta_s_mean_from_distribution( - cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo) + cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo + ) beta_s_square_mean = compute_beta_s_square_mean_from_distribution( - cfg_inf['GAMMA_PARAMS']['z_cluster'], cfg_inf['GAMMA_PARAMS']['z_src'], cosmo) + cfg_inf["GAMMA_PARAMS"]["z_cluster"], cfg_inf["GAMMA_PARAMS"]["z_src"], cosmo + ) gammat_inf = theo.compute_tangential_shear(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]) kappa_inf = theo.compute_convergence(cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"]) @@ -581,14 +590,14 @@ def test_shear_convergence_unittests(modeling_data, profile_init): theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="notvalid" + approx="notvalid", ) assert_raises( ValueError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="notvalid" + approx="notvalid", ) assert_raises( ValueError, @@ -596,7 +605,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha, - approx="notvalid" + approx="notvalid", ) # test KeyError from invalid key in integ_kwargs assert_raises( @@ -604,14 +613,14 @@ def test_shear_convergence_unittests(modeling_data, profile_init): theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - integ_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0}, ) assert_raises( KeyError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - integ_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0}, ) assert_raises( KeyError, @@ -619,7 +628,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=alpha, - integ_kwargs={"notavalidkey": 0.0} + integ_kwargs={"notavalidkey": 0.0}, ) # test ValueError from unsupported z_src_info cfg_inf["GAMMA_PARAMS"]["z_src_info"] = "notvalid" @@ -632,14 +641,14 @@ def test_shear_convergence_unittests(modeling_data, profile_init): theo.compute_reduced_tangential_shear, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="order1" + approx="order1", ) assert_raises( ValueError, theo.compute_magnification, cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], - approx="order1" + approx="order1", ) assert_raises( ValueError, @@ -647,7 +656,7 @@ def test_shear_convergence_unittests(modeling_data, profile_init): cosmo=cosmo, **cfg_inf["GAMMA_PARAMS"], alpha=2, - approx="order1" + approx="order1", ) # test z_src_info = 'beta' @@ -1065,7 +1074,7 @@ def test_compute_magnification_bias(modeling_data): assert_allclose( theo.compute_magnification_bias_from_magnification(magnification[0], alpha[0]), truth[0][0], - **TOLERANCE + **TOLERANCE, ) assert_allclose( theo.compute_magnification_bias_from_magnification(magnification, alpha), truth, **TOLERANCE @@ -1075,7 +1084,7 @@ def test_compute_magnification_bias(modeling_data): np.array(magnification), np.array(alpha) ), np.array(truth), - **TOLERANCE + **TOLERANCE, )