-
Notifications
You must be signed in to change notification settings - Fork 0
/
trispectrum.py
177 lines (145 loc) · 7.45 KB
/
trispectrum.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
#Code adapted from PolyBin O. H. E. Philcox (2023), https://arxiv.org/pdf/2303.08828.pdf
import healpy as hp
import numpy as np
def Tl_numerator(inp, data1, data2, data3, data4,
Cl13_th, Cl14_th, Cl23_th, Cl24_th,
lmin=0, verb=False,
equal12=False,equal13=False,equal14=False,equal23=False,equal24=False,equal34=False,
remove_two_point=True):
"""
Compute the numerator of the idealized trispectrum estimator.
Note that we weight according to *spin-zero* Gaunt symbols, which is different to Philcox (in prep.).
This necessarily subtracts off the disconnected pieces, given input theory Cl_th spectra (plus noise, if appropriate).
Note that we index the output as t[l1,l2,l3,l4,L] for diagonal momentum L.
We also drop the L=0 pieces, since these would require non-mean-zero correlators.
If lmin > 0, we don't calculate any pieces with l<lmin; the output array still contains these pieces, just filled with zeros.
ARGUMENTS
----------
inp: Info() object, contains information about input parameters
data{i}: 1D numpy array, ith map input to trispectrum
Cl{i}{j}_th: 1D numpy array, power spectrum of data{i} and data{j}
lmin: int, minimum ell for which to calculate output
verb: Bool, whether to print
equal{i}{j}: Bool, whether data{i}==data{j}
remove_two_point: Bool, whether to subtract two-point disconnected pieces
RETURNS
-------
t4_num_ideal+t2_num_ideal+t0_num_ideal: 5D numpy array, indexed with [l1,l2,l3,l4,L]
"""
# Define 3j calculation
lmax_data = 3*inp.nside-1
l_arr,m_arr = hp.Alm.getlm(lmax_data)
Nside = inp.nside
lmax = inp.ellmax
lmax_sum = inp.ell_sum_max
## Transform to harmonic space + compute I maps
# Map 1
data1_lm = hp.map2alm(data1)
I1_map = [hp.alm2map((l_arr==l)*data1_lm,Nside) for l in range(lmin,lmax_sum+1)]
# Map 2
if equal12:
I2_map = I1_map
else:
data2_lm = hp.map2alm(data2)
I2_map = [hp.alm2map((l_arr==l)*data2_lm,Nside) for l in range(lmin,lmax_sum+1)]
# Map 3
if equal13:
I3_map = I1_map
elif equal23:
I3_map = I2_map
else:
data3_lm = hp.map2alm(data3)
I3_map = [hp.alm2map((l_arr==l)*data3_lm,Nside) for l in range(lmin,lmax_sum+1)]
# Map 4
if equal14:
I4_map = I1_map
elif equal24:
I4_map = I2_map
elif equal34:
I4_map = I3_map
else:
data4_lm = hp.map2alm(data4)
I4_map = [hp.alm2map((l_arr==l)*data4_lm,Nside) for l in range(lmin,lmax_sum+1)]
## Define maps of A^{ab}_lm = int[dn Y_lm(n) I^a(n)I^b(n)] for two I maps
if verb: print("Computing A^{ab} maps")
A12_lm = [[hp.map2alm(I1_map[l1-lmin]*I2_map[l2-lmin]) for l2 in range(lmin,lmax_sum+1)] for l1 in range(lmin,lmax_sum+1)]
A34_lm = [[hp.map2alm(I3_map[l3-lmin]*I4_map[l4-lmin]) for l4 in range(lmin,lmax_sum+1)] for l3 in range(lmin,lmax_sum+1)]
# Create output arrays (for 4-field, 2-field and 0-field terms)
t4_num_ideal = np.zeros((lmax_sum+1,lmax_sum+1,lmax_sum+1,lmax_sum+1,lmax+1), dtype=np.float32)
t2_num_ideal = np.zeros_like(t4_num_ideal, dtype=np.float32)
t0_num_ideal = np.zeros_like(t4_num_ideal, dtype=np.float32)
## Compute four-field term
# Iterate over bins
for l1 in range(lmin,lmax_sum+1):
for l2 in range(lmin,lmax_sum+1):
for l3 in range(lmin,lmax_sum+1):
for l4 in range(lmin,lmax_sum+1):
if (-1)**(l1+l2+l3+l4)==-1: continue
summand = A12_lm[l1-lmin][l2-lmin]*A34_lm[l3-lmin][l4-lmin].conj()
for L in range(max(abs(l1-l2),lmin),min(l1+l2+1,lmax+1)):
if L<abs(l3-l4) or L>l3+l4: continue
if (-1)**(l1+l2+L)==-1: continue # drop parity-odd modes
if (-1)**(l3+l4+L)==-1: continue
# Compute four-field term
t4_num_ideal[l1,l2,l3,l4,L] = np.sum(summand*(l_arr==L)*(1.+(m_arr>0))).real
if not remove_two_point:
return t4_num_ideal
## Compute two-field term
# Compute empirical power spectra
Cl12 = hp.anafast(data1, data2, lmax=lmax_sum)
Cl13 = hp.anafast(data1, data3, lmax=lmax_sum)
Cl14 = hp.anafast(data1, data4, lmax=lmax_sum)
Cl23 = hp.anafast(data2, data3, lmax=lmax_sum)
Cl24 = hp.anafast(data2, data4, lmax=lmax_sum)
Cl34 = hp.anafast(data3, data4, lmax=lmax_sum)
# Iterate over bins
for l1 in range(lmin,lmax_sum+1):
for l2 in range(lmin,lmax_sum+1):
for l3 in range(lmin,lmax_sum+1):
for l4 in range(lmin,lmax_sum+1):
# second permutation
if (l1==l3 and l2==l4):
for L in range(max(abs(l1-l2),lmin),min(l1+l2+1,lmax+1)):
if L<abs(l3-l4) or L>l3+l4: continue
if (-1)**(l1+l2+L)==-1: continue # drop parity-odd modes
if (-1)**(l3+l4+L)==-1: continue
# Compute two-field term
prefactor = (2*l1+1)*(2*l2+1)*(2*L+1)/(4.*np.pi)*inp.wigner3j[l1,l2,L]**2
t2_num_ideal[l1,l2,l3,l4,L] += -prefactor*(Cl13_th[l1]*Cl24[l2]+Cl13[l1]*Cl24_th[l2])
t0_num_ideal[l1,l2,l3,l4,L] += prefactor*Cl13_th[l1]*Cl24_th[l2]
# third permutation
if (l1==l4 and l2==l3):
for L in range(max(abs(l1-l2),lmin),min(l1+l2+1,lmax+1)):
if L<abs(l3-l4) or L>l3+l4: continue
if (-1)**(l1+l2+L)==-1: continue # drop parity-odd modes
if (-1)**(l3+l4+L)==-1: continue
# Compute two-field term
prefactor = (2*l1+1)*(2*l2+1)*(2*L+1)/(4.*np.pi)*inp.wigner3j[l1,l2,L]**2
t2_num_ideal[l1,l2,l3,l4,L] += -prefactor*(Cl14_th[l1]*Cl23[l2]+Cl14[l1]*Cl23_th[l2])
t0_num_ideal[l1,l2,l3,l4,L] += prefactor*Cl14_th[l1]*Cl23_th[l2]
return t4_num_ideal+t2_num_ideal+t0_num_ideal
def rho(inp, a_map, w1_map, w2_map, remove_two_point=True):
'''
Compute trispectrum without normalization
ARGUMENTS
---------
inp: Info() object, contains information about input parameters
a_map: 1D numpy array, map of signal with average subtracted
w1_map: 1D numpy array, map of first weight map with average subtracted
w2_map: 1D numpy array, map of second weight map with average subtracted
remove_two_point: Bool, whether to subtract two-point disconnected pieces
RETURNS
-------
tl_out: 5D numpy array, indexed as tl_out[l2,l4,l3,l5,l1]
'''
lmax_data = 3*inp.nside-1
Cl_aa = hp.anafast(a_map, lmax=lmax_data)
Cl_w1w2 = hp.anafast(w1_map, w2_map, lmax=lmax_data)
Cl_aw2 = hp.anafast(a_map, w2_map, lmax=lmax_data)
Cl_aw1 = hp.anafast(a_map, w1_map, lmax=lmax_data)
equal24 = np.array_equal(w1_map, w2_map)
tl_out = Tl_numerator(inp,a_map,w1_map,a_map,w2_map,
Cl_aa, Cl_aw2, Cl_aw1, Cl_w1w2,
equal13=True, equal24=equal24,
remove_two_point=remove_two_point)
return np.nan_to_num(tl_out)