Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

API(fields,twopoint): compute mixing matrices from fields #95

Merged
merged 4 commits into from
Jan 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 76 additions & 39 deletions examples/example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,9 @@
"* A position field (`\"P\"`) for angular clustering and galaxy-galaxy lensing;\n",
"* A shear field (`\"G\"`) for cosmic shear and galaxy-galaxy lensing.\n",
"\n",
"We also specify that the shear field should flip the sign of the \"G2\" column by adding a minus sign."
"We also specify that the shear field should flip the sign of the \"G2\" column by adding a minus sign.\n",
"\n",
"Finally, we define the optional names of the weight functions (`\"V\"`, `\"W\"`) of the fields. These will be used further down below to compute mixing matrices for the fields."
]
},
{
Expand All @@ -465,8 +467,8 @@
"lonlat = ('RIGHT_ASCENSION', 'DECLINATION')\n",
"\n",
"fields = {\n",
" 'P': Positions(*lonlat),\n",
" 'G': Shears(*lonlat, 'G1', '-G2', 'WEIGHT'),\n",
" 'P': Positions(*lonlat, weight=\"V\"),\n",
" 'G': Shears(*lonlat, 'G1', '-G2', 'WEIGHT', weight=\"W\"),\n",
"}"
]
},
Expand Down Expand Up @@ -520,7 +522,7 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2a0ea2d6fc02466fb00059925c44032e",
"model_id": "69f552bc18b543a5a39235bda3703ff2",
"version_major": 2,
"version_minor": 0
},
Expand All @@ -534,13 +536,13 @@
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">/Users/ntessore/code/heracles-ec/heracles/heracles/fields.py:247: UserWarning: position and visibility maps have \n",
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">/Users/ntessore/code/heracles-ec/heracles/heracles/fields.py:254: UserWarning: position and visibility maps have \n",
"different NSIDE\n",
" warnings.warn(\"position and visibility maps have different NSIDE\")\n",
"</pre>\n"
],
"text/plain": [
"/Users/ntessore/code/heracles-ec/heracles/heracles/fields.py:247: UserWarning: position and visibility maps have \n",
"/Users/ntessore/code/heracles-ec/heracles/heracles/fields.py:254: UserWarning: position and visibility maps have \n",
"different NSIDE\n",
" warnings.warn(\"position and visibility maps have different NSIDE\")\n"
]
Expand Down Expand Up @@ -690,7 +692,7 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "73b9269235fb47b9b79d26f456f8962e",
"model_id": "a78c0a381db24cfda4bf1521bd78fc4f",
"version_major": 2,
"version_minor": 0
},
Expand Down Expand Up @@ -923,7 +925,7 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "7eea7c123c304cfeb5f2150ab6db9012",
"model_id": "6e4d3dc39afc46a9b31d03ccd3fb7b14",
"version_major": 2,
"version_minor": 0
},
Expand Down Expand Up @@ -1027,7 +1029,7 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "32abfabf091a441b9c0b716b3d476aae",
"model_id": "34f59966f570417880c30c9fd306d745",
"version_major": 2,
"version_minor": 0
},
Expand Down Expand Up @@ -1105,31 +1107,68 @@
"execution_count": 36,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "230f11f3d8e741858a241369d682cc43",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Output()"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\n"
]
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"></pre>\n"
],
"text/plain": []
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">\n",
"</pre>\n"
],
"text/plain": [
"\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"mms = mixing_matrices(cls_mm, l1max=lmax, l2max=lmax, l3max=lmax_mm)"
"mms = mixing_matrices(fields, cls_mm, l1max=lmax, l2max=lmax, l3max=lmax_mm, progress=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The mixing matrices are returned in a toc dict with slightly different names: They use a two letter code that indicates which angular power spectra are mixed by a given matrix:\n",
"The mixing matrices are returned in a toc dict with familiar names: They correspond to the _output_ angular power spectra after mixing by a given matrix:\n",
"\n",
"* `00` for `(P, P) → (P, P)`;\n",
"* `0+` for `(P, E) → (P, E)` and `(P, B) → (P, B)`;\n",
"* `++` for `(E, E) → (E, E)` and `(B, B) → (B, B)`;\n",
"* `--` for `(E, E) → (B, B)` and `(B, B) → (E, E)`;\n",
"* `+-` for `(E, B) → (E, B)`.\n",
"* `P, P` for `(P, P) → (P, P)`;\n",
"* `P, G_E` for `(P, G_E) → (P, G_E)`, as well as `(P, G_B) → (P, G_B)`;\n",
"* `G_E, G_E` for `(G_E, G_E) → (G_E, G_E)`, as well as `(G_B, G_B) → (G_B, G_B)`;\n",
"* `G_B, G_B` for `(G_E, G_E) → (G_B, G_B)`, as well as `(G_B, G_B) → (G_E, G_E)`;\n",
"* `G_E, G_B` for `(G_E, G_B) → (G_E, G_B)`.\n",
"\n",
"For more details, see the paper by Brown, Castro & Taylor (2005). Note that the `+-` mixing matrix here is not the $W^{+-}$ matrix from said paper."
"For more details, see e.g. the paper by Brown, Castro & Taylor (2005)."
]
},
{
Expand All @@ -1140,21 +1179,19 @@
{
"data": {
"text/plain": [
"[('00', 0, 0),\n",
" ('0+', 0, 0),\n",
" ('00', 0, 1),\n",
" ('0+', 0, 1),\n",
" ('00', 0, 2),\n",
" ('0+', 0, 2),\n",
" ('00', 0, 3),\n",
" ('0+', 0, 3),\n",
" ('00', 0, 4),\n",
" ('0+', 0, 4),\n",
" ('00', 0, 5),\n",
" ('0+', 0, 5),\n",
" ('00', 0, 6),\n",
" ('0+', 0, 6),\n",
" ('00', 0, 7)]"
"[('P', 'P', 0, 0),\n",
" ('P', 'G_E', 0, 0),\n",
" ('P', 'P', 0, 1),\n",
" ('P', 'G_E', 0, 1),\n",
" ('P', 'P', 0, 2),\n",
" ('P', 'G_E', 0, 2),\n",
" '...',\n",
" ('G_E', 'G_B', 8, 9),\n",
" ('P', 'P', 9, 9),\n",
" ('P', 'G_E', 9, 9),\n",
" ('G_E', 'G_E', 9, 9),\n",
" ('G_B', 'G_B', 9, 9),\n",
" ('G_E', 'G_B', 9, 9)]"
]
},
"execution_count": 37,
Expand All @@ -1163,7 +1200,7 @@
}
],
"source": [
"list(mms.keys())[:15]"
"list(mms.keys())[:6] + ['...'] + list(mms.keys())[-6:]"
]
},
{
Expand Down Expand Up @@ -1234,7 +1271,7 @@
}
],
"source": [
"plt.imshow(mms['++', i, i], cmap='binary',\n",
"plt.imshow(mms['G_E', 'G_E', i, i], cmap='binary',\n",
" norm=mpl.colors.LogNorm(vmin=1e-7))\n",
"plt.colorbar(pad=0.025, fraction=0.0465)\n",
"plt.show()"
Expand Down Expand Up @@ -1569,15 +1606,15 @@
" cl = camb_cls[f'W{i+1}xW{i+j+1}']\n",
"\n",
" if s1.source_type == 'counts' and s2.source_type == 'counts':\n",
" theory_cls['P', 'P', i1, i2] = mms['00', i1, i2] @ cl\n",
" theory_cls['P', 'P', i1, i2] = mms['P', 'P', i1, i2] @ cl\n",
" elif s1.source_type == 'lensing' and s2.source_type == 'lensing':\n",
" theory_cls['G_E', 'G_E', i1, i2] = mms['++', i1, i2] @ (cl*fl**2)\n",
" theory_cls['G_B', 'G_B', i1, i2] = mms['--', i1, i2] @ (cl*fl**2)\n",
" theory_cls['G_E', 'G_E', i1, i2] = mms['G_E', 'G_E', i1, i2] @ (cl*fl**2)\n",
" theory_cls['G_B', 'G_B', i1, i2] = mms['G_B', 'G_B', i1, i2] @ (cl*fl**2)\n",
" elif s1.source_type == 'counts' and s2.source_type == 'lensing':\n",
" theory_cls['P', 'G_E', i1, i2] = mms['0+', i1, i2] @ (cl*fl)\n",
" theory_cls['P', 'G_E', i1, i2] = mms['P', 'G_E', i1, i2] @ (cl*fl)\n",
" theory_cls['P', 'G_B', i1, i2] = np.zeros_like(cl)\n",
" elif s1.source_type == 'lensing' and s2.source_type == 'counts':\n",
" theory_cls['P', 'G_E', i2, i1] = mms['0+', i2, i1] @ (cl*fl)\n",
" theory_cls['P', 'G_E', i2, i1] = mms['P', 'G_E', i2, i1] @ (cl*fl)\n",
" theory_cls['P', 'G_B', i2, i1] = np.zeros_like(cl)"
]
},
Expand Down
11 changes: 9 additions & 2 deletions heracles/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,11 @@ def __init_subclass__(cls, *, spin: int | None = None) -> None:
break
cls.__ncol = (ncol - nopt, ncol)

def __init__(self, *columns: str) -> None:
def __init__(self, *columns: str, weight: str | None = None) -> None:
"""Initialise the field."""
super().__init__()
self.__columns = self._init_columns(*columns) if columns else None
self.__weight = weight
self._metadata: dict[str, Any] = {}
if (spin := self.__spin) is not None:
self._metadata["spin"] = spin
Expand Down Expand Up @@ -140,6 +141,11 @@ def spin(self) -> int:
raise ValueError(msg)
return spin

@property
def weight(self) -> str | None:
"""Name of the weight function for this field."""
return self.__weight

@abstractmethod
async def __call__(
self,
Expand Down Expand Up @@ -187,9 +193,10 @@ def __init__(
*columns: str,
overdensity: bool = True,
nbar: float | None = None,
weight: str | None = None,
) -> None:
"""Create a position field."""
super().__init__(*columns)
super().__init__(*columns, weight=weight)
self.__overdensity = overdensity
self.__nbar = nbar

Expand Down
Loading
Loading