diff --git a/.devcontainer/apt.txt b/.devcontainer/apt.txt
new file mode 100644
index 00000000..6c1009bf
--- /dev/null
+++ b/.devcontainer/apt.txt
@@ -0,0 +1,4 @@
+git
+ncdu
+wget
+curl
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 00000000..1ca5f58e
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,21 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+{
+ "image": "quay.io/pangeo/pytorch-notebook:latest",
+
+ "customizations": {
+ "vscode": {
+ "extensions": [
+ "ms-toolsai.jupyter",
+ "ms-python.python",
+ "ms-vsliveshare.vsliveshare",
+ "DavidAnson.vscode-markdownlint",
+ "GitHub.copilot"
+ ]
+ }
+ },
+ "postCreateCommand": "sh .devcontainer/postBuild.sh",
+ "features": {
+ "ghcr.io/devcontainers-contrib/features/black:2": {},
+ "ghcr.io/devcontainers-contrib/features/pylint:2": {}
+ }
+}
diff --git a/.devcontainer/environment.yml b/.devcontainer/environment.yml
new file mode 100644
index 00000000..4a8c6d53
--- /dev/null
+++ b/.devcontainer/environment.yml
@@ -0,0 +1,16 @@
+channels:
+ - conda-forge
+dependencies:
+ - python>=3.10
+ - astropy
+ - jupyterlab
+ - matplotlib
+ - numpy
+ - pandas
+ - scipy
+ - h5py
+ - nox
+ - sphinx
+ - myst-parser
+ - jupyter-book
+ - pip
diff --git a/.devcontainer/postBuild.sh b/.devcontainer/postBuild.sh
new file mode 100644
index 00000000..743f496f
--- /dev/null
+++ b/.devcontainer/postBuild.sh
@@ -0,0 +1,5 @@
+# For writing commands that will be executed after the container is created
+
+# Installs `caustic` as local library without resolving dependencies (--no-deps)
+python3 -m pip install -e /workspaces/caustics --no-deps
+python3 -m pip install -e ".[dev]"
diff --git a/.devcontainer/start b/.devcontainer/start
new file mode 100644
index 00000000..5b46c1ac
--- /dev/null
+++ b/.devcontainer/start
@@ -0,0 +1,12 @@
+#!/bin/bash -l
+
+# ==== ONLY EDIT WITHIN THIS BLOCK =====
+
+export CAUSTICS_ENV="caustics"
+if ! [[ -z "${CAUSTICS_SCRATCH_PREFIX}" ]] && ! [[ -z "${JUPYTERHUB_USER}" ]]; then
+ export CAUSTICS_SCRATCH="${CAUSTICS_SCRATCH_PREFIX}/${JUPYTERHUB_USER}/"
+fi
+
+# ==== ONLY EDIT WITHIN THIS BLOCK =====
+
+exec "$@"
diff --git a/.git_archival.txt b/.git_archival.txt
new file mode 100644
index 00000000..8fb235d7
--- /dev/null
+++ b/.git_archival.txt
@@ -0,0 +1,4 @@
+node: $Format:%H$
+node-date: $Format:%cI$
+describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
+ref-names: $Format:%D$
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..00a7b00c
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+.git_archival.txt export-subst
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
new file mode 100644
index 00000000..78484a86
--- /dev/null
+++ b/.github/CONTRIBUTING.md
@@ -0,0 +1,113 @@
+See the [Scientific Python Developer Guide][spc-dev-intro] for a detailed
+description of best practices for developing scientific packages.
+
+[spc-dev-intro]: https://scientific-python-cookie.readthedocs.io/guide/intro
+
+# Quick development
+
+The fastest way to start with development is to use `nox`. This will set up a
+virtual environment for you to run all the checks and tests. There are 2 ways to
+install `nox`:
+
+## Codespaces
+
+Nox is pre-installed in the Codespaces environment. So, after activating a Codespace,
+you can just open the terminal and run `nox` to run all the checks and tests.
+
+## Local
+
+If you don't have nox, you can do the following to install `nox`:
+
+```bash
+pip install nox
+```
+
+If you use macOS, then `nox` is in brew:
+
+```bash
+brew install nox
+```
+
+## Nox basics
+
+### What is it?
+
+`nox` is a command-line tool that automates testing in multiple Python environments,
+similar to tox. Unlike tox,
+Nox uses a standard Python file for configuration,
+you can find this configuration in [`noxfile.py`](../noxfile.py).
+
+### How do I use it?
+
+To use, run `nox`. This will lint and test using every installed version of
+Python on your system, skipping ones that are not installed. You can also run
+specific jobs:
+
+```bash
+nox -s lint # Lint only
+nox -s tests # Python tests
+nox -s build # Make an SDist and wheel
+```
+
+Nox handles everything for you, including setting up a temporary virtual
+environment for each run.
+
+# Setting up a development environment manually
+
+You can set up a development environment by running:
+
+```bash
+python3 -m venv .venv
+source ./.venv/bin/activate
+pip install -v -e .[dev]
+```
+
+If you have the
+[Python Launcher for Unix](https://github.com/brettcannon/python-launcher), you
+can instead do:
+
+```bash
+py -m venv .venv
+py -m install -v -e .[dev]
+```
+
+# Post setup
+
+You should prepare pre-commit, which will help you by checking that commits pass
+required checks:
+
+```bash
+pip install pre-commit # or brew install pre-commit on macOS
+pre-commit install # Will install a pre-commit hook into the git repo
+```
+
+You can also/alternatively run `pre-commit run` (changes only) or
+`pre-commit run --all-files` to check even without installing the hook.
+
+# Testing
+
+Use pytest to run the unit checks:
+
+```bash
+pytest
+```
+
+# Coverage
+
+Use pytest-cov to generate coverage reports:
+
+```bash
+pytest --cov=caustics
+```
+
+# Pre-commit
+
+This project uses pre-commit for all style checking. While you can run it with
+nox, this is such an important tool that it deserves to be installed on its own.
+Install pre-commit and run:
+
+```bash
+pre-commit run -a
+```
+
+to check all files.
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..6fddca0d
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,7 @@
+version: 2
+updates:
+ # Maintain dependencies for GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/.github/matchers/pylint.json b/.github/matchers/pylint.json
new file mode 100644
index 00000000..e3a6bd16
--- /dev/null
+++ b/.github/matchers/pylint.json
@@ -0,0 +1,32 @@
+{
+ "problemMatcher": [
+ {
+ "severity": "warning",
+ "pattern": [
+ {
+ "regexp": "^([^:]+):(\\d+):(\\d+): ([A-DF-Z]\\d+): \\033\\[[\\d;]+m([^\\033]+).*$",
+ "file": 1,
+ "line": 2,
+ "column": 3,
+ "code": 4,
+ "message": 5
+ }
+ ],
+ "owner": "pylint-warning"
+ },
+ {
+ "severity": "error",
+ "pattern": [
+ {
+ "regexp": "^([^:]+):(\\d+):(\\d+): (E\\d+): \\033\\[[\\d;]+m([^\\033]+).*$",
+ "file": 1,
+ "line": 2,
+ "column": 3,
+ "code": 4,
+ "message": 5
+ }
+ ],
+ "owner": "pylint-error"
+ }
+ ]
+}
diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
new file mode 100644
index 00000000..9f1560c9
--- /dev/null
+++ b/.github/workflows/cd.yml
@@ -0,0 +1,99 @@
+name: CD
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+ - dev
+ release:
+ types:
+ - published
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ FORCE_COLOR: 3
+
+jobs:
+ dist:
+ name: Distribution build
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Build sdist and wheel
+ run: pipx run build
+
+ - uses: actions/upload-artifact@v3
+ with:
+ path: dist
+
+ - name: Check products
+ run: pipx run twine check dist/*
+
+ test-built-dist:
+ needs: [dist]
+ name: Test built distribution
+ runs-on: ubuntu-latest
+ permissions:
+ id-token: write
+ steps:
+ - uses: actions/setup-python@v4.7.0
+ name: Install Python
+ with:
+ python-version: "3.10"
+ - uses: actions/download-artifact@v3
+ with:
+ name: artifact
+ path: dist
+ - name: List contents of built dist
+ run: |
+ ls -ltrh
+ ls -ltrh dist
+ - name: Publish to Test PyPI
+ uses: pypa/gh-action-pypi-publish@v1.8.10
+ with:
+ repository-url: https://test.pypi.org/legacy/
+ verbose: true
+ skip-existing: true
+ - name: Check pypi packages
+ run: |
+ sleep 3
+ python -m pip install --upgrade pip
+
+ echo "=== Testing wheel file ==="
+ # Install wheel to get dependencies and check import
+ python -m pip install --extra-index-url https://test.pypi.org/simple --upgrade --pre caustics
+ python -c "import caustics; print(caustics.__version__)"
+ echo "=== Done testing wheel file ==="
+
+ echo "=== Testing source tar file ==="
+ # Install tar gz and check import
+ python -m pip uninstall --yes caustics
+ python -m pip install --extra-index-url https://test.pypi.org/simple --upgrade --pre --no-binary=:all: caustics
+ python -c "import caustics; print(caustics.__version__)"
+ echo "=== Done testing source tar file ==="
+
+ publish:
+ needs: [dist, test-built-dist]
+ name: Publish to PyPI
+ environment: pypi
+ permissions:
+ id-token: write
+ runs-on: ubuntu-latest
+ if: github.event_name == 'release' && github.event.action == 'published'
+
+ steps:
+ - uses: actions/download-artifact@v3
+ with:
+ name: artifact
+ path: dist
+
+ - uses: pypa/gh-action-pypi-publish@v1.8.10
+ if: startsWith(github.ref, 'refs/tags')
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..27441588
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,74 @@
+# This workflow will install Python dependencies, run tests and lint with a single version of Python
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+
+name: CI
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - main
+ - dev
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ FORCE_COLOR: 3
+ PROJECT_NAME: "caustics"
+
+jobs:
+ build:
+ runs-on: ${{matrix.os}}
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.9", "3.10", "3.11"]
+ os: [ubuntu-latest, windows-latest, macOS-latest]
+
+ steps:
+ - name: Checkout caustics
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ allow-prereleases: true
+
+ - name: Record State
+ run: |
+ pwd
+ echo github.ref is: ${{ github.ref }}
+ echo GITHUB_SHA is: $GITHUB_SHA
+ echo github.event_name is: ${{ github.event_name }}
+ echo github workspace: ${{ github.workspace }}
+ pip --version
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install pytest pytest-cov torch wheel
+
+ # We only want to install this on one run, because otherwise we'll have
+ # duplicate annotations.
+ - name: Install error reporter
+ if: ${{ matrix.python-version == '3.10' }}
+ run: |
+ python -m pip install pytest-github-actions-annotate-failures
+
+ - name: Install Caustics
+ run: |
+ pip install -e ".[dev]"
+ pip show ${{ env.PROJECT_NAME }}
+
+ - name: Test with pytest
+ run: |
+ pytest -vvv --cov=${{ env.PROJECT_NAME }} --cov-report=xml --cov-report=term tests/
+
+ - name: Upload coverage reports to Codecov with GitHub Action
+ uses: codecov/codecov-action@v3
diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml
deleted file mode 100644
index 13c8bd44..00000000
--- a/.github/workflows/coverage.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Code Coverage
-
-on:
- push:
- branches:
- - main
- - dev
- pull_request:
- branches:
- - main
- - dev
- workflow_dispatch:
-
-jobs:
- coverage:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout caustics
- uses: actions/checkout@v3
-
- - name: Set up Python 3.10
- uses: actions/setup-python@v4
- with:
- python-version: '3.10'
- - name: Record State
- run: |
- pwd
- echo github.ref is: ${{ github.ref }}
- echo GITHUB_SHA is: $GITHUB_SHA
- echo github.event_name is: ${{ github.event_name }}
- echo github workspace: ${{ github.workspace }}
- pip --version
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install pytest pytest-cov torch wheel
- # Install deps
- cd $GITHUB_WORKSPACE
- pip install -r requirements.txt
- shell: bash
-
- - name: Install Caustics
- run: |
- cd $GITHUB_WORKSPACE
- pip install -e .[dev]
- pip show caustics
- shell: bash
- - name: Test with pytest
- run: |
- cd $GITHUB_WORKSPACE
- pwd
- pytest --cov-report=xml --cov=caustics test/
- cat coverage.xml
- shell: bash
- - name: Upload coverage report to Codecov
- uses: codecov/codecov-action@v3
- with:
- files: ${{ github.workspace }}/coverage.xml
- fail_ci_if_error: false
diff --git a/.github/workflows/documentation.yaml b/.github/workflows/documentation.yaml
index 6498c184..cd2adec2 100644
--- a/.github/workflows/documentation.yaml
+++ b/.github/workflows/documentation.yaml
@@ -4,7 +4,7 @@ on:
branches:
- main
workflow_dispatch:
-
+
jobs:
docs:
runs-on: ubuntu-latest
@@ -12,38 +12,39 @@ jobs:
- uses: actions/checkout@master
- uses: actions/setup-python@v4
with:
- python-version: '3.9'
- cache: 'pip'
-
+ python-version: "3.9"
+ cache: "pip"
+
- run: |
- pip install torch wheel
+ pip install torch wheel
pip install -r requirements.txt
-
+
- name: Install dependencies
run: |
sudo apt-get install -y pandoc
pip install sphinx sphinx_rtd_theme nbsphinx
-
+
- name: Install Caustics
run: |
cd $GITHUB_WORKSPACE
pip install -e .[dev]
pip show caustics
shell: bash
-
+
- name: Clone external Jupyter Notebook repository
run: |
cd $GITHUB_WORKSPACE/docs/
python pull_notebooks.py
-
+
- name: Sphinx build
run: |
sphinx-apidoc -f -o docs/ caustics/
sphinx-build docs _build
-
+
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
- if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
+ if:
+ ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
publish_branch: gh-pages
github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml
deleted file mode 100644
index 0f9cec17..00000000
--- a/.github/workflows/python-app.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-# This workflow will install Python dependencies, run tests and lint with a single version of Python
-# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
-
-name: Unit Tests
-
-on:
- push:
- branches:
- - main
- - dev
- pull_request:
- branches:
- - main
- - dev
- workflow_dispatch:
-
-jobs:
- build:
-
- runs-on: ${{matrix.os}}
- strategy:
- matrix:
- python-version: ["3.9", "3.10"]
- os: [ubuntu-latest, windows-latest, macOS-latest]
-
- steps:
- - name: Checkout caustics
- uses: actions/checkout@v3
-
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
- with:
- python-version: ${{ matrix.python-version }}
- - name: Record State
- run: |
- pwd
- echo github.ref is: ${{ github.ref }}
- echo GITHUB_SHA is: $GITHUB_SHA
- echo github.event_name is: ${{ github.event_name }}
- echo github workspace: ${{ github.workspace }}
- pip --version
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install pytest pytest-cov torch wheel
- # Install deps
- cd $GITHUB_WORKSPACE
- pip install -r requirements.txt
- shell: bash
-
- - name: Install Caustics
- run: |
- cd $GITHUB_WORKSPACE
- pip install -e .[dev]
- pip show caustics
- shell: bash
-
- - name: Test with pytest
- run: |
- cd $GITHUB_WORKSPACE
- pytest test
- shell: bash
-
diff --git a/.gitignore b/.gitignore
index 0d07421a..12601fd5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -134,3 +134,6 @@ package-lock.json
package.json
.idea/
+
+# version
+_version.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..9b8e5dfd
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,85 @@
+exclude: |
+ (?x)^(
+ tests/utils.py |
+ .devcontainer/ |
+ docs/source/
+ )
+
+ci:
+ autoupdate_commit_msg: "chore: update pre-commit hooks"
+ autofix_commit_msg: "style: pre-commit fixes"
+
+repos:
+ - repo: https://github.com/psf/black
+ rev: "23.7.0"
+ hooks:
+ - id: black-jupyter
+
+ - repo: https://github.com/asottile/blacken-docs
+ rev: "1.15.0"
+ hooks:
+ - id: blacken-docs
+ additional_dependencies: [black==23.7.0]
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: "v4.4.0"
+ hooks:
+ - id: check-added-large-files
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-yaml
+ - id: debug-statements
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ - id: name-tests-test
+ args: ["--pytest-test-first"]
+ - id: requirements-txt-fixer
+ - id: trailing-whitespace
+
+ - repo: https://github.com/pre-commit/pygrep-hooks
+ rev: "v1.10.0"
+ hooks:
+ - id: rst-backticks
+ - id: rst-directive-colons
+ - id: rst-inline-touching-normal
+
+ - repo: https://github.com/pre-commit/mirrors-prettier
+ rev: "v3.0.0"
+ hooks:
+ - id: prettier
+ types_or: [yaml, markdown, html, css, scss, javascript, json]
+ args: [--prose-wrap=always]
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: "v0.0.277"
+ hooks:
+ - id: ruff
+ args: ["--fix", "--show-fixes"]
+
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: "v1.4.1"
+ hooks:
+ - id: mypy
+ files: src|tests
+ args: []
+ additional_dependencies:
+ - pytest
+
+ - repo: https://github.com/codespell-project/codespell
+ rev: "v2.2.5"
+ hooks:
+ - id: codespell
+
+ - repo: https://github.com/shellcheck-py/shellcheck-py
+ rev: "v0.9.0.5"
+ hooks:
+ - id: shellcheck
+
+ - repo: local
+ hooks:
+ - id: disallow-caps
+ name: Disallow improper capitalization
+ language: pygrep
+ entry: PyBind|Numpy|Cmake|CCache|Github|PyTest
+ exclude: .pre-commit-config.yaml
diff --git a/.readthedocs.yaml b/.readthedocs.yml
similarity index 53%
rename from .readthedocs.yaml
rename to .readthedocs.yml
index fe6a07a9..05041f5a 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yml
@@ -5,19 +5,9 @@
# Required
version: 2
-# Set the OS, Python version and other tools you might need
-build:
- os: ubuntu-22.04
- tools:
- python: "3.12"
- # You can also specify other tool versions:
- # nodejs: "19"
- # rust: "1.64"
- # golang: "1.19"
-
# Build documentation in the "docs/" directory with Sphinx
sphinx:
- configuration: docs/conf.py
+ configuration: docs/source/conf.py
# Optionally build your docs in additional formats such as PDF and ePub
# formats:
@@ -34,15 +24,11 @@ build:
tools:
python: "3.9"
apt_packages:
- - pandoc # Specify pandoc to be installed via apt-get
+ - pandoc # Specify pandoc to be installed via apt-get
python:
install:
- - requirements: requirements.txt # Path to your requirements.txt file
- - requirements: docs/requirements.txt # Path to your requirements.txt file
+ - requirements: requirements.txt # Path to your requirements.txt file
+ - requirements: docs/requirements.txt # Path to your requirements.txt file
- method: pip
- path: . # Install the package itself
-
-# python:
-# install:
-# - requirements: docs/requirements.txt
\ No newline at end of file
+ path: . # Install the package itself
diff --git a/README.md b/README.md
index 7af68fb7..d2e61002 100644
--- a/README.md
+++ b/README.md
@@ -4,41 +4,56 @@
-
+[![ssec](https://img.shields.io/badge/SSEC-Project-purple?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA0AAAAOCAQAAABedl5ZAAAACXBIWXMAAAHKAAABygHMtnUxAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAAMNJREFUGBltwcEqwwEcAOAfc1F2sNsOTqSlNUopSv5jW1YzHHYY/6YtLa1Jy4mbl3Bz8QIeyKM4fMaUxr4vZnEpjWnmLMSYCysxTcddhF25+EvJia5hhCudULAePyRalvUteXIfBgYxJufRuaKuprKsbDjVUrUj40FNQ11PTzEmrCmrevPhRcVQai8m1PRVvOPZgX2JttWYsGhD3atbHWcyUqX4oqDtJkJiJHUYv+R1JbaNHJmP/+Q1HLu2GbNoSm3Ft0+Y1YMdPSTSwQAAAABJRU5ErkJggg==&style=plastic)](https://escience.washington.edu/wetai/)
[![tests](https://github.com/Ciela-Institute/caustics/actions/workflows/python-app.yml/badge.svg?branch=main)](https://github.com/Ciela-Institute/caustics/actions)
[![Docs](https://github.com/Ciela-Institute/caustics/actions/workflows/documentation.yaml/badge.svg)](https://github.com/Ciela-Institute/caustics/actions/workflows/documentation.yaml)
[![PyPI version](https://badge.fury.io/py/caustics.svg)](https://pypi.org/project/caustics/)
[![coverage](https://img.shields.io/codecov/c/github/Ciela-Institute/caustics)](https://app.codecov.io/gh/Ciela-Institute/caustics)
+
# caustics
-The lensing pipeline of the future: GPU-accelerated, automatically-differentiable,
-highly modular. Currently under heavy development: expect interface changes and
-some imprecise/untested calculations.
+The lensing pipeline of the future: GPU-accelerated,
+automatically-differentiable, highly modular. Currently under heavy development:
+expect interface changes and some imprecise/untested calculations.
-## Installation
+## Installation
Simply install caustics from PyPI:
+
```bash
pip install caustics
```
## Documentation
-Please see our [documentation page](Ciela-Institute.github.io/caustics/) for more detailed information.
+Please see our [documentation page](https://caustics.readthedocs.io/en/latest/) for
+more detailed information.
-## Contributing
+## Contribution
-Please reach out to one of us if you're interested in contributing!
+We welcome contributions from collaborators and researchers interested in our
+work. If you have improvements, suggestions, or new findings to share, please
+submit an issue or pull request. Your contributions help advance our research and analysis
+efforts.
-To start, follow the installation instructions, replacing the last line with
-```bash
-pip install -e ".[dev]"
-```
-This creates an editable install and installs the dev dependencies.
+To get started with your development (or fork), click the "Open with GitHub
+Codespaces" button below to launch a fully configured development environment
+with all the necessary tools and extensions.
+
+[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/uw-ssec/caustics?quickstart=1)
+
+Instruction on how to contribute to this project can be found in the
+CONTRIBUTION.md
Some guidelines:
+
- Please use `isort` and `black` to format your code.
-- Use `CamelCase` for class names and `snake_case` for variable and method names.
+- Use `CamelCase` for class names and `snake_case` for variable and method
+ names.
- Open up issues for bugs/missing features.
- Use pull requests for additions to the code.
- Write tests that can be run by [`pytest`](https://docs.pytest.org/).
+
+Thanks to our contributors so far!
+
+[![Contributors](https://contrib.rocks/image?repo=Ciela-Institute/caustics)](https://github.com/Ciela-Institute/caustics/graphs/contributors)
diff --git a/docs/Makefile b/docs/Makefile
index 298ea9e2..51285967 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -16,4 +16,4 @@ help:
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/requirements.txt b/docs/requirements.txt
index a25ab063..80e683f8 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,4 +1,4 @@
-wheel
+nbsphinx
sphinx
sphinx_rtd_theme
-nbsphinx
+wheel
diff --git a/docs/citation.rst b/docs/source/citation.rst
similarity index 100%
rename from docs/citation.rst
rename to docs/source/citation.rst
diff --git a/docs/conf.py b/docs/source/conf.py
similarity index 81%
rename from docs/conf.py
rename to docs/source/conf.py
index 4cf235a0..2011b358 100644
--- a/docs/conf.py
+++ b/docs/source/conf.py
@@ -12,21 +12,20 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
-import os
-import sys
-#sys.path.insert(0, os.path.abspath('../src'))
+from caustics import __version__
+# sys.path.insert(0, os.path.abspath('../src'))
# -- Project information -----------------------------------------------------
-project = 'caustics'
-copyright = '2023, Ciela Institute'
-author = 'Ciela Institute'
+project = "caustics"
+copyright = "2023, Ciela Institute"
+author = "Ciela Institute"
# The short X.Y version
-version = '0.5'
+version = __version__
# The full version, including alpha/beta/rc tags
-release = 'v0.5.0'
+release = version
# -- General configuration ---------------------------------------------------
@@ -39,40 +38,40 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'nbsphinx',
- 'sphinx.ext.autodoc',
+ "nbsphinx",
+ "sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
- 'sphinx.ext.doctest',
- 'sphinx.ext.coverage',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.ifconfig',
- 'sphinx.ext.viewcode',
+ "sphinx.ext.doctest",
+ "sphinx.ext.coverage",
+ "sphinx.ext.mathjax",
+ "sphinx.ext.ifconfig",
+ "sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = 'en'
+language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
@@ -84,7 +83,7 @@
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
-#html_theme = 'alabaster'
+# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -95,7 +94,7 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
@@ -112,7 +111,7 @@
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
-htmlhelp_basename = 'causticsdoc'
+htmlhelp_basename = "causticsdoc"
# -- Options for LaTeX output ------------------------------------------------
@@ -121,15 +120,12 @@
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
-
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
-
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
-
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
@@ -139,8 +135,7 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'caustics.tex', 'caustics Documentation',
- 'Ciela Institute', 'manual'),
+ (master_doc, "caustics.tex", "caustics Documentation", "Ciela Institute", "manual"),
]
@@ -148,10 +143,7 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'caustics', 'caustics Documentation',
- [author], 1)
-]
+man_pages = [(master_doc, "caustics", "caustics Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
@@ -160,9 +152,15 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'caustics', 'caustics Documentation',
- author, 'caustics', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ master_doc,
+ "caustics",
+ "caustics Documentation",
+ author,
+ "caustics",
+ "The gravitational lensing pipeline of the future.",
+ "Miscellaneous",
+ ),
]
@@ -181,9 +179,9 @@
# epub_uid = ''
# A list of files that should not be packed into the epub file.
-epub_exclude_files = ['search.html']
+epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for nbsphinx --------------------------------------------------
-nbsphinx_execute = 'never'
+nbsphinx_execute = "never"
diff --git a/docs/contributing.rst b/docs/source/contributing.rst
similarity index 100%
rename from docs/contributing.rst
rename to docs/source/contributing.rst
diff --git a/docs/getting_started.rst b/docs/source/getting_started.rst
similarity index 100%
rename from docs/getting_started.rst
rename to docs/source/getting_started.rst
diff --git a/docs/illustrative_examples.rst b/docs/source/illustrative_examples.rst
similarity index 100%
rename from docs/illustrative_examples.rst
rename to docs/source/illustrative_examples.rst
diff --git a/docs/index.rst b/docs/source/index.rst
similarity index 99%
rename from docs/index.rst
rename to docs/source/index.rst
index 0b4ca666..10c6ab50 100644
--- a/docs/index.rst
+++ b/docs/source/index.rst
@@ -6,7 +6,7 @@
.. image:: https://github.com/Ciela-Institute/caustics/blob/main/media/AP_logo.png?raw=true
:width: 100 %
:target: https://github.com/Ciela-Institute/caustics
-
+
|br|
Welcome to Caustics' documentation!
@@ -40,7 +40,7 @@ Read The Docs
contributing.rst
citation.rst
license.rst
-
+
Indices and tables
==================
@@ -48,7 +48,7 @@ Indices and tables
:maxdepth: 1
modules.rst
-
+
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
diff --git a/docs/install.rst b/docs/source/install.rst
similarity index 100%
rename from docs/install.rst
rename to docs/source/install.rst
diff --git a/docs/license.rst b/docs/source/license.rst
similarity index 100%
rename from docs/license.rst
rename to docs/source/license.rst
diff --git a/docs/modules.rst b/docs/source/modules.rst
similarity index 100%
rename from docs/modules.rst
rename to docs/source/modules.rst
diff --git a/docs/pull_notebooks.py b/docs/source/pull_notebooks.py
similarity index 100%
rename from docs/pull_notebooks.py
rename to docs/source/pull_notebooks.py
diff --git a/docs/tutorials.rst b/docs/source/tutorials.rst
similarity index 90%
rename from docs/tutorials.rst
rename to docs/source/tutorials.rst
index a5997413..e9dea14b 100644
--- a/docs/tutorials.rst
+++ b/docs/source/tutorials.rst
@@ -14,10 +14,3 @@ version of each tutorial is available here.
VisualizeCaustics
MultiplaneDemo
InvertLensEquation
-
-
-
-
-
-
-
diff --git a/noxfile.py b/noxfile.py
new file mode 100644
index 00000000..5dc68404
--- /dev/null
+++ b/noxfile.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+import argparse
+import shutil
+from pathlib import Path
+
+import nox
+
+DIR = Path(__file__).parent.resolve()
+
+nox.options.sessions = ["lint", "pylint", "tests", "build"]
+
+
+@nox.session
+def lint(session: nox.Session) -> None:
+ """
+ Run the linter.
+ """
+ session.install("pre-commit")
+ session.run("pre-commit", "run", "--all-files", *session.posargs)
+
+
+@nox.session
+def pylint(session: nox.Session) -> None:
+ """
+ Run PyLint.
+ """
+ # This needs to be installed into the package environment, and is slower
+ # than a pre-commit check
+ session.install(".", "pylint")
+ session.run("pylint", "src", *session.posargs)
+
+
+@nox.session
+def tests(session: nox.Session) -> None:
+ """
+ Run the unit and regular tests. Use --cov to activate coverage.
+ """
+ session.install(".[dev]")
+ session.run("pytest", *session.posargs)
+
+
+@nox.session
+def build(session: nox.Session) -> None:
+ """
+ Build an SDist and wheel.
+ """
+
+ build_p = DIR.joinpath("build")
+ if build_p.exists():
+ shutil.rmtree(build_p)
+
+ session.install("build")
+ session.run("python", "-m", "build")
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..b3216a9c
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,59 @@
+[build-system]
+requires = ["hatchling", "hatch-requirements-txt", "hatch-vcs"]
+build-backend = "hatchling.build"
+
+[project]
+name = "caustics"
+dynamic = [
+ "dependencies",
+ "version"
+]
+authors = [
+ { name="Connor Stone", email="connor.stone@mila.quebec" },
+ { name="Alexandre Adam", email="alexandre.adam@mila.quebec" },
+ { name="UW SSEC", email="ssec@uw.edu" }
+]
+description = "The lensing pipeline of the future: GPU-accelerated, automatically-differentiable, highly modular. Currently under heavy development: expect interface changes and some imprecise/untested calculations."
+readme = "README.md"
+requires-python = ">=3.9"
+license = {file = "LICENSE"}
+keywords = [
+ "caustics",
+ "lensing",
+ "astronomy",
+ "strong lensing",
+ "gravitational lensing",
+ "astrophysics",
+ "differentiable programming",
+ "pytorch"
+]
+classifiers=[
+ "Development Status :: 1 - Planning",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python :: 3"
+]
+
+[project.urls]
+Homepage = "https://mila.quebec/en/"
+Documentation = "https://caustics.readthedocs.io/en/latest/"
+Repository = "https://github.com/Ciela-Institute/caustics"
+Issues = "https://github.com/Ciela-Institute/caustics/issues"
+
+[project.optional-dependencies]
+dev = [
+ "lenstronomy==1.11.1"
+]
+
+[tool.hatch.metadata.hooks.requirements_txt]
+files = ["requirements.txt"]
+
+[tool.hatch.version]
+source = "vcs"
+
+[tool.hatch.build.hooks.vcs]
+version-file = "src/caustics/_version.py"
+
+[tool.hatch.version.raw-options]
+local_scheme = "no-local-version"
diff --git a/requirements.txt b/requirements.txt
index 7301ffee..85e4c715 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
+astropy>=5.2.1,<6.0.0
graphviz==0.20.1
h5py>=3.8.0
levmarq_torch==0.0.1
numpy>=1.23.5
scipy>=1.8.0
torch>=2.0.0
-astropy>=5.2.1,<6.0.0
diff --git a/setup.py b/setup.py
index 22606b40..2bfa1336 100644
--- a/setup.py
+++ b/setup.py
@@ -2,20 +2,23 @@
from setuptools import setup, find_packages
import caustics.__init__ as caustics
+
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
+
+
def read_lines(fname):
-
ret = list(open(os.path.join(os.path.dirname(__file__), fname)).readlines())
print(ret)
return ret
+
setup(
- name = "caustics",
+ name="caustics",
version=caustics.__version__,
description="A gravitational lensing simulator for the future",
long_description=read("README.md"),
- long_description_content_type='text/markdown',
+ long_description_content_type="text/markdown",
url="https://github.com/Ciela-Institute/caustics",
author=caustics.__author__,
license="MIT license",
@@ -27,7 +30,7 @@ def read_lines(fname):
"setuptools>=67.2.0",
],
},
- keywords = [
+ keywords=[
"gravitational lensing",
"astrophysics",
"differentiable programming",
@@ -36,8 +39,8 @@ def read_lines(fname):
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Science/Research",
- "License :: OSI Approved :: MIT License",
+ "License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
- ],
+ ],
)
diff --git a/caustics/__init__.py b/src/caustics/__init__.py
similarity index 75%
rename from caustics/__init__.py
rename to src/caustics/__init__.py
index b83a1af1..5bc6f757 100644
--- a/caustics/__init__.py
+++ b/src/caustics/__init__.py
@@ -1,5 +1,4 @@
-__version__ = '0.5.0'
-__author__ = "Ciela"
+from ._version import version as VERSION # noqa
from .constants import *
from .lenses import *
@@ -9,4 +8,8 @@
from .light import *
from .utils import *
from .sims import *
+
# from .demo import *
+
+__version__ = VERSION
+__author__ = "Ciela"
diff --git a/caustics/constants.py b/src/caustics/constants.py
similarity index 60%
rename from caustics/constants.py
rename to src/caustics/constants.py
index c40b5268..c95a96ba 100644
--- a/caustics/constants.py
+++ b/src/caustics/constants.py
@@ -3,12 +3,20 @@
from astropy.constants.codata2018 import G as _G_astropy
from astropy.constants.codata2018 import c as _c_astropy
-__all__ = ("rad_to_arcsec", "arcsec_to_rad", "c_km_s", "G", "G_over_c2", "c_Mpc_s", "km_to_Mpc")
+__all__ = (
+ "rad_to_arcsec",
+ "arcsec_to_rad",
+ "c_km_s",
+ "G",
+ "G_over_c2",
+ "c_Mpc_s",
+ "km_to_Mpc",
+)
-rad_to_arcsec = 180 / pi * 60 ** 2
+rad_to_arcsec = 180 / pi * 60**2
arcsec_to_rad = 1 / rad_to_arcsec
c_km_s = float(_c_astropy.to("km/s").value)
G = float(_G_astropy.to("pc * km^2 / (s^2 * solMass)").value)
-G_over_c2 = float((_G_astropy / _c_astropy ** 2).to("Mpc/solMass").value) # type: ignore
+G_over_c2 = float((_G_astropy / _c_astropy**2).to("Mpc/solMass").value) # type: ignore
c_Mpc_s = float(_c_astropy.to("Mpc/s").value)
km_to_Mpc = 3.2407792896664e-20 # TODO: use astropy
diff --git a/caustics/cosmology.py b/src/caustics/cosmology.py
similarity index 82%
rename from caustics/cosmology.py
rename to src/caustics/cosmology.py
index 502218ec..6b55112d 100644
--- a/caustics/cosmology.py
+++ b/src/caustics/cosmology.py
@@ -1,6 +1,6 @@
from abc import abstractmethod
from math import pi
-from typing import Any, Optional
+from typing import Optional
import torch
from astropy.cosmology import default_cosmology
@@ -30,7 +30,7 @@
_comoving_distance_helper_x_grid = 10 ** torch.linspace(-3, 1, 500, dtype=torch.float64)
_comoving_distance_helper_y_grid = torch.as_tensor(
_comoving_distance_helper_x_grid
- * hyp2f1(1 / 3, 1 / 2, 4 / 3, -(_comoving_distance_helper_x_grid ** 3)),
+ * hyp2f1(1 / 3, 1 / 2, 4 / 3, -(_comoving_distance_helper_x_grid**3)),
dtype=torch.float64,
)
@@ -75,7 +75,9 @@ def critical_density(self, z: Tensor, params: Optional["Packed"] = None) -> Tens
@abstractmethod
@unpack(1)
- def comoving_distance(self, z: Tensor, *args, params: Optional["Packed"] = None) -> Tensor:
+ def comoving_distance(
+ self, z: Tensor, *args, params: Optional["Packed"] = None
+ ) -> Tensor:
"""
Compute the comoving distance to redshift z.
@@ -90,7 +92,9 @@ def comoving_distance(self, z: Tensor, *args, params: Optional["Packed"] = None)
@abstractmethod
@unpack(1)
- def transverse_comoving_distance(self, z: Tensor, *args, params: Optional["Packed"] = None) -> Tensor:
+ def transverse_comoving_distance(
+ self, z: Tensor, *args, params: Optional["Packed"] = None
+ ) -> Tensor:
"""
Compute the transverse comoving distance to redshift z (Mpc).
@@ -105,7 +109,7 @@ def transverse_comoving_distance(self, z: Tensor, *args, params: Optional["Packe
@unpack(2)
def comoving_distance_z1z2(
- self, z1: Tensor, z2: Tensor, *args, params: Optional["Packed"] = None
+ self, z1: Tensor, z2: Tensor, *args, params: Optional["Packed"] = None
) -> Tensor:
"""
Compute the comoving distance between two redshifts.
@@ -122,7 +126,7 @@ def comoving_distance_z1z2(
@unpack(2)
def transverse_comoving_distance_z1z2(
- self, z1: Tensor, z2: Tensor, *args, params: Optional["Packed"] = None
+ self, z1: Tensor, z2: Tensor, *args, params: Optional["Packed"] = None
) -> Tensor:
"""
Compute the transverse comoving distance between two redshifts (Mpc).
@@ -135,10 +139,14 @@ def transverse_comoving_distance_z1z2(
Returns:
Tensor: The transverse comoving distance between each pair of redshifts in Mpc.
"""
- return self.transverse_comoving_distance(z2, params) - self.transverse_comoving_distance(z1, params)
+ return self.transverse_comoving_distance(
+ z2, params
+ ) - self.transverse_comoving_distance(z1, params)
@unpack(1)
- def angular_diameter_distance(self, z: Tensor, *args, params: Optional["Packed"] = None) -> Tensor:
+ def angular_diameter_distance(
+ self, z: Tensor, *args, params: Optional["Packed"] = None
+ ) -> Tensor:
"""
Compute the angular diameter distance to redshift z.
@@ -153,7 +161,7 @@ def angular_diameter_distance(self, z: Tensor, *args, params: Optional["Packed"]
@unpack(2)
def angular_diameter_distance_z1z2(
- self, z1: Tensor, z2: Tensor, *args, params: Optional["Packed"] = None
+ self, z1: Tensor, z2: Tensor, *args, params: Optional["Packed"] = None
) -> Tensor:
"""
Compute the angular diameter distance between two redshifts.
@@ -170,7 +178,7 @@ def angular_diameter_distance_z1z2(
@unpack(2)
def time_delay_distance(
- self, z_l: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None
+ self, z_l: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None
) -> Tensor:
"""
Compute the time delay distance between lens and source planes.
@@ -190,7 +198,7 @@ def time_delay_distance(
@unpack(2)
def critical_surface_density(
- self, z_l: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None
+ self, z_l: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None
) -> Tensor:
"""
Compute the critical surface density between lens and source planes.
@@ -242,11 +250,17 @@ def __init__(
self._comoving_distance_helper_y_grid = _comoving_distance_helper_y_grid.to(
dtype=torch.float32
)
-
- def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
+
+ def to(
+ self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None
+ ):
super().to(device, dtype)
- self._comoving_distance_helper_y_grid = self._comoving_distance_helper_y_grid.to(device, dtype)
- self._comoving_distance_helper_x_grid = self._comoving_distance_helper_x_grid.to(device, dtype)
+ self._comoving_distance_helper_y_grid = (
+ self._comoving_distance_helper_y_grid.to(device, dtype)
+ )
+ self._comoving_distance_helper_x_grid = (
+ self._comoving_distance_helper_x_grid.to(device, dtype)
+ )
def hubble_distance(self, h0):
"""
@@ -261,7 +275,15 @@ def hubble_distance(self, h0):
return c_Mpc_s / (100 * km_to_Mpc) / h0
@unpack(1)
- def critical_density(self, z: Tensor, h0, central_critical_density, Om0, *args, params: Optional["Packed"] = None) -> torch.Tensor:
+ def critical_density(
+ self,
+ z: Tensor,
+ h0,
+ central_critical_density,
+ Om0,
+ *args,
+ params: Optional["Packed"] = None,
+ ) -> torch.Tensor:
"""
Calculate the critical density at redshift z.
@@ -276,7 +298,9 @@ def critical_density(self, z: Tensor, h0, central_critical_density, Om0, *args,
return central_critical_density * (Om0 * (1 + z) ** 3 + Ode0)
@unpack(1)
- def _comoving_distance_helper(self, x: Tensor, *args, params: Optional["Packed"] = None) -> Tensor:
+ def _comoving_distance_helper(
+ self, x: Tensor, *args, params: Optional["Packed"] = None
+ ) -> Tensor:
"""
Helper method for computing comoving distances.
@@ -293,7 +317,15 @@ def _comoving_distance_helper(self, x: Tensor, *args, params: Optional["Packed"]
).reshape(x.shape)
@unpack(1)
- def comoving_distance(self, z: Tensor, h0, central_critical_density, Om0, *args, params: Optional["Packed"] = None) -> Tensor:
+ def comoving_distance(
+ self,
+ z: Tensor,
+ h0,
+ central_critical_density,
+ Om0,
+ *args,
+ params: Optional["Packed"] = None,
+ ) -> Tensor:
"""
Calculate the comoving distance to redshift z.
@@ -317,7 +349,12 @@ def comoving_distance(self, z: Tensor, h0, central_critical_density, Om0, *args,
@unpack(1)
def transverse_comoving_distance(
- self, z: Tensor, h0, central_critical_density, Om0, *args, params: Optional["Packed"] = None
+ self,
+ z: Tensor,
+ h0,
+ central_critical_density,
+ Om0,
+ *args,
+ params: Optional["Packed"] = None,
) -> Tensor:
-
return self.comoving_distance(z, params)
diff --git a/caustics/data/__init__.py b/src/caustics/data/__init__.py
similarity index 100%
rename from caustics/data/__init__.py
rename to src/caustics/data/__init__.py
diff --git a/caustics/data/hdf5dataset.py b/src/caustics/data/hdf5dataset.py
similarity index 100%
rename from caustics/data/hdf5dataset.py
rename to src/caustics/data/hdf5dataset.py
diff --git a/caustics/data/illustris_kappa.py b/src/caustics/data/illustris_kappa.py
similarity index 100%
rename from caustics/data/illustris_kappa.py
rename to src/caustics/data/illustris_kappa.py
diff --git a/caustics/data/probes.py b/src/caustics/data/probes.py
similarity index 100%
rename from caustics/data/probes.py
rename to src/caustics/data/probes.py
diff --git a/caustics/lenses/__init__.py b/src/caustics/lenses/__init__.py
similarity index 100%
rename from caustics/lenses/__init__.py
rename to src/caustics/lenses/__init__.py
diff --git a/caustics/lenses/base.py b/src/caustics/lenses/base.py
similarity index 66%
rename from caustics/lenses/base.py
rename to src/caustics/lenses/base.py
index 1a349192..768205f1 100644
--- a/caustics/lenses/base.py
+++ b/src/caustics/lenses/base.py
@@ -1,5 +1,5 @@
from abc import abstractmethod
-from typing import Any, Optional, Union
+from typing import Optional, Union
from functools import partial
import warnings
@@ -8,16 +8,18 @@
from ..constants import arcsec_to_rad, c_Mpc_s
from ..cosmology import Cosmology
-from ..parametrized import Parametrized, unpack
+from ..parametrized import Parametrized, unpack
from .utils import get_magnification
from ..utils import batch_lm
__all__ = ("ThinLens", "ThickLens")
+
class Lens(Parametrized):
"""
Base class for all lenses
"""
+
def __init__(self, cosmology: Cosmology, name: str = None):
"""
Initializes a new instance of the Lens class.
@@ -31,8 +33,16 @@ def __init__(self, cosmology: Cosmology, name: str = None):
@unpack(3)
def jacobian_lens_equation(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, method = "autograd", pixelscale = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ method="autograd",
+ pixelscale=None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the lensing equation at specified points. This equates to a (2,2) matrix at each (x,y) point.
@@ -43,13 +53,25 @@ def jacobian_lens_equation(
return self._jacobian_lens_equation_autograd(x, y, z_s, params, **kwargs)
elif method == "finitediff":
if pixelscale is None:
- raise ValueError("Finite differences lensing jacobian requires regular grid and known pixelscale. Please include the pixelscale argument")
- return self._jacobian_lens_equation_finitediff(x, y, z_s, pixelscale, params, **kwargs)
+ raise ValueError(
+ "Finite differences lensing jacobian requires regular grid and known pixelscale. Please include the pixelscale argument"
+ )
+ return self._jacobian_lens_equation_finitediff(
+ x, y, z_s, pixelscale, params, **kwargs
+ )
else:
raise ValueError("method should be one of: autograd, finitediff")
@unpack(3)
- def magnification(self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def magnification(
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> Tensor:
"""
Compute the gravitational magnification at the given coordinates.
@@ -62,11 +84,20 @@ def magnification(self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Option
Returns:
Tensor: Gravitational magnification at the given coordinates.
"""
- return get_magnification(partial(self.raytrace, params = params), x, y, z_s)
+ return get_magnification(partial(self.raytrace, params=params), x, y, z_s)
@unpack(3)
def forward_raytrace(
- self, bx: Tensor, by: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, epsilon = 1e-2, n_init = 50, fov = 5., **kwargs
+ self,
+ bx: Tensor,
+ by: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ epsilon=1e-2,
+ n_init=50,
+ fov=5.0,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Perform a forward ray-tracing operation which maps from the source plane to the image plane.
@@ -83,38 +114,42 @@ def forward_raytrace(
Returns:
tuple[Tensor, Tensor]: Ray-traced coordinates in the x and y directions.
"""
-
- bxy = torch.stack((bx, by)).repeat(n_init,1) # has shape (n_init, Dout:2)
+
+ bxy = torch.stack((bx, by)).repeat(n_init, 1) # has shape (n_init, Dout:2)
# TODO make FOV more general so that it doesnt have to be centered on zero,zero
if fov is None:
raise ValueError("fov must be given to generate initial guesses")
# Random starting points in image plane
- guesses = torch.as_tensor(fov) * (torch.rand(n_init, 2) - 0.5) # Has shape (n_init, Din:2)
+ guesses = torch.as_tensor(fov) * (
+ torch.rand(n_init, 2) - 0.5
+ ) # Has shape (n_init, Din:2)
# Optimize guesses in image plane
x, l, c = batch_lm(
guesses,
bxy,
- lambda *a, **k: torch.stack(self.raytrace(a[0][...,0], a[0][...,1], *a[1:], **k), dim = -1),
- f_args = (z_s, params)
+ lambda *a, **k: torch.stack(
+ self.raytrace(a[0][..., 0], a[0][..., 1], *a[1:], **k), dim=-1
+ ),
+ f_args=(z_s, params),
)
# Clip points that didn't converge
- x = x[c < 1e-2*epsilon**2]
+ x = x[c < 1e-2 * epsilon**2]
# Cluster results into n-images
res = []
while len(x) > 0:
res.append(x[0])
- d = torch.linalg.norm(x - x[0], dim = -1)
+ d = torch.linalg.norm(x - x[0], dim=-1)
x = x[d > epsilon]
- res = torch.stack(res,dim = 0)
- return res[...,0], res[...,1]
+ res = torch.stack(res, dim=0)
+ return res[..., 0], res[..., 1]
+
-
class ThickLens(Lens):
"""
Base class for modeling gravitational lenses that cannot be treated using the thin lens approximation.
@@ -126,11 +161,17 @@ class ThickLens(Lens):
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
ThickLens objects do not have a reduced deflection angle since the distance D_ls is undefined
-
+
Args:
x (Tensor): Tensor of x coordinates in the lens plane.
y (Tensor): Tensor of y coordinates in the lens plane.
@@ -140,12 +181,20 @@ def reduced_deflection_angle(
Raises:
NotImplementedError
"""
- warnings.warn("ThickLens objects do not have a reduced deflection angle since they have no unique lens redshift. The distance D_{ls} is undefined in the equation $\alpha_{reduced} = \frac{D_{ls}}{D_s}\alpha_{physical}$. See `effective_reduced_deflection_angle`. Now using effective_reduced_deflection_angle, please switch functions to remove this warning")
+ warnings.warn(
+ "ThickLens objects do not have a reduced deflection angle since they have no unique lens redshift. The distance D_{ls} is undefined in the equation $\alpha_{reduced} = \frac{D_{ls}}{D_s}\alpha_{physical}$. See `effective_reduced_deflection_angle`. Now using effective_reduced_deflection_angle, please switch functions to remove this warning"
+ )
return self.effective_reduced_deflection_angle(x, y, z_s, params)
@unpack(3)
def effective_reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""ThickLens objects do not have a reduced deflection angle since the
distance D_ls is undefined. Instead we define an effective
@@ -154,7 +203,7 @@ def effective_reduced_deflection_angle(
effective reduced deflection angle, $\theta$ are the observed
angular coordinates, and $\beta$ are the angular coordinates
to the source plane.
-
+
Args:
x (Tensor): Tensor of x coordinates in the lens plane.
y (Tensor): Tensor of y coordinates in the lens plane.
@@ -167,7 +216,13 @@ def effective_reduced_deflection_angle(
@unpack(3)
def physical_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""Physical deflection angles are computed with respect to a lensing
plane. ThickLens objects have no unique definition of a lens
@@ -183,12 +238,20 @@ def physical_deflection_angle(
tuple[Tensor, Tensor]: Tuple of Tensors representing the x and y components of the deflection angle, respectively.
"""
- raise NotImplementedError("Physical deflection angles are computed with respect to a lensing plane. ThickLens objects have no unique definition of a lens plane and so cannot compute a physical_deflection_angle")
+ raise NotImplementedError(
+ "Physical deflection angles are computed with respect to a lensing plane. ThickLens objects have no unique definition of a lens plane and so cannot compute a physical_deflection_angle"
+ )
@abstractmethod
@unpack(3)
def raytrace(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""Performs ray tracing by computing the angular position on the
source plance associated with a given input observed angular
@@ -209,7 +272,13 @@ def raytrace(
@abstractmethod
@unpack(3)
def surface_density(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Computes the projected mass density at given coordinates.
@@ -228,7 +297,13 @@ def surface_density(
@abstractmethod
@unpack(3)
def time_delay(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Computes the gravitational time delay at given coordinates.
@@ -246,8 +321,15 @@ def time_delay(
@unpack(4)
def _jacobian_effective_deflection_angle_finitediff(
- self, x: Tensor, y: Tensor, z_s: Tensor, pixelscale: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ pixelscale: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the effective reduced deflection angle vector field. This equates to a (2,2) matrix at each (x,y) point.
"""
@@ -256,14 +338,20 @@ def _jacobian_effective_deflection_angle_finitediff(
# Build Jacobian
J = torch.zeros((*ax.shape, 2, 2), device=ax.device, dtype=ax.dtype)
- J[...,0,1], J[...,0,0] = torch.gradient(ax, spacing = pixelscale)
- J[...,1,1], J[...,1,0] = torch.gradient(ay, spacing = pixelscale)
+ J[..., 0, 1], J[..., 0, 0] = torch.gradient(ax, spacing=pixelscale)
+ J[..., 1, 1], J[..., 1, 0] = torch.gradient(ay, spacing=pixelscale)
return J
-
+
@unpack(3)
def _jacobian_effective_deflection_angle_autograd(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the effective reduced deflection angle vector field. This equates to a (2,2) matrix at each (x,y) point.
"""
@@ -276,16 +364,32 @@ def _jacobian_effective_deflection_angle_autograd(
# Build Jacobian
J = torch.zeros((*ax.shape, 2, 2), device=ax.device, dtype=ax.dtype)
- J[...,0,0], = torch.autograd.grad(ax, x, grad_outputs = torch.ones_like(ax), create_graph = True)
- J[...,0,1], = torch.autograd.grad(ax, y, grad_outputs = torch.ones_like(ax), create_graph = True)
- J[...,1,0], = torch.autograd.grad(ay, x, grad_outputs = torch.ones_like(ay), create_graph = True)
- J[...,1,1], = torch.autograd.grad(ay, y, grad_outputs = torch.ones_like(ay), create_graph = True)
+ (J[..., 0, 0],) = torch.autograd.grad(
+ ax, x, grad_outputs=torch.ones_like(ax), create_graph=True
+ )
+ (J[..., 0, 1],) = torch.autograd.grad(
+ ax, y, grad_outputs=torch.ones_like(ax), create_graph=True
+ )
+ (J[..., 1, 0],) = torch.autograd.grad(
+ ay, x, grad_outputs=torch.ones_like(ay), create_graph=True
+ )
+ (J[..., 1, 1],) = torch.autograd.grad(
+ ay, y, grad_outputs=torch.ones_like(ay), create_graph=True
+ )
return J.detach()
-
+
@unpack(3)
def jacobian_effective_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, method = "autograd", pixelscale = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ method="autograd",
+ pixelscale=None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the effective reduced deflection angle vector field. This equates to a (2,2) matrix at each (x,y) point.
@@ -296,52 +400,85 @@ def jacobian_effective_deflection_angle(
return self._jacobian_effective_deflection_angle_autograd(x, y, z_s, params)
elif method == "finitediff":
if pixelscale is None:
- raise ValueError("Finite differences lensing jacobian requires regular grid and known pixelscale. Please include the pixelscale argument")
- return self._jacobian_effective_deflection_angle_finitediff(x, y, z_s, pixelscale, params)
+ raise ValueError(
+ "Finite differences lensing jacobian requires regular grid and known pixelscale. Please include the pixelscale argument"
+ )
+ return self._jacobian_effective_deflection_angle_finitediff(
+ x, y, z_s, pixelscale, params
+ )
else:
raise ValueError("method should be one of: autograd, finitediff")
@unpack(4)
def _jacobian_lens_equation_finitediff(
- self, x: Tensor, y: Tensor, z_s: Tensor, pixelscale: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ pixelscale: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the lensing equation at specified points. This equates to a (2,2) matrix at each (x,y) point.
"""
# Build Jacobian
- J = self._jacobian_effective_deflection_angle_finitediff(x, y, z_s, pixelscale, params, **kwargs)
+ J = self._jacobian_effective_deflection_angle_finitediff(
+ x, y, z_s, pixelscale, params, **kwargs
+ )
return torch.eye(2) - J
-
+
@unpack(3)
def _jacobian_lens_equation_autograd(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the lensing equation at specified points. This equates to a (2,2) matrix at each (x,y) point.
"""
# Build Jacobian
- J = self._jacobian_effective_deflection_angle_autograd(x, y, z_s, params, **kwargs)
+ J = self._jacobian_effective_deflection_angle_autograd(
+ x, y, z_s, params, **kwargs
+ )
return torch.eye(2) - J.detach()
-
+
@unpack(3)
def effective_convergence_div(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Using the divergence of the effective reduced delfection angle we can compute the divergence component of the effective convergence field. This field produces a single plane convergence field which reproduces as much of the deflection field as possible for a single plane. See: https://arxiv.org/pdf/2006.07383.pdf see also the `effective_convergence_curl` method.
"""
J = self.jacobian_effective_deflection_angle(x, y, z_s, params, **kwargs)
- return 0.5*(J[...,0,0] + J[...,1,1])
+ return 0.5 * (J[..., 0, 0] + J[..., 1, 1])
@unpack(3)
def effective_convergence_curl(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Use the curl of the effective reduced deflection angle vector field to compute an effective convergence which derrives specifically from the curl of the deflection field. This field is purely a result of multiplane lensing and cannot occur in single plane lensing. See: https://arxiv.org/pdf/2006.07383.pdf
"""
J = self.jacobian_effective_deflection_angle(x, y, z_s, params, **kwargs)
- return 0.5 * (J[...,1,0] - J[...,0,1])
+ return 0.5 * (J[..., 1, 0] - J[..., 0, 1])
class ThinLens(Lens):
@@ -360,13 +497,25 @@ class ThinLens(Lens):
"""
- def __init__(self, cosmology: Cosmology, z_l: Optional[Union[Tensor, float]] = None, name: str = None):
- super().__init__(cosmology = cosmology, name = name)
+ def __init__(
+ self,
+ cosmology: Cosmology,
+ z_l: Optional[Union[Tensor, float]] = None,
+ name: str = None,
+ ):
+ super().__init__(cosmology=cosmology, name=name)
self.add_param("z_l", z_l)
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Computes the reduced deflection angle of the lens at given coordinates [arcsec].
@@ -382,12 +531,21 @@ def reduced_deflection_angle(
"""
d_s = self.cosmology.angular_diameter_distance(z_s, params)
d_ls = self.cosmology.angular_diameter_distance_z1z2(z_l, z_s, params)
- deflection_angle_x, deflection_angle_y = self.physical_deflection_angle(x, y, z_s, params)
+ deflection_angle_x, deflection_angle_y = self.physical_deflection_angle(
+ x, y, z_s, params
+ )
return (d_ls / d_s) * deflection_angle_x, (d_ls / d_s) * deflection_angle_y
@unpack(3)
def physical_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Computes the physical deflection angle immediately after passing through this lens's plane.
@@ -403,13 +561,21 @@ def physical_deflection_angle(
"""
d_s = self.cosmology.angular_diameter_distance(z_s, params)
d_ls = self.cosmology.angular_diameter_distance_z1z2(z_l, z_s, params)
- deflection_angle_x, deflection_angle_y = self.reduced_deflection_angle(x, y, z_s, params)
+ deflection_angle_x, deflection_angle_y = self.reduced_deflection_angle(
+ x, y, z_s, params
+ )
return (d_s / d_ls) * deflection_angle_x, (d_s / d_ls) * deflection_angle_y
@abstractmethod
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Computes the convergence of the lens at given coordinates.
@@ -428,7 +594,13 @@ def convergence(
@abstractmethod
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Computes the gravitational lensing potential at given coordinates.
@@ -445,7 +617,14 @@ def potential(
@unpack(3)
def surface_density(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Computes the surface mass density of the lens at given coordinates.
@@ -459,12 +638,20 @@ def surface_density(
Returns:
Tensor: Surface mass density at the given coordinates in solar masses per Mpc^2.
"""
- critical_surface_density = self.cosmology.critical_surface_density(z_l, z_s, params)
+ critical_surface_density = self.cosmology.critical_surface_density(
+ z_l, z_s, params
+ )
return self.convergence(x, y, z_s, params) * critical_surface_density
@unpack(3)
def raytrace(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Perform a ray-tracing operation by subtracting the deflection angles from the input coordinates.
@@ -483,7 +670,14 @@ def raytrace(
@unpack(3)
def time_delay(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
):
"""
Compute the gravitational time delay for light passing through the lens at given coordinates.
@@ -508,8 +702,15 @@ def time_delay(
@unpack(4)
def _jacobian_deflection_angle_finitediff(
- self, x: Tensor, y: Tensor, z_s: Tensor, pixelscale: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ pixelscale: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the deflection angle vector. This equates to a (2,2) matrix at each (x,y) point.
"""
@@ -518,14 +719,20 @@ def _jacobian_deflection_angle_finitediff(
# Build Jacobian
J = torch.zeros((*ax.shape, 2, 2), device=ax.device, dtype=ax.dtype)
- J[...,0,1], J[...,0,0] = torch.gradient(ax, spacing = pixelscale)
- J[...,1,1], J[...,1,0] = torch.gradient(ay, spacing = pixelscale)
+ J[..., 0, 1], J[..., 0, 0] = torch.gradient(ax, spacing=pixelscale)
+ J[..., 1, 1], J[..., 1, 0] = torch.gradient(ay, spacing=pixelscale)
return J
-
+
@unpack(3)
def _jacobian_deflection_angle_autograd(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the deflection angle vector. This equates to a (2,2) matrix at each (x,y) point.
"""
@@ -538,16 +745,32 @@ def _jacobian_deflection_angle_autograd(
# Build Jacobian
J = torch.zeros((*ax.shape, 2, 2), device=ax.device, dtype=ax.dtype)
- J[...,0,0], = torch.autograd.grad(ax, x, grad_outputs = torch.ones_like(ax), create_graph = True)
- J[...,0,1], = torch.autograd.grad(ax, y, grad_outputs = torch.ones_like(ax), create_graph = True)
- J[...,1,0], = torch.autograd.grad(ay, x, grad_outputs = torch.ones_like(ay), create_graph = True)
- J[...,1,1], = torch.autograd.grad(ay, y, grad_outputs = torch.ones_like(ay), create_graph = True)
+ (J[..., 0, 0],) = torch.autograd.grad(
+ ax, x, grad_outputs=torch.ones_like(ax), create_graph=True
+ )
+ (J[..., 0, 1],) = torch.autograd.grad(
+ ax, y, grad_outputs=torch.ones_like(ax), create_graph=True
+ )
+ (J[..., 1, 0],) = torch.autograd.grad(
+ ay, x, grad_outputs=torch.ones_like(ay), create_graph=True
+ )
+ (J[..., 1, 1],) = torch.autograd.grad(
+ ay, y, grad_outputs=torch.ones_like(ay), create_graph=True
+ )
return J.detach()
-
+
@unpack(3)
def jacobian_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, method = "autograd", pixelscale = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ method="autograd",
+ pixelscale=None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the deflection angle vector. This equates to a (2,2) matrix at each (x,y) point.
@@ -558,30 +781,48 @@ def jacobian_deflection_angle(
return self._jacobian_deflection_angle_autograd(x, y, z_s, params)
elif method == "finitediff":
if pixelscale is None:
- raise ValueError("Finite differences lensing jacobian requires regular grid and known pixelscale. Please include the pixelscale argument")
- return self._jacobian_deflection_angle_finitediff(x, y, z_s, pixelscale, params)
+ raise ValueError(
+ "Finite differences lensing jacobian requires regular grid and known pixelscale. Please include the pixelscale argument"
+ )
+ return self._jacobian_deflection_angle_finitediff(
+ x, y, z_s, pixelscale, params
+ )
else:
raise ValueError("method should be one of: autograd, finitediff")
-
+
@unpack(4)
def _jacobian_lens_equation_finitediff(
- self, x: Tensor, y: Tensor, z_s: Tensor, pixelscale: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ pixelscale: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the lensing equation at specified points. This equates to a (2,2) matrix at each (x,y) point.
"""
# Build Jacobian
- J = self._jacobian_deflection_angle_finitediff(x, y, z_s, pixelscale, params, **kwargs)
+ J = self._jacobian_deflection_angle_finitediff(
+ x, y, z_s, pixelscale, params, **kwargs
+ )
return torch.eye(2) - J
-
+
@unpack(3)
def _jacobian_lens_equation_autograd(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
- ) -> tuple[tuple[Tensor, Tensor],tuple[Tensor, Tensor]]:
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
"""
Return the jacobian of the lensing equation at specified points. This equates to a (2,2) matrix at each (x,y) point.
"""
# Build Jacobian
J = self._jacobian_deflection_angle_autograd(x, y, z_s, params, **kwargs)
return torch.eye(2) - J.detach()
-
diff --git a/caustics/lenses/epl.py b/src/caustics/lenses/epl.py
similarity index 90%
rename from caustics/lenses/epl.py
rename to src/caustics/lenses/epl.py
index 789bd8c9..4945527e 100644
--- a/caustics/lenses/epl.py
+++ b/src/caustics/lenses/epl.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, Union
+from typing import Optional, Union
import torch
from torch import Tensor
@@ -15,13 +15,13 @@ class EPL(ThinLens):
"""
Elliptical power law (EPL, aka singular power-law ellipsoid) profile.
- This class represents a thin gravitational lens model with an elliptical power law profile. The lensing equations are solved
+ This class represents a thin gravitational lens model with an elliptical power law profile. The lensing equations are solved
iteratively using an approach based on Tessore et al. 2015.
Attributes:
n_iter (int): Number of iterations for the iterative solver.
s (float): Softening length for the elliptical power-law profile.
-
+
Parameters:
z_l (Optional[Union[Tensor, float]]): This is the redshift of the lens. In the context of gravitational lensing, the lens is the galaxy or other mass distribution that is bending the light from a more distant source.
x0 and y0 (Optional[Union[Tensor, float]]): These are the coordinates of the lens center in the lens plane. The lens plane is the plane perpendicular to the line of sight in which the deflection of light by the lens is considered.
@@ -76,7 +76,20 @@ def __init__(
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, q, phi, b, t, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ q,
+ phi,
+ b,
+ t,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Compute the reduced deflection angles of the lens.
@@ -133,7 +146,20 @@ def _r_omega(self, z, t, q):
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, q, phi, b, t, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ q,
+ phi,
+ b,
+ t,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
):
"""
Compute the lensing potential of the lens.
@@ -154,7 +180,20 @@ def potential(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, q, phi, b, t, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ q,
+ phi,
+ b,
+ t,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
):
"""
Compute the convergence of the lens, which describes the local density of the lens.
diff --git a/caustics/lenses/external_shear.py b/src/caustics/lenses/external_shear.py
similarity index 78%
rename from caustics/lenses/external_shear.py
rename to src/caustics/lenses/external_shear.py
index 88f4cb98..b1f41450 100644
--- a/caustics/lenses/external_shear.py
+++ b/src/caustics/lenses/external_shear.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, Union
+from typing import Optional, Union
from torch import Tensor
@@ -21,9 +21,10 @@ class ExternalShear(ThinLens):
x0, y0 (Optional[Union[Tensor, float]]): Coordinates of the shear center in the lens plane.
gamma_1, gamma_2 (Optional[Union[Tensor, float]]): Shear components.
- Note: The shear components gamma_1 and gamma_2 represent an external shear, a gravitational
- distortion that can be caused by nearby structures outside of the main lens galaxy.
+ Note: The shear components gamma_1 and gamma_2 represent an external shear, a gravitational
+ distortion that can be caused by nearby structures outside of the main lens galaxy.
"""
+
def __init__(
self,
cosmology: Cosmology,
@@ -35,7 +36,6 @@ def __init__(
s: float = 0.0,
name: str = None,
):
-
super().__init__(cosmology, z_l, name=name)
self.add_param("x0", x0)
@@ -46,7 +46,18 @@ def __init__(
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, gamma_1, gamma_2, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ gamma_1,
+ gamma_2,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Calculates the reduced deflection angle.
@@ -63,17 +74,28 @@ def reduced_deflection_angle(
x, y = translate_rotate(x, y, x0, y0)
# Meneghetti eq 3.83
# TODO, why is it not:
- #th = (x**2 + y**2).sqrt() + self.s
- #a1 = x/th + x * gamma_1 + y * gamma_2
- #a2 = y/th + x * gamma_2 - y * gamma_1
+ # th = (x**2 + y**2).sqrt() + self.s
+ # a1 = x/th + x * gamma_1 + y * gamma_2
+ # a2 = y/th + x * gamma_2 - y * gamma_1
a1 = x * gamma_1 + y * gamma_2
a2 = x * gamma_2 - y * gamma_1
-
+
return a1, a2 # I'm not sure but I think no derotation necessary
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, gamma_1, gamma_2, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ gamma_1,
+ gamma_2,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Calculates the lensing potential.
@@ -93,7 +115,18 @@ def potential(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, gamma_1, gamma_2, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ gamma_1,
+ gamma_2,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
The convergence is undefined for an external shear.
@@ -105,7 +138,7 @@ def convergence(
params (Packed, optional): Dynamic parameter container.
Raises:
- NotImplementedError: This method is not implemented as the convergence is not defined
+ NotImplementedError: This method is not implemented as the convergence is not defined
for an external shear.
"""
raise NotImplementedError("convergence undefined for external shear")
diff --git a/caustics/lenses/mass_sheet.py b/src/caustics/lenses/mass_sheet.py
similarity index 76%
rename from caustics/lenses/mass_sheet.py
rename to src/caustics/lenses/mass_sheet.py
index c386c52a..4e8e5a58 100644
--- a/caustics/lenses/mass_sheet.py
+++ b/src/caustics/lenses/mass_sheet.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, Union
+from typing import Optional, Union
import torch
from torch import Tensor
@@ -22,9 +22,10 @@ class MassSheet(ThinLens):
x0, y0 (Optional[Union[Tensor, float]]): Coordinates of the shear center in the lens plane.
gamma_1, gamma_2 (Optional[Union[Tensor, float]]): Shear components.
- Note: The shear components gamma_1 and gamma_2 represent an external shear, a gravitational
- distortion that can be caused by nearby structures outside of the main lens galaxy.
+ Note: The shear components gamma_1 and gamma_2 represent an external shear, a gravitational
+ distortion that can be caused by nearby structures outside of the main lens galaxy.
"""
+
def __init__(
self,
cosmology: Cosmology,
@@ -34,7 +35,6 @@ def __init__(
surface_density: Optional[Union[Tensor, float]] = None,
name: str = None,
):
-
super().__init__(cosmology, z_l, name=name)
self.add_param("x0", x0)
@@ -43,7 +43,17 @@ def __init__(
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, surface_density, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ surface_density,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Calculates the reduced deflection angle.
@@ -65,16 +75,34 @@ def reduced_deflection_angle(
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, surface_density, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ surface_density,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
-
# Meneghetti eq 3.81
return surface_density * 0.5 * (x**2 + y**2)
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, surface_density, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ surface_density,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
-
# Essentially by definition
return surface_density * torch.ones_like(x)
diff --git a/caustics/lenses/multiplane.py b/src/caustics/lenses/multiplane.py
similarity index 83%
rename from caustics/lenses/multiplane.py
rename to src/caustics/lenses/multiplane.py
index 7c0e471f..e24ae2da 100644
--- a/caustics/lenses/multiplane.py
+++ b/src/caustics/lenses/multiplane.py
@@ -1,5 +1,5 @@
from operator import itemgetter
-from typing import Any, Optional
+from typing import Optional
import torch
from torch import Tensor
@@ -24,6 +24,7 @@ class Multiplane(ThickLens):
cosmology (Cosmology): Cosmological parameters used for calculations.
lenses (list[ThinLens]): List of thin lenses.
"""
+
def __init__(self, cosmology: Cosmology, lenses: list[ThinLens], name: str = None):
super().__init__(cosmology, name=name)
self.lenses = lenses
@@ -31,7 +32,9 @@ def __init__(self, cosmology: Cosmology, lenses: list[ThinLens], name: str = Non
self.add_parametrized(lens)
@unpack(0)
- def get_z_ls(self, *args, params: Optional["Packed"] = None, **kwargs) -> list[Tensor]:
+ def get_z_ls(
+ self, *args, params: Optional["Packed"] = None, **kwargs
+ ) -> list[Tensor]:
"""
Get the redshifts of each lens in the multiplane.
@@ -47,7 +50,13 @@ def get_z_ls(self, *args, params: Optional["Packed"] = None, **kwargs) -> list[T
@unpack(3)
def raytrace(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""Calculate the angular source positions corresponding to the
observer positions x,y. See Margarita et al. 2013 for the
@@ -68,7 +77,7 @@ def raytrace(
\vec{\theta}^{i+1} = \vec{\theta}^{i} - \alpha^i(\vec{x}^{i+1})
Here we set as initialization :math:`\vec{\theta}^0 = theta` the observation angular coordinates and :math:`\vec{x}^0 = 0` the initial physical coordinates (i.e. the observation rays come from a point at the observer). The indexing of :math:`\vec{x}^i` and :math:`\vec{\theta}^i` indicates the properties at the plane :math:`i`, and 0 means the observer, 1 is the first lensing plane (infinitesimally after the plane since the deflection has been applied), and so on. Note that in the actual implementation we start at :math:`\vec{x}^1` and :math:`\vec{\theta}^0` and begin at the second step in the recursion formula.
-
+
Args:
x (Tensor): angular x-coordinates from the observer perspective.
y (Tensor): angular y-coordinates from the observer perspective.
@@ -83,10 +92,17 @@ def raytrace(
@unpack(4)
def raytrace_z1z2(
- self, x: Tensor, y: Tensor, z_start: Tensor, z_end: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_start: Tensor,
+ z_end: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
- Method to do multiplane ray tracing from arbitrary start/end redshift.
+ Method to do multiplane ray tracing from arbitrary start/end redshift.
"""
# Collect lens redshifts and ensure proper order
@@ -94,15 +110,19 @@ def raytrace_z1z2(
lens_planes = [i for i, _ in sorted(enumerate(z_ls), key=itemgetter(1))]
# Compute physical position on first lens plane
- D = self.cosmology.transverse_comoving_distance_z1z2(z_start, z_ls[lens_planes[0]], params)
+ D = self.cosmology.transverse_comoving_distance_z1z2(
+ z_start, z_ls[lens_planes[0]], params
+ )
X, Y = x * arcsec_to_rad * D, y * arcsec_to_rad * D
# Initial angles are observation angles (negative needed because of negative in propogation term)
theta_x, theta_y = x, y
-
+
for i in lens_planes:
# Compute deflection angle at current ray positions
- D_l = self.cosmology.transverse_comoving_distance_z1z2(z_start, z_ls[i], params)
+ D_l = self.cosmology.transverse_comoving_distance_z1z2(
+ z_start, z_ls[i], params
+ )
alpha_x, alpha_y = self.lenses[i].physical_deflection_angle(
X * rad_to_arcsec / D_l,
Y * rad_to_arcsec / D_l,
@@ -115,8 +135,10 @@ def raytrace_z1z2(
theta_y = theta_y - alpha_y
# Propogate rays to next plane (basically eq 18)
- z_next = z_ls[i+1] if i != lens_planes[-1] else z_end
- D = self.cosmology.transverse_comoving_distance_z1z2(z_ls[i], z_next, params)
+ z_next = z_ls[i + 1] if i != lens_planes[-1] else z_end
+ D = self.cosmology.transverse_comoving_distance_z1z2(
+ z_ls[i], z_next, params
+ )
X = X + D * theta_x * arcsec_to_rad
Y = Y + D * theta_y * arcsec_to_rad
@@ -126,14 +148,26 @@ def raytrace_z1z2(
@unpack(3)
def effective_reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
bx, by = self.raytrace(x, y, z_s, params)
return x - bx, y - by
@unpack(3)
def surface_density(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Calculate the projected mass density.
@@ -155,7 +189,13 @@ def surface_density(
@unpack(3)
def time_delay(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the time delay of light caused by the lensing.
diff --git a/caustics/lenses/nfw.py b/src/caustics/lenses/nfw.py
similarity index 83%
rename from caustics/lenses/nfw.py
rename to src/caustics/lenses/nfw.py
index 05357159..2a1d0297 100644
--- a/caustics/lenses/nfw.py
+++ b/src/caustics/lenses/nfw.py
@@ -1,5 +1,5 @@
from math import pi
-from typing import Any, Optional, Union
+from typing import Optional, Union
import torch
from torch import Tensor
@@ -14,21 +14,22 @@
__all__ = ("NFW",)
+
class NFW(ThinLens):
"""
NFW lens class. This class models a lens using the Navarro-Frenk-White (NFW) profile.
- The NFW profile is a spatial density profile of dark matter halo that arises in
+ The NFW profile is a spatial density profile of dark matter halo that arises in
cosmological simulations.
Attributes:
z_l (Optional[Tensor]): Redshift of the lens. Default is None.
- x0 (Optional[Tensor]): x-coordinate of the lens center in the lens plane.
+ x0 (Optional[Tensor]): x-coordinate of the lens center in the lens plane.
Default is None.
- y0 (Optional[Tensor]): y-coordinate of the lens center in the lens plane.
+ y0 (Optional[Tensor]): y-coordinate of the lens center in the lens plane.
Default is None.
m (Optional[Tensor]): Mass of the lens. Default is None.
c (Optional[Tensor]): Concentration parameter of the lens. Default is None.
- s (float): Softening parameter to avoid singularities at the center of the lens.
+ s (float): Softening parameter to avoid singularities at the center of the lens.
Default is 0.0.
use_case (str): Due to an idyosyncratic behaviour of PyTorch, the NFW/TNFW profile
specifically cant be both batchable and differentiable. You may select which version
@@ -46,6 +47,7 @@ class NFW(ThinLens):
convergence: Computes the convergence (dimensionless surface mass density).
potential: Computes the lensing potential.
"""
+
def __init__(
self,
cosmology: Cosmology,
@@ -55,7 +57,7 @@ def __init__(
m: Optional[Union[Tensor, float]] = None,
c: Optional[Union[Tensor, float]] = None,
s: float = 0.0,
- use_case = "batchable",
+ use_case="batchable",
name: str = None,
):
"""
@@ -63,16 +65,16 @@ def __init__(
Args:
name (str): Name of the lens instance.
- cosmology (Cosmology): An instance of the Cosmology class which contains
+ cosmology (Cosmology): An instance of the Cosmology class which contains
information about the cosmological model and parameters.
z_l (Optional[Union[Tensor, float]]): Redshift of the lens. Default is None.
- x0 (Optional[Union[Tensor, float]]): x-coordinate of the lens center in the lens plane.
+ x0 (Optional[Union[Tensor, float]]): x-coordinate of the lens center in the lens plane.
Default is None.
- y0 (Optional[Union[Tensor, float]]): y-coordinate of the lens center in the lens plane.
+ y0 (Optional[Union[Tensor, float]]): y-coordinate of the lens center in the lens plane.
Default is None.
m (Optional[Union[Tensor, float]]): Mass of the lens. Default is None.
c (Optional[Union[Tensor, float]]): Concentration parameter of the lens. Default is None.
- s (float): Softening parameter to avoid singularities at the center of the lens.
+ s (float): Softening parameter to avoid singularities at the center of the lens.
Default is 0.0.
"""
super().__init__(cosmology, z_l, name=name)
@@ -94,7 +96,9 @@ def __init__(
raise ValueError("use case should be one of: batchable, differentiable")
@unpack(0)
- def get_scale_radius(self, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_scale_radius(
+ self, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs
+ ) -> Tensor:
"""
Calculate the scale radius of the lens.
@@ -112,7 +116,9 @@ def get_scale_radius(self, z_l, x0, y0, m, c, *args, params: Optional["Packed"]
return 1 / c * r_delta
@unpack(0)
- def get_scale_density(self, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_scale_density(
+ self, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs
+ ) -> Tensor:
"""
Calculate the scale density of the lens.
@@ -133,7 +139,9 @@ def get_scale_density(self, z_l, x0, y0, m, c, *args, params: Optional["Packed"]
)
@unpack(1)
- def get_convergence_s(self, z_s, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_convergence_s(
+ self, z_s, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs
+ ) -> Tensor:
"""
Calculate the dimensionless surface mass density of the lens.
@@ -147,8 +155,14 @@ def get_convergence_s(self, z_s, z_l, x0, y0, m, c, *args, params: Optional["Pac
Returns:
Tensor: The dimensionless surface mass density of the lens.
"""
- critical_surface_density = self.cosmology.critical_surface_density(z_l, z_s, params)
- return self.get_scale_density(params) * self.get_scale_radius(params) / critical_surface_density
+ critical_surface_density = self.cosmology.critical_surface_density(
+ z_l, z_s, params
+ )
+ return (
+ self.get_scale_density(params)
+ * self.get_scale_radius(params)
+ / critical_surface_density
+ )
@staticmethod
def _f_differentiable(x: Tensor) -> Tensor:
@@ -163,9 +177,20 @@ def _f_differentiable(x: Tensor) -> Tensor:
"""
# TODO: generalize beyond torch, or patch Tensor
f = torch.zeros_like(x)
- f[x > 1] = 1 - 2 / (x[x > 1]**2 - 1).sqrt() * ((x[x > 1] - 1) / (x[x > 1] + 1)).sqrt().arctan()
- f[x < 1] = 1 - 2 / (1 - x[x < 1]**2).sqrt() * ((1 - x[x < 1]) / (1 + x[x < 1])).sqrt().arctanh()
+ f[x > 1] = (
+ 1
+ - 2
+ / (x[x > 1] ** 2 - 1).sqrt()
+ * ((x[x > 1] - 1) / (x[x > 1] + 1)).sqrt().arctan()
+ )
+ f[x < 1] = (
+ 1
+ - 2
+ / (1 - x[x < 1] ** 2).sqrt()
+ * ((1 - x[x < 1]) / (1 + x[x < 1])).sqrt().arctanh()
+ )
return f
+
@staticmethod
def _f_batchable(x: Tensor) -> Tensor:
"""
@@ -184,8 +209,8 @@ def _f_batchable(x: Tensor) -> Tensor:
torch.where(
x < 1,
1 - 2 / (1 - x**2).sqrt() * ((1 - x) / (1 + x)).sqrt().arctanh(),
- torch.zeros_like(x), # x == 1
- )
+ torch.zeros_like(x), # x == 1
+ ),
)
@staticmethod
@@ -205,6 +230,7 @@ def _g_differentiable(x: Tensor) -> Tensor:
term_2[x > 1] = (1 / x[x > 1]).arccos() ** 2
term_2[x < 1] = -(1 / x[x < 1]).arccosh() ** 2
return term_1 + term_2
+
@staticmethod
def _g_batchable(x: Tensor) -> Tensor:
"""
@@ -224,8 +250,8 @@ def _g_batchable(x: Tensor) -> Tensor:
torch.where(
x < 1,
-(1 / x).arccosh() ** 2,
- torch.zeros_like(x), # x == 1
- )
+ torch.zeros_like(x), # x == 1
+ ),
)
return term_1 + term_2
@@ -242,9 +268,10 @@ def _h_differentiable(x: Tensor) -> Tensor:
"""
term_1 = (x / 2).log()
term_2 = torch.ones_like(x)
- term_2[x > 1] = (1 / x[x > 1]).arccos() * 1 / (x[x > 1]**2 - 1).sqrt()
- term_2[x < 1] = (1 / x[x < 1]).arccosh() * 1 / (1 - x[x < 1]**2).sqrt()
+ term_2[x > 1] = (1 / x[x > 1]).arccos() * 1 / (x[x > 1] ** 2 - 1).sqrt()
+ term_2[x < 1] = (1 / x[x < 1]).arccosh() * 1 / (1 - x[x < 1] ** 2).sqrt()
return term_1 + term_2
+
@staticmethod
def _h_batchable(x: Tensor) -> Tensor:
"""
@@ -261,17 +288,25 @@ def _h_batchable(x: Tensor) -> Tensor:
x > 1,
(1 / x).arccos() * 1 / (x**2 - 1).sqrt(),
torch.where(
- x < 1,
- (1 / x).arccosh() * 1 / (1 - x**2).sqrt(),
- torch.ones_like(x)
- )
+ x < 1, (1 / x).arccosh() * 1 / (1 - x**2).sqrt(), torch.ones_like(x)
+ ),
)
return term_1 + term_2
-
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ m,
+ c,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Compute the reduced deflection angle.
@@ -311,7 +346,18 @@ def reduced_deflection_angle(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ m,
+ c,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the convergence (dimensionless surface mass density).
@@ -336,7 +382,18 @@ def convergence(
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, m, c, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ m,
+ c,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential.
@@ -357,4 +414,10 @@ def potential(
xi = d_l * th * arcsec_to_rad
r = xi / scale_radius # xi / xi_0
convergence_s = self.get_convergence_s(z_s, params)
- return 2 * convergence_s * self._g(r) * scale_radius**2 / (d_l**2 * arcsec_to_rad**2)
+ return (
+ 2
+ * convergence_s
+ * self._g(r)
+ * scale_radius**2
+ / (d_l**2 * arcsec_to_rad**2)
+ )
diff --git a/caustics/lenses/pixelated_convergence.py b/src/caustics/lenses/pixelated_convergence.py
similarity index 81%
rename from caustics/lenses/pixelated_convergence.py
rename to src/caustics/lenses/pixelated_convergence.py
index 320cacea..84477eb1 100644
--- a/caustics/lenses/pixelated_convergence.py
+++ b/src/caustics/lenses/pixelated_convergence.py
@@ -1,5 +1,5 @@
from math import pi
-from typing import Any, Optional
+from typing import Optional
import torch
import torch.nn.functional as F
@@ -13,6 +13,7 @@
__all__ = ("PixelatedConvergence",)
+
class PixelatedConvergence(ThinLens):
def __init__(
self,
@@ -26,7 +27,7 @@ def __init__(
shape: Optional[tuple[int, ...]] = None,
convolution_mode: str = "fft",
use_next_fast_len: bool = True,
- padding = "zero",
+ padding="zero",
name: str = None,
):
"""Strong lensing with user provided kappa map
@@ -59,7 +60,7 @@ def __init__(
or "tile".
"""
-
+
super().__init__(cosmology, z_l, name=name)
if convergence_map is not None and convergence_map.ndim != 2:
@@ -67,9 +68,7 @@ def __init__(
f"convergence_map must be 2D. Received a {convergence_map.ndim}D tensor)"
)
elif shape is not None and len(shape) != 2:
- raise ValueError(
- f"shape must specify a 2D tensor. Received shape={shape}"
- )
+ raise ValueError(f"shape must specify a 2D tensor. Received shape={shape}")
self.add_param("x0", x0)
self.add_param("y0", y0)
@@ -112,7 +111,7 @@ def to(
Args:
device (Optional[torch.device]): The target device to move the tensors to.
dtype (Optional[torch.dtype]): The target data type to cast the tensors to.
- """
+ """
super().to(device, dtype)
self.potential_kernel = self.potential_kernel.to(device=device, dtype=dtype)
self.ax_kernel = self.ax_kernel.to(device=device, dtype=dtype)
@@ -138,16 +137,18 @@ def _fft2_padded(self, x: Tensor) -> Tensor:
if self.use_next_fast_len:
pad = next_fast_len(pad)
self._s = (pad, pad)
-
+
if self.padding == "zero":
pass
elif self.padding in ["reflect", "circular"]:
- x = F.pad(x[None,None], (0, self.n_pix-1, 0, self.n_pix-1), mode = self.padding).squeeze()
+ x = F.pad(
+ x[None, None], (0, self.n_pix - 1, 0, self.n_pix - 1), mode=self.padding
+ ).squeeze()
elif self.padding == "tile":
- x = torch.tile(x, (2,2))
+ x = torch.tile(x, (2, 2))
return torch.fft.rfft2(x, self._s)
-
+
def _unpad_fft(self, x: Tensor) -> Tensor:
"""
Remove padding from the result of a 2D FFT.
@@ -158,7 +159,9 @@ def _unpad_fft(self, x: Tensor) -> Tensor:
Returns:
Tensor: The input tensor without padding.
"""
- return torch.roll(x, (-self._s[0]//2,-self._s[1]//2), dims = (-2,-1))[..., :self.n_pix, :self.n_pix]
+ return torch.roll(x, (-self._s[0] // 2, -self._s[1] // 2), dims=(-2, -1))[
+ ..., : self.n_pix, : self.n_pix
+ ]
def _unpad_conv2d(self, x: Tensor) -> Tensor:
"""
@@ -170,7 +173,7 @@ def _unpad_conv2d(self, x: Tensor) -> Tensor:
Returns:
Tensor: The input tensor without padding.
"""
- return x # torch.roll(x, (-self.padding_range * self.ax_kernel.shape[0]//4,-self.padding_range * self.ax_kernel.shape[1]//4), dims = (-2,-1))[..., :self.n_pix, :self.n_pix] #[..., 1:, 1:]
+ return x # torch.roll(x, (-self.padding_range * self.ax_kernel.shape[0]//4,-self.padding_range * self.ax_kernel.shape[1]//4), dims = (-2,-1))[..., :self.n_pix, :self.n_pix] #[..., 1:, 1:]
@property
def convolution_mode(self):
@@ -207,7 +210,17 @@ def convolution_mode(self, convolution_mode: str):
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, convergence_map, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ convergence_map,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Compute the deflection angles at the specified positions using the given convergence map.
@@ -222,9 +235,14 @@ def reduced_deflection_angle(
tuple[Tensor, Tensor]: The x and y components of the deflection angles at the specified positions.
"""
if self.convolution_mode == "fft":
- deflection_angle_x_map, deflection_angle_y_map = self._deflection_angle_fft(convergence_map)
+ deflection_angle_x_map, deflection_angle_y_map = self._deflection_angle_fft(
+ convergence_map
+ )
else:
- deflection_angle_x_map, deflection_angle_y_map = self._deflection_angle_conv2d(convergence_map)
+ (
+ deflection_angle_x_map,
+ deflection_angle_y_map,
+ ) = self._deflection_angle_conv2d(convergence_map)
# Scale is distance from center of image to center of pixel on the edge
scale = self.fov / 2
deflection_angle_x = interp2d(
@@ -246,15 +264,17 @@ def _deflection_angle_fft(self, convergence_map: Tensor) -> tuple[Tensor, Tensor
tuple[Tensor, Tensor]: The x and y components of the deflection angles.
"""
convergence_tilde = self._fft2_padded(convergence_map)
- deflection_angle_x = torch.fft.irfft2(convergence_tilde * self.ax_kernel_tilde, self._s).real * (
- self.pixelscale**2 / pi
- )
- deflection_angle_y = torch.fft.irfft2(convergence_tilde * self.ay_kernel_tilde, self._s).real * (
- self.pixelscale**2 / pi
- )
+ deflection_angle_x = torch.fft.irfft2(
+ convergence_tilde * self.ax_kernel_tilde, self._s
+ ).real * (self.pixelscale**2 / pi)
+ deflection_angle_y = torch.fft.irfft2(
+ convergence_tilde * self.ay_kernel_tilde, self._s
+ ).real * (self.pixelscale**2 / pi)
return self._unpad_fft(deflection_angle_x), self._unpad_fft(deflection_angle_y)
- def _deflection_angle_conv2d(self, convergence_map: Tensor) -> tuple[Tensor, Tensor]:
+ def _deflection_angle_conv2d(
+ self, convergence_map: Tensor
+ ) -> tuple[Tensor, Tensor]:
"""
Compute the deflection angles using the 2D convolution method.
@@ -266,20 +286,34 @@ def _deflection_angle_conv2d(self, convergence_map: Tensor) -> tuple[Tensor, Ten
"""
# Use convergence_map as kernel since the kernel is twice as large. Flip since
# we actually want the cross-correlation.
-
+
pad = 2 * self.n_pix
- convergence_map_flipped = convergence_map.flip((-1, -2))[None, None] # F.pad(, ((pad - self.n_pix)//2, (pad - self.n_pix)//2, (pad - self.n_pix)//2, (pad - self.n_pix)//2), mode = self.padding_mode)
- deflection_angle_x = F.conv2d(self.ax_kernel[None, None], convergence_map_flipped, padding = "same").squeeze() * (
- self.pixelscale**2 / pi
+ convergence_map_flipped = convergence_map.flip((-1, -2))[
+ None, None
+ ] # F.pad(, ((pad - self.n_pix)//2, (pad - self.n_pix)//2, (pad - self.n_pix)//2, (pad - self.n_pix)//2), mode = self.padding_mode)
+ deflection_angle_x = F.conv2d(
+ self.ax_kernel[None, None], convergence_map_flipped, padding="same"
+ ).squeeze() * (self.pixelscale**2 / pi)
+ deflection_angle_y = F.conv2d(
+ self.ay_kernel[None, None], convergence_map_flipped, padding="same"
+ ).squeeze() * (self.pixelscale**2 / pi)
+ return self._unpad_conv2d(deflection_angle_x), self._unpad_conv2d(
+ deflection_angle_y
)
- deflection_angle_y = F.conv2d(self.ay_kernel[None, None], convergence_map_flipped, padding = "same").squeeze() * (
- self.pixelscale**2 / pi
- )
- return self._unpad_conv2d(deflection_angle_x), self._unpad_conv2d(deflection_angle_y)
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, convergence_map, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ convergence_map,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential at the specified positions using the given convergence map.
@@ -307,56 +341,68 @@ def potential(
def _potential_fft(self, convergence_map: Tensor) -> Tensor:
"""
Compute the lensing potential using the Fast Fourier Transform (FFT) method.
-
+
Args:
convergence_map (Tensor): The 2D tensor representing the convergence map.
-
+
Returns:
Tensor: The lensing potential.
"""
convergence_tilde = self._fft2_padded(convergence_map)
- potential = torch.fft.irfft2(convergence_tilde * self.potential_kernel_tilde, self._s) * (
- self.pixelscale**2 / pi
- )
+ potential = torch.fft.irfft2(
+ convergence_tilde * self.potential_kernel_tilde, self._s
+ ) * (self.pixelscale**2 / pi)
return self._unpad_fft(potential)
def _potential_conv2d(self, convergence_map: Tensor) -> Tensor:
"""
Compute the lensing potential using the 2D convolution method.
-
+
Args:
convergence_map (Tensor): The 2D tensor representing the convergence map.
-
+
Returns:
Tensor: The lensing potential.
"""
# Use convergence_map as kernel since the kernel is twice as large. Flip since
# we actually want the cross-correlation.
convergence_map_flipped = convergence_map.flip((-1, -2))[None, None]
- potential = F.conv2d(self.potential_kernel[None, None], convergence_map_flipped, padding = "same").squeeze() * (
- self.pixelscale**2 / pi
- )
+ potential = F.conv2d(
+ self.potential_kernel[None, None], convergence_map_flipped, padding="same"
+ ).squeeze() * (self.pixelscale**2 / pi)
return self._unpad_conv2d(potential)
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, convergence_map, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ convergence_map,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the convergence at the specified positions. This method is not implemented.
-
+
Args:
x (Tensor): The x-coordinates of the positions to compute the convergence for.
y (Tensor): The y-coordinates of the positions to compute the convergence for.
z_s (Tensor): The source redshift.
params (Packed, optional): A dictionary containing additional parameters.
-
+
Returns:
Tensor: The convergence at the specified positions.
-
+
Raises:
NotImplementedError: This method is not implemented.
"""
return interp2d(
- convergence_map, (x - x0).view(-1) / self.fov*2, (y - y0).view(-1) / self.fov*2
+ convergence_map,
+ (x - x0).view(-1) / self.fov * 2,
+ (y - y0).view(-1) / self.fov * 2,
).reshape(x.shape)
diff --git a/caustics/lenses/point.py b/src/caustics/lenses/point.py
similarity index 85%
rename from caustics/lenses/point.py
rename to src/caustics/lenses/point.py
index 41967021..a0efca85 100644
--- a/caustics/lenses/point.py
+++ b/src/caustics/lenses/point.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, Union
+from typing import Optional, Union
import torch
from torch import Tensor
@@ -10,6 +10,7 @@
__all__ = ("Point",)
+
class Point(ThinLens):
"""
Class representing a point mass lens in strong gravitational lensing.
@@ -23,6 +24,7 @@ class Point(ThinLens):
th_ein (Optional[Union[Tensor, float]]): Einstein radius of the lens.
s (float): Softening parameter to prevent numerical instabilities.
"""
+
def __init__(
self,
cosmology: Cosmology,
@@ -54,7 +56,17 @@ def __init__(
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, th_ein, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ th_ein,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Compute the deflection angles.
@@ -76,7 +88,17 @@ def reduced_deflection_angle(
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, th_ein, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ th_ein,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential.
@@ -96,7 +118,17 @@ def potential(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, th_ein, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ th_ein,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the convergence (dimensionless surface mass density).
diff --git a/caustics/lenses/pseudo_jaffe.py b/src/caustics/lenses/pseudo_jaffe.py
similarity index 70%
rename from caustics/lenses/pseudo_jaffe.py
rename to src/caustics/lenses/pseudo_jaffe.py
index d1490537..cca8ab09 100644
--- a/caustics/lenses/pseudo_jaffe.py
+++ b/src/caustics/lenses/pseudo_jaffe.py
@@ -1,11 +1,11 @@
from math import pi
-from typing import Any, Optional, Union
+from typing import Optional, Union
import torch
from torch import Tensor
from ..cosmology import Cosmology
-from ..constants import arcsec_to_rad, rad_to_arcsec
+from ..constants import arcsec_to_rad
from ..utils import translate_rotate
from .base import ThinLens
from ..parametrized import unpack
@@ -15,8 +15,8 @@
class PseudoJaffe(ThinLens):
"""
- Class representing a Pseudo Jaffe lens in strong gravitational lensing,
- based on `Eliasdottir et al 2007 `_ and
+ Class representing a Pseudo Jaffe lens in strong gravitational lensing,
+ based on `Eliasdottir et al 2007 `_ and
the `lenstronomy` source code.
Attributes:
@@ -67,13 +67,45 @@ def __init__(
self.s = s
@unpack(1)
- def get_convergence_0(self, z_s, z_l, x0, y0, mass, core_radius, scale_radius, *args, params: Optional["Packed"] = None, **kwargs):
+ def get_convergence_0(
+ self,
+ z_s,
+ z_l,
+ x0,
+ y0,
+ mass,
+ core_radius,
+ scale_radius,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ):
d_l = self.cosmology.angular_diameter_distance(z_l, params)
sigma_crit = self.cosmology.critical_surface_density(z_l, z_s, params)
- return mass / (2 * torch.pi * sigma_crit * core_radius * scale_radius * (d_l * arcsec_to_rad)**2)
-
+ return mass / (
+ 2
+ * torch.pi
+ * sigma_crit
+ * core_radius
+ * scale_radius
+ * (d_l * arcsec_to_rad) ** 2
+ )
+
@unpack(2)
- def mass_enclosed_2d(self, theta, z_s, z_l, x0, y0, mass, core_radius, scale_radius, *args, params: Optional["Packed"] = None, **kwargs):
+ def mass_enclosed_2d(
+ self,
+ theta,
+ z_s,
+ z_l,
+ x0,
+ y0,
+ mass,
+ core_radius,
+ scale_radius,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ):
"""
Calculate the mass enclosed within a two-dimensional radius.
@@ -87,14 +119,16 @@ def mass_enclosed_2d(self, theta, z_s, z_l, x0, y0, mass, core_radius, scale_rad
"""
theta = theta + self.s
d_l = self.cosmology.angular_diameter_distance(z_l, params)
- surface_density_0 = self.get_convergence_0(z_s, params) * self.cosmology.critical_surface_density(z_l, z_s, params)
+ surface_density_0 = self.get_convergence_0(
+ z_s, params
+ ) * self.cosmology.critical_surface_density(z_l, z_s, params)
return (
2
* pi
* surface_density_0
* core_radius
* scale_radius
- * (d_l * arcsec_to_rad)**2
+ * (d_l * arcsec_to_rad) ** 2
/ (scale_radius - core_radius)
* (
(core_radius**2 + theta**2).sqrt()
@@ -138,74 +172,130 @@ def central_convergence(
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, mass, core_radius, scale_radius, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ core_radius,
+ scale_radius,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
- """ Calculate the deflection angle.
+ """Calculate the deflection angle.
Args:
x (Tensor): x-coordinate of the lens.
y (Tensor): y-coordinate of the lens.
z_s (Tensor): Source redshift.
params (Packed, optional): Dynamic parameter container.
-
+
Returns:
Tuple[Tensor, Tensor]: The deflection angle in the x and y directions.
"""
x, y = translate_rotate(x, y, x0, y0)
R = (x**2 + y**2).sqrt() + self.s
- f = R / core_radius / (1 + (1 + (R / core_radius) ** 2).sqrt()) - R / scale_radius / (
- 1 + (1 + (R / scale_radius) ** 2).sqrt()
+ f = R / core_radius / (
+ 1 + (1 + (R / core_radius) ** 2).sqrt()
+ ) - R / scale_radius / (1 + (1 + (R / scale_radius) ** 2).sqrt())
+ alpha = (
+ 2
+ * self.get_convergence_0(z_s, params)
+ * core_radius
+ * scale_radius
+ / (scale_radius - core_radius)
+ * f
)
- alpha = 2 * self.get_convergence_0(z_s, params) * core_radius * scale_radius / (scale_radius - core_radius) * f
ax = alpha * x / R
ay = alpha * y / R
return ax, ay
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, mass, core_radius, scale_radius, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ core_radius,
+ scale_radius,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential. This calculation is based on equation A18.
-
+
Args:
x (Tensor): x-coordinate of the lens.
y (Tensor): y-coordinate of the lens.
z_s (Tensor): Source redshift.
params (Packed, optional): Dynamic parameter container.
-
+
Returns:
Tensor: The lensing potential.
"""
x, y = translate_rotate(x, y, x0, y0)
R_squared = x**2 + y**2 + self.s
- coeff = -2 * self.get_convergence_0(z_s, params) * core_radius * scale_radius / (scale_radius - core_radius)
+ coeff = (
+ -2
+ * self.get_convergence_0(z_s, params)
+ * core_radius
+ * scale_radius
+ / (scale_radius - core_radius)
+ )
return coeff * (
(scale_radius**2 + R_squared).sqrt()
- (core_radius**2 + R_squared).sqrt()
+ core_radius * (core_radius + (core_radius**2 + R_squared).sqrt()).log()
- - scale_radius * (scale_radius + (scale_radius**2 + R_squared).sqrt()).log()
+ - scale_radius
+ * (scale_radius + (scale_radius**2 + R_squared).sqrt()).log()
)
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, mass, core_radius, scale_radius, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ core_radius,
+ scale_radius,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Calculate the projected mass density, based on equation A6.
-
+
Args:
x (Tensor): x-coordinate of the lens.
y (Tensor): y-coordinate of the lens.
z_s (Tensor): Source redshift.
params (Packed, optional): Dynamic parameter container.
-
+
Returns:
Tensor: The projected mass density.
"""
x, y = translate_rotate(x, y, x0, y0)
R_squared = x**2 + y**2 + self.s
- coeff = self.get_convergence_0(z_s, params) * core_radius * scale_radius / (scale_radius - core_radius)
+ coeff = (
+ self.get_convergence_0(z_s, params)
+ * core_radius
+ * scale_radius
+ / (scale_radius - core_radius)
+ )
return coeff * (
- 1 / (core_radius**2 + R_squared).sqrt() - 1 / (scale_radius**2 + R_squared).sqrt()
+ 1 / (core_radius**2 + R_squared).sqrt()
+ - 1 / (scale_radius**2 + R_squared).sqrt()
)
diff --git a/caustics/lenses/sie.py b/src/caustics/lenses/sie.py
similarity index 83%
rename from caustics/lenses/sie.py
rename to src/caustics/lenses/sie.py
index c187a27a..f5bcd917 100644
--- a/caustics/lenses/sie.py
+++ b/src/caustics/lenses/sie.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, Union
+from typing import Optional, Union
from torch import Tensor
@@ -12,9 +12,9 @@
class SIE(ThinLens):
"""
- A class representing a Singular Isothermal Ellipsoid (SIE) strong gravitational lens model.
+ A class representing a Singular Isothermal Ellipsoid (SIE) strong gravitational lens model.
This model is based on Keeton 2001, which can be found at https://arxiv.org/abs/astro-ph/0102341.
-
+
Attributes:
name (str): The name of the lens.
cosmology (Cosmology): An instance of the Cosmology class.
@@ -33,7 +33,7 @@ def __init__(
z_l: Optional[Union[Tensor, float]] = None,
x0: Optional[Union[Tensor, float]] = None,
y0: Optional[Union[Tensor, float]] = None,
- q: Optional[Union[Tensor, float]] = None,# TODO change to true axis ratio
+ q: Optional[Union[Tensor, float]] = None, # TODO change to true axis ratio
phi: Optional[Union[Tensor, float]] = None,
b: Optional[Union[Tensor, float]] = None,
s: float = 0.0,
@@ -67,7 +67,19 @@ def _get_potential(self, x, y, q):
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, q, phi, b, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ q,
+ phi,
+ b,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Calculate the physical deflection angle.
@@ -90,8 +102,20 @@ def reduced_deflection_angle(
return derotate(ax, ay, phi)
@unpack(3)
- def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, q, phi, b, *args, params: Optional["Packed"] = None, **kwargs
+ def potential(
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ q,
+ phi,
+ b,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential.
@@ -112,7 +136,19 @@ def potential(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, q, phi, b, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ q,
+ phi,
+ b,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Calculate the projected mass density.
diff --git a/caustics/lenses/singleplane.py b/src/caustics/lenses/singleplane.py
similarity index 83%
rename from caustics/lenses/singleplane.py
rename to src/caustics/lenses/singleplane.py
index f9cad5f4..7c6ff11a 100644
--- a/caustics/lenses/singleplane.py
+++ b/src/caustics/lenses/singleplane.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional
+from typing import Optional
import torch
from torch import Tensor
@@ -12,16 +12,18 @@
class SinglePlane(ThinLens):
"""
- A class for combining multiple thin lenses into a single lensing plane.
+ A class for combining multiple thin lenses into a single lensing plane.
This model inherits from the base `ThinLens` class.
-
+
Attributes:
name (str): The name of the single plane lens.
cosmology (Cosmology): An instance of the Cosmology class.
lenses (List[ThinLens]): A list of ThinLens objects that are being combined into a single lensing plane.
"""
- def __init__(self, cosmology: Cosmology, lenses: list[ThinLens], name: str = None, **kwargs):
+ def __init__(
+ self, cosmology: Cosmology, lenses: list[ThinLens], name: str = None, **kwargs
+ ):
"""
Initialize the SinglePlane lens model.
"""
@@ -33,7 +35,13 @@ def __init__(self, cosmology: Cosmology, lenses: list[ThinLens], name: str = Non
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Calculate the total deflection angle by summing the deflection angles of all individual lenses.
@@ -57,7 +65,13 @@ def reduced_deflection_angle(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Calculate the total projected mass density by summing the mass densities of all individual lenses.
@@ -79,7 +93,13 @@ def convergence(
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the total lensing potential by summing the lensing potentials of all individual lenses.
diff --git a/caustics/lenses/sis.py b/src/caustics/lenses/sis.py
similarity index 82%
rename from caustics/lenses/sis.py
rename to src/caustics/lenses/sis.py
index f6ac43e4..244fe6ea 100644
--- a/caustics/lenses/sis.py
+++ b/src/caustics/lenses/sis.py
@@ -1,6 +1,5 @@
-from typing import Any, Optional, Union
+from typing import Optional, Union
-import torch
from torch import Tensor
from ..cosmology import Cosmology
@@ -13,7 +12,7 @@
class SIS(ThinLens):
"""
- A class representing the Singular Isothermal Sphere (SIS) model.
+ A class representing the Singular Isothermal Sphere (SIS) model.
This model inherits from the base `ThinLens` class.
Attributes:
@@ -25,6 +24,7 @@ class SIS(ThinLens):
th_ein (Optional[Union[Tensor, float]]): The Einstein radius of the lens.
s (float): A smoothing factor, default is 0.0.
"""
+
def __init__(
self,
cosmology: Cosmology,
@@ -33,7 +33,7 @@ def __init__(
y0: Optional[Union[Tensor, float]] = None,
th_ein: Optional[Union[Tensor, float]] = None,
s: float = 0.0,
- name: str = None
+ name: str = None,
):
"""
Initialize the SIS lens model.
@@ -47,7 +47,17 @@ def __init__(
@unpack(3)
def reduced_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, th_ein, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ th_ein,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""
Calculate the deflection angle of the SIS lens.
@@ -69,7 +79,17 @@ def reduced_deflection_angle(
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, th_ein, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ th_ein,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential of the SIS lens.
@@ -89,7 +109,17 @@ def potential(
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, th_ein, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ th_ein,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Calculate the projected mass density of the SIS lens.
diff --git a/caustics/lenses/tnfw.py b/src/caustics/lenses/tnfw.py
similarity index 76%
rename from caustics/lenses/tnfw.py
rename to src/caustics/lenses/tnfw.py
index 5f41f61f..093ee53f 100644
--- a/caustics/lenses/tnfw.py
+++ b/src/caustics/lenses/tnfw.py
@@ -1,5 +1,5 @@
from math import pi
-from typing import Any, Optional, Union
+from typing import Optional, Union
import torch
from torch import Tensor
@@ -14,9 +14,10 @@
__all__ = ("TNFW",)
+
class TNFW(ThinLens):
"""Truncated Navaro-Frenk-White profile
-
+
TNFW lens class. This class models a lens using the truncated
Navarro-Frenk-White (NFW) profile. The NFW profile is a spatial
density profile of dark matter halo that arises in cosmological
@@ -25,7 +26,7 @@ class TNFW(ThinLens):
infinity. This is based off the paper by Baltz et al. 2009:
https://arxiv.org/abs/0705.0682
-
+
https://ui.adsabs.harvard.edu/abs/2009JCAP...01..015B/abstract
Note:
@@ -40,15 +41,15 @@ class TNFW(ThinLens):
Args:
name (str): Name of the lens instance.
- cosmology (Cosmology): An instance of the Cosmology class which contains
+ cosmology (Cosmology): An instance of the Cosmology class which contains
information about the cosmological model and parameters.
z_l (Optional[Tensor]): Redshift of the lens.
- x0 (Optional[Tensor]): Center of lens position on x-axis (arcsec).
- y0 (Optional[Tensor]): Center of lens position on y-axis (arcsec).
+ x0 (Optional[Tensor]): Center of lens position on x-axis (arcsec).
+ y0 (Optional[Tensor]): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
- s (float): Softening parameter to avoid singularities at the center of the lens.
+ s (float): Softening parameter to avoid singularities at the center of the lens.
Default is 0.0.
interpret_m_total_mass (bool): Indicates how to interpret the mass variable "m". If true
the mass is intepreted as the total mass of the halo (good because it makes sense). If
@@ -59,6 +60,7 @@ class TNFW(ThinLens):
you wish to use by setting this parameter to one of: batchable, differentiable.
"""
+
def __init__(
self,
cosmology: Cosmology,
@@ -70,7 +72,7 @@ def __init__(
tau: Optional[Union[Tensor, float]] = None,
s: float = 0.0,
interpret_m_total_mass: bool = True,
- use_case = "batchable",
+ use_case="batchable",
name: str = None,
):
"""
@@ -92,21 +94,32 @@ def __init__(
self._F = self._F_differentiable
else:
raise ValueError("use case should be one of: batchable, differentiable")
-
+
@staticmethod
def _F_batchable(x):
"""
Helper method from Baltz et al. 2009 equation A.5
"""
- return torch.where(x == 1, torch.ones_like(x), ((1 / x.to(dtype=torch.cdouble)).arccos() / (x.to(dtype=torch.cdouble)**2 - 1).sqrt()).abs())
+ return torch.where(
+ x == 1,
+ torch.ones_like(x),
+ (
+ (1 / x.to(dtype=torch.cdouble)).arccos()
+ / (x.to(dtype=torch.cdouble) ** 2 - 1).sqrt()
+ ).abs(),
+ )
@staticmethod
def _F_differentiable(x):
f = torch.ones_like(x)
- f[x < 1] = torch.arctanh((1. - x[x < 1]**2).sqrt()) / (1. - x[x < 1]**2).sqrt()
- f[x > 1] = torch.arctan((x[x > 1]**2 - 1.).sqrt()) / (x[x > 1]**2 - 1.).sqrt()
+ f[x < 1] = (
+ torch.arctanh((1.0 - x[x < 1] ** 2).sqrt()) / (1.0 - x[x < 1] ** 2).sqrt()
+ )
+ f[x > 1] = (
+ torch.arctan((x[x > 1] ** 2 - 1.0).sqrt()) / (x[x > 1] ** 2 - 1.0).sqrt()
+ )
return f
-
+
@staticmethod
def _L(x, tau):
"""
@@ -115,14 +128,25 @@ def _L(x, tau):
return (x / (tau + (tau**2 + x**2).sqrt())).log()
@unpack(0)
- def get_concentration(self, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_concentration(
+ self,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> Tensor:
"""
Calculate the scale radius of the lens. This is the same formula used for the classic NFW profile.
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -132,19 +156,30 @@ def get_concentration(self, z_l, x0, y0, mass, scale_radius, tau, *args, params:
Tensor: The scale radius of the lens in Mpc.
"""
critical_density = self.cosmology.critical_density(z_l, params)
- d_l = self.cosmology.angular_diameter_distance(z_l, params)
+ d_l = self.cosmology.angular_diameter_distance(z_l, params)
r_delta = (3 * mass / (4 * pi * DELTA * critical_density)) ** (1 / 3)
return r_delta / (scale_radius * d_l * arcsec_to_rad)
@unpack(0)
- def get_truncation_radius(self, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_truncation_radius(
+ self,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> Tensor:
"""
Calculate the truncation radius of the TNFW lens.
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -156,14 +191,25 @@ def get_truncation_radius(self, z_l, x0, y0, mass, scale_radius, tau, *args, par
return tau * scale_radius
@unpack(0)
- def get_M0(self, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_M0(
+ self,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> Tensor:
"""
Calculate the reference mass. This is an abstract reference mass used internally in the equations from Baltz et al. 2009.
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -173,21 +219,43 @@ def get_M0(self, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["
Tensor: The reference mass of the lens in Msol.
"""
if self.interpret_m_total_mass:
- return mass * (tau**2 + 1)**2 / (tau**2 * ((tau**2 - 1) * tau.log() + torch.pi * tau - (tau**2 + 1)))
+ return (
+ mass
+ * (tau**2 + 1) ** 2
+ / (
+ tau**2
+ * ((tau**2 - 1) * tau.log() + torch.pi * tau - (tau**2 + 1))
+ )
+ )
else:
d_l = self.cosmology.angular_diameter_distance(z_l, params)
- return 4 * torch.pi * (scale_radius * d_l * arcsec_to_rad)**3 * self.get_scale_density(params)
-
+ return (
+ 4
+ * torch.pi
+ * (scale_radius * d_l * arcsec_to_rad) ** 3
+ * self.get_scale_density(params)
+ )
@unpack(0)
- def get_scale_density(self, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs) -> Tensor:
+ def get_scale_density(
+ self,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ) -> Tensor:
"""
Calculate the scale density of the lens.
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -207,15 +275,27 @@ def get_scale_density(self, z_l, x0, y0, mass, scale_radius, tau, *args, params:
@unpack(3)
def convergence(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
TNFW convergence as given in Baltz et al. 2009. This is unitless since it is Sigma(x) / Sigma_crit.
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -223,7 +303,7 @@ def convergence(
Returns:
Tensor: unitless convergence at requested position
-
+
"""
x, y = translate_rotate(x, y, x0, y0)
r = (x**2 + y**2).sqrt() + self.s
@@ -233,27 +313,40 @@ def convergence(
L = self._L(g, tau)
critical_density = self.cosmology.critical_surface_density(z_l, z_s, params)
- S = self.get_M0(params) / (2 * torch.pi * (scale_radius * d_l * arcsec_to_rad)**2)
-
+ S = self.get_M0(params) / (
+ 2 * torch.pi * (scale_radius * d_l * arcsec_to_rad) ** 2
+ )
+
t2 = tau**2
- a1 = t2 / (t2 + 1)**2
- a2 = torch.where(g == 1, (t2 + 1) / 3., (t2 + 1) * (1 - F) / (g**2 - 1))
+ a1 = t2 / (t2 + 1) ** 2
+ a2 = torch.where(g == 1, (t2 + 1) / 3.0, (t2 + 1) * (1 - F) / (g**2 - 1))
a3 = 2 * F
- a4 = - torch.pi / (t2 + g**2).sqrt()
+ a4 = -torch.pi / (t2 + g**2).sqrt()
a5 = (t2 - 1) * L / (tau * (t2 + g**2).sqrt())
return a1 * (a2 + a3 + a4 + a5) * S / critical_density
@unpack(2)
def mass_enclosed_2d(
- self, r: Tensor, z_s: Tensor, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ r: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Total projected mass (Msol) within a radius r (arcsec).
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -266,17 +359,29 @@ def mass_enclosed_2d(
t2 = tau**2
F = self._F(g)
L = self._L(g, tau)
- a1 = t2 / (t2 + 1)**2
- a2 = (t2 + 1 + 2*(g**2 - 1)) * F
+ a1 = t2 / (t2 + 1) ** 2
+ a2 = (t2 + 1 + 2 * (g**2 - 1)) * F
a3 = tau * torch.pi
a4 = (t2 - 1) * tau.log()
a5 = (t2 + g**2).sqrt() * (-torch.pi + (t2 - 1) * L / tau)
S = self.get_M0(params)
return S * a1 * (a2 + a3 + a4 + a5)
-
+
@unpack(3)
def physical_deflection_angle(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> tuple[Tensor, Tensor]:
"""Compute the physical deflection angle (arcsec) for this lens at
the requested position. Note that the NFW/TNFW profile is more
@@ -285,8 +390,8 @@ def physical_deflection_angle(
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -298,17 +403,31 @@ def physical_deflection_angle(
"""
d_l = self.cosmology.angular_diameter_distance(z_l, params)
x, y = translate_rotate(x, y, x0, y0)
- r = ((x**2 + y**2).sqrt() + self.s)
- theta = torch.arctan2(y,x)
+ r = (x**2 + y**2).sqrt() + self.s
+ theta = torch.arctan2(y, x)
# The below actually equally comes from eq 2.13 in Meneghetti notes
- dr = self.mass_enclosed_2d(r, z_s, params) / (r * d_l * arcsec_to_rad) # note dpsi(u)/du = 2x*dpsi(x)/dx when u = x^2
+ dr = self.mass_enclosed_2d(r, z_s, params) / (
+ r * d_l * arcsec_to_rad
+ ) # note dpsi(u)/du = 2x*dpsi(x)/dx when u = x^2
S = 4 * G_over_c2 * rad_to_arcsec
return S * dr * theta.cos(), S * dr * theta.sin()
@unpack(3)
def potential(
- self, x: Tensor, y: Tensor, z_s: Tensor, z_l, x0, y0, mass, scale_radius, tau, *args, params: Optional["Packed"] = None, **kwargs
+ self,
+ x: Tensor,
+ y: Tensor,
+ z_s: Tensor,
+ z_l,
+ x0,
+ y0,
+ mass,
+ scale_radius,
+ tau,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
) -> Tensor:
"""
Compute the lensing potential. Note that this is not a unitless potential! This is the potential as given in Baltz et al. 2009.
@@ -317,8 +436,8 @@ def potential(
Args:
z_l (Tensor): Redshift of the lens.
- x0 (Tensor): Center of lens position on x-axis (arcsec).
- y0 (Tensor): Center of lens position on y-axis (arcsec).
+ x0 (Tensor): Center of lens position on x-axis (arcsec).
+ y0 (Tensor): Center of lens position on y-axis (arcsec).
mass (Optional[Tensor]): Mass of the lens (Msol).
scale_radius (Optional[Tensor]): Scale radius of the TNFW lens (arcsec).
tau (Optional[Tensor]): Truncation scale. Ratio of truncation radius to scale radius (rt/rs).
@@ -335,15 +454,24 @@ def potential(
F = self._F(g)
L = self._L(g, tau)
- #d_l = self.cosmology.angular_diameter_distance(z_l, params)
- S = 2 * self.get_M0(params) * G_over_c2 # * rad_to_arcsec * d_l**2
- a1 = 1 / (t2 + 1)**2
- a2 = 2 * torch.pi * t2 * (tau - (t2 + u).sqrt() + tau * (tau + (t2 + u).sqrt()).log())
+ # d_l = self.cosmology.angular_diameter_distance(z_l, params)
+ S = 2 * self.get_M0(params) * G_over_c2 # * rad_to_arcsec * d_l**2
+ a1 = 1 / (t2 + 1) ** 2
+ a2 = (
+ 2
+ * torch.pi
+ * t2
+ * (tau - (t2 + u).sqrt() + tau * (tau + (t2 + u).sqrt()).log())
+ )
a3 = 2 * (t2 - 1) * tau * (t2 + u).sqrt() * L
a4 = t2 * (t2 - 1) * L**2
a5 = 4 * t2 * (u - 1) * F
- a6 = t2 * (t2 - 1) * (1 / g.to(dtype=torch.cdouble)).arccos().abs()**2
+ a6 = t2 * (t2 - 1) * (1 / g.to(dtype=torch.cdouble)).arccos().abs() ** 2
a7 = t2 * ((t2 - 1) * tau.log() - t2 - 1) * u.log()
- a8 = t2 * ((t2 - 1) * tau.log() * (4*tau).log() + 2 * (tau/2).log() - 2 * tau * (tau - torch.pi) * (2*tau).log())
+ a8 = t2 * (
+ (t2 - 1) * tau.log() * (4 * tau).log()
+ + 2 * (tau / 2).log()
+ - 2 * tau * (tau - torch.pi) * (2 * tau).log()
+ )
return S * a1 * (a2 + a3 + a4 + a5 + a6 + a7 - a8)
diff --git a/caustics/lenses/utils.py b/src/caustics/lenses/utils.py
similarity index 96%
rename from caustics/lenses/utils.py
rename to src/caustics/lenses/utils.py
index 4912a6c8..4ccb2b54 100644
--- a/caustics/lenses/utils.py
+++ b/src/caustics/lenses/utils.py
@@ -50,7 +50,7 @@ def get_pix_magnification(raytrace, x, y, z_s) -> Tensor:
def get_magnification(raytrace, x, y, z_s) -> Tensor:
"""
- Computes the magnification over a grid on the lensing plane. This is done by calling `get_pix_magnification`
+ Computes the magnification over a grid on the lensing plane. This is done by calling `get_pix_magnification`
for each point on the grid.
Args:
@@ -62,6 +62,4 @@ def get_magnification(raytrace, x, y, z_s) -> Tensor:
Returns:
A tensor representing the magnification at each point on the grid.
"""
- return vmap_n(get_pix_magnification, 2, (None, 0, 0, None))(
- raytrace, x, y, z_s
- )
+ return vmap_n(get_pix_magnification, 2, (None, 0, 0, None))(raytrace, x, y, z_s)
diff --git a/caustics/light/__init__.py b/src/caustics/light/__init__.py
similarity index 100%
rename from caustics/light/__init__.py
rename to src/caustics/light/__init__.py
diff --git a/caustics/light/base.py b/src/caustics/light/base.py
similarity index 71%
rename from caustics/light/base.py
rename to src/caustics/light/base.py
index 40e9da3e..f6a0e915 100644
--- a/caustics/light/base.py
+++ b/src/caustics/light/base.py
@@ -1,5 +1,5 @@
from abc import abstractmethod
-from typing import Any, Optional
+from typing import Optional
from torch import Tensor
@@ -10,38 +10,39 @@
class Source(Parametrized):
"""
- This is an abstract base class used to represent a source in a strong gravitational lensing system.
- It provides the basic structure and required methods that any derived source class should implement.
- The Source class inherits from the Parametrized class, implying that it contains parameters that can
+ This is an abstract base class used to represent a source in a strong gravitational lensing system.
+ It provides the basic structure and required methods that any derived source class should implement.
+ The Source class inherits from the Parametrized class, implying that it contains parameters that can
be optimized or manipulated.
-
- The class introduces one abstract method, `brightness`, that must be implemented in any concrete
+
+ The class introduces one abstract method, `brightness`, that must be implemented in any concrete
subclass. This method calculates the brightness of the source at given coordinates.
"""
+
@abstractmethod
@unpack(2)
def brightness(
- self, x: Tensor, y: Tensor, *args, params: Optional["Packed"] = None, **kwargs
+ self, x: Tensor, y: Tensor, *args, params: Optional["Packed"] = None, **kwargs
) -> Tensor:
"""
- Abstract method that calculates the brightness of the source at the given coordinates.
+ Abstract method that calculates the brightness of the source at the given coordinates.
This method is expected to be implemented in any class that derives from Source.
-
+
Args:
- x (Tensor): The x-coordinate(s) at which to calculate the source brightness.
+ x (Tensor): The x-coordinate(s) at which to calculate the source brightness.
This could be a single value or a tensor of values.
-
- y (Tensor): The y-coordinate(s) at which to calculate the source brightness.
+
+ y (Tensor): The y-coordinate(s) at which to calculate the source brightness.
This could be a single value or a tensor of values.
-
- params (Packed, optional): Dynamic parameter container that might be required to calculate
- the brightness. The exact contents will depend on the specific implementation in derived classes.
+
+ params (Packed, optional): Dynamic parameter container that might be required to calculate
+ the brightness. The exact contents will depend on the specific implementation in derived classes.
Returns:
- Tensor: The brightness of the source at the given coordinate(s). The exact form of the output
+ Tensor: The brightness of the source at the given coordinate(s). The exact form of the output
will depend on the specific implementation in the derived class.
-
- Note:
+
+ Note:
This method must be overridden in any class that inherits from `Source`.
"""
...
diff --git a/caustics/light/pixelated.py b/src/caustics/light/pixelated.py
similarity index 81%
rename from caustics/light/pixelated.py
rename to src/caustics/light/pixelated.py
index 72a82790..142e53b0 100644
--- a/caustics/light/pixelated.py
+++ b/src/caustics/light/pixelated.py
@@ -1,7 +1,6 @@
from typing import Optional, Union
from torch import Tensor
-from torch import vmap
from ..utils import interp2d
from .base import Source
@@ -12,23 +11,24 @@
class Pixelated(Source):
"""
- `Pixelated` is a subclass of the abstract class `Source`. It representes the brightness profile of
+ `Pixelated` is a subclass of the abstract class `Source`. It representes the brightness profile of
source with a pixelated grid of intensity values.
-
- This class provides a concrete implementation of the `brightness` method required by the `Source`
- superclass. In this implementation, brightness is determined by interpolating values from the
+
+ This class provides a concrete implementation of the `brightness` method required by the `Source`
+ superclass. In this implementation, brightness is determined by interpolating values from the
provided source image.
Attributes:
- x0 (Optional[Tensor]): The x-coordinate of the source image's center.
+ x0 (Optional[Tensor]): The x-coordinate of the source image's center.
y0 (Optional[Tensor]): The y-coordinate of the source image's center.
image (Optional[Tensor]): The source image from which brightness values will be interpolated.
pixelscale (Optional[Tensor]): The pixelscale of the source image in the lens plane in units of arcsec/pixel.
shape (Optional[tuple[int, ...]]): The shape of the source image.
"""
+
def __init__(
self,
- image: Optional[Tensor] = None,
+ image: Optional[Tensor] = None,
x0: Optional[Union[Tensor, float]] = None,
y0: Optional[Union[Tensor, float]] = None,
pixelscale: Optional[Union[Tensor, float]] = None,
@@ -36,7 +36,7 @@ def __init__(
name: str = None,
):
"""
- Constructs the `Pixelated` object with the given parameters.
+ Constructs the `Pixelated` object with the given parameters.
Args:
name (str): The name of the source.
@@ -61,25 +61,38 @@ def __init__(
self.add_param("pixelscale", pixelscale)
@unpack(2)
- def brightness(self, x, y, x0, y0, image, pixelscale, *args, params: Optional["Packed"] = None, **kwargs):
+ def brightness(
+ self,
+ x,
+ y,
+ x0,
+ y0,
+ image,
+ pixelscale,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ):
"""
- Implements the `brightness` method for `Pixelated`. The brightness at a given point is
+ Implements the `brightness` method for `Pixelated`. The brightness at a given point is
determined by interpolating values from the source image.
Args:
- x (Tensor): The x-coordinate(s) at which to calculate the source brightness.
+ x (Tensor): The x-coordinate(s) at which to calculate the source brightness.
This could be a single value or a tensor of values.
- y (Tensor): The y-coordinate(s) at which to calculate the source brightness.
+ y (Tensor): The y-coordinate(s) at which to calculate the source brightness.
This could be a single value or a tensor of values.
- params (Optional[Packed]): A dictionary containing additional parameters that might be required to
- calculate the brightness.
+ params (Optional[Packed]): A dictionary containing additional parameters that might be required to
+ calculate the brightness.
Returns:
- Tensor: The brightness of the source at the given coordinate(s). The brightness is
+ Tensor: The brightness of the source at the given coordinate(s). The brightness is
determined by interpolating values from the source image.
"""
fov_x = pixelscale * image.shape[0]
fov_y = pixelscale * image.shape[1]
return interp2d(
- image, (x - x0).view(-1) / fov_x*2, (y - y0).view(-1) / fov_y*2 # make coordinates bounds at half the fov
+ image,
+ (x - x0).view(-1) / fov_x * 2,
+ (y - y0).view(-1) / fov_y * 2, # make coordinates bounds at half the fov
).reshape(x.shape)
diff --git a/caustics/light/probes.py b/src/caustics/light/probes.py
similarity index 100%
rename from caustics/light/probes.py
rename to src/caustics/light/probes.py
diff --git a/caustics/light/sersic.py b/src/caustics/light/sersic.py
similarity index 89%
rename from caustics/light/sersic.py
rename to src/caustics/light/sersic.py
index e35ea8f1..4e37263a 100644
--- a/caustics/light/sersic.py
+++ b/src/caustics/light/sersic.py
@@ -11,14 +11,14 @@
class Sersic(Source):
"""
- `Sersic` is a subclass of the abstract class `Source`. It represents a source in a strong
- gravitational lensing system that follows a Sersic profile, a mathematical function that describes
+ `Sersic` is a subclass of the abstract class `Source`. It represents a source in a strong
+ gravitational lensing system that follows a Sersic profile, a mathematical function that describes
how the intensity I of a galaxy varies with distance r from its center.
-
+
The Sersic profile is often used to describe elliptical galaxies and spiral galaxies' bulges.
Attributes:
- x0 (Optional[Tensor]): The x-coordinate of the Sersic source's center.
+ x0 (Optional[Tensor]): The x-coordinate of the Sersic source's center.
y0 (Optional[Tensor]): The y-coordinate of the Sersic source's center.
q (Optional[Tensor]): The axis ratio of the Sersic source.
phi (Optional[Tensor]): The orientation of the Sersic source (position angle).
@@ -28,6 +28,7 @@ class Sersic(Source):
s (float): A small constant for numerical stability.
lenstronomy_k_mode (bool): A flag indicating whether to use lenstronomy to compute the value of k.
"""
+
def __init__(
self,
x0: Optional[Union[Tensor, float]] = None,
@@ -42,7 +43,7 @@ def __init__(
name: str = None,
):
"""
- Constructs the `Sersic` object with the given parameters.
+ Constructs the `Sersic` object with the given parameters.
Args:
name (str): The name of the source.
@@ -69,15 +70,29 @@ def __init__(
self.lenstronomy_k_mode = use_lenstronomy_k
@unpack(2)
- def brightness(self, x, y, x0, y0, q, phi, n, Re, Ie, *args, params: Optional["Packed"] = None, **kwargs):
+ def brightness(
+ self,
+ x,
+ y,
+ x0,
+ y0,
+ q,
+ phi,
+ n,
+ Re,
+ Ie,
+ *args,
+ params: Optional["Packed"] = None,
+ **kwargs,
+ ):
"""
- Implements the `brightness` method for `Sersic`. The brightness at a given point is
+ Implements the `brightness` method for `Sersic`. The brightness at a given point is
determined by the Sersic profile formula.
Args:
- x (Tensor): The x-coordinate(s) at which to calculate the source brightness.
+ x (Tensor): The x-coordinate(s) at which to calculate the source brightness.
This could be a single value or a tensor of values.
- y (Tensor): The y-coordinate(s) at which to calculate the source brightness.
+ y (Tensor): The y-coordinate(s) at which to calculate the source brightness.
This could be a single value or a tensor of values.
params (Packed, optional): Dynamic parameter container.
@@ -85,13 +100,13 @@ def brightness(self, x, y, x0, y0, q, phi, n, Re, Ie, *args, params: Optional["P
Tensor: The brightness of the source at the given point(s). The output tensor has the same shape as `x` and `y`.
Notes:
- The Sersic profile is defined as: I(r) = Ie * exp(-k * ((r / r_e)^(1/n) - 1)),
- where Ie is the intensity at the effective radius r_e, n is the Sersic index
- that describes the concentration of the source, and k is a parameter that
- depends on n. In this implementation, we use elliptical coordinates ex and ey,
+ The Sersic profile is defined as: I(r) = Ie * exp(-k * ((r / r_e)^(1/n) - 1)),
+ where Ie is the intensity at the effective radius r_e, n is the Sersic index
+ that describes the concentration of the source, and k is a parameter that
+ depends on n. In this implementation, we use elliptical coordinates ex and ey,
and the transformation from Cartesian coordinates is handled by `to_elliptical`.
- The value of k can be calculated in two ways, controlled by `lenstronomy_k_mode`.
- If `lenstronomy_k_mode` is True, we use the approximation from Lenstronomy,
+ The value of k can be calculated in two ways, controlled by `lenstronomy_k_mode`.
+ If `lenstronomy_k_mode` is True, we use the approximation from Lenstronomy,
otherwise, we use the approximation from Ciotti & Bertin (1999).
"""
x, y = translate_rotate(x, y, x0, y0, phi)
diff --git a/caustics/namespace_dict.py b/src/caustics/namespace_dict.py
similarity index 93%
rename from caustics/namespace_dict.py
rename to src/caustics/namespace_dict.py
index 44e10738..6e0ac77d 100644
--- a/caustics/namespace_dict.py
+++ b/src/caustics/namespace_dict.py
@@ -6,6 +6,7 @@ class NamespaceDict(OrderedDict):
"""
Add support for attributes on top of an OrderedDict
"""
+
def __getattr__(self, key):
if key in self:
return self[key]
@@ -32,14 +33,16 @@ class _NestedNamespaceDict(NamespaceDict):
"""
Abstract method for NestedNamespaceDict and its Proxy
"""
+
def flatten(self) -> NamespaceDict:
"""
Flatten the nested dictionary into a NamespaceDict
-
+
Returns:
NamespaceDict: Flattened dictionary as a NamespaceDict
"""
flattened_dict = NamespaceDict()
+
def _flatten_dict(dictionary, parent_key=""):
for key, value in dictionary.items():
new_key = f"{parent_key}.{key}" if parent_key else key
@@ -47,24 +50,27 @@ def _flatten_dict(dictionary, parent_key=""):
_flatten_dict(value, new_key)
else:
flattened_dict[new_key] = value
+
_flatten_dict(self)
return flattened_dict
def collapse(self) -> NamespaceDict:
"""
- Flatten the nested dictionary and collapse keys into the first level
+ Flatten the nested dictionary and collapse keys into the first level
of the NamespaceDict
-
+
Returns:
NamespaceDict: Flattened dictionary as a NamespaceDict
"""
flattened_dict = NamespaceDict()
+
def _flatten_dict(dictionary):
for key, value in dictionary.items():
if isinstance(value, dict):
_flatten_dict(value)
else:
flattened_dict[key] = value
+
_flatten_dict(self)
return flattened_dict
@@ -74,6 +80,7 @@ class _NestedNamespaceProxy(_NestedNamespaceDict):
Proxy for NestedNamespaceDict in order to allow recursion in
the class attributes
"""
+
def __init__(self, parent, key_path):
# Add new private keys to give us a ladder back to root node
self._parent = parent
@@ -81,7 +88,7 @@ def __init__(self, parent, key_path):
super().__init__(parent[key_path])
def __setattr__(self, key, value):
- if key.startswith('_'):
+ if key.startswith("_"):
# We are in a child node, we need to recurse up
super().__setattr__(key, value)
else:
@@ -91,15 +98,15 @@ def __setattr__(self, key, value):
# Hide the private keys from common usage
def keys(self):
return [key for key in super().keys() if not key.startswith("_")]
-
+
def items(self):
for key, value in super().items():
- if not key.startswith('_'):
+ if not key.startswith("_"):
yield (key, value)
def values(self):
return [v for k, v in super().items() if not k.startswith("_")]
-
+
def __len__(self):
# make sure hidden keys don't count in the length of the object
return len(self.keys())
@@ -108,7 +115,7 @@ def __len__(self):
class NestedNamespaceDict(_NestedNamespaceDict):
"""
Example usage
- ```python
+ ```python
nested_namespace = NestedNamespaceDict()
nested_namespace.foo = 'Hello'
nested_namespace.bar = {'baz': 'World'}
@@ -119,31 +126,32 @@ class NestedNamespaceDict(_NestedNamespaceDict):
print(nested_namespace)
# Output:
# {'foo': 'Hello', 'bar': {'baz': 'World', 'qux': 42 }}
-
+
#==============================
# Flattened key access
#==============================
print(nested_dict['foo']) # Output: Hello
print(nested_dict['bar.baz']) # Output: World
print(nested_dict['bar.qux']) # Output: 42
-
+
#==============================
# Nested namespace access
#==============================
print(nested_dict.bar.qux) # Output: 42
-
+
#==============================
# Flatten and collapse method
#==============================
print(nested_dict.flatten())
# Output:
# {'foo': 'Hello', 'bar.baz': 'World', 'bar.qux': 42}
-
+
print(nested_dict.collapse()
# Output:
# {'foo': 'Hello', 'baz': 'World', 'qux': 42}
-
+
"""
+
def __getattr__(self, key):
if key in self:
value = super().__getitem__(key)
@@ -152,7 +160,9 @@ def __getattr__(self, key):
else:
return value
else:
- raise AttributeError(f"'NestedNamespaceDict' object has no attribute '{key}'")
+ raise AttributeError(
+ f"'NestedNamespaceDict' object has no attribute '{key}'"
+ )
def __getitem__(self, key):
if "." in key:
@@ -171,8 +181,9 @@ def __setitem__(self, key, value):
if root not in self:
self[root] = NestedNamespaceDict()
elif not isinstance(self[root], dict):
- raise ValueError("Can't assign a NestedNamespaceDict to a non-dict entry")
+ raise ValueError(
+ "Can't assign a NestedNamespaceDict to a non-dict entry"
+ )
self[root].__setitem__(childs, value)
else:
super().__setitem__(key, value)
-
diff --git a/caustics/packed.py b/src/caustics/packed.py
similarity index 100%
rename from caustics/packed.py
rename to src/caustics/packed.py
diff --git a/caustics/parameter.py b/src/caustics/parameter.py
similarity index 85%
rename from caustics/parameter.py
rename to src/caustics/parameter.py
index c87d9be8..d49a490d 100644
--- a/caustics/parameter.py
+++ b/src/caustics/parameter.py
@@ -2,7 +2,6 @@
import torch
from torch import Tensor
-from .namespace_dict import NamespaceDict
__all__ = ("Parameter",)
@@ -19,7 +18,9 @@ class Parameter:
"""
def __init__(
- self, value: Optional[Union[Tensor, float]] = None, shape: Optional[tuple[int, ...]] = ()
+ self,
+ value: Optional[Union[Tensor, float]] = None,
+ shape: Optional[tuple[int, ...]] = (),
):
# Must assign one of value or shape
if value is None:
@@ -45,16 +46,18 @@ def dynamic(self) -> bool:
@property
def value(self) -> Optional[Tensor]:
return self._value
-
+
@value.setter
def value(self, value: Union[None, Tensor, float]):
if value is not None:
value = torch.as_tensor(value)
if value.shape != self.shape:
- raise ValueError(f"Cannot set Parameter value with a different shape. Received {value.shape}, expected {self.shape}")
+ raise ValueError(
+ f"Cannot set Parameter value with a different shape. Received {value.shape}, expected {self.shape}"
+ )
self._value = value
self._dtype = None if value is None else value.dtype
-
+
@property
def dtype(self):
return self._dtype
@@ -62,11 +65,13 @@ def dtype(self):
@property
def shape(self) -> tuple[int, ...]:
return self._shape
-
+
def set_static(self):
self.value = None
- def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
+ def to(
+ self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None
+ ):
"""
Moves and/or casts the values of the parameter.
diff --git a/caustics/parametrized.py b/src/caustics/parametrized.py
similarity index 86%
rename from caustics/parametrized.py
rename to src/caustics/parametrized.py
index d0940c6a..753207e5 100644
--- a/caustics/parametrized.py
+++ b/src/caustics/parametrized.py
@@ -1,4 +1,4 @@
-from collections import OrderedDict, defaultdict
+from collections import OrderedDict
from math import prod
from typing import Optional, Union
import functools
@@ -13,13 +13,15 @@
from .namespace_dict import NamespaceDict, NestedNamespaceDict
from .parameter import Parameter
-__all__ = ("Parametrized","unpack")
+__all__ = ("Parametrized", "unpack")
def check_valid_name(name):
if keyword.iskeyword(name) or not bool(re.match("^[a-zA-Z_][a-zA-Z0-9_]*$", name)):
- raise NameError(f"The string {name} contains illegal characters (like space or '-'). "\
- "Please use snake case or another valid python variable naming style.")
+ raise NameError(
+ f"The string {name} contains illegal characters (like space or '-'). "
+ "Please use snake case or another valid python variable naming style."
+ )
class Parametrized:
@@ -56,16 +58,18 @@ def __init__(self, name: str = None):
self._params: OrderedDict[str, Parameter] = NamespaceDict()
self._childs: OrderedDict[str, Parametrized] = NamespaceDict()
self._module_key_map = {}
-
+
def _default_name(self):
return re.search("([A-Z])\w+", str(self.__class__)).group()
-
+
def __getattribute__(self, key):
try:
return super().__getattribute__(key)
except AttributeError as e:
# Check if key refers to a parametrized module name (different from its attribute key)
- _map = super().__getattribute__("_module_key_map") # use super to avoid recursion error
+ _map = super().__getattribute__(
+ "_module_key_map"
+ ) # use super to avoid recursion error
if key in _map.keys():
return super().__getattribute__(_map[key])
else:
@@ -82,18 +86,18 @@ def __setattr__(self, key, value):
elif isinstance(value, Parametrized):
# Update map from attribute key to module name for __getattribute__ method
self._module_key_map[value.name] = key
- self.add_parametrized(value, set_attr=False)
+ self.add_parametrized(value, set_attr=False)
# set attr only to user defined key, not module name (self.{module.name} is still accessible, see __getattribute__ method)
super().__setattr__(key, value)
else:
super().__setattr__(key, value)
- except AttributeError: # _params or another attribute in here do not exist yet
- super().__setattr__(key, value)
+ except AttributeError: # _params or another attribute in here do not exist yet
+ super().__setattr__(key, value)
@property
def name(self) -> str:
return self._name
-
+
@name.setter
def name(self, new_name: str):
check_valid_name(new_name)
@@ -106,7 +110,9 @@ def name(self, new_name: str):
child._parents[new_name] = self
self._name = new_name
- def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
+ def to(
+ self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None
+ ):
"""
Moves static Params for this component and its childs to the specified device and casts them to the specified data type.
"""
@@ -122,7 +128,7 @@ def _generate_unique_name(name, module_names):
while f"{name}_{i}" in module_names:
i += 1
return f"{name}_{i}"
-
+
def add_parametrized(self, p: "Parametrized", set_attr=True):
"""
Add a child to this module, and create edges for the DAG
@@ -130,7 +136,7 @@ def add_parametrized(self, p: "Parametrized", set_attr=True):
# If self.name is already in the module parents, we need to update self.name
if self.name in p._parents.keys():
new_name = self._generate_unique_name(self.name, p._parents.keys())
- self.name = new_name # from name.setter, this updates the DAG edges as well
+ self.name = new_name # from name.setter, this updates the DAG edges as well
p._parents[self.name] = self
# If the module name is already in self._childs, we need to update module name
if p.name is self._childs.keys():
@@ -156,7 +162,7 @@ def add_param(
"""
self._params[name] = Parameter(value, shape)
# __setattr__ inside add_param to catch all uses of this method
- super().__setattr__(name, self._params[name])
+ super().__setattr__(name, self._params[name])
@property
def n_dynamic(self) -> int:
@@ -180,7 +186,7 @@ def pack(
) -> Packed:
"""
Converts a list or tensor into a dict that can subsequently be unpacked
- into arguments to this component and its childs. Also, add a batch dimension
+ into arguments to this component and its childs. Also, add a batch dimension
to each Tensor without such a dimension.
Args:
@@ -197,14 +203,15 @@ def pack(
ValueError: If the input is a tensor and the shape does not match the expected shape.
"""
if isinstance(x, (dict, Packed)):
- missing_names = [name for name in self.params.dynamic.keys() if name not in x]
+ missing_names = [
+ name for name in self.params.dynamic.keys() if name not in x
+ ]
if len(missing_names) > 0:
raise ValueError(f"missing x keys for {missing_names}")
# TODO: check structure!
return Packed(x)
-
-
+
elif isinstance(x, (list, tuple)):
n_passed = len(x)
n_dynamic_params = len(self.params.dynamic.flatten())
@@ -217,17 +224,19 @@ def pack(
cur_offset += module.n_dynamic
elif n_passed == n_dynamic_modules:
for i, name in enumerate(self.dynamic_modules.keys()):
- x_repacked[name] = x[i]
+ x_repacked[name] = x[i]
else:
raise ValueError(
f"{n_passed} dynamic args were passed, but {n_dynamic_params} parameters or "
f"{n_dynamic_modules} Tensor (1 per dynamic module) are required"
)
return Packed(x_repacked)
-
+
elif isinstance(x, Tensor):
n_passed = x.shape[-1]
- n_expected = sum([module.dynamic_size for module in self.dynamic_modules.values()])
+ n_expected = sum(
+ [module.dynamic_size for module in self.dynamic_modules.values()]
+ )
if n_passed != n_expected:
# TODO: give component and arg names
raise ValueError(
@@ -250,7 +259,7 @@ def unpack(
) -> list[Tensor]:
"""
Unpacks a dict of kwargs, list of args or flattened vector of args to retrieve
- this object's static and dynamic parameters.
+ this object's static and dynamic parameters.
Args:
x (Optional[dict[str, Union[list[Tensor], dict[str, Tensor], Tensor]]]):
@@ -268,11 +277,13 @@ def unpack(
# Check if module has dynamic parameters
if self.module_params.dynamic:
dynamic_x = x[self.name]
- else: # all parameters are static and module is not present in x
+ else: # all parameters are static and module is not present in x
dynamic_x = []
if isinstance(x, dict):
if self.name in x.keys() and x.get(self.name, {}):
- print(f"Module {self.name} is static, the parameters {' '.join(x[self.name].keys())} passed dynamically will be ignored ignored")
+ print(
+ f"Module {self.name} is static, the parameters {' '.join(x[self.name].keys())} passed dynamically will be ignored ignored"
+ )
unpacked_x = []
offset = 0
for name, param in self._params.items():
@@ -284,17 +295,23 @@ def unpack(
offset += 1
elif isinstance(dynamic_x, Tensor):
size = prod(param.shape)
- param_value = dynamic_x[..., offset: offset + size].reshape(param.shape)
+ param_value = dynamic_x[..., offset : offset + size].reshape(
+ param.shape
+ )
offset += size
else:
- raise ValueError(f"Invalid data type found when unpacking parameters for {self.name}."
- f"Expected argument of unpack to be a list/tuple/dict of Tensor, or simply a flattened tensor"
- f"but found {type(dynamic_x)}.")
- else: # param is static
+ raise ValueError(
+ f"Invalid data type found when unpacking parameters for {self.name}."
+ f"Expected argument of unpack to be a list/tuple/dict of Tensor, or simply a flattened tensor"
+ f"but found {type(dynamic_x)}."
+ )
+ else: # param is static
param_value = param.value
if not isinstance(param_value, Tensor):
- raise ValueError(f"Invalid data type found when unpacking parameters for {self.name}."
- f"Argument of unpack must contain Tensor, but found {type(param_value)}")
+ raise ValueError(
+ f"Invalid data type found when unpacking parameters for {self.name}."
+ f"Argument of unpack must contain Tensor, but found {type(param_value)}"
+ )
unpacked_x.append(param_value)
return unpacked_x
@@ -308,12 +325,13 @@ def module_params(self) -> NestedNamespaceDict:
else:
dynamic[name] = param
return NestedNamespaceDict([("static", static), ("dynamic", dynamic)])
-
+
@property
def params(self) -> NestedNamespaceDict:
# todo make this an ordinary dict and reorder at the end.
static = NestedNamespaceDict()
dynamic = NestedNamespaceDict()
+
def _get_params(module):
if module.module_params.static:
static[module.name] = module.module_params.static
@@ -321,6 +339,7 @@ def _get_params(module):
dynamic[module.name] = module.module_params.dynamic
for child in module._childs.values():
_get_params(child)
+
_get_params(self)
# TODO reorder
return NestedNamespaceDict([("static", static), ("dynamic", dynamic)])
@@ -328,7 +347,10 @@ def _get_params(module):
@property
def dynamic_modules(self) -> NamespaceDict[str, "Parametrized"]:
# Only catch modules with dynamic parameters
- modules = NamespaceDict() # todo make this an ordinary dict and reorder at the end.
+ modules = (
+ NamespaceDict()
+ ) # todo make this an ordinary dict and reorder at the end.
+
def _get_childs(module):
# Start from root, and move down the DAG
if module.module_params.dynamic:
@@ -336,6 +358,7 @@ def _get_childs(module):
if module._childs != {}:
for child in module._childs.values():
_get_childs(child)
+
_get_childs(self)
# TODO reorder
return modules
@@ -355,7 +378,9 @@ def __str__(self) -> str:
for n, d in self._childs.items():
if d.n_dynamic > 0:
- desc_dynamic_strs.append(f"('{n}': {list(d.module_params.dynamic.keys())})")
+ desc_dynamic_strs.append(
+ f"('{n}': {list(d.module_params.dynamic.keys())})"
+ )
desc_dynamic_str = ", ".join(desc_dynamic_strs)
@@ -419,6 +444,7 @@ def add_params(p: Parametrized, dot):
return dot
+
def unpack(n_leading_args=0):
def decorator(method):
sig = inspect.signature(method)
@@ -436,7 +462,7 @@ def wrapped(self, *args, **kwargs):
leading_args.append(kwargs.pop(param))
elif args:
leading_args.append(args.pop(0))
-
+
# Collect module parameters passed in argument (dynamic or otherwise)
if args and isinstance(args[0], Packed):
# Case 1: Params is already Packed (or no params were passed)
@@ -453,7 +479,9 @@ def wrapped(self, *args, **kwargs):
trailing_args.append(kwargs.pop(param))
elif args:
trailing_args.append(args.pop(0))
- if not trailing_args or (len(trailing_args) == 1 and trailing_args[0] is None):
+ if not trailing_args or (
+ len(trailing_args) == 1 and trailing_args[0] is None
+ ):
# No params were passed, module is static and was expecting no params
x = Packed()
elif isinstance(trailing_args[0], (list, dict)):
@@ -463,7 +491,7 @@ def wrapped(self, *args, **kwargs):
# all parameters were passed individually in args or kwargs
x = self.pack(trailing_args)
unpacked_args = self.unpack(x)
- kwargs['params'] = x
+ kwargs["params"] = x
return method(self, *leading_args, *unpacked_args, **kwargs)
return wrapped
diff --git a/caustics/sims/__init__.py b/src/caustics/sims/__init__.py
similarity index 100%
rename from caustics/sims/__init__.py
rename to src/caustics/sims/__init__.py
diff --git a/caustics/sims/lens_source.py b/src/caustics/sims/lens_source.py
similarity index 72%
rename from caustics/sims/lens_source.py
rename to src/caustics/sims/lens_source.py
index d17b6e42..e6238be8 100644
--- a/caustics/sims/lens_source.py
+++ b/src/caustics/sims/lens_source.py
@@ -2,16 +2,17 @@
from scipy.fft import next_fast_len
from torch.nn.functional import avg_pool2d, conv2d
-from typing import Tuple, Optional
+from typing import Optional
import torch
from .simulator import Simulator
__all__ = ("Lens_Source",)
+
class Lens_Source(Simulator):
"""Lens image of a source.
-
+
Striaghtforward simulator to sample a lensed image of a source
object. Constructs a sampling grid internally based on the
pixelscale and gridding parameters. It can automatically upscale
@@ -28,7 +29,7 @@ class Lens_Source(Simulator):
lens = caustics.lenses.SIS(cosmology = cosmo, x0 = 0., y0 = 0., th_ein = 1.)
source = caustics.sources.Sersic(x0 = 0., y0 = 0., q = 0.5, phi = 0.4, n = 2., Re = 1., Ie = 1.)
sim = caustics.sims.Lens_Source(lens, source, pixelscale = 0.05, gridx = 100, gridy = 100, upsample_factor = 2, z_s = 1.)
-
+
img = sim()
plt.imshow(img, origin = "lower")
plt.show()
@@ -47,19 +48,20 @@ class Lens_Source(Simulator):
name (default "sim"): a name for this simulator in the parameter DAG.
"""
+
def __init__(
self,
lens,
source,
pixelscale: float,
pixels_x: int,
- lens_light = None,
- psf = None,
+ lens_light=None,
+ psf=None,
pixels_y: Optional[int] = None,
upsample_factor: int = 1,
- psf_pad = True,
- psf_mode = "fft",
- z_s = None,
+ psf_pad=True,
+ psf_mode="fft",
+ z_s=None,
name: str = "sim",
):
super().__init__(name)
@@ -72,7 +74,7 @@ def __init__(
self.psf = None
else:
self.psf = torch.as_tensor(psf)
- self.psf /= psf.sum() # ensure normalized
+ self.psf /= psf.sum() # ensure normalized
self.add_param("z_s", z_s)
# Image grid
@@ -83,20 +85,31 @@ def __init__(
# PSF padding if needed
self.psf_mode = psf_mode
if psf_pad and self.psf is not None:
- self.psf_pad = (self.psf.shape[1]//2 + 1, self.psf.shape[0]//2 + 1)
+ self.psf_pad = (self.psf.shape[1] // 2 + 1, self.psf.shape[0] // 2 + 1)
else:
- self.psf_pad = (0,0)
+ self.psf_pad = (0, 0)
# Build the imaging grid
self.upsample_factor = upsample_factor
- self.n_pix = (self.gridding[0] + self.psf_pad[0]*2, self.gridding[1] + self.psf_pad[1]*2)
- tx = torch.linspace(-0.5 * (pixelscale * self.n_pix[0]), 0.5 * (pixelscale * self.n_pix[0]), self.n_pix[0]*upsample_factor)
- ty = torch.linspace(-0.5 * (pixelscale * self.n_pix[1]), 0.5 * (pixelscale * self.n_pix[1]), self.n_pix[1]*upsample_factor)
- self.grid = torch.meshgrid(tx, ty, indexing = "xy")
+ self.n_pix = (
+ self.gridding[0] + self.psf_pad[0] * 2,
+ self.gridding[1] + self.psf_pad[1] * 2,
+ )
+ tx = torch.linspace(
+ -0.5 * (pixelscale * self.n_pix[0]),
+ 0.5 * (pixelscale * self.n_pix[0]),
+ self.n_pix[0] * upsample_factor,
+ )
+ ty = torch.linspace(
+ -0.5 * (pixelscale * self.n_pix[1]),
+ 0.5 * (pixelscale * self.n_pix[1]),
+ self.n_pix[1] * upsample_factor,
+ )
+ self.grid = torch.meshgrid(tx, ty, indexing="xy")
if self.psf is not None:
- self.psf_fft = self._fft2_padded(self.psf)
-
+ self.psf_fft = self._fft2_padded(self.psf)
+
def _fft2_padded(self, x):
"""
Compute the 2D Fast Fourier Transform (FFT) of a tensor with zero-padding.
@@ -110,9 +123,9 @@ def _fft2_padded(self, x):
npix = copy(self.n_pix)
npix = (next_fast_len(npix[0]), next_fast_len(npix[1]))
self._s = npix
-
+
return torch.fft.rfft2(x, self._s)
-
+
def _unpad_fft(self, x):
"""
Remove padding from the result of a 2D FFT.
@@ -123,17 +136,26 @@ def _unpad_fft(self, x):
Returns:
Tensor: The input tensor without padding.
"""
- return torch.roll(x, (-self.psf_pad[0],-self.psf_pad[1]), dims = (-2,-1))[..., :self.n_pix[0], :self.n_pix[1]]
+ return torch.roll(x, (-self.psf_pad[0], -self.psf_pad[1]), dims=(-2, -1))[
+ ..., : self.n_pix[0], : self.n_pix[1]
+ ]
- def forward(self, params, source_light=True, lens_light=True, lens_source=True, psf_convolve=True):
+ def forward(
+ self,
+ params,
+ source_light=True,
+ lens_light=True,
+ lens_source=True,
+ psf_convolve=True,
+ ):
"""
params: Packed object
source_light: when true the source light will be sampled
lens_light: when true the lens light will be sampled
- lens_source: when true, the source light model will be lensed by the lens mass distribution
+ lens_source: when true, the source light model will be lensed by the lens mass distribution
psf_convolve: when true the image will be convolved with the psf
"""
- z_s, = self.unpack(params)
+ (z_s,) = self.unpack(params)
# Sample the source light
if source_light:
@@ -156,13 +178,24 @@ def forward(self, params, source_light=True, lens_light=True, lens_source=True,
if psf_convolve and self.psf is not None:
if self.psf_mode == "fft":
mu_fft = self._fft2_padded(mu)
- mu = self._unpad_fft(torch.fft.irfft2(mu_fft * self.psf_fft, self._s).real)
+ mu = self._unpad_fft(
+ torch.fft.irfft2(mu_fft * self.psf_fft, self._s).real
+ )
elif self.psf_mode == "conv2d":
- mu = conv2d(mu[None, None], self.psf[None, None], padding = "same").squeeze()
+ mu = conv2d(
+ mu[None, None], self.psf[None, None], padding="same"
+ ).squeeze()
else:
- raise ValueError(f"psf_mode should be one of 'fft' or 'conv2d', not {self.psf_mode}")
+ raise ValueError(
+ f"psf_mode should be one of 'fft' or 'conv2d', not {self.psf_mode}"
+ )
# Return to the desired image
- mu_native_resolution = avg_pool2d(mu[None, None], self.upsample_factor, divisor_override = 1).squeeze()
- mu_clipped = mu_native_resolution[self.psf_pad[1]:self.gridding[1] + self.psf_pad[1], self.psf_pad[0]:self.gridding[0] + self.psf_pad[0]]
+ mu_native_resolution = avg_pool2d(
+ mu[None, None], self.upsample_factor, divisor_override=1
+ ).squeeze()
+ mu_clipped = mu_native_resolution[
+ self.psf_pad[1] : self.gridding[1] + self.psf_pad[1],
+ self.psf_pad[0] : self.gridding[0] + self.psf_pad[0],
+ ]
return mu_clipped
diff --git a/caustics/sims/simulator.py b/src/caustics/sims/simulator.py
similarity index 97%
rename from caustics/sims/simulator.py
rename to src/caustics/sims/simulator.py
index ae3ecd9d..d24f5260 100644
--- a/caustics/sims/simulator.py
+++ b/src/caustics/sims/simulator.py
@@ -23,7 +23,5 @@ def __call__(self, *args, **kwargs):
else:
packed_args = self.pack()
rest_args = tuple()
-
-
- return self.forward(packed_args, *rest_args, **kwargs)
+ return self.forward(packed_args, *rest_args, **kwargs)
diff --git a/caustics/utils.py b/src/caustics/utils.py
similarity index 90%
rename from caustics/utils.py
rename to src/caustics/utils.py
index 6101c12d..f3fb94c7 100644
--- a/caustics/utils.py
+++ b/src/caustics/utils.py
@@ -7,6 +7,7 @@
from torch.func import jacfwd
import numpy as np
+
def flip_axis_ratio(q, phi):
"""
Makes the value of 'q' positive, then swaps x and y axes if 'q' is larger than 1.
@@ -100,12 +101,22 @@ def get_meshgrid(
Returns:
Tuple[Tensor, Tensor]: The generated meshgrid as a tuple of Tensors.
"""
- xs = torch.linspace(-1, 1, nx, device=device, dtype=dtype) * pixelscale * (nx - 1) / 2
- ys = torch.linspace(-1, 1, ny, device=device, dtype=dtype) * pixelscale * (ny - 1) / 2
+ xs = (
+ torch.linspace(-1, 1, nx, device=device, dtype=dtype)
+ * pixelscale
+ * (nx - 1)
+ / 2
+ )
+ ys = (
+ torch.linspace(-1, 1, ny, device=device, dtype=dtype)
+ * pixelscale
+ * (ny - 1)
+ / 2
+ )
return torch.meshgrid([xs, ys], indexing="xy")
-def safe_divide(num, denom, places = 7):
+def safe_divide(num, denom, places=7):
"""
Safely divides two tensors, returning zero where the denominator is zero.
@@ -342,28 +353,28 @@ def get_cluster_means(xs: Tensor, k: int):
return torch.stack(means)
-def _lm_step(f, X, Y, Cinv, L, Lup, Ldn, epsilon):
+def _lm_step(f, X, Y, Cinv, L, Lup, Ldn, epsilon):
# Forward
fY = f(X)
dY = Y - fY
-
+
# Jacobian
J = jacfwd(f)(X)
- J = J.to(dtype = X.dtype)
+ J = J.to(dtype=X.dtype)
chi2 = (dY @ Cinv @ dY).sum(-1)
-
+
# Gradient
grad = J.T @ Cinv @ dY
-
+
# Hessian
hess = J.T @ Cinv @ J
- hess_perturb = L * (torch.diag(hess) + 0.1*torch.eye(hess.shape[0]))
+ hess_perturb = L * (torch.diag(hess) + 0.1 * torch.eye(hess.shape[0]))
hess = hess + hess_perturb
# Step
h = torch.linalg.solve(hess, grad)
-
+
# New chi^2
fYnew = f(X + h)
dYnew = Y - fYnew
@@ -379,32 +390,33 @@ def _lm_step(f, X, Y, Cinv, L, Lup, Ldn, epsilon):
return X, L, chi2
+
def batch_lm(
- X, # B, Din
- Y, # B, Dout
- f, # Din -> Dout
- C = None, # B, Dout, Dout
- epsilon = 1e-1,
- L = 1e0,
- L_dn = 11.,
- L_up = 9.,
- max_iter = 50,
- L_min = 1e-9,
- L_max = 1e9,
- stopping = 1e-4,
- f_args = (),
- f_kwargs = {},
+ X, # B, Din
+ Y, # B, Dout
+ f, # Din -> Dout
+ C=None, # B, Dout, Dout
+ epsilon=1e-1,
+ L=1e0,
+ L_dn=11.0,
+ L_up=9.0,
+ max_iter=50,
+ L_min=1e-9,
+ L_max=1e9,
+ stopping=1e-4,
+ f_args=(),
+ f_kwargs={},
):
B, Din = X.shape
B, Dout = Y.shape
-
+
if len(X) != len(Y):
raise ValueError("x and y must having matching batch dimension")
if C is None:
C = torch.eye(Dout).repeat(B, 1, 1)
Cinv = torch.linalg.inv(C)
-
+
v_lm_step = torch.vmap(partial(_lm_step, lambda x: f(x, *f_args, **f_kwargs)))
L = L * torch.ones(B)
Lup = L_up * torch.ones(B)
@@ -412,22 +424,33 @@ def batch_lm(
e = epsilon * torch.ones(B)
for _ in range(max_iter):
Xnew, L, C = v_lm_step(X, Y, Cinv, L, Lup, Ldn, e)
- if torch.all((Xnew - X).abs() < stopping) and torch.sum(L < 1e-2).item() > B/3:
+ if (
+ torch.all((Xnew - X).abs() < stopping)
+ and torch.sum(L < 1e-2).item() > B / 3
+ ):
break
X = Xnew
-
+
return X, L, C
-def gaussian(pixelscale, nx, ny, sigma, upsample = 1, dtype = torch.float32, device = None):
-
+
+def gaussian(pixelscale, nx, ny, sigma, upsample=1, dtype=torch.float32, device=None):
X, Y = np.meshgrid(
- np.linspace(-(nx*upsample - 1) * pixelscale / 2, (nx*upsample - 1) * pixelscale / 2, nx*upsample),
- np.linspace(-(ny*upsample - 1) * pixelscale / 2, (ny*upsample - 1) * pixelscale / 2, ny*upsample),
- indexing = "xy",
+ np.linspace(
+ -(nx * upsample - 1) * pixelscale / 2,
+ (nx * upsample - 1) * pixelscale / 2,
+ nx * upsample,
+ ),
+ np.linspace(
+ -(ny * upsample - 1) * pixelscale / 2,
+ (ny * upsample - 1) * pixelscale / 2,
+ ny * upsample,
+ ),
+ indexing="xy",
)
- Z = np.exp(- 0.5 * (X**2 + Y**2) / sigma**2)
-
+ Z = np.exp(-0.5 * (X**2 + Y**2) / sigma**2)
+
Z = Z.reshape(ny, upsample, nx, upsample).sum(axis=(1, 3))
- return torch.tensor(Z / np.sum(Z), dtype = dtype, device = device)
+ return torch.tensor(Z / np.sum(Z), dtype=dtype, device=device)
diff --git a/test/test_simulator.py b/test/test_simulator.py
deleted file mode 100644
index 562306ee..00000000
--- a/test/test_simulator.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from math import pi
-
-import torch
-
-from caustics.sims import Lens_Source
-from caustics.cosmology import FlatLambdaCDM
-from caustics.lenses import SIE
-from caustics.light import Sersic
-from caustics.utils import get_meshgrid, gaussian
-
-def test_simulator_runs():
-
- # Model
- cosmology = FlatLambdaCDM(name="cosmo")
- lensmass = SIE(name="lens", cosmology=cosmology, z_l = 1., x0 = 0., y0 = 0.01, q = 0.5, phi = pi/3., b = 1.)
-
- source = Sersic(name="source", x0 = 0.01, y0 = -0.03, q = 0.6, phi = -pi/4, n = 2., Re = 0.5, Ie = 1.)
- lenslight = Sersic(name="lenslight", x0 = 0.0, y0 = 0.01, q = 0.7, phi = pi/4, n = 3., Re = 0.7, Ie = 1.)
-
- psf = gaussian(0.05, 11, 11, 0.2, upsample = 2)
-
- sim = Lens_Source(lens = lensmass, source = source, pixelscale = 0.05, pixels_x = 50, lens_light = lenslight, psf = psf, z_s = 2.)
-
- assert torch.all(torch.isfinite(sim()))
- assert torch.all(torch.isfinite(sim({}, source_light=True, lens_light=True, lens_source=True, psf_convolve=False)))
- assert torch.all(torch.isfinite(sim({}, source_light=True, lens_light=True, lens_source=False, psf_convolve=True)))
- assert torch.all(torch.isfinite(sim({}, source_light=True, lens_light=False, lens_source=True, psf_convolve=True)))
- assert torch.all(torch.isfinite(sim({}, source_light=False, lens_light=True, lens_source=True, psf_convolve=True)))
diff --git a/test/test_base.py b/tests/test_base.py
similarity index 69%
rename from test/test_base.py
rename to tests/test_base.py
index 60068931..320ea69e 100644
--- a/test/test_base.py
+++ b/tests/test_base.py
@@ -1,38 +1,36 @@
-from math import pi
-
import torch
import numpy as np
from caustics.cosmology import FlatLambdaCDM
from caustics.lenses import SIE
-def test():
+def test():
z_l = torch.tensor(0.5, dtype=torch.float32)
z_s = torch.tensor(1.5, dtype=torch.float32)
-
+
# Model
cosmology = FlatLambdaCDM(name="cosmo")
lens = SIE(
name="sie",
cosmology=cosmology,
- z_l = z_l,
- x0 = torch.tensor(0.),
- y0 = torch.tensor(0.),
- q = torch.tensor(0.4),
- phi = torch.tensor(np.pi/5),
- b = torch.tensor(1.),
+ z_l=z_l,
+ x0=torch.tensor(0.0),
+ y0=torch.tensor(0.0),
+ q=torch.tensor(0.4),
+ phi=torch.tensor(np.pi / 5),
+ b=torch.tensor(1.0),
)
-
+
# Point in the source plane
sp_x = torch.tensor(0.2)
sp_y = torch.tensor(0.2)
-
+
# Points in image plane
x, y = lens.forward_raytrace(sp_x, sp_y, z_s)
-
+
# Raytrace to check
bx, by = lens.raytrace(x, y, z_s)
assert torch.all((sp_x - bx).abs() < 1e-3)
- assert torch.all((sp_y - by).abs() < 1e-3)
+ assert torch.all((sp_y - by).abs() < 1e-3)
diff --git a/test/test_batching.py b/tests/test_batching.py
similarity index 77%
rename from test/test_batching.py
rename to tests/test_batching.py
index 7671b569..78004aea 100644
--- a/test/test_batching.py
+++ b/tests/test_batching.py
@@ -1,59 +1,77 @@
import torch
from torch import vmap
from utils import setup_image_simulator, setup_simulator
-import pytest
+
def test_vmapped_simulator():
- sim, (sim_params, cosmo_params, lens_params, source_params) = setup_simulator(batched_params=True)
+ sim, (sim_params, cosmo_params, lens_params, source_params) = setup_simulator(
+ batched_params=True
+ )
n_pix = sim.n_pix
print(sim.params)
-
+
# test list input
x = sim_params + cosmo_params + lens_params + source_params
print(x[0].shape)
assert vmap(sim)(x).shape == torch.Size([2, n_pix, n_pix])
-
+
# test tensor input
x_tensor = torch.stack(x, dim=1)
print(x_tensor.shape)
assert vmap(sim)(x_tensor).shape == torch.Size([2, n_pix, n_pix])
-
+
# Test dictionary input: Only module with dynamic parameters are required
- x_dict = {"simulator": sim_params, "cosmo": cosmo_params, "source": source_params, "lens": lens_params}
+ x_dict = {
+ "simulator": sim_params,
+ "cosmo": cosmo_params,
+ "source": source_params,
+ "lens": lens_params,
+ }
print(x_dict)
assert vmap(sim)(x_dict).shape == torch.Size([2, n_pix, n_pix])
-
+
# Test semantic list (one tensor per module)
x_semantic = [sim_params, cosmo_params, lens_params, source_params]
assert vmap(sim)(x_semantic).shape == torch.Size([2, n_pix, n_pix])
def test_vmapped_simulator_with_pixelated_modules():
- sim, (cosmo_params, lens_params, kappa, source) = setup_image_simulator(batched_params=True)
+ sim, (cosmo_params, lens_params, kappa, source) = setup_image_simulator(
+ batched_params=True
+ )
n_pix = sim.n_pix
print(sim.params)
-
+
# test list input
- x = cosmo_params + lens_params + kappa + source
+ x = cosmo_params + lens_params + kappa + source
print(x[2].shape)
assert vmap(sim)(x).shape == torch.Size([2, n_pix, n_pix])
-
+
# test tensor input: Does not work well with images since it would require unflattening the images in caustics
# x_tensor = torch.concat([_x.view(2, -1) for _x in x], dim=1)
# print(x_tensor.shape)
# assert vmap(sim)(x_tensor).shape == torch.Size([2, n_pix, n_pix])
-
+
# Test dictionary input: Only module with dynamic parameters are required
- x_dict = {"cosmo": cosmo_params, "source": source, "lens": lens_params, "kappa": kappa}
+ x_dict = {
+ "cosmo": cosmo_params,
+ "source": source,
+ "lens": lens_params,
+ "kappa": kappa,
+ }
print(x_dict)
assert vmap(sim)(x_dict).shape == torch.Size([2, n_pix, n_pix])
-
+
# Test passing tensor in source and kappa instead of list
- x_dict = {"cosmo": cosmo_params, "source": source[0], "lens": lens_params, "kappa": kappa[0]}
+ x_dict = {
+ "cosmo": cosmo_params,
+ "source": source[0],
+ "lens": lens_params,
+ "kappa": kappa[0],
+ }
print(x_dict)
assert vmap(sim)(x_dict).shape == torch.Size([2, n_pix, n_pix])
-
+
# Test semantic list (one tensor per module)
x_semantic = [cosmo_params, lens_params, kappa, source]
assert vmap(sim)(x_semantic).shape == torch.Size([2, n_pix, n_pix])
-
diff --git a/test/test_cosmology.py b/tests/test_cosmology.py
similarity index 100%
rename from test/test_cosmology.py
rename to tests/test_cosmology.py
diff --git a/test/test_epl.py b/tests/test_epl.py
similarity index 100%
rename from test/test_epl.py
rename to tests/test_epl.py
diff --git a/test/test_external_shear.py b/tests/test_external_shear.py
similarity index 100%
rename from test/test_external_shear.py
rename to tests/test_external_shear.py
diff --git a/test/test_interpolate_image.py b/tests/test_interpolate_image.py
similarity index 89%
rename from test/test_interpolate_image.py
rename to tests/test_interpolate_image.py
index 06056e46..c292878f 100644
--- a/test/test_interpolate_image.py
+++ b/tests/test_interpolate_image.py
@@ -59,14 +59,14 @@ def test_pixelated_source():
n = 32
x, y = get_meshgrid(res, n, n)
image = torch.ones(n, n)
- source = Pixelated(image=image, x0=0., y0=0., pixelscale=res)
+ source = Pixelated(image=image, x0=0.0, y0=0.0, pixelscale=res)
im = source.brightness(x, y)
print(im)
assert torch.all(im == image)
-
+
# Check smaller res
- source = Pixelated(image=image, x0=0., y0=0., pixelscale=res/2)
+ source = Pixelated(image=image, x0=0.0, y0=0.0, pixelscale=res / 2)
im = source.brightness(x, y)
- expected_im = torch.nn.functional.pad(torch.ones(n//2, n//2), pad=[n//4]*4)
+ expected_im = torch.nn.functional.pad(torch.ones(n // 2, n // 2), pad=[n // 4] * 4)
print(im)
assert torch.all(im == expected_im)
diff --git a/test/test_jacobian_lens_equation.py b/tests/test_jacobian_lens_equation.py
similarity index 70%
rename from test/test_jacobian_lens_equation.py
rename to tests/test_jacobian_lens_equation.py
index 9b0bd94e..ef126551 100644
--- a/test/test_jacobian_lens_equation.py
+++ b/tests/test_jacobian_lens_equation.py
@@ -1,29 +1,32 @@
from math import pi
-import lenstronomy.Util.param_util as param_util
import torch
-from lenstronomy.LensModel.lens_model import LensModel
-from utils import lens_test_helper
from caustics.cosmology import FlatLambdaCDM
from caustics.lenses import SIE, Multiplane
from caustics.utils import get_meshgrid
+
def test_jacobian_autograd_vs_finitediff():
# Models
cosmology = FlatLambdaCDM(name="cosmo")
lens = SIE(name="sie", cosmology=cosmology)
thx, thy = get_meshgrid(0.01, 20, 20)
-
+
# Parameters
z_s = torch.tensor(1.2)
x = torch.tensor([0.5, 0.912, -0.442, 0.7, pi / 3, 1.4])
# Evaluate Jacobian
J_autograd = lens.jacobian_lens_equation(thx, thy, z_s, lens.pack(x))
- J_finitediff = lens.jacobian_lens_equation(thx, thy, z_s, lens.pack(x), method = "finitediff", pixelscale = torch.tensor(0.01))
-
- assert torch.sum(((J_autograd - J_finitediff)/J_autograd).abs() < 1e-3) > 0.8 * J_autograd.numel()
+ J_finitediff = lens.jacobian_lens_equation(
+ thx, thy, z_s, lens.pack(x), method="finitediff", pixelscale=torch.tensor(0.01)
+ )
+
+ assert (
+ torch.sum(((J_autograd - J_finitediff) / J_autograd).abs() < 1e-3)
+ > 0.8 * J_autograd.numel()
+ )
def test_multiplane_jacobian():
@@ -41,16 +44,19 @@ def test_multiplane_jacobian():
x = torch.tensor([p for _xs in xs for p in _xs], dtype=torch.float32)
lens = Multiplane(
- name="multiplane", cosmology=cosmology, lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))]
+ name="multiplane",
+ cosmology=cosmology,
+ lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))],
)
thx, thy = get_meshgrid(0.1, 10, 10)
-
+
# Parameters
z_s = torch.tensor(1.2)
x = torch.tensor(xs).flatten()
A = lens.jacobian_lens_equation(thx, thy, z_s, lens.pack(x))
- assert A.shape == (10,10,2,2)
-
+ assert A.shape == (10, 10, 2, 2)
+
+
def test_multiplane_jacobian_autograd_vs_finitediff():
# Setup
z_s = torch.tensor(1.5, dtype=torch.float32)
@@ -66,21 +72,28 @@ def test_multiplane_jacobian_autograd_vs_finitediff():
x = torch.tensor([p for _xs in xs for p in _xs], dtype=torch.float32)
lens = Multiplane(
- name="multiplane", cosmology=cosmology, lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))]
+ name="multiplane",
+ cosmology=cosmology,
+ lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))],
)
thx, thy = get_meshgrid(0.01, 10, 10)
-
+
# Parameters
z_s = torch.tensor(1.2)
x = torch.tensor(xs).flatten()
# Evaluate Jacobian
J_autograd = lens.jacobian_lens_equation(thx, thy, z_s, lens.pack(x))
- J_finitediff = lens.jacobian_lens_equation(thx, thy, z_s, lens.pack(x), method = "finitediff", pixelscale = torch.tensor(0.01))
-
- assert torch.sum(((J_autograd - J_finitediff)/J_autograd).abs() < 1e-2) > 0.5 * J_autograd.numel()
+ J_finitediff = lens.jacobian_lens_equation(
+ thx, thy, z_s, lens.pack(x), method="finitediff", pixelscale=torch.tensor(0.01)
+ )
+
+ assert (
+ torch.sum(((J_autograd - J_finitediff) / J_autograd).abs() < 1e-2)
+ > 0.5 * J_autograd.numel()
+ )
+
-
def test_multiplane_effective_convergence():
# Setup
z_s = torch.tensor(1.5, dtype=torch.float32)
@@ -96,7 +109,9 @@ def test_multiplane_effective_convergence():
x = torch.tensor([p for _xs in xs for p in _xs], dtype=torch.float32)
lens = Multiplane(
- name="multiplane", cosmology=cosmology, lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))]
+ name="multiplane",
+ cosmology=cosmology,
+ lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))],
)
thx, thy = get_meshgrid(0.1, 10, 10)
@@ -104,9 +119,10 @@ def test_multiplane_effective_convergence():
z_s = torch.tensor(1.2)
x = torch.tensor(xs).flatten()
C = lens.effective_convergence_div(thx, thy, z_s, lens.pack(x))
- assert C.shape == (10,10)
+ assert C.shape == (10, 10)
curl = lens.effective_convergence_curl(thx, thy, z_s, lens.pack(x))
- assert curl.shape == (10,10)
+ assert curl.shape == (10, 10)
+
if __name__ == "__main__":
test()
diff --git a/test/test_kappa_grid.py b/tests/test_kappa_grid.py
similarity index 88%
rename from test/test_kappa_grid.py
rename to tests/test_kappa_grid.py
index be5b1d61..f1a2c4b2 100644
--- a/test/test_kappa_grid.py
+++ b/tests/test_kappa_grid.py
@@ -5,9 +5,9 @@
from caustics.utils import get_meshgrid
-def _setup(n_pix, mode, use_next_fast_len, padding = "zero"):
+def _setup(n_pix, mode, use_next_fast_len, padding="zero"):
# TODO understand why this test fails for resolutions != 0.025
- res = 0.025
+ res = 0.025
thx, thy = get_meshgrid(res, n_pix, n_pix)
z_l = torch.tensor(0.5)
@@ -25,16 +25,25 @@ def _setup(n_pix, mode, use_next_fast_len, padding = "zero"):
rho_0 = torch.tensor(1.0)
d_l = cosmology.angular_diameter_distance(z_l)
- arcsec_to_rad = 1 / (180 / torch.pi * 60 ** 2)
-
- kappa_0 = lens_pj.central_convergence(z_l, z_s, rho_0, th_core * d_l * arcsec_to_rad, th_s * d_l * arcsec_to_rad, cosmology.critical_surface_density(z_l, z_s))
+ arcsec_to_rad = 1 / (180 / torch.pi * 60**2)
+
+ kappa_0 = lens_pj.central_convergence(
+ z_l,
+ z_s,
+ rho_0,
+ th_core * d_l * arcsec_to_rad,
+ th_s * d_l * arcsec_to_rad,
+ cosmology.critical_surface_density(z_l, z_s),
+ )
# z_l, thx0, thy0, kappa_0, th_core, th_s
x_pj = torch.tensor([z_l, thx0, thy0, kappa_0, th_core, th_s])
# Exact calculations
Psi = lens_pj.potential(thx, thy, z_l, lens_pj.pack(x_pj))
Psi -= Psi.min()
- alpha_x, alpha_y = lens_pj.reduced_deflection_angle(thx, thy, z_l, lens_pj.pack(x_pj))
+ alpha_x, alpha_y = lens_pj.reduced_deflection_angle(
+ thx, thy, z_l, lens_pj.pack(x_pj)
+ )
# Approximate calculations
lens_kap = PixelatedConvergence(
@@ -92,20 +101,27 @@ def test_consistency():
assert torch.allclose(alpha_x_fft, alpha_x_conv2d, atol=1e-20, rtol=0)
assert torch.allclose(alpha_y_fft, alpha_y_conv2d, atol=1e-20, rtol=0)
+
def test_padoptions():
"""
Checks whether using fft and conv2d give the same results.
"""
_, Psi_fft_circ, _, alpha_x_fft_circ, _, alpha_y_fft_circ = _setup(
- 100, "fft", True, "circular",
+ 100,
+ "fft",
+ True,
+ "circular",
)
_, Psi_fft_tile, _, alpha_x_fft_tile, _, alpha_y_fft_tile = _setup(
- 100, "fft", True, "tile",
+ 100,
+ "fft",
+ True,
+ "tile",
)
assert torch.allclose(Psi_fft_circ, Psi_fft_tile, atol=1e-20, rtol=0)
assert torch.allclose(alpha_x_fft_circ, alpha_x_fft_tile, atol=1e-20, rtol=0)
assert torch.allclose(alpha_y_fft_circ, alpha_y_fft_tile, atol=1e-20, rtol=0)
-
+
def _check_center(
x, x_approx, center_c, center_r, rtol=1e-5, atol=1e-8, half_buffer=20
@@ -128,4 +144,3 @@ def _check_center(
rtol,
atol,
)
-
diff --git a/test/test_masssheet.py b/tests/test_masssheet.py
similarity index 56%
rename from test/test_masssheet.py
rename to tests/test_masssheet.py
index 539cbf9e..d6167b33 100644
--- a/test/test_masssheet.py
+++ b/tests/test_masssheet.py
@@ -1,9 +1,4 @@
-from math import pi
-
-import lenstronomy.Util.param_util as param_util
import torch
-from lenstronomy.LensModel.lens_model import LensModel
-from utils import lens_test_helper
from caustics.cosmology import FlatLambdaCDM
from caustics.lenses import MassSheet
@@ -11,21 +6,18 @@
def test():
- atol = 1e-5
- rtol = 1e-5
-
# Models
cosmology = FlatLambdaCDM(name="cosmo")
lens = MassSheet(name="sheet", cosmology=cosmology)
# Parameters
z_s = torch.tensor(1.2)
- x = torch.tensor([0.5, 0., 0., 0.7])
+ x = torch.tensor([0.5, 0.0, 0.0, 0.7])
thx, thy = get_meshgrid(0.01, 10, 10)
-
+
ax, ay = lens.reduced_deflection_angle(thx, thy, z_s, *x)
- p = lens.potential(thx, thy, z_s, *x)
+ lens.potential(thx, thy, z_s, *x)
- c = lens.convergence(thx, thy, z_s, *x)
+ lens.convergence(thx, thy, z_s, *x)
diff --git a/test/test_multiplane.py b/tests/test_multiplane.py
similarity index 81%
rename from test/test_multiplane.py
rename to tests/test_multiplane.py
index f00d4a2a..ce015697 100644
--- a/test/test_multiplane.py
+++ b/tests/test_multiplane.py
@@ -1,5 +1,4 @@
from math import pi
-from operator import mul
import lenstronomy.Util.param_util as param_util
import torch
@@ -12,6 +11,7 @@
from caustics.lenses import SIE, Multiplane, PixelatedConvergence
from caustics.utils import get_meshgrid
+
def test():
rtol = 0
atol = 5e-3
@@ -30,9 +30,11 @@ def test():
x = torch.tensor([p for _xs in xs for p in _xs], dtype=torch.float32)
lens = Multiplane(
- name="multiplane", cosmology=cosmology, lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))]
+ name="multiplane",
+ cosmology=cosmology,
+ lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))],
)
- #lens.effective_reduced_deflection_angle = lens.raytrace
+ # lens.effective_reduced_deflection_angle = lens.raytrace
# lenstronomy
kwargs_ls = []
@@ -73,40 +75,46 @@ def test_params():
planes = []
for p in range(n_planes):
lens = PixelatedConvergence(
- name=f"plane_{p}",
- pixelscale=pixel_size,
- n_pix=pixels,
- cosmology=cosmology,
- z_l=z[p],
- x0=0.,
- y0=0.,
- shape=(pixels, pixels),
- padding="tile"
- )
- planes.append(lens)
+ name=f"plane_{p}",
+ pixelscale=pixel_size,
+ n_pix=pixels,
+ cosmology=cosmology,
+ z_l=z[p],
+ x0=0.0,
+ y0=0.0,
+ shape=(pixels, pixels),
+ padding="tile",
+ )
+ planes.append(lens)
multiplane_lens = Multiplane(cosmology=cosmology, lenses=planes)
z_s = torch.tensor(z_s)
x, y = get_meshgrid(pixel_size, 32, 32)
params = [torch.randn(pixels, pixels) for i in range(10)]
# Test out the computation of a few quantities to make sure params are passed correctly
-
+
# First case, params as list of tensors
kappa_eff = multiplane_lens.effective_convergence_div(x, y, z_s, params)
assert kappa_eff.shape == torch.Size([32, 32])
- alphax, alphay = multiplane_lens.effective_reduced_deflection_angle(x, y, z_s, params)
+ alphax, alphay = multiplane_lens.effective_reduced_deflection_angle(
+ x, y, z_s, params
+ )
- # Second case, params given as a kwargs
+ # Second case, params given as a kwargs
kappa_eff = multiplane_lens.effective_convergence_div(x, y, z_s, params=params)
assert kappa_eff.shape == torch.Size([32, 32])
- alphax, alphay = multiplane_lens.effective_reduced_deflection_angle(x, y, z_s, params=params)
+ alphax, alphay = multiplane_lens.effective_reduced_deflection_angle(
+ x, y, z_s, params=params
+ )
# Test that we can pass a dictionary
params = {f"plane_{p}": torch.randn(pixels, pixels) for p in range(n_planes)}
kappa_eff = multiplane_lens.effective_convergence_div(x, y, z_s, params)
assert kappa_eff.shape == torch.Size([32, 32])
- alphax, alphay = multiplane_lens.effective_reduced_deflection_angle(x, y, z_s, params)
+ alphax, alphay = multiplane_lens.effective_reduced_deflection_angle(
+ x, y, z_s, params
+ )
+
-
if __name__ == "__main__":
test()
diff --git a/test/test_namespace_dict.py b/tests/test_namespace_dict.py
similarity index 99%
rename from test/test_namespace_dict.py
rename to tests/test_namespace_dict.py
index c5854e74..0ce3075e 100644
--- a/test/test_namespace_dict.py
+++ b/tests/test_namespace_dict.py
@@ -78,7 +78,6 @@ def test_nested_namespace_dict_collapse_shared_node():
nested_namespace["bar.qux.corge"] = 98
-
def test_nested_namespace_errors():
nested_namespace = NestedNamespaceDict()
with raises(AttributeError):
@@ -95,4 +94,3 @@ def test_nested_namespace_errors():
with raises(KeyError):
nested_namespace.foo = {"bar": 42}
print(nested_namespace["qux.baz"])
-
diff --git a/test/test_nfw.py b/tests/test_nfw.py
similarity index 98%
rename from test/test_nfw.py
rename to tests/test_nfw.py
index 9b2c0ea1..b2a8093a 100644
--- a/test/test_nfw.py
+++ b/tests/test_nfw.py
@@ -39,7 +39,7 @@ def test():
m = 1e12
c = 8.0
x = torch.tensor([thx0, thy0, m, c])
-
+
# Lenstronomy
cosmo = FlatLambdaCDM_AP(H0=h0_default * 100, Om0=Om0_default, Ob0=Ob0_default)
lens_cosmo = LensCosmo(z_lens=z_l.item(), z_source=z_s.item(), cosmo=cosmo)
@@ -52,11 +52,12 @@ def test():
lens_test_helper(lens, lens_ls, z_s, x, kwargs_ls, atol, rtol)
+
def test_runs():
cosmology = CausticFlatLambdaCDM(name="cosmo")
z_l = torch.tensor(0.1)
- lens = NFW(name="nfw", cosmology=cosmology, z_l=z_l, use_case = "differentiable")
-
+ lens = NFW(name="nfw", cosmology=cosmology, z_l=z_l, use_case="differentiable")
+
# Parameters
z_s = torch.tensor(0.5)
@@ -65,9 +66,9 @@ def test_runs():
m = 1e12
rs = 8.0
x = torch.tensor([thx0, thy0, m, rs])
-
+
thx, thy, thx_ls, thy_ls = setup_grids()
-
+
Psi = lens.potential(thx, thy, z_s, lens.pack(x))
assert torch.all(torch.isfinite(Psi))
alpha = lens.reduced_deflection_angle(thx, thy, z_s, lens.pack(x))
@@ -75,7 +76,7 @@ def test_runs():
assert torch.all(torch.isfinite(alpha[1]))
kappa = lens.convergence(thx, thy, z_s, lens.pack(x))
assert torch.all(torch.isfinite(kappa))
-
+
if __name__ == "__main__":
test()
diff --git a/test/test_parameter.py b/tests/test_parameter.py
similarity index 53%
rename from test/test_parameter.py
rename to tests/test_parameter.py
index c879988b..5c6f90f1 100644
--- a/test/test_parameter.py
+++ b/tests/test_parameter.py
@@ -1,33 +1,31 @@
-import torch
-from caustics.lenses import EPL, PixelatedConvergence
-from caustics.sims import Simulator
+from caustics.lenses import PixelatedConvergence
from caustics.cosmology import FlatLambdaCDM
-from caustics.light import Pixelated
import pytest
# For future PR currently this test fails
# def test_static_parameter_init():
- # module = EPL(FlatLambdaCDM(h0=0.7, Om0=0.3))
- # print(module.params)
- # module.to(dtype=torch.float16)
- # assert module.params.static.FlatLambdaCDM.h0.value.dtype == torch.float16
+# module = EPL(FlatLambdaCDM(h0=0.7, Om0=0.3))
+# print(module.params)
+# module.to(dtype=torch.float16)
+# assert module.params.static.FlatLambdaCDM.h0.value.dtype == torch.float16
+
def test_shape_error_messages():
# with pytest.raises(TypeError):
- # # User cannot enter a list, only a tuple for type checking and consistency with torch
- # module = Pixelated(shape=[8, 8])
-
+ # # User cannot enter a list, only a tuple for type checking and consistency with torch
+ # module = Pixelated(shape=[8, 8])
+
# with pytest.raises(ValueError):
- # module = Pixelated(shape=(8,))
-
+ # module = Pixelated(shape=(8,))
+
fov = 7.8
n_pix = 20
cosmo = FlatLambdaCDM()
with pytest.raises(TypeError):
# User cannot enter a list, only a tuple (because of type checking and consistency with torch)
PixelatedConvergence(fov, n_pix, cosmo, shape=[8, 8])
-
- with pytest.raises(ValueError):
+
+ with pytest.raises(ValueError):
# wrong number of dimensions
PixelatedConvergence(fov, n_pix, cosmo, shape=(8,))
@@ -35,6 +33,9 @@ def test_shape_error_messages():
def test_repr():
cosmo = FlatLambdaCDM()
print(cosmo.h0)
- assert cosmo.h0.__repr__() == f"Param(value={cosmo.h0.value}, dtype={str(cosmo.h0.dtype)})"
+ assert (
+ cosmo.h0.__repr__()
+ == f"Param(value={cosmo.h0.value}, dtype={str(cosmo.h0.dtype)})"
+ )
cosmo = FlatLambdaCDM(h0=None)
assert cosmo.h0.__repr__() == f"Param(shape={cosmo.h0.shape})"
diff --git a/test/test_parametrized.py b/tests/test_parametrized.py
similarity index 82%
rename from test/test_parametrized.py
rename to tests/test_parametrized.py
index fa2592a0..9deed57e 100644
--- a/test/test_parametrized.py
+++ b/tests/test_parametrized.py
@@ -1,5 +1,4 @@
import torch
-from torch import vmap
import pytest
import numpy as np
from caustics.sims import Simulator
@@ -19,15 +18,15 @@ def __init__(self):
self.epl = EPL(self.cosmo)
self.sersic = Sersic()
self.add_param("z_s", 1.0)
-
+
sim = Sim()
- assert len(sim.module_params) == 2 # dynamic and static
- assert len(sim.module_params.dynamic) == 0 # simulator has no dynmaic params
- assert len(sim.module_params.static) == 1 # and 1 static param (z_s)
- assert len(sim.params) == 2 # dynamic and static
- assert len(sim.params.dynamic) == 3 # cosmo, epl and sersic
- assert len(sim.params.static) == 2 # simulator and cosmo have static params
- assert len(sim.params.dynamic.flatten()) == 15 # total number of params
+ assert len(sim.module_params) == 2 # dynamic and static
+ assert len(sim.module_params.dynamic) == 0 # simulator has no dynmaic params
+ assert len(sim.module_params.static) == 1 # and 1 static param (z_s)
+ assert len(sim.params) == 2 # dynamic and static
+ assert len(sim.params.dynamic) == 3 # cosmo, epl and sersic
+ assert len(sim.params.static) == 2 # simulator and cosmo have static params
+ assert len(sim.params.dynamic.flatten()) == 15 # total number of params
def test_graph():
@@ -39,43 +38,53 @@ def test_graph():
def test_unpack_all_modules_dynamic():
sim, (sim_params, cosmo_params, lens_params, source_params) = setup_simulator()
n_pix = sim.n_pix
-
+
# test list input
x = sim_params + cosmo_params + lens_params + source_params
assert sim(x).shape == torch.Size([n_pix, n_pix])
-
+
# test tensor input
x_tensor = torch.stack(x)
assert sim(x_tensor).shape == torch.Size([n_pix, n_pix])
-
+
# Test dictionary input: Only module with dynamic parameters are required
- x_dict = {"simulator": sim_params, "cosmo": cosmo_params, "source": source_params, "lens": lens_params}
-
+ x_dict = {
+ "simulator": sim_params,
+ "cosmo": cosmo_params,
+ "source": source_params,
+ "lens": lens_params,
+ }
+
assert sim(x_dict).shape == torch.Size([n_pix, n_pix])
-
+
# Test semantic list (one tensor per module)
- x_semantic = [torch.stack(module) for module in [sim_params, cosmo_params, lens_params, source_params]]
+ x_semantic = [
+ torch.stack(module)
+ for module in [sim_params, cosmo_params, lens_params, source_params]
+ ]
assert sim(x_semantic).shape == torch.Size([n_pix, n_pix])
def test_unpack_some_modules_static():
# same test as above but cosmo is completely static so not fed in the forward method
- sim, (_, _, lens_params, source_params) = setup_simulator(cosmo_static=True, simulator_static=True)
+ sim, (_, _, lens_params, source_params) = setup_simulator(
+ cosmo_static=True, simulator_static=True
+ )
n_pix = sim.n_pix
-
+
# test list input
x = lens_params + source_params
assert sim(x).shape == torch.Size([n_pix, n_pix])
-
+
# test tensor input
x_tensor = torch.stack(x)
assert sim(x_tensor).shape == torch.Size([n_pix, n_pix])
-
+
# Test dictionary input: Only module with dynamic parameters are required
x_dict = {"source": source_params, "lens": lens_params}
-
+
assert sim(x_dict).shape == torch.Size([n_pix, n_pix])
-
+
# Test semantic list (one tensor per module)
x_semantic = [torch.stack(module) for module in [lens_params, source_params]]
assert sim(x_semantic).shape == torch.Size([n_pix, n_pix])
@@ -97,7 +106,7 @@ def __init__(self):
self.cosmo = FlatLambdaCDM()
self.lens = EPL(self.cosmo, name="lens")
self.source = Sersic(name="source")
-
+
sim = Sim()
assert sim.name == "Sim"
sim.name = "Test"
@@ -121,6 +130,7 @@ def test_parametrized_name_setter_bad_names():
with pytest.raises(NameError):
module = Sersic(name=name)
+
def test_parametrized_name_collision():
# Case 1: Name collision in children of simulator
class Sim(Simulator):
@@ -130,7 +140,7 @@ def __init__(self):
# These two module are identical and will create a name collision
self.lens1 = EPL(self.cosmo)
self.lens2 = EPL(self.cosmo)
-
+
sim = Sim()
# Current way names are updated. Could be chnaged so that all params in collision
# Get a number
@@ -140,11 +150,12 @@ def __init__(self):
# Case 2: name collision in parents of a module
cosmo = FlatLambdaCDM(h0=None)
lens = EPL(cosmo)
+
class Sim(Simulator):
def __init__(self):
super().__init__()
self.lens = lens
-
+
sim1 = Sim()
sim2 = Sim()
assert sim1.name == "Sim"
@@ -153,13 +164,13 @@ def __init__(self):
assert "Sim" in lens._parents.keys()
-
-
# TODO make the params attribute -> parameters and make it more intuitive
def test_to_method():
- sim, (sim_params, cosmo_params, lens_params, source_params) = setup_simulator(batched_params=True)
-
- # Check that static params have correct type
+ sim, (sim_params, cosmo_params, lens_params, source_params) = setup_simulator(
+ batched_params=True
+ )
+
+ # Check that static params have correct type
module = Sersic(x0=0.5)
assert module.x0.dtype == torch.float32
@@ -167,18 +178,20 @@ def test_to_method():
assert module.x0.dtype == torch.float32
module = Sersic(x0=np.array(0.5))
- assert module.x0.dtype == torch.float64 # Decided against default type, so gets numpy type here
-
+ assert (
+ module.x0.dtype == torch.float64
+ ) # Decided against default type, so gets numpy type here
+
# Check that all parameters are converted to correct type
sim.to(dtype=torch.float16)
- assert sim.z_s.dtype is None # dynamic parameter
+ assert sim.z_s.dtype is None # dynamic parameter
assert sim.lens.cosmo.Om0.dtype == torch.float16
assert sim.cosmo.Om0.dtype == torch.float16
-
+
def test_parameter_redefinition():
sim, _ = setup_simulator()
-
+
# Make sure the __getattribute__ method works as intended
assert isinstance(sim.z_s, Parameter)
# Now test __setattr__ method, we need to catch the redefinition here and keep z_s a parameter
@@ -189,4 +202,3 @@ def test_parameter_redefinition():
sim.z_s = None
assert sim.z_s.value is None
assert sim.z_s.dynamic is True
-
diff --git a/test/test_pixel_grid.py b/tests/test_pixel_grid.py
similarity index 100%
rename from test/test_pixel_grid.py
rename to tests/test_pixel_grid.py
diff --git a/test/test_point.py b/tests/test_point.py
similarity index 100%
rename from test/test_point.py
rename to tests/test_point.py
diff --git a/test/test_pseudo_jaffe.py b/tests/test_pseudo_jaffe.py
similarity index 61%
rename from test/test_pseudo_jaffe.py
rename to tests/test_pseudo_jaffe.py
index 89c64f95..1cf408dd 100644
--- a/test/test_pseudo_jaffe.py
+++ b/tests/test_pseudo_jaffe.py
@@ -1,5 +1,3 @@
-from collections import defaultdict
-
import torch
from lenstronomy.LensModel.lens_model import LensModel
from utils import lens_test_helper
@@ -22,11 +20,24 @@ def test():
z_s = torch.tensor(2.1)
x = torch.tensor([0.5, 0.071, 0.023, -1e100, 0.5, 1.5])
d_l = cosmology.angular_diameter_distance(x[0])
- arcsec_to_rad = 1 / (180 / torch.pi * 60 ** 2)
+ arcsec_to_rad = 1 / (180 / torch.pi * 60**2)
kappa_0 = lens.central_convergence(
- x[0], z_s, torch.tensor(2e11), x[4] * d_l * arcsec_to_rad, x[5] * d_l * arcsec_to_rad, cosmology.critical_surface_density(x[0], z_s)
+ x[0],
+ z_s,
+ torch.tensor(2e11),
+ x[4] * d_l * arcsec_to_rad,
+ x[5] * d_l * arcsec_to_rad,
+ cosmology.critical_surface_density(x[0], z_s),
+ )
+ x[3] = (
+ 2
+ * torch.pi
+ * kappa_0
+ * cosmology.critical_surface_density(x[0], z_s)
+ * x[4]
+ * x[5]
+ * (d_l * arcsec_to_rad) ** 2
)
- x[3] = 2 * torch.pi * kappa_0 * cosmology.critical_surface_density(x[0], z_s) * x[4] * x[5] * (d_l * arcsec_to_rad)**2
kwargs_ls = [
{
"sigma0": kappa_0.item(),
@@ -39,22 +50,36 @@ def test():
lens_test_helper(lens, lens_ls, z_s, x, kwargs_ls, rtol, atol)
+
def test_massenclosed():
cosmology = FlatLambdaCDM(name="cosmo")
lens = PseudoJaffe(name="pj", cosmology=cosmology)
z_s = torch.tensor(2.1)
x = torch.tensor([0.5, 0.071, 0.023, -1e100, 0.5, 1.5])
d_l = cosmology.angular_diameter_distance(x[0])
- arcsec_to_rad = 1 / (180 / torch.pi * 60 ** 2)
+ arcsec_to_rad = 1 / (180 / torch.pi * 60**2)
kappa_0 = lens.central_convergence(
- x[0], z_s, torch.tensor(2e11), x[4] * d_l * arcsec_to_rad, x[5] * d_l * arcsec_to_rad, cosmology.critical_surface_density(x[0], z_s)
+ x[0],
+ z_s,
+ torch.tensor(2e11),
+ x[4] * d_l * arcsec_to_rad,
+ x[5] * d_l * arcsec_to_rad,
+ cosmology.critical_surface_density(x[0], z_s),
)
- x[3] = 2 * torch.pi * kappa_0 * cosmology.critical_surface_density(x[0], z_s) * x[4] * x[5] * (d_l * arcsec_to_rad)**2
- xx = torch.linspace(0,10,10)
+ x[3] = (
+ 2
+ * torch.pi
+ * kappa_0
+ * cosmology.critical_surface_density(x[0], z_s)
+ * x[4]
+ * x[5]
+ * (d_l * arcsec_to_rad) ** 2
+ )
+ xx = torch.linspace(0, 10, 10)
masses = lens.mass_enclosed_2d(xx, z_s, lens.pack(x))
assert torch.all(masses < x[3])
-
+
if __name__ == "__main__":
test()
diff --git a/test/test_sersic.py b/tests/test_sersic.py
similarity index 81%
rename from test/test_sersic.py
rename to tests/test_sersic.py
index cecdfe67..e4c5e9a8 100644
--- a/test/test_sersic.py
+++ b/tests/test_sersic.py
@@ -1,5 +1,3 @@
-from math import pi
-
import lenstronomy.Util.param_util as param_util
import numpy as np
import torch
@@ -35,7 +33,7 @@ def test():
# Parameters
thx0_src = 0.05
thy0_src = 0.01
- phi_src = 0.
+ phi_src = 0.0
q_src = 0.5
index_src = 1.5
th_e_src = 0.1
@@ -44,7 +42,17 @@ def test():
# the definition used by lenstronomy. This only works when phi = 0.
# any other angle will not give the same results between the
# two codes.
- x = torch.tensor([thx0_src * np.sqrt(q_src), thy0_src, np.sqrt(q_src), phi_src, index_src, th_e_src, I_e_src])
+ x = torch.tensor(
+ [
+ thx0_src * np.sqrt(q_src),
+ thy0_src,
+ np.sqrt(q_src),
+ phi_src,
+ index_src,
+ th_e_src,
+ I_e_src,
+ ]
+ )
e1, e2 = param_util.phi_q2_ellipticity(phi=phi_src, q=q_src)
kwargs_light_source = [
{
@@ -58,11 +66,9 @@ def test():
}
]
- brightness = sersic.brightness(thx*np.sqrt(q_src), thy, sersic.pack(x))
+ brightness = sersic.brightness(thx * np.sqrt(q_src), thy, sersic.pack(x))
x_ls, y_ls = pixel_grid.coordinate_grid(nx, ny)
- brightness_ls = sersic_ls.surface_brightness(
- x_ls, y_ls, kwargs_light_source
- )
+ brightness_ls = sersic_ls.surface_brightness(x_ls, y_ls, kwargs_light_source)
assert np.allclose(brightness.numpy(), brightness_ls)
diff --git a/test/test_sie.py b/tests/test_sie.py
similarity index 95%
rename from test/test_sie.py
rename to tests/test_sie.py
index c9827bb3..51860001 100644
--- a/test/test_sie.py
+++ b/tests/test_sie.py
@@ -7,7 +7,6 @@
from caustics.cosmology import FlatLambdaCDM
from caustics.lenses import SIE
-from caustics.utils import get_meshgrid
def test():
@@ -36,6 +35,6 @@ def test():
lens_test_helper(lens, lens_ls, z_s, x, kwargs_ls, rtol, atol)
-
+
if __name__ == "__main__":
test()
diff --git a/tests/test_simulator.py b/tests/test_simulator.py
new file mode 100644
index 00000000..8fbf5720
--- /dev/null
+++ b/tests/test_simulator.py
@@ -0,0 +1,89 @@
+from math import pi
+
+import torch
+
+from caustics.sims import Lens_Source
+from caustics.cosmology import FlatLambdaCDM
+from caustics.lenses import SIE
+from caustics.light import Sersic
+from caustics.utils import gaussian
+
+
+def test_simulator_runs():
+ # Model
+ cosmology = FlatLambdaCDM(name="cosmo")
+ lensmass = SIE(
+ name="lens",
+ cosmology=cosmology,
+ z_l=1.0,
+ x0=0.0,
+ y0=0.01,
+ q=0.5,
+ phi=pi / 3.0,
+ b=1.0,
+ )
+
+ source = Sersic(
+ name="source", x0=0.01, y0=-0.03, q=0.6, phi=-pi / 4, n=2.0, Re=0.5, Ie=1.0
+ )
+ lenslight = Sersic(
+ name="lenslight", x0=0.0, y0=0.01, q=0.7, phi=pi / 4, n=3.0, Re=0.7, Ie=1.0
+ )
+
+ psf = gaussian(0.05, 11, 11, 0.2, upsample=2)
+
+ sim = Lens_Source(
+ lens=lensmass,
+ source=source,
+ pixelscale=0.05,
+ pixels_x=50,
+ lens_light=lenslight,
+ psf=psf,
+ z_s=2.0,
+ )
+
+ assert torch.all(torch.isfinite(sim()))
+ assert torch.all(
+ torch.isfinite(
+ sim(
+ {},
+ source_light=True,
+ lens_light=True,
+ lens_source=True,
+ psf_convolve=False,
+ )
+ )
+ )
+ assert torch.all(
+ torch.isfinite(
+ sim(
+ {},
+ source_light=True,
+ lens_light=True,
+ lens_source=False,
+ psf_convolve=True,
+ )
+ )
+ )
+ assert torch.all(
+ torch.isfinite(
+ sim(
+ {},
+ source_light=True,
+ lens_light=False,
+ lens_source=True,
+ psf_convolve=True,
+ )
+ )
+ )
+ assert torch.all(
+ torch.isfinite(
+ sim(
+ {},
+ source_light=False,
+ lens_light=True,
+ lens_source=True,
+ psf_convolve=True,
+ )
+ )
+ )
diff --git a/test/test_sis.py b/tests/test_sis.py
similarity index 100%
rename from test/test_sis.py
rename to tests/test_sis.py
diff --git a/test/test_tnfw.py b/tests/test_tnfw.py
similarity index 82%
rename from test/test_tnfw.py
rename to tests/test_tnfw.py
index c721aafa..bfb5138f 100644
--- a/test/test_tnfw.py
+++ b/tests/test_tnfw.py
@@ -25,7 +25,7 @@ def test():
# Models
cosmology = CausticFlatLambdaCDM(name="cosmo")
z_l = torch.tensor(0.1)
- lens = TNFW(name="tnfw", cosmology=cosmology, z_l=z_l, interpret_m_total_mass = False)
+ lens = TNFW(name="tnfw", cosmology=cosmology, z_l=z_l, interpret_m_total_mass=False)
lens_model_list = ["TNFW"]
lens_ls = LensModel(lens_model_list=lens_model_list)
@@ -40,7 +40,7 @@ def test():
c = 8.0
t = 3.0
x = torch.tensor([thx0, thy0, m, c, t])
-
+
# Lenstronomy
cosmo = FlatLambdaCDM_AP(H0=h0_default * 100, Om0=Om0_default, Ob0=Ob0_default)
lens_cosmo = LensCosmo(z_lens=z_l.item(), z_source=z_s.item(), cosmo=cosmo)
@@ -49,16 +49,34 @@ def test():
# lenstronomy params ['Rs', 'alpha_Rs', 'center_x', 'center_y']
kwargs_ls = [
- {"Rs": Rs_angle, "alpha_Rs": alpha_Rs, "r_trunc": Rs_angle * t, "center_x": thx0, "center_y": thy0}
+ {
+ "Rs": Rs_angle,
+ "alpha_Rs": alpha_Rs,
+ "r_trunc": Rs_angle * t,
+ "center_x": thx0,
+ "center_y": thy0,
+ }
]
- lens_test_helper(lens, lens_ls, z_s, x, kwargs_ls, atol, rtol, test_alpha = True, test_Psi = False, test_kappa = True)
+ lens_test_helper(
+ lens,
+ lens_ls,
+ z_s,
+ x,
+ kwargs_ls,
+ atol,
+ rtol,
+ test_alpha=True,
+ test_Psi=False,
+ test_kappa=True,
+ )
+
def test_runs():
cosmology = CausticFlatLambdaCDM(name="cosmo")
z_l = torch.tensor(0.1)
- lens = TNFW(name="tnfw", cosmology=cosmology, z_l=z_l, use_case = "differentiable")
-
+ lens = TNFW(name="tnfw", cosmology=cosmology, z_l=z_l, use_case="differentiable")
+
# Parameters
z_s = torch.tensor(0.5)
@@ -68,9 +86,9 @@ def test_runs():
rs = 8.0
t = 3.0
x = torch.tensor([thx0, thy0, m, rs, t])
-
+
thx, thy, thx_ls, thy_ls = setup_grids()
-
+
Psi = lens.potential(thx, thy, z_s, lens.pack(x))
assert torch.all(torch.isfinite(Psi))
diff --git a/test/utils.py b/tests/utils.py
similarity index 82%
rename from test/utils.py
rename to tests/utils.py
index 72a9ef68..6ab48e0b 100644
--- a/test/utils.py
+++ b/tests/utils.py
@@ -12,8 +12,12 @@
from caustics.sims import Simulator
from caustics.cosmology import FlatLambdaCDM
-def setup_simulator(cosmo_static=False, use_nfw=True, simulator_static=False, batched_params=False):
+
+def setup_simulator(
+ cosmo_static=False, use_nfw=True, simulator_static=False, batched_params=False
+):
n_pix = 20
+
class Sim(Simulator):
def __init__(self, name="simulator"):
super().__init__(name)
@@ -24,7 +28,9 @@ def __init__(self, name="simulator"):
z_l = 0.5
self.cosmo = FlatLambdaCDM(h0=0.7 if cosmo_static else None, name="cosmo")
if use_nfw:
- self.lens = NFW(self.cosmo, z_l=z_l, name="lens") # NFW wactually depend on cosmology, so a better test for Parametrized
+ self.lens = NFW(
+ self.cosmo, z_l=z_l, name="lens"
+ ) # NFW wactually depend on cosmology, so a better test for Parametrized
else:
self.lens = EPL(self.cosmo, z_l=z_l, name="lens")
self.sersic = Sersic(name="source")
@@ -32,8 +38,10 @@ def __init__(self, name="simulator"):
self.n_pix = n_pix
def forward(self, params):
- z_s, = self.unpack(params)
- alphax, alphay = self.lens.reduced_deflection_angle(x=self.thx, y=self.thy, z_s=z_s, params=params)
+ (z_s,) = self.unpack(params)
+ alphax, alphay = self.lens.reduced_deflection_angle(
+ x=self.thx, y=self.thy, z_s=z_s, params=params
+ )
bx = self.thx - alphax
by = self.thy - alphay
return self.sersic.brightness(bx, by, params)
@@ -44,10 +52,10 @@ def forward(self, params):
# default cosmo params
h0 = torch.tensor([0.68, 0.75])
cosmo_params = [h0]
- # default lens params
+ # default lens params
if use_nfw:
- x0 = torch.tensor([0., 0.1])
- y0 = torch.tensor([0., 0.1])
+ x0 = torch.tensor([0.0, 0.1])
+ y0 = torch.tensor([0.0, 0.1])
m = torch.tensor([1e12, 1e13])
c = torch.tensor([10, 5])
lens_params = [x0, y0, m, c]
@@ -59,16 +67,16 @@ def forward(self, params):
b = torch.tensor([1.5, 1.2])
t = torch.tensor([1.2, 1.0])
lens_params = [x0, y0, q, phi, b, t]
- # default source params
+ # default source params
x0s = torch.tensor([0, 0.1])
y0s = torch.tensor([0, 0.1])
qs = torch.tensor([0.9, 0.8])
phis = torch.tensor([-0.56, 0.8])
- n = torch.tensor([1., 4.])
- Re = torch.tensor([.2, .5])
- Ie = torch.tensor([1.2, 10.])
+ n = torch.tensor([1.0, 4.0])
+ Re = torch.tensor([0.2, 0.5])
+ Ie = torch.tensor([1.2, 10.0])
source_params = [x0s, y0s, qs, phis, n, Re, Ie]
-
+
if not batched_params:
sim_params = [_x[0] for _x in sim_params]
cosmo_params = [_x[0] for _x in cosmo_params]
@@ -79,6 +87,7 @@ def forward(self, params):
def setup_image_simulator(cosmo_static=False, batched_params=False):
n_pix = 20
+
class Sim(Simulator):
def __init__(self, name="test"):
super().__init__(name)
@@ -87,21 +96,38 @@ def __init__(self, name="test"):
self.z_s = torch.tensor(1.0)
self.cosmo = FlatLambdaCDM(h0=0.7 if cosmo_static else None, name="cosmo")
self.epl = EPL(self.cosmo, z_l=z_l, name="lens")
- self.kappa = PixelatedConvergence(pixel_scale, n_pix, self.cosmo, z_l=z_l, shape=(n_pix, n_pix), name="kappa")
- self.source = Pixelated(x0=0., y0=0., pixelscale=pixel_scale/2, shape=(n_pix, n_pix), name="source")
+ self.kappa = PixelatedConvergence(
+ pixel_scale,
+ n_pix,
+ self.cosmo,
+ z_l=z_l,
+ shape=(n_pix, n_pix),
+ name="kappa",
+ )
+ self.source = Pixelated(
+ x0=0.0,
+ y0=0.0,
+ pixelscale=pixel_scale / 2,
+ shape=(n_pix, n_pix),
+ name="source",
+ )
self.thx, self.thy = get_meshgrid(pixel_scale, n_pix, n_pix)
self.n_pix = n_pix
def forward(self, params):
- alphax, alphay = self.epl.reduced_deflection_angle(x=self.thx, y=self.thy, z_s=self.z_s, params=params)
- alphax_h, alphay_h = self.kappa.reduced_deflection_angle(x=self.thx, y=self.thy, z_s=self.z_s, params=params)
+ alphax, alphay = self.epl.reduced_deflection_angle(
+ x=self.thx, y=self.thy, z_s=self.z_s, params=params
+ )
+ alphax_h, alphay_h = self.kappa.reduced_deflection_angle(
+ x=self.thx, y=self.thy, z_s=self.z_s, params=params
+ )
bx = self.thx - alphax - alphax_h
by = self.thy - alphay - alphay_h
return self.source.brightness(bx, by, params)
# default cosmo params
h0 = torch.tensor([0.68, 0.75])
- # default lens params
+ # default lens params
x0 = torch.tensor([0, 0.1])
y0 = torch.tensor([0, 0.1])
q = torch.tensor([0.9, 0.8])