From 1a587f12e934a03df4ab2637490ba1e4f0781c6d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Maximilien=20Le=20Cle=C3=AF?=
<34255811+MaximilienLC@users.noreply.github.com>
Date: Wed, 17 Jan 2024 15:16:13 -0500
Subject: [PATCH] Adding Neuroevolution (#183)
* neuroevo push 1 + devcontainer git no verify
* read write utils
* initialize functions
* add exchange_agents & fix init_common
* update_exchange_and_mutate_info & type fixes
* readwrite numpy -> lists
* evolve utils + hydra config restructure start
* restructuring + docstring bleeding into DL module
* update recommended git workflow (#133)
* DL doc beautified
* compute utils + split fit/config
* added hybrid ne+dl module, restructured to match
* update readme
* utils fixup
* docstring dot paths
* utils touchups, start porting envs
* touchups + imitation
* misc changes
* documentation update + high level config docu
* more doc updates + clean
* replaced autoapi w/ autosummary in docs/ for greater customizability
* config fixes for all but DL + NE
* added paramlink color when targetted
* add documentation on how to document
* finished fixing DL docs, resumed NE
* mostly neuroevo docu
* hydra yaml inheritance, more stable paths, better intro
* reorganized run & store_configs, big doc add root API
* full transition to hydra-zen, big more stable structure
* messy commit, good hydra zen progress
* full hydra-zen (+ yaml configs if wished) more or less figured
* file shuffling
* small cleaning
* doc cleanup + starting finalizing neuroevo
* general done, dl done, ne almost done
* neuroevolution example
* more ne fixes
* almost finish neuroevo
* pr ready
* missing req
* formatting + docker logic typecheck unittest
* more workflow patching
---
.devcontainer.json | 30 ++-
...build-push.yaml => docker-build-push.yaml} | 12 +-
...container-build.yaml => docker-build.yaml} | 12 +-
.github/workflows/typecheck-unittest.yaml | 12 +-
.gitignore | 4 +-
Containerfile => Dockerfile | 2 +-
README.md | 141 +++++++---
cneuromax/__init__.py | 181 ++++++++++++-
cneuromax/__main__.py | 9 +
cneuromax/config.py | 65 +++++
cneuromax/fitting/__init__.py | 2 +-
cneuromax/fitting/common/__init__.py | 43 ---
cneuromax/fitting/common/fitter.py | 27 --
cneuromax/fitting/config.py | 24 ++
cneuromax/fitting/deeplearning/__init__.py | 127 +--------
cneuromax/fitting/deeplearning/__main__.py | 146 ----------
cneuromax/fitting/deeplearning/config.py | 58 ++++
cneuromax/fitting/deeplearning/config.yaml | 17 --
.../deeplearning/datamodule/__init__.py | 7 +-
.../fitting/deeplearning/datamodule/base.py | 139 +++++-----
cneuromax/fitting/deeplearning/fitter.py | 245 -----------------
.../deeplearning/litmodule/__init__.py | 7 +-
.../fitting/deeplearning/litmodule/base.py | 131 +++++----
.../litmodule/classification/__init__.py | 6 +-
.../litmodule/classification/base.py | 59 ++--
.../litmodule/nnmodule/__init__.py | 7 +
.../{ => litmodule}/nnmodule/mlp.py | 49 ++--
.../fitting/deeplearning/litmodule/store.py | 64 +++++
.../fitting/deeplearning/nnmodule/__init__.py | 23 --
cneuromax/fitting/deeplearning/runner.py | 75 ++++++
cneuromax/fitting/deeplearning/store.py | 26 ++
cneuromax/fitting/deeplearning/train.py | 70 +++++
.../fitting/deeplearning/utils/__init__.py | 2 +-
.../fitting/deeplearning/utils/lightning.py | 252 +++++++++++++-----
cneuromax/fitting/deeplneuroevo/__init__.py | 1 +
cneuromax/fitting/neuroevolution/__init__.py | 2 +-
.../fitting/neuroevolution/agent/__init__.py | 7 +
.../fitting/neuroevolution/agent/base.py | 128 +++++++++
cneuromax/fitting/neuroevolution/config.py | 101 +++++++
cneuromax/fitting/neuroevolution/evolve.py | 194 ++++++++++++++
.../fitting/neuroevolution/net/__init__.py | 1 +
.../neuroevolution/net/cpu/__init__.py | 1 +
.../neuroevolution/net/cpu/static/__init__.py | 4 +
.../neuroevolution/net/cpu/static/rnnfc.py | 64 +++++
cneuromax/fitting/neuroevolution/runner.py | 75 ++++++
.../fitting/neuroevolution/space/__init__.py | 14 +
.../fitting/neuroevolution/space/base.py | 66 +++++
.../neuroevolution/space/reinforcement.py | 156 +++++++++++
.../fitting/neuroevolution/utils/__init__.py | 1 +
.../fitting/neuroevolution/utils/compute.py | 223 ++++++++++++++++
.../neuroevolution/utils/compute_test.py | 42 +++
.../fitting/neuroevolution/utils/evolve.py | 161 +++++++++++
.../fitting/neuroevolution/utils/exchange.py | 177 ++++++++++++
.../neuroevolution/utils/initialize.py | 170 ++++++++++++
.../fitting/neuroevolution/utils/readwrite.py | 112 ++++++++
.../fitting/neuroevolution/utils/type.py | 37 +++
.../fitting/neuroevolution/utils/validate.py | 23 ++
.../fitting/neuroevolution/utils/wandb.py | 19 ++
cneuromax/fitting/runner.py | 23 ++
cneuromax/fitting/store.py | 29 ++
cneuromax/fitting/utils/__init__.py | 1 +
cneuromax/fitting/utils/hydra.py | 57 ++++
cneuromax/projects/__init__.py | 1 +
cneuromax/projects/classify_mnist/__init__.py | 44 +++
.../classify_mnist/datamodule.py | 37 ++-
.../classify_mnist/datamodule_test.py | 72 +++++
.../projects/classify_mnist/litmodule.py | 34 +++
.../classify_mnist/task}/mlp.yaml | 10 -
.../classify_mnist/task/mlp_beluga.yaml | 18 ++
.../projects/neuroevorl_control/__init__.py | 41 +++
.../projects/neuroevorl_control/agent.py | 159 +++++++++++
.../projects/neuroevorl_control/space.py | 38 +++
.../neuroevorl_control/task/acrobot.yaml | 17 ++
cneuromax/runner.py | 70 +++++
cneuromax/serving/__init__.py | 2 +-
cneuromax/store.py | 38 +++
cneuromax/task/__init__.py | 1 -
cneuromax/task/classify_mnist/__init__.py | 39 ---
.../task/classify_mnist/datamodule_test.py | 60 -----
cneuromax/task/classify_mnist/litmodule.py | 31 ---
cneuromax/task/classify_mnist/mlp_beluga.yaml | 43 ---
cneuromax/testing/__init__.py | 1 +
cneuromax/utils/__init__.py | 2 +-
.../utils/{annotations.py => beartype.py} | 71 ++---
cneuromax/utils/gymnasium_test.py | 12 +
cneuromax/utils/hydra.py | 31 ---
cneuromax/utils/hydra_zen.py | 74 +++++
cneuromax/utils/misc.py | 32 +++
cneuromax/utils/mpi4py.py | 20 ++
cneuromax/utils/runner.py | 95 +++++++
cneuromax/utils/torch.py | 44 +++
cneuromax/utils/wandb.py | 24 ++
docs/Contribution.rst | 60 ++++-
docs/Execution_On_a_Slurm_cluster.rst | 2 +-
docs/Execution_On_an_Ubuntu_machine.rst | 11 +-
docs/Installation_On_a_Slurm_cluster.rst | 2 +-
docs/__init__.py | 2 +-
docs/_static/paramlink_target_color.css | 3 +
docs/_templates/module.rst | 20 ++
docs/conf.py | 48 ++--
docs/genetic.pdf | Bin 0 -> 67502 bytes
docs/index.rst | 43 +--
docs/requirements.txt | 6 +-
pyproject.toml | 115 ++++----
104 files changed, 4078 insertions(+), 1358 deletions(-)
rename .github/workflows/{container-build-push.yaml => docker-build-push.yaml} (54%)
rename .github/workflows/{container-build.yaml => docker-build.yaml} (54%)
rename Containerfile => Dockerfile (95%)
create mode 100644 cneuromax/__main__.py
create mode 100644 cneuromax/config.py
delete mode 100644 cneuromax/fitting/common/__init__.py
delete mode 100644 cneuromax/fitting/common/fitter.py
create mode 100644 cneuromax/fitting/config.py
delete mode 100644 cneuromax/fitting/deeplearning/__main__.py
create mode 100644 cneuromax/fitting/deeplearning/config.py
delete mode 100644 cneuromax/fitting/deeplearning/config.yaml
delete mode 100644 cneuromax/fitting/deeplearning/fitter.py
create mode 100644 cneuromax/fitting/deeplearning/litmodule/nnmodule/__init__.py
rename cneuromax/fitting/deeplearning/{ => litmodule}/nnmodule/mlp.py (58%)
create mode 100644 cneuromax/fitting/deeplearning/litmodule/store.py
delete mode 100644 cneuromax/fitting/deeplearning/nnmodule/__init__.py
create mode 100644 cneuromax/fitting/deeplearning/runner.py
create mode 100644 cneuromax/fitting/deeplearning/store.py
create mode 100644 cneuromax/fitting/deeplearning/train.py
create mode 100644 cneuromax/fitting/deeplneuroevo/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/agent/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/agent/base.py
create mode 100644 cneuromax/fitting/neuroevolution/config.py
create mode 100644 cneuromax/fitting/neuroevolution/evolve.py
create mode 100644 cneuromax/fitting/neuroevolution/net/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/net/cpu/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/net/cpu/static/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/net/cpu/static/rnnfc.py
create mode 100644 cneuromax/fitting/neuroevolution/runner.py
create mode 100644 cneuromax/fitting/neuroevolution/space/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/space/base.py
create mode 100644 cneuromax/fitting/neuroevolution/space/reinforcement.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/__init__.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/compute.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/compute_test.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/evolve.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/exchange.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/initialize.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/readwrite.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/type.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/validate.py
create mode 100644 cneuromax/fitting/neuroevolution/utils/wandb.py
create mode 100644 cneuromax/fitting/runner.py
create mode 100644 cneuromax/fitting/store.py
create mode 100644 cneuromax/fitting/utils/__init__.py
create mode 100644 cneuromax/fitting/utils/hydra.py
create mode 100644 cneuromax/projects/__init__.py
create mode 100644 cneuromax/projects/classify_mnist/__init__.py
rename cneuromax/{task => projects}/classify_mnist/datamodule.py (71%)
create mode 100644 cneuromax/projects/classify_mnist/datamodule_test.py
create mode 100644 cneuromax/projects/classify_mnist/litmodule.py
rename cneuromax/{task/classify_mnist => projects/classify_mnist/task}/mlp.yaml (66%)
create mode 100644 cneuromax/projects/classify_mnist/task/mlp_beluga.yaml
create mode 100644 cneuromax/projects/neuroevorl_control/__init__.py
create mode 100644 cneuromax/projects/neuroevorl_control/agent.py
create mode 100644 cneuromax/projects/neuroevorl_control/space.py
create mode 100644 cneuromax/projects/neuroevorl_control/task/acrobot.yaml
create mode 100644 cneuromax/runner.py
create mode 100644 cneuromax/store.py
delete mode 100644 cneuromax/task/__init__.py
delete mode 100644 cneuromax/task/classify_mnist/__init__.py
delete mode 100644 cneuromax/task/classify_mnist/datamodule_test.py
delete mode 100644 cneuromax/task/classify_mnist/litmodule.py
delete mode 100644 cneuromax/task/classify_mnist/mlp_beluga.yaml
create mode 100644 cneuromax/testing/__init__.py
rename cneuromax/utils/{annotations.py => beartype.py} (52%)
create mode 100644 cneuromax/utils/gymnasium_test.py
delete mode 100644 cneuromax/utils/hydra.py
create mode 100644 cneuromax/utils/hydra_zen.py
create mode 100644 cneuromax/utils/misc.py
create mode 100644 cneuromax/utils/mpi4py.py
create mode 100644 cneuromax/utils/runner.py
create mode 100644 cneuromax/utils/torch.py
create mode 100644 cneuromax/utils/wandb.py
create mode 100644 docs/_static/paramlink_target_color.css
create mode 100644 docs/_templates/module.rst
create mode 100644 docs/genetic.pdf
diff --git a/.devcontainer.json b/.devcontainer.json
index 533f8746..664a17ab 100644
--- a/.devcontainer.json
+++ b/.devcontainer.json
@@ -14,30 +14,32 @@
"matangover.mypy"
],
"settings": {
- "ruff.lint.arg": [
- "--config=pyproject.toml"
- ],
"black-formatter.args": [
"--config=pyproject.toml"
],
- "[python]": {
- "editor.defaultFormatter": "ms-python.black-formatter",
- "editor.formatOnSave": true,
- "editor.codeActionsOnSave": {
- "source.organizeImports": true
- }
- },
- "mypy.configFile": "pyproject.toml",
+ "esbonio.server.hideSphinxOutput": true,
"esbonio.sphinx.buildDir": "${workspaceFolder}/docs/_build/html",
"esbonio.sphinx.confDir": "${workspaceFolder}/docs/",
"esbonio.sphinx.srcDir": "${workspaceFolder}/docs/",
- "restructuredtext.linter.doc8.executablePath": "/usr/local/bin/doc8",
- "restructuredtext.syntaxHighlighting.disabled": true,
"files.insertFinalNewline": true,
"files.trimTrailingWhitespace": true,
+ "git.allowNoVerifyCommit": true,
+ "mypy.configFile": "pyproject.toml",
"python.analysis.ignore": [
"/**"
- ]
+ ],
+ "restructuredtext.linter.doc8.executablePath": "/usr/local/bin/doc8",
+ "restructuredtext.syntaxHighlighting.disabled": true,
+ "ruff.lint.arg": [
+ "--config=pyproject.toml"
+ ],
+ "[python]": {
+ "editor.defaultFormatter": "ms-python.black-formatter",
+ "editor.formatOnSave": true,
+ "editor.codeActionsOnSave": {
+ "source.organizeImports": true
+ }
+ }
}
}
},
diff --git a/.github/workflows/container-build-push.yaml b/.github/workflows/docker-build-push.yaml
similarity index 54%
rename from .github/workflows/container-build-push.yaml
rename to .github/workflows/docker-build-push.yaml
index 6a12dad5..6c1c08ff 100644
--- a/.github/workflows/container-build-push.yaml
+++ b/.github/workflows/docker-build-push.yaml
@@ -1,21 +1,21 @@
-# Builds the container image with run dependencies & push it to the registry
-name: Container Build & Push
+# Builds the Docker image & pushes it to Docker Hub.
+name: Docker Build & Push
on:
push:
branches: [main]
paths:
- - Containerfile
+ - Dockerfile
- pyproject.toml
- - .github/workflows/container-build-push.yaml
+ - .github/workflows/docker-build-push.yaml
jobs:
- container-build-push:
+ docker-build-push:
runs-on: self-hosted
steps:
- name: Checkout the GitHub repo
uses: actions/checkout@v4
- name: Build the CNeuroMax image
- run: podman build -t cneuromod/cneuromax:latest -f Containerfile .
+ run: podman build -t cneuromod/cneuromax:latest -f Dockerfile .
- name: Push the CNeuroMax image to Docker Hub
run: podman push docker.io/cneuromod/cneuromax:latest
diff --git a/.github/workflows/container-build.yaml b/.github/workflows/docker-build.yaml
similarity index 54%
rename from .github/workflows/container-build.yaml
rename to .github/workflows/docker-build.yaml
index c4b858e9..8a16ecd5 100644
--- a/.github/workflows/container-build.yaml
+++ b/.github/workflows/docker-build.yaml
@@ -1,19 +1,19 @@
-# Builds the container image with run dependencies
-name: Container Build
+# Builds the Docker image
+name: Docker Build
on:
pull_request:
branches: [main]
paths:
- - Containerfile
+ - Dockerfile
- pyproject.toml
- - .github/workflows/container-build.yaml
+ - .github/workflows/docker-build.yaml
jobs:
- container-build:
+ docker-build:
runs-on: self-hosted
steps:
- name: Checkout the GitHub repo
uses: actions/checkout@v4
- name: Build the tentative CNeuroMax image
- run: podman build -f Containerfile .
+ run: podman build -f Dockerfile .
diff --git a/.github/workflows/typecheck-unittest.yaml b/.github/workflows/typecheck-unittest.yaml
index b08aa4dc..96b43388 100644
--- a/.github/workflows/typecheck-unittest.yaml
+++ b/.github/workflows/typecheck-unittest.yaml
@@ -22,19 +22,23 @@ jobs:
typecheck-unittest:
runs-on: self-hosted
steps:
- - name: Pull the container image
- run: podman pull docker.io/cneuromod/cneuromax:latest
- name: Checkout the GitHub repo
uses: actions/checkout@v4
+ # Waiting for `Docker Build` to complete to not build the image twice
+ - name: Stall the job for 5 minutes if it is a pull request
+ if: github.event_name == 'pull_request'
+ run: sleep 300
+ - name: Build the CNeuroMax image
+ run: podman build -t cneuromod/cneuromax:test -f Dockerfile .
- name: Run mypy
run: >
podman run --rm -v $PWD:/cneuromax -w /cneuromax
- docker.io/cneuromod/cneuromax:latest
+ cneuromod/cneuromax:test
mypy --config-file=pyproject.toml cneuromax
- name: Run pytest
run: >
podman run --rm -v $PWD:/cneuromax -w /cneuromax
- docker.io/cneuromod/cneuromax:latest
+ cneuromod/cneuromax:test
pytest --cov cneuromax
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
diff --git a/.gitignore b/.gitignore
index e9e74564..91f08270 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,5 @@
**/_build/
-**/autoapi/
+**/_autosummary/
**/cneuromax.egg-info/
**/.coverage
**/data/
@@ -11,3 +11,5 @@
**/.ruff_cache/
**/.vscode/
WANDB_KEY.txt
+test.ipynb
+cneuromax/fitting/neuroevolution/space/imitation.py
diff --git a/Containerfile b/Dockerfile
similarity index 95%
rename from Containerfile
rename to Dockerfile
index 65b81c8f..c258f052 100644
--- a/Containerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# ----------------------------------------------------------------------------#
-# This Containerfile (a.k.a. Dockerfile) is used to build the Docker image
+# This Dockerfile (a.k.a. Dockerfile) is used to build the Docker image
# (which can turn into an Apptainer image) shared by all CNeuroMax projects.
# It installs all of the dependencies but does not install CNeuroMax itself,
# for development purposes.
diff --git a/README.md b/README.md
index 01258cc9..d2209231 100644
--- a/README.md
+++ b/README.md
@@ -19,80 +19,137 @@
https://img.shields.io/badge/code%20style-black-000000.svg)](
https://github.com/psf/black)
-Full documentation available at [https://courtois-neuromod.github.io/cneuromax](
- https://courtois-neuromod.github.io/cneuromax/).
+
Overview
-Introduction
+CNeuroMax is a Machine Learning workspace for model fitting
+([Deep Learning](https://en.wikipedia.org/wiki/Deep_learning) +
+[Neuroevolution](https://en.wikipedia.org/wiki/Neuroevolution) +
+[HPO](https://en.wikipedia.org/wiki/Hyperparameter_optimization)
+w/ [Oríon](https://github.com/Epistimio/orion)), testing
+and serving (with [Lightning Apps](https://lightning.ai/docs/app/stable/))
+AI/ML models. CNeuroMax aims to:
-CNeuroMax is a framework for large-scale training of machine learning models,
-with an emphasis on easy deployment in academic high-performance computing
-environments. CNeuroMax aims to:
+**1. Reduce code & configuration boilerplate with:**
+* [Hydra](https://github.com/facebookresearch/hydra) for task/experiment
+configuration.
+* [Hydra-zen](https://github.com/mit-ll-responsible-ai/hydra-zen) for
+[Hydra](https://github.com/facebookresearch/hydra) structured configuration
+management.
+* [Lightning](https://github.com/Lightning-AI/pytorch-lightning) for
+[PyTorch](https://github.com/pytorch/pytorch) code.
-1. **Facilitate the configuration of complex models and training runs through
- tools like:** Hydra, Hydra-Zen, Lightning etc.
+**2. Simplify machine learning workflows:**
+* Hyperparameter optimization with [Orion](https://github.com/Epistimio/orion)
+through its
+[Hydra Sweeper plugin](https://github.com/Epistimio/hydra_orion_sweeper).
+* SLURM job definition, queuing and monitoring with
+[Submitit](https://github.com/facebookincubator/submitit) through its
+[Hydra Launcher plugin](https://hydra.cc/docs/plugins/submitit_launcher/).
+* [Docker](https://www.docker.com/) / [Apptainer](https://apptainer.org/)
+environment containerization for both regular & SLURM-based execution.
+* Transition from regular execution to SLURM-based execution by only swapping
+container technology and as little as a single
+[Hydra](https://github.com/facebookresearch/hydra)
+configuration field.
-2. **Automate much of the process of deployment in a high-performance computing
- environment:** creating SLURM scripts, monitoring SLURM jobs, setting up
- virtual environments, upgrading packages, tuning hyperparameters, etc.
+**3. Automate workspace & coding processes:**
+* Package upgrades through
+[Renovate](https://github.com/renovatebot/renovate).
+* Docstring documentation generation with
+[Sphinx](https://github.com/sphinx-doc/sphinx).
+* Pre-commit formatting & linting hooks with
+[pre-commit](https://pre-commit.com/).
+* Documentation/Docker image validation/deployment, formatting, linting,
+type-checking & unit tests upon contribution to the ``main`` branch using
+[GitHub Actions](https://github.com/features/actions).
-3. **Provide a space for researchers to share their code and experiment
- results:** a central repository with a common solid and well-tested
- object-oriented structure for Lightning Modules, subdirectories for each
- experiment, Weights & Biases working both locally and on SLURM with support
- for team-shared logging etc.
+**4. Facilitate researcher collaboration through:**
+* An object-oriented structure for code sharing & reusability.
+* A mono-repository workspace with task/experiment-specific subdirectories.
+* A very informative & clear to navigate Python API reference.
+* Shared logging with a [Weights & Biases](https://wandb.ai/site) team space.
-4. **Offer optional tools to strengthen code quality and reproducibility:**
- code linting and formatting, unit testing, static & dynamic type checking
- that supports tensor shapes and dtypes, documentation auto-generation and
- auto-deployment, precommit hooks etc.
+**5. Promote high-quality and reproducible code by:**
+* Linting with [Ruff](https://github.com/astral-sh/ruff),
+formatting with [Black](https://github.com/psf/black),
+unit-testing with [pytest](https://github.com/pytest-dev/pytest).
+* Type-checking with [Mypy](https://github.com/python/mypy) (static)
+& [Beartype](https://github.com/beartype/beartype) (dynamic).
+* DType & Shape type hinting for [PyTorch](https://github.com/pytorch/pytorch)
+tensors using [jaxtyping](https://github.com/google/jaxtyping) &
+[NumPy](https://github.com/numpy/numpy) arrays using
+[nptyping](https://github.com/ramonhagenaars/nptyping). Fully type checkable
+at runtime with [Beartype](https://github.com/beartype/beartype).
+* Providing a common [Development Container](https://containers.dev/)
+recipe with the above features enabled + documentation preview
+with [esbonio](https://github.com/swyddfa/esbonio) &
+[GitHub Copilot](https://github.com/features/copilot).
-Repository structure:
+**6. Smoothen up rough edges by providing:**
+* Extensive documentation on how to install/execute on regular & SLURM-based
+systems.
+* Unassuming guides on how to contribute to the codebase.
+* Tutorials on i) how to facilitate code transport across machines & ii) how
+to prune unrelated components of the library for paper publication.
+* Offline [Weights & Biases](https://wandb.ai/site) support with
+[wandb-osh](https://github.com/klieret/wandb-offline-sync-hook).
+
+High-level repository tree:
```
cneuromax/
-├─ .github/ <-- Config files for GitHub automation (tests, containers, etc)
-├─ cneuromax/ <-- Machine Learning code
-│ ├─ fitting/ <-- ML model fitting code
-│ │ ├─ common/ <-- Code common to all fitting workflows
-│ │ │ ├─ __init__.py <-- Stores common Hydra configs
-│ │ │ └─ fitter.py <-- Base Hydra config common to all fitting workflows
+├─ .github/ <-- Config files for GitHub Actions (tests, containers, etc)
+├─ cneuromax/ <-- Root
+│ ├─ fitting/ <-- Model fitting code
│ │ ├─ deeplearning/ <-- Deep Learning code
│ │ │ ├─ datamodule/ <-- Lightning DataModules
-│ │ │ │ ├─ base.py <-- Base Lightning DataModule to build upon
│ │ │ ├─ litmodule/ <-- Lightning Modules
-│ │ │ │ ├─ base.py <-- Base Lightning Module to build upon
-│ │ │ ├─ nnmodule/ <-- PyTorch Modules & Hydra configs
+│ │ │ ├─ nnmodule/ <-- PyTorch Modules
│ │ │ ├─ utils/ <-- Deep Learning utilities
-│ │ │ ├─ __init__.py <-- Stores Deep Learning Hydra configs
-│ │ │ ├─ __main__.py <-- Entrypoint when calling `python cneuromax.fitting.deeplearning`
-│ │ │ ├─ config.yaml <-- Default Hydra configs & settings
-│ │ │ └─ fitter.py <-- Deep Learning fitting
-│ │ └─ neuroevolution/ <-- Neuroevolution code
-│ ├─ serving/ <-- Contains the code to create applications (cozmo inference, etc)
-│ ├─ task/ <-- Contains the Deep Learning tasks
+│ │ │ ├─ config.py <-- Deep Learning structured configs
+│ │ │ ├─ runner.py <-- Deep Learning task runner
+│ │ │ └─ train.py <-- Deep Learning training function
+│ │ ├─ neuroevolution/ <-- Neuroevolution code
+│ │ │ ├─ agent/ <-- Neuroevolution agents (encapsulate networks)
+│ │ │ ├─ net/ <-- Neuroevolution networks
+│ │ │ ├─ space/ <-- Neuroevolution spaces (where agents get evaluated)
+│ │ │ ├─ utils/ <-- Neuroevolution utilities
+│ │ │ ├─ config.py <-- Neuroevolution structured configs
+│ │ │ ├─ evolve.py <-- Neuroevolution evolution function
+│ │ │ └─ runner.py <-- Neuroevolution task runner
+│ │ ├─ config.py <-- Fitting structured configs
+│ │ └─ runner.py <-- Fitting task runner
+│ ├─ serving/ <-- Contains the code to create apps (cozmo inference, etc)
+│ ├─ projects/ <-- Contains all existing projects
│ │ │
│ │ │ ******************************************
-│ │ └─ my_new_task/ <-- *** Your new Deep Learning task folder ***
-│ │ ├─ __init__.py <-- ********** Your Hydra Configs ************
+│ │ └─ my_new_project/ <-- ******** Your new project folder *********
+│ │ ├─ task/ <-- *********** Your task folder *************
+│ │ │ └─ config.yaml <-- ****** Your task configuration file ******
+│ │ ├─ __main__.py <-- **** Your Hydra Configs & entrypoint *****
│ │ ├─ datamodule.py <-- ******* Your Lightning DataModule ********
│ │ ├─ litmodule.py <-- ********* Your Lightning Module **********
│ │ ├─ nnmodule.py <-- ********** Your PyTorch Module ***********
-│ │ └─ config.yaml <-- ****** Your Hydra configuration file *****
│ │ ******************************************
│ │
-│ └─ utils/ <-- CNeuroMax utilities
+│ ├─ utils/ <-- CNeuroMax utilities
+│ ├─ __init__.py <-- Sets up Beartype
+│ └─ config.py <-- Base structured configs
+│ └─ runner.py <-- Base task runner
├─ docs/ <-- Documentation files
├─ .devcontainer.json <-- VSCode container development config
├─ .gitignore <-- Files to not track with Git/GitHub
├─ .pre-commit-config.yaml <-- Pre-"git commit" actions config (format, lint, etc)
├─ .yamllint.yaml <-- YAML files config
-├─ Containerfile <-- To build the Docker image
+├─ Dockerfile <-- To build the Docker image
├─ LICENSE <-- MIT License file
├─ README.md <-- Repository description file
├─ pyproject.toml <-- Python code & dependencies config
└─ renovate.json <-- Renovate Bot config (keeps dependencies up-to-date)
```
+Additional information:
+
CNeuroMax is developed in the context of the
[Courtois Project on Neuronal Modelling](https://cneuromod.ca), also known as
CNeuroMod. Launched in 2018, CNeuroMod aims to create more human-like AI models
diff --git a/cneuromax/__init__.py b/cneuromax/__init__.py
index 282f0b02..d6b707a8 100644
--- a/cneuromax/__init__.py
+++ b/cneuromax/__init__.py
@@ -1,11 +1,184 @@
-"""."""
+""":mod:`cneuromax` package.
+Execution
+=========
+
+``python -m cneuromax project=PROJECT_NAME task=TASK_NAME``.
+
+Terminology
+===========
+
+1. Quck definitions
+~~~~~~~~~~~~~~~~~~~
+
+``subtask``: Sub-work unit of a ``task`` (ex: a model training run
+with a specific set of hyper-parameters).
+
+``task``: Some work unit specified by a :mod:`hydra-core` config
+``.yaml`` file or a :mod:`hydra-zen` Python config that specifies
+its execution (ex: the training of the same type of model with various
+hyper-parameters).
+
+``project``: A collection of ``tasks`` + cross-``task``
+functionality (ex: a custom :mod:`lightning` ``datamodule``).
+
+``service``: Contains cross-``project`` functionality (ex: base
+:mod:`lightning` sub-classes).
+
+``interface``: Contains cross-``service`` functionality (ex:
+:mod:`hydra-core` base configs).
+
+2. Interface
+~~~~~~~~~~~~
+
+a. Interface overview
+---------------------
+
+An ``interface`` refers to a Python package located at
+``cneuromax/INTERFACE_PATH/``.
+
+.. note::
+
+ Interfaces can be nested, ex: :mod:`cneuromax.serving`.
+
+b. Example interfaces
+---------------------
+
+Root interface: :mod:`cneuromax` (`source folder `_)
+
+Fitting: :mod:`cneuromax.fitting` (`source folder `_)
+
+c. Creating a new interface
+---------------------------
+
+To create ``INTERFACE_NAME`` at path
+``cneuromax/.../PARENT_INTERFACE_NAME/INTERFACE_NAME``, create a class
+to inherit from the :class:`.BaseTaskRunner` class/sub-class implemented
+by ``PARENT_INTERFACE_NAME`` (ex:
+:class:`cneuromax.fitting.runner.FittingTaskRunner`).
+
+3. Service
+~~~~~~~~~~
+
+a. Service overview
+-------------------
+
+A ``service`` refers to a Python package located at
+``cneuromax/INTERFACE_PATH/SERVICE_NAME/``.
+
+b. Example services
+-------------------
+
+Deep Learning: :mod:`cneuromax.fitting.deeplearning` (`source folder
+`_)
+
+Neuroevolution: :mod:`cneuromax.fitting.neuroevolution` (`source folder
+`_)
+
+Model serving (in progress): :mod:`cneuromax.serving` (`source folder
+`_)
+
+c. Creating a new service
+-------------------------
+
+To create ``SERVICE_NAME`` at path
+``cneuromax/.../INTERFACE_LATEST_NAME/SERVICE_NAME``, create a class
+to inherit from the :class:`.BaseTaskRunner` class/sub-class implemented
+by ``INTERFACE_LATEST_NAME`` and implement as little as
+:meth:`.BaseTaskRunner.run_subtask` (ex:
+:class:`cneuromax.fitting.deeplearning.runner.DeepLearningTaskRunner`).
+
+4. Project
+~~~~~~~~~~
+
+a. Project overview
+-------------------
+
+A ``project`` refers to a Python package located at
+``cneuromax/projects/PROJECT_NAME/``.
+
+b. Example projects
+-------------------
+
+MNIST classification: :mod:`cneuromax.projects.classify_mnist` (`source
+folder `_)
+
+Control tasks neuroevolution: :mod:`cneuromax.projects.control_nevo`
+(`source folder `_)
+
+c. Creating a new project
+-------------------------
+
+To create ``PROJECT_NAME`` at path
+``cneuromax/projects/PROJECT_NAME/``, create a class to inherit from
+the :class:`.BaseTaskRunner` class/sub-class implemented by the
+``service`` or other ``project`` of your choice (ex:
+:class:`cneuromax.fitting.deeplearning.runner.DeepLearningTaskRunner`).
+You probabaly will want to override
+:meth:`~.BaseTaskRunner.store_configs`.
+
+For succinctness (will reduce your command length), we suggest writing
+the above class in the ``__init__.py`` file of your ``project``.
+
+5. Task
+~~~~~~~
+
+a. Task overview
+----------------
+
+A ``task`` is a work unit specified by a :mod:`hydra-core` configuration
+``.yaml`` file located in
+``cneuromax/projects/PROJECT_NAME/task/TASK_NAME.yaml`` or a
+:mod:`hydra-zen` Python config implemented in your overwritten
+:meth:`.BaseTaskRunner.store_configs`.
+
+b. Example tasks
+----------------
+
+MLP MNIST classification: ``cneuromax/projects/classify_mnist/task/\
+mlp.yaml`` (`source file `_)
+
+Acrobot neuroevolution: Check out the contents of
+:func:`cneuromax.projects.control_nevo.TaskRunner.store_configs`.
+
+c. Creating a new task
+----------------------
+
+Create ``TASK_NAME.yaml`` at path
+``cneuromax/projects/PROJECT_NAME/task/TASK_NAME.yaml`` and include
+``# @package _global_`` at the top of the file (as shown
+in the first above example). Otherwise, you can create a
+:mod:`hydra-zen` Python config that specifies its execution (as shown
+in the second above example).
+
+__main__.py
+===========
+
+.. highlight:: python
+.. code-block:: python
+
+ from cneuromax.runner import BaseTaskRunner
+ from cneuromax.utils.runner import get_task_runner_class
+ from cneuromax.utils.wandb import login_wandb
+
+ if __name__ == "__main__":
+ TaskRunner: type[BaseTaskRunner] = get_task_runner_class()
+ login_wandb()
+ TaskRunner.store_configs_and_run_task()
+"""
import warnings
from beartype import BeartypeConf
from beartype.claw import beartype_this_package
beartype_this_package(conf=BeartypeConf(is_pep484_tower=True))
-
-warnings.filterwarnings("ignore", module="beartype")
-warnings.filterwarnings("ignore", module="lightning")
+warnings.filterwarnings(action="ignore", module="beartype")
+warnings.filterwarnings(action="ignore", module="lightning")
+warnings.filterwarnings(action="ignore", module="gymnasium")
diff --git a/cneuromax/__main__.py b/cneuromax/__main__.py
new file mode 100644
index 00000000..65fa0a0f
--- /dev/null
+++ b/cneuromax/__main__.py
@@ -0,0 +1,9 @@
+""":mod:`cneuromax` entrypoint."""
+from cneuromax.runner import BaseTaskRunner
+from cneuromax.utils.runner import get_task_runner_class
+from cneuromax.utils.wandb import login_wandb
+
+if __name__ == "__main__":
+ TaskRunner: type[BaseTaskRunner] = get_task_runner_class()
+ login_wandb()
+ TaskRunner.store_configs_and_run_task()
diff --git a/cneuromax/config.py b/cneuromax/config.py
new file mode 100644
index 00000000..f33e2761
--- /dev/null
+++ b/cneuromax/config.py
@@ -0,0 +1,65 @@
+""":class:`BaseSubtaskConfig` & :class:`BaseHydraConfig`.
+
+Check-out the `hydra docs \
+`_
+& `omegaconf docs \
+`_
+for more information on how structured configurations work and how to
+best utilize them.
+
+This module also makes use of :func:`hydra_zen.make_config` to
+simplify config creation (`reference \
+`_).
+"""
+from dataclasses import dataclass
+from typing import Annotated as An
+
+from hydra import conf as hc
+from hydra import types as ht
+from hydra.experimental.callbacks import LogJobReturnCallback
+from hydra_zen import make_config
+
+from cneuromax.utils.beartype import not_empty
+from cneuromax.utils.hydra_zen import fs_builds
+
+
+@dataclass
+class BaseSubtaskConfig:
+ """Base ``subtask`` config.
+
+ Args:
+ output_dir: Path to the ``subtask`` output directory. Every\
+ artifact generated during the ``subtask`` will be stored\
+ in this directory.
+ data_dir: Path to the data directory. This directory is\
+ shared between ``task`` runs. It is used to store\
+ datasets, pre-trained models, etc.
+ """
+
+ output_dir: An[str, not_empty()] = "${hydra:runtime.output_dir}"
+ data_dir: An[str, not_empty()] = "${oc.env:CNEUROMAX_PATH}/data/"
+
+
+@dataclass
+class BaseHydraConfig(
+ make_config( # type: ignore[misc]
+ bases=(hc.HydraConf,),
+ callbacks={"log_job_return": fs_builds(LogJobReturnCallback)},
+ job=hc.JobConf(
+ config=hc.JobConf.JobConfig(
+ override_dirname=hc.JobConf.JobConfig.OverrideDirname(
+ kv_sep=".",
+ item_sep="~",
+ exclude_keys=["task", "project"],
+ ),
+ ),
+ ),
+ mode=ht.RunMode.MULTIRUN,
+ sweep=hc.SweepDir(
+ dir="${oc.env:CNEUROMAX_PATH}/data/${project}/${task}/",
+ subdir="${hydra:job.override_dirname}",
+ ),
+ ),
+):
+ """Base :mod:`hydra.conf.HydraConf` config."""
diff --git a/cneuromax/fitting/__init__.py b/cneuromax/fitting/__init__.py
index fbfb7f63..dcc605b8 100644
--- a/cneuromax/fitting/__init__.py
+++ b/cneuromax/fitting/__init__.py
@@ -1 +1 @@
-"""Fitting module."""
+"""Model fitting."""
diff --git a/cneuromax/fitting/common/__init__.py b/cneuromax/fitting/common/__init__.py
deleted file mode 100644
index 1953ebe8..00000000
--- a/cneuromax/fitting/common/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""Fitting common Hydra configuration creation & storage."""
-
-from typing import Any
-
-from hydra.core.config_store import ConfigStore
-from hydra_plugins.hydra_submitit_launcher.config import SlurmQueueConf
-
-from cneuromax.fitting.common.fitter import BaseFitterHydraConfig
-
-__all__ = ["BaseFitterHydraConfig", "store_configs"]
-
-
-def store_configs(cs: ConfigStore) -> None:
- """Stores all common Hydra fitting related configs.
-
- Args:
- cs: .
- """
- store_launcher_configs(cs)
- cs.store(name="base_fitter", node=BaseFitterHydraConfig)
-
-
-def store_launcher_configs(cs: ConfigStore) -> None:
- """Stores Hydra ``hydra/launcher`` group configs.
-
- Names: ``submitit_slurm_acan``, ``submitit_slurm_acan_simexp``.
-
- Args:
- cs: .
- """
- cs.store(name="setup_apptainer_acan", node=["module load apptainer"])
- setup: Any = "${merge:${setup_apptainer_acan},${copy_data_commands}}"
- python = "apptainer --nv exec ${oc.env:SCRATCH}/cneuromax.sif python"
- cs.store(
- group="hydra/launcher",
- name="submitit_slurm_acan",
- node=SlurmQueueConf(python=python, setup=setup),
- )
- cs.store(
- group="hydra/launcher",
- name="submitit_slurm_acan_simexp",
- node=SlurmQueueConf(account="rrg-pbellec", setup=setup),
- )
diff --git a/cneuromax/fitting/common/fitter.py b/cneuromax/fitting/common/fitter.py
deleted file mode 100644
index a4d3b41a..00000000
--- a/cneuromax/fitting/common/fitter.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""."""
-
-from dataclasses import dataclass
-from typing import Annotated as An
-
-from cneuromax.utils.annotations import not_empty, one_of
-
-
-@dataclass
-class BaseFitterHydraConfig:
- """Root Fitter Hydra configuration.
-
- Attributes:
- data_dir: .
- device: Computing device to use for large matrix operations.
- load_path: Path to the model to load.
- load_path_pbt: Path to the HPO checkpoint to load for PBT.
- save_path: Path to save the model.
- copy_data_commands: Commands to copy data to the cluster.
- """
-
- data_dir: An[str, not_empty()] = "data/example_run/"
- device: An[str, one_of("cpu", "gpu")] = "cpu"
- load_path: An[str, not_empty()] | None = None
- load_path_pbt: An[str, not_empty()] | None = None
- save_path: An[str, not_empty()] = "${data_dir}/lightning/final.ckpt"
- copy_data_commands: list[str] | None = None
diff --git a/cneuromax/fitting/config.py b/cneuromax/fitting/config.py
new file mode 100644
index 00000000..b97db9e7
--- /dev/null
+++ b/cneuromax/fitting/config.py
@@ -0,0 +1,24 @@
+""":class:`FittingSubtaskConfig`."""
+from dataclasses import dataclass
+from typing import Annotated as An
+
+from cneuromax.config import BaseSubtaskConfig
+from cneuromax.utils.beartype import one_of
+
+
+@dataclass
+class FittingSubtaskConfig(BaseSubtaskConfig):
+ """Fitting ``subtask`` config.
+
+ Args:
+ device: Computing device to use for large matrix operations.
+ copy_data_commands: List of commands to execute to transfer the\
+ training data to the
+ :paramref:`~.BaseSubtaskConfig.data_dir` directory.\
+ This is useful when the training data is originally stored\
+ in a different location than
+ :paramref:`~.BaseSubtaskConfig.data_dir`.
+ """
+
+ device: An[str, one_of("cpu", "gpu")] = "cpu"
+ copy_data_commands: list[str] | None = None
diff --git a/cneuromax/fitting/deeplearning/__init__.py b/cneuromax/fitting/deeplearning/__init__.py
index 594de145..d7523b0b 100644
--- a/cneuromax/fitting/deeplearning/__init__.py
+++ b/cneuromax/fitting/deeplearning/__init__.py
@@ -1,126 +1 @@
-"""Fitting with Deep Learning.
-
-Stores several useful Hydra configs.
-"""
-
-from hydra.core.config_store import ConfigStore
-from lightning.pytorch.loggers.wandb import WandbLogger
-from lightning.pytorch.trainer import Trainer
-from omegaconf import MISSING
-from torch.optim import SGD, Adam, AdamW
-from transformers import (
- get_constant_schedule,
- get_constant_schedule_with_warmup,
-)
-
-from cneuromax.fitting.deeplearning.fitter import (
- DeepLearningFitter,
- DeepLearningFitterHydraConfig,
-)
-from cneuromax.fitting.deeplearning.nnmodule import (
- store_configs as store_nnmodule_configs,
-)
-from cneuromax.utils.hydra import fs_builds, pfs_builds
-
-__all__ = [
- "DeepLearningFitter",
- "DeepLearningFitterHydraConfig",
- "store_configs",
- "store_logger_configs",
- "store_optimizer_configs",
- "store_scheduler_configs",
- "store_trainer_configs",
-]
-
-
-def store_configs(cs: ConfigStore) -> None:
- """Store configs for the Deep Learning module.
-
- Args:
- cs (ConfigStore): The ConfigStore instance.
- """
- store_logger_configs(cs)
- store_nnmodule_configs(cs)
- store_optimizer_configs(cs)
- store_scheduler_configs(cs)
- store_trainer_configs(cs)
-
- cs.store(name="dl_fitter", node=DeepLearningFitterHydraConfig)
-
-
-def store_logger_configs(cs: ConfigStore) -> None:
- """Stores Hydra ``logger`` group configs.
-
- Names: ``wandb``, ``wandb_simexp``.
-
- Args:
- cs: .
- """
- base_args = {
- "name": MISSING,
- "save_dir": "${data_dir}",
- "project": MISSING,
- }
- cs.store(
- group="logger",
- name="wandb",
- node=fs_builds(WandbLogger, **base_args, entity=MISSING),
- )
-
- cs.store(
- group="logger",
- name="wandb_simexp",
- node=fs_builds(WandbLogger, **base_args, entity="cneuroml"),
- )
-
-
-def store_optimizer_configs(cs: ConfigStore) -> None:
- """Stores Hydra ``litmodule/optimizer`` group configs.
-
- Names: ``adam``, ``adamw``, ``sgd``.
-
- Args:
- cs: .
- """
- cs.store(group="litmodule/optimizer", name="adam", node=pfs_builds(Adam))
- cs.store(group="litmodule/optimizer", name="adamw", node=pfs_builds(AdamW))
- cs.store(group="litmodule/optimizer", name="sgd", node=pfs_builds(SGD))
-
-
-def store_scheduler_configs(cs: ConfigStore) -> None:
- """Stores Hydra ``litmodule/scheduler`` group configs.
-
- Names: ``constant``, ``linear_warmup``.
-
- Args:
- cs: .
- """
- cs.store(
- group="litmodule/scheduler",
- name="constant",
- node=pfs_builds(get_constant_schedule),
- )
- cs.store(
- group="litmodule/scheduler",
- name="linear_warmup",
- node=pfs_builds(get_constant_schedule_with_warmup),
- )
-
-
-def store_trainer_configs(cs: ConfigStore) -> None:
- """Stores Hydra ``trainer`` group configs.
-
- Names: ``base``.
-
- Args:
- cs: .
- """
- cs.store(
- group="trainer",
- name="base",
- node=fs_builds(
- Trainer,
- accelerator="${device}",
- default_root_dir="${data_dir}/lightning/",
- ),
- )
+"""Deep Learning."""
diff --git a/cneuromax/fitting/deeplearning/__main__.py b/cneuromax/fitting/deeplearning/__main__.py
deleted file mode 100644
index 756344c0..00000000
--- a/cneuromax/fitting/deeplearning/__main__.py
+++ /dev/null
@@ -1,146 +0,0 @@
-"""Entry point for fitting with Deep Learning."""
-
-import logging
-import os
-import sys
-from importlib import import_module
-from pathlib import Path
-
-import hydra
-import torch
-import wandb
-from hydra.core.config_store import ConfigStore
-from omegaconf import DictConfig, OmegaConf
-
-from cneuromax.fitting.common import store_configs as store_base_fitter_configs
-from cneuromax.fitting.deeplearning import (
- store_configs as store_deep_learning_configs,
-)
-from cneuromax.fitting.deeplearning.fitter import (
- DeepLearningFitter,
- DeepLearningFitterHydraConfig,
-)
-
-
-def store_task_configs(cs: ConfigStore) -> None:
- """Store pre-defined task Hydra configurations.
-
- Parse the task config path from the script arguments, import
- its ``store_configs`` function if it exists, and call it.
-
- Args:
- cs: .
-
- Raises:
- ModuleNotFoundError: If the task module cannot be found.
- AttributeError: If the task module does not have a
- ``store_configs`` function.
- """
- for arg in sys.argv:
- if "task" in arg:
- try:
- task_module = import_module(
- "cneuromax.task." + arg.split("=")[1].split("/")[0],
- )
- except ModuleNotFoundError:
- logging.exception(
- "The task module cannot be found. Make sure it exists in "
- "``cneuromax/task`` and is spelled correctly.",
- )
-
- try:
- task_module.store_configs(cs)
- except AttributeError:
- logging.exception(
- "The task module must have a ``store_configs`` function. "
- "Check-out ``cneuromax/tasks/classify_mnist/__init__.py``"
- "for an example.",
- )
-
- return
-
- module_not_found_error_2 = (
- "The task module must be specified in the script "
- "arguments. Example: ``python -m "
- "cneuromax.fitting.deeplearning task=classify_mnist/mlp``."
- )
- raise ModuleNotFoundError(module_not_found_error_2)
-
-
-def store_configs() -> None:
- """Store configs for the Deep Learning module."""
- cs = ConfigStore.instance()
- store_base_fitter_configs(cs)
- store_deep_learning_configs(cs)
- store_task_configs(cs)
-
-
-def verify_config(config: DictConfig) -> None:
- """Verifies that various config elements are set correctly.
-
- Currently, it only makes sure that the ``device`` is set correctly.
-
- Args:
- config: .
- """
- # Verify device
- if not torch.cuda.is_available():
- logging.info("CUDA is not available, setting device to CPU.")
- config.device = "cpu"
-
-
-def process_config(config: DictConfig) -> DeepLearningFitterHydraConfig:
- """Process the Hydra config.
-
- Args:
- config: .
-
- Returns:
- The processed Hydra config.
- """
- OmegaConf.resolve(config)
- OmegaConf.set_struct(config, value=True)
- dl_config = OmegaConf.to_object(config)
- if not isinstance(dl_config, DeepLearningFitterHydraConfig):
- raise TypeError
- return dl_config
-
-
-@hydra.main(config_name="config", config_path=".", version_base=None)
-def run(config: DictConfig) -> None:
- """.
-
- Args:
- config: .
-
- Returns:
- The validation loss.
- """
- verify_config(config)
- dl_config = process_config(config)
- fitter = DeepLearningFitter(dl_config)
- fitter.fit()
-
-
-def login_wandb() -> None:
- """Login to W&B using the key stored in ``WANDB_KEY.txt``."""
- wandb_key_path = Path(
- str(os.environ.get("CNEUROMAX_PATH")) + "/WANDB_KEY.txt",
- )
- if wandb_key_path.exists():
- with wandb_key_path.open("r") as f:
- key = f.read().strip()
- wandb.login(key=key)
- else:
- logging.info(
- "W&B key not found, proceeding without. You can retrieve your key "
- "from ``https://wandb.ai/settings`` and store it in a file named "
- "``WANDB_KEY.txt`` in the root directory of the project. Discard "
- "this message if you meant not to use W&B.",
- )
-
-
-if __name__ == "__main__":
- store_configs()
- login_wandb()
- run()
diff --git a/cneuromax/fitting/deeplearning/config.py b/cneuromax/fitting/deeplearning/config.py
new file mode 100644
index 00000000..39fe49fa
--- /dev/null
+++ b/cneuromax/fitting/deeplearning/config.py
@@ -0,0 +1,58 @@
+""":class:`DeepLearningTaskConfig`."""
+from dataclasses import dataclass, field
+from typing import Any
+
+from hydra_zen import make_config
+from lightning.pytorch import Trainer
+from lightning.pytorch.loggers.wandb import WandbLogger
+
+from cneuromax.fitting.config import (
+ FittingSubtaskConfig,
+)
+from cneuromax.fitting.deeplearning.datamodule import (
+ BaseDataModule,
+ BaseDataModuleConfig,
+)
+from cneuromax.fitting.deeplearning.litmodule import BaseLitModule
+from cneuromax.utils.hydra_zen import (
+ fs_builds,
+ pfs_builds,
+)
+
+
+@dataclass
+class DeepLearningTaskConfig(
+ make_config( # type: ignore[misc]
+ trainer=pfs_builds(Trainer),
+ datamodule=fs_builds(BaseDataModule, config=BaseDataModuleConfig()),
+ litmodule=fs_builds(BaseLitModule),
+ logger=pfs_builds(WandbLogger),
+ config=fs_builds(FittingSubtaskConfig),
+ ),
+):
+ """Deep Learning ``task`` config.
+
+ Args:
+ defaults: Hydra defaults.
+ trainer: See :class:`~lightning.pytorch.Trainer`.
+ datamodule: See :class:`.BaseDataModule`.
+ litmodule: See :class:`.BaseLitModule`.
+ logger: See\
+ :class:`~lightning.pytorch.loggers.wandb.WandbLogger`.
+ config: See :class:`.FittingSubtaskConfig`.
+ """
+
+ defaults: list[Any] = field(
+ default_factory=lambda: [
+ "_self_",
+ {"trainer": "base"},
+ {"litmodule/nnmodule": "mlp"},
+ {"litmodule/scheduler": "constant"},
+ {"litmodule/optimizer": "adamw"},
+ {"logger": "wandb_simexp"},
+ "project",
+ "task",
+ {"task": None},
+ {"override hydra/launcher": "submitit_local"},
+ ],
+ )
diff --git a/cneuromax/fitting/deeplearning/config.yaml b/cneuromax/fitting/deeplearning/config.yaml
deleted file mode 100644
index d87e6ddf..00000000
--- a/cneuromax/fitting/deeplearning/config.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-hydra:
- searchpath:
- - file://${oc.env:CNEUROMAX_PATH}/cneuromax/
- run:
- dir: ${data_dir}/hydra/
- sweep:
- dir: ${data_dir}/
- subdir: hydra/
-
-defaults:
- - dl_fitter
- - trainer: base
- - litmodule/scheduler: constant
- - litmodule/optimizer: adamw
- - logger: wandb
- - _self_
- - task: null
diff --git a/cneuromax/fitting/deeplearning/datamodule/__init__.py b/cneuromax/fitting/deeplearning/datamodule/__init__.py
index 55701eed..ef3304f5 100644
--- a/cneuromax/fitting/deeplearning/datamodule/__init__.py
+++ b/cneuromax/fitting/deeplearning/datamodule/__init__.py
@@ -1,9 +1,8 @@
-"""Lightning DataModules."""
-
+r""":class:`~lightning.pytorch.LightningDataModule`\s."""
from cneuromax.fitting.deeplearning.datamodule.base import (
BaseDataModule,
BaseDataModuleConfig,
- BaseDataset,
+ Datasets,
)
-__all__ = ["BaseDataModule", "BaseDataModuleConfig", "BaseDataset"]
+__all__ = ["Datasets", "BaseDataModuleConfig", "BaseDataModule"]
diff --git a/cneuromax/fitting/deeplearning/datamodule/base.py b/cneuromax/fitting/deeplearning/datamodule/base.py
index c6eee302..3ec3b222 100644
--- a/cneuromax/fitting/deeplearning/datamodule/base.py
+++ b/cneuromax/fitting/deeplearning/datamodule/base.py
@@ -1,5 +1,4 @@
-"""Base DataModule class & related utilities."""
-
+""":class:`BaseDataModule` + its datasets/config classes."""
from abc import ABCMeta
from dataclasses import dataclass
from typing import Annotated as An
@@ -9,18 +8,18 @@
from torch import Tensor
from torch.utils.data import DataLoader, Dataset
-from cneuromax.utils.annotations import not_empty, one_of
+from cneuromax.utils.beartype import not_empty, one_of
@dataclass
-class BaseDataset:
- """.
-
- Attributes:
- train: .
- val: .
- test: .
- predict: .
+class Datasets:
+ """Holds stage-specific :class:`~torch.utils.data.Dataset` objects.
+
+ Args:
+ train: Training dataset.
+ val: Validation dataset.
+ test: Testing dataset.
+ predict: Prediction dataset.
"""
train: Dataset[Tensor] | None = None
@@ -31,48 +30,48 @@ class BaseDataset:
@dataclass
class BaseDataModuleConfig:
- """.
+ """Holds :class:`BaseDataModule` config values.
- Attributes:
- data_dir: .
- device: .
+ Args:
+ data_dir: See :paramref:`~.BaseSubtaskConfig.data_dir`.
+ device: See :paramref:`~.FittingSubtaskConfig.device`.
"""
- data_dir: An[str, not_empty()] = "${data_dir}"
- device: An[str, one_of("cpu", "gpu")] = "${device}"
+ data_dir: An[str, not_empty()] = "${config.data_dir}"
+ device: An[str, one_of("cpu", "gpu")] = "${config.device}"
class BaseDataModule(LightningDataModule, metaclass=ABCMeta):
- """Root Lightning ``DataModule`` class.
+ """Base :mod:`lightning` ``DataModule``.
- With ``stage`` being any of ``"train"``, ``"val"``, ``"test"`` or
- ``"predict"``, subclasses need to properly define the
- ``dataset[stage]`` instance attribute(s) for each desired ``stage``.
+ With ```` being any of ``train``, ``val``, ``test`` or
+ ``predict``, subclasses need to properly define the
+ ``datasets.`` attribute(s) for each desired stage.
+
+ Args:
+ config: See :class:`BaseDataModuleConfig`.
Attributes:
- config (``BaseDataModuleConfig``): .
- dataset (``BaseDataset``): .
- pin_memory (``bool``): Whether to copy tensors into device
- pinned memory before returning them (is set to ``True`` by
- default if using GPUs).
- per_device_batch_size (``int``): Per-device number of samples to
- load per iteration. Default value (``1``) is later
- overwritten with the use of a Lightning ``Tuner``.
- per_device_num_workers (``int``): Per-device number of CPU
- processes to use for data loading (``0`` means that the data
- will be loaded by each device's assigned CPU process).
- Default value (``0``) is later overwritten.
+ config (:class:`BaseDataModuleConfig`)
+ datasets (:class:`Datasets`)
+ pin_memory (``bool``): Whether to copy tensors into device\
+ pinned memory before returning them (is set to ``True`` by\
+ default if :paramref:`~BaseDataModuleConfig.device` is\
+ ``"gpu"``).
+ per_device_batch_size (``int``): Per-device number of samples\
+ to load per iteration. Temporary value (``1``) is\
+ overwritten in :func:`.set_batch_size_and_num_workers`.
+ per_device_num_workers (``int``): Per-device number of CPU\
+ processes to use for data loading (``0`` means that the\
+ data will be loaded by each device's assigned CPU\
+ process). Temporary value (``0``) is later overwritten\
+ in :func:`.set_batch_size_and_num_workers`.
"""
def __init__(self: "BaseDataModule", config: BaseDataModuleConfig) -> None:
- """Calls parent constructor & initializes instance attributes.
-
- Args:
- config: .
- """
super().__init__()
self.config = config
- self.dataset = BaseDataset()
+ self.datasets = Datasets()
self.pin_memory = self.config.device == "gpu"
self.per_device_batch_size = 1
self.per_device_num_workers = 0
@@ -82,21 +81,24 @@ def load_state_dict(
self: "BaseDataModule",
state_dict: dict[str, int],
) -> None:
- """Sets the instance's per-device batch_size & num_workers.
+ """Replace instance attrib vals w/ :paramref:`state_dict` vals.
Args:
- state_dict: .
+ state_dict: Dictionary containing values to override\
+ :attr:`per_device_batch_size` &\
+ :attr:`per_device_num_workers`.
"""
self.per_device_batch_size = state_dict["per_device_batch_size"]
self.per_device_num_workers = state_dict["per_device_num_workers"]
@final
def state_dict(self: "BaseDataModule") -> dict[str, int]:
- """.
+ """Returns instance attribute values.
Returns:
- This instance's per-device batch size & number of workers
- inside a new dictionary.
+ A new dictionary containing attribute values\
+ :attr:`per_device_batch_size` &\
+ :attr:`per_device_num_workers`.
"""
return {
"per_device_batch_size": self.per_device_batch_size,
@@ -110,64 +112,67 @@ def x_dataloader(
*,
shuffle: bool = True,
) -> DataLoader[Tensor]:
- """Generic ``DataLoader`` factory method.
+ """Generic :class:`~torch.utils.data.DataLoader` factory method.
+
+ Args:
+ dataset: A :mod:`torch` ``Dataset`` to wrap with a\
+ :class:`~torch.utils.data.DataLoader`
+ shuffle: Whether to shuffle the dataset when iterating\
+ over it.
Raises:
- AttributeError: If ``dataset`` is ``None``.
+ AttributeError: If :paramref:`dataset` is ``None``.
Returns:
- A new PyTorch ``DataLoader`` instance.
+ A new :class:`~torch.utils.data.DataLoader` instance\
+ wrapping the :paramref:`dataset` argument.
"""
if dataset is None:
raise AttributeError
-
- if not hasattr(self, "collate_fn"):
- self.collate_fn = None
-
return DataLoader(
dataset=dataset,
batch_size=self.per_device_batch_size,
shuffle=shuffle,
num_workers=self.per_device_num_workers,
- collate_fn=self.collate_fn,
pin_memory=self.pin_memory,
)
@final
def train_dataloader(self: "BaseDataModule") -> DataLoader[Tensor]:
- """Calls ``x_dataloader`` with train dataset.
+ """Calls :meth:`x_dataloader` w/ :attr:`datasets` ``.train``.
Returns:
- A new training PyTorch ``DataLoader`` instance.
+ A new training :class:`torch.utils.data.DataLoader`\
+ instance.
"""
- return self.x_dataloader(dataset=self.dataset.train)
+ return self.x_dataloader(dataset=self.datasets.train)
@final
def val_dataloader(self: "BaseDataModule") -> DataLoader[Tensor]:
- """Calls ``x_dataloader`` with val dataset.
+ """Calls :meth:`x_dataloader` w/ :attr:`datasets` ``.val``.
Returns:
- A new validation PyTorch ``DataLoader`` instance.
+ A new validation :class:`~torch.utils.data.DataLoader`\
+ instance.
"""
- return self.x_dataloader(dataset=self.dataset.val)
+ return self.x_dataloader(dataset=self.datasets.val)
@final
def test_dataloader(self: "BaseDataModule") -> DataLoader[Tensor]:
- """Calls ``x_dataloader`` with test dataset.
+ """Calls :meth:`x_dataloader` w/ :attr:`datasets` ``.test``.
Returns:
- A new testing PyTorch ``DataLoader`` instance.
+ A new testing :class:`~torch.utils.data.DataLoader`\
+ instance.
"""
- return self.x_dataloader(dataset=self.dataset.test)
+ return self.x_dataloader(dataset=self.datasets.test)
@final
def predict_dataloader(self: "BaseDataModule") -> DataLoader[Tensor]:
- """Calls ``x_dataloader`` with predict dataset.
-
- The predict PyTorch ``DataLoader`` instance does not shuffle the
- dataset.
+ """Calls :meth:`x_dataloader` w/ :attr:`datasets` ``.predict``.
Returns:
- A new prediction PyTorch ``DataLoader`` instance.
+ A new prediction :class:`~torch.utils.data.DataLoader`\
+ instance that does not shuffle the dataset.
"""
- return self.x_dataloader(dataset=self.dataset.test, shuffle=False)
+ return self.x_dataloader(dataset=self.datasets.predict, shuffle=False)
diff --git a/cneuromax/fitting/deeplearning/fitter.py b/cneuromax/fitting/deeplearning/fitter.py
deleted file mode 100644
index f9108034..00000000
--- a/cneuromax/fitting/deeplearning/fitter.py
+++ /dev/null
@@ -1,245 +0,0 @@
-"""Deep Learning Fitter Config & Class."""
-
-import logging
-import os
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Any
-
-import torch
-from hydra.core.hydra_config import HydraConfig
-from hydra.utils import instantiate
-from hydra_plugins.hydra_submitit_launcher.config import (
- LocalQueueConf,
- SlurmQueueConf,
-)
-from hydra_plugins.hydra_submitit_launcher.submitit_launcher import (
- LocalLauncher,
- SlurmLauncher,
-)
-from lightning.pytorch import Trainer
-from lightning.pytorch.loggers import Logger
-from lightning.pytorch.loggers.wandb import WandbLogger
-from omegaconf import MISSING, DictConfig, OmegaConf
-from torch.distributed import ReduceOp
-
-from cneuromax.fitting.common import BaseFitterHydraConfig
-from cneuromax.fitting.deeplearning.datamodule import BaseDataModule
-from cneuromax.fitting.deeplearning.litmodule import BaseLitModule
-from cneuromax.utils.hydra import get_path
-
-
-@dataclass
-class DeepLearningFitterHydraConfig(BaseFitterHydraConfig):
- """.
-
- Attributes:
- trainer: .
- litmodule: .
- datamodule: .
- logger: .
- """
-
- trainer: Any = MISSING # Implicit TrainerHydraConfig
- litmodule: Any = MISSING # Implicit LitModuleHydraConfig
- datamodule: Any = MISSING # Implicit DataModuleHydraConfig
- logger: Any = MISSING # Implicit LoggerHydraConfig
-
-
-class DeepLearningFitter:
- """Deep Learning Fitter.
-
- This class is the main entry point of the Deep Learning module. It
- acts as an interface between Hydra (configuration + launcher +
- sweeper) and Lightning (trainer + logger + modules).
-
- Note that this class will be instantiated by ``config.num_nodes`` x
- ``config.gpus_per_node`` processes.
-
- Attributes:
- config (``DeepLearningFitterHydraConfig``): .
- logger (``Logger``): .
- trainer (``Trainer``): .
- litmodule (``BaseLitModule``): .
- datamodule (``BaseDataModule``): .
- """
-
- def __init__(
- self: "DeepLearningFitter",
- config: DeepLearningFitterHydraConfig,
- ) -> None:
- """Constructor, stores config and initializes various objects.
-
- Transforms the Hydra configuration instructions into Lightning
- objects, sets up hardware-dependent parameters and sets the
- checkpoint path to resume training from (if applicable).
-
- Args:
- config: .
- """
- self.config = config
- self.launcher_config = self.retrieve_launcher_config()
- self.instantiate_lightning_objects()
- self.set_batch_size_and_num_workers()
- self.set_checkpoint_path()
-
- def retrieve_launcher_config(
- self: "DeepLearningFitter",
- ) -> LocalQueueConf | SlurmQueueConf:
- """."""
- launcher_dict_config: DictConfig = HydraConfig.get().launcher
- launcher_container_config = OmegaConf.to_container(
- launcher_dict_config,
- )
- if not isinstance(launcher_container_config, dict):
- raise TypeError
- launcher_config_dict = dict(launcher_container_config)
- return (
- LocalQueueConf(**launcher_config_dict)
- if launcher_dict_config._target_ == get_path(LocalLauncher)
- else SlurmQueueConf(**launcher_config_dict)
- )
-
- def instantiate_lightning_objects(self: "DeepLearningFitter") -> None:
- """."""
- self.logger: Logger | None
- if self.config.logger._target_ == get_path(WandbLogger):
- wandb_key_path = Path(
- str(os.environ.get("CNEUROMAX_PATH")) + "/WANDB_KEY.txt",
- )
- if wandb_key_path.exists():
- kwargs = {}
- if self.launcher_config._target_ == get_path(SlurmLauncher):
- kwargs["offline"] = True
- self.logger = instantiate(self.config.logger, **kwargs)
- else:
- logging.info(
- "W&B key not found. Logging disabled.",
- )
- self.logger = None
- else:
- self.logger = instantiate(self.config.logger)
-
- callbacks = None
- """
- if self.launcher_config._target_ == get_path(SlurmLauncher):
- callbacks = [TriggerWandbSyncLightningCallback()]
- """
- self.trainer: Trainer = instantiate(
- config=self.config.trainer,
- devices=self.launcher_config.gpus_per_node or 1
- if self.config.device == "gpu"
- else self.launcher_config.tasks_per_node,
- logger=self.logger,
- callbacks=callbacks,
- )
-
- self.datamodule: BaseDataModule = instantiate(self.config.datamodule)
-
- self.litmodule: BaseLitModule = instantiate(self.config.litmodule)
-
- def set_batch_size_and_num_workers(self: "DeepLearningFitter") -> None:
- """.
-
- If starting a new HPO run, finds and sets "good" ``batch_size``
- and ``num_workers`` parameters.
-
- See the ``find_good_batch_size`` and ``find_good_num_workers``
- functions documentation for more details.
-
- We make the assumption that if we are resuming from a checkpoint
- created while running hyper-parameter optimization, we are
- running on the same hardware configuration as was used to create
- the checkpoint. Therefore, we do not need to once again look for
- good ``batch_size`` and ``num_workers`` parameters.
- """
- from cneuromax.fitting.deeplearning.utils.lightning import (
- find_good_num_workers, # Prevent circular import
- find_good_per_device_batch_size,
- )
-
- if not self.config.load_path_pbt:
- proposed_per_device_batch_size: int = (
- find_good_per_device_batch_size(
- self.config,
- self.launcher_config,
- )
- )
- proposed_per_device_num_workers: int = find_good_num_workers(
- self.config,
- self.launcher_config,
- proposed_per_device_batch_size,
- )
-
- per_device_batch_size: int = int(
- self.trainer.strategy.reduce(
- torch.tensor(proposed_per_device_batch_size),
- reduce_op=ReduceOp.MIN, # type: ignore [arg-type]
- ),
- )
-
- per_device_num_workers: int = int(
- self.trainer.strategy.reduce(
- torch.tensor(proposed_per_device_num_workers),
- reduce_op=ReduceOp.MAX, # type: ignore [arg-type]
- ),
- )
-
- self.datamodule.per_device_batch_size = per_device_batch_size
- self.datamodule.per_device_num_workers = per_device_num_workers
-
- def set_checkpoint_path(self: "DeepLearningFitter") -> None:
- """Sets the path to the checkpoint to resume training from.
-
- Three cases are considered:
- - if the ``config.load_path_pbt`` parameter is set, we are
- resuming from a checkpoint created while running HPO. In this
- case, we set the checkpoint path to the value of
- ``config.load_path_hpo`` and use a custom checkpoint connector
- to not override the new HPO config values.
- - if the ``config.load_path`` parameter is set (but not
- ``config.load_path_hpo``), we are resuming from a regular
- checkpoint. In this case, we set the checkpoint path to the
- value of ``config.load_path``.
- - if neither ``config.load_path_hpo`` nor ``config.load_path``
- are set, we are starting a new training run. In this case, we
- set the checkpoint path to ``None``.
- """
- self.ckpt_path: str | None
-
- if self.config.load_path_pbt:
- from cneuromax.fitting.deeplearning.utils.lightning import (
- InitOptimParamsCheckpointConnector, # Prevent circular import
- )
-
- self.ckpt_path = self.config.load_path_pbt
- self.trainer._checkpoint_connector = (
- InitOptimParamsCheckpointConnector(self.trainer)
- )
- elif self.config.load_path:
- self.ckpt_path = self.config.load_path
- else:
- self.ckpt_path = None
-
- def fit(self: "DeepLearningFitter") -> float:
- """.
-
- Trains (or resumes training) the model, saves a checkpoint and
- returns the final validation loss.
-
- Returns:
- The final validation loss.
- """
- self.trainer.fit(
- model=self.litmodule,
- datamodule=self.datamodule,
- ckpt_path=self.ckpt_path,
- )
- if self.config.save_path:
- logging.info("Saving final checkpoint...")
- self.trainer.save_checkpoint(self.config.save_path)
- logging.info("Final checkpoint saved.")
- return self.trainer.validate(
- model=self.litmodule,
- datamodule=self.datamodule,
- )[0]["val/loss"]
diff --git a/cneuromax/fitting/deeplearning/litmodule/__init__.py b/cneuromax/fitting/deeplearning/litmodule/__init__.py
index ea7edbec..cf0f2822 100644
--- a/cneuromax/fitting/deeplearning/litmodule/__init__.py
+++ b/cneuromax/fitting/deeplearning/litmodule/__init__.py
@@ -1,5 +1,6 @@
-"""Lightning Modules."""
-
-from cneuromax.fitting.deeplearning.litmodule.base import BaseLitModule
+r""":class:`lightning.pytorch.LightningModule`\s."""
+from cneuromax.fitting.deeplearning.litmodule.base import (
+ BaseLitModule,
+)
__all__ = ["BaseLitModule"]
diff --git a/cneuromax/fitting/deeplearning/litmodule/base.py b/cneuromax/fitting/deeplearning/litmodule/base.py
index 3a58e5a2..e4a16f36 100644
--- a/cneuromax/fitting/deeplearning/litmodule/base.py
+++ b/cneuromax/fitting/deeplearning/litmodule/base.py
@@ -1,5 +1,4 @@
-"""."""
-
+""":class:`BaseLitModule`."""
from abc import ABCMeta
from functools import partial
from typing import Annotated as An
@@ -11,19 +10,61 @@
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LRScheduler
-from cneuromax.utils.annotations import one_of
+from cneuromax.utils.beartype import one_of
class BaseLitModule(LightningModule, metaclass=ABCMeta):
- """Root Lightning ``Module`` class.
-
- Subclasses need to implement the ``step`` method that inputs a batch
- and returns the loss value(s).
+ """Base :mod:`lightning` ``LitModule``.
+
+ Subclasses need to implement the :meth:`step` method that inputs
+ both ``batch`` (``tuple[torch.Tensor]``) and ``stage`` (``str``)
+ arguments while returning the loss value(s) in the form of a
+ :class:`torch.Tensor`.
+
+ Example definition:
+
+ .. highlight:: python
+ .. code-block:: python
+
+ def step(
+ self: "BaseClassificationLitModule",
+ batch: tuple[
+ Float[Tensor, " batch_size *x_shape"],
+ Int[Tensor, " batch_size"],
+ ],
+ stage: An[str, one_of("train", "val", "test")],
+ ) -> Float[Tensor, " "]:
+ ...
+
+ Note:
+ ``batch`` and loss value(s) type hints in this class are not
+ rendered properly in the documentation due to an\
+ incompatibility between :mod:`sphinx` and :mod:`jaxtyping`.\
+ Refer to the source code available next to the method\
+ signatures to find the correct types.
+
+ Args:
+ nnmodule: A :mod:`torch` ``nn.Module`` to be used by this\
+ instance.
+ optimizer: A :mod:`torch` ``Optimizer`` to be used by this\
+ instance. It is partial as an argument as the\
+ :paramref:`nnmodule` parameters are required for its\
+ initialization.
+ scheduler: A :mod:`torch` ``Scheduler`` to be used by this\
+ instance. It is partial as an argument as the\
+ :paramref:`optimizer` is required for its initialization.
Attributes:
- nnmodule (PyTorch ``nn.Module``): .
- optimizer (PyTorch ``Optimizer``): .
- scheduler (PyTorch ``LRScheduler``): .
+ nnmodule (:class:`torch.nn.Module`): See\
+ :paramref:`~BaseLitModule.nnmodule`.
+ optimizer (:class:`torch.optim.Optimizer`): See\
+ :paramref:`~BaseLitModule.optimizer`.
+ scheduler (:class:`torch.optim.lr_scheduler.LRScheduler`): See\
+ :paramref:`~BaseLitModule.scheduler`.
+
+ Raises:
+ NotImplementedError: If the :meth:`step` method is not\
+ defined or not callable.
"""
def __init__(
@@ -32,18 +73,15 @@ def __init__(
optimizer: partial[Optimizer],
scheduler: partial[LRScheduler],
) -> None:
- """Calls parent constructor & initializes instance attributes.
-
- Args:
- nnmodule: .
- optimizer: .
- scheduler: .
- """
super().__init__()
-
- self.nnmodule: nn.Module = nnmodule
- self.optimizer: Optimizer = optimizer(params=self.parameters())
- self.scheduler: LRScheduler = scheduler(optimizer=self.optimizer)
+ self.nnmodule = nnmodule
+ self.optimizer = optimizer(params=self.parameters())
+ self.scheduler = scheduler(optimizer=self.optimizer)
+ if not callable(getattr(self, "step", None)):
+ error_msg = (
+ "The `BaseLitModule.step` method is not defined/not callable."
+ )
+ raise NotImplementedError(error_msg)
@final
def stage_step(
@@ -53,31 +91,23 @@ def stage_step(
| list[Num[Tensor, " ..."]],
stage: An[str, one_of("train", "val", "test", "predict")],
) -> Num[Tensor, " ..."]:
- """Generic stage wrapper around the ``step`` instance method.
+ """Generic stage wrapper around the :meth:`step` method.
- Verifies that the ``step`` instance method is callable, calls
- it and logs the loss value(s).
+ Verifies that the :meth:`step` method exists and is callable,
+ calls it and logs the loss value(s).
Args:
- batch: .
- stage: .
+ batch: The batched input data.
+ stage: The current stage (``train``, ``val``, ``test`` or\
+ ``predict``).
Returns:
The loss value(s).
-
- Raises:
- AttributeError: If the ``step`` instance method is not
- callable.
"""
- if not (hasattr(self, "step") and callable(self.step)):
- raise AttributeError
-
if isinstance(batch, list):
tupled_batch: tuple[Num[Tensor, " ..."], ...] = tuple(batch)
-
loss: Num[Tensor, " ..."] = self.step(tupled_batch, stage)
- self.log(f"{stage}/loss", loss)
-
+ self.log(name=f"{stage}/loss", value=loss)
return loss
@final
@@ -87,7 +117,10 @@ def training_step(
| tuple[Num[Tensor, " ..."], ...]
| list[Num[Tensor, " ..."]],
) -> Num[Tensor, " ..."]:
- """Calls ``stage_step`` method with argument ``stage=train``.
+ """Calls :meth:`stage_step` with argument ``stage="train"``.
+
+ Args:
+ batch: See :paramref:`~stage_step.batch`.
Returns:
The loss value(s).
@@ -100,15 +133,18 @@ def validation_step(
batch: Num[Tensor, " ..."]
| tuple[Num[Tensor, " ..."], ...]
| list[Num[Tensor, " ..."]],
+ # :paramref:`*args` & :paramref:`**kwargs` type annotations
+ # cannot be more specific because of
+ # :meth:`LightningModule.validation_step`\'s signature.
*args: Any, # noqa: ANN401, ARG002
**kwargs: Any, # noqa: ANN401, ARG002
) -> Num[Tensor, " ..."]:
- """Calls ``stage_step`` method with argument ``stage=val``.
+ """Calls :meth:`stage_step` with argument ``stage="val"``.
Args:
- batch: .
- *args: .
- **kwargs: .
+ batch: See :paramref:`~stage_step.batch`.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
Returns:
The loss value(s).
@@ -122,10 +158,10 @@ def test_step(
| tuple[Num[Tensor, " ..."], ...]
| list[Num[Tensor, " ..."]],
) -> Num[Tensor, " ..."]:
- """Calls ``stage_step`` method with argument ``stage=test``.
+ """Calls :meth:`stage_step` with argument ``stage="test"``.
Args:
- batch: .
+ batch: See :paramref:`~stage_step.batch`.
Returns:
The loss value(s).
@@ -136,12 +172,13 @@ def test_step(
def configure_optimizers(
self: "BaseLitModule",
) -> tuple[list[Optimizer], list[dict[str, LRScheduler | str | int]]]:
- """.
+ """Returns a dict with :attr:`optimizer` and :attr:`scheduler`.
Returns:
- A tuple containing the PyTorch ``Optimizer`` and
- ``LRScheduler`` instance attributes (each nested in a
- list).
+ A tuple containing this instance's\
+ :class:`~torch.optim.Optimizer` and\
+ :class:`~torch.optim.lr_scheduler.LRScheduler`\
+ attributes.
"""
return [self.optimizer], [
{"scheduler": self.scheduler, "interval": "step", "frequency": 1},
diff --git a/cneuromax/fitting/deeplearning/litmodule/classification/__init__.py b/cneuromax/fitting/deeplearning/litmodule/classification/__init__.py
index 355d3e69..5f849eee 100644
--- a/cneuromax/fitting/deeplearning/litmodule/classification/__init__.py
+++ b/cneuromax/fitting/deeplearning/litmodule/classification/__init__.py
@@ -1,7 +1,7 @@
-"""Classification Lightning Modules."""
-
+r"""Classification :class:`~lightning.pytorch.LightningModule`\s."""
from cneuromax.fitting.deeplearning.litmodule.classification.base import (
BaseClassificationLitModule,
+ BaseClassificationLitModuleConfig,
)
-__all__ = ["BaseClassificationLitModule"]
+__all__ = ["BaseClassificationLitModuleConfig", "BaseClassificationLitModule"]
diff --git a/cneuromax/fitting/deeplearning/litmodule/classification/base.py b/cneuromax/fitting/deeplearning/litmodule/classification/base.py
index 60bf3c7e..2621b216 100644
--- a/cneuromax/fitting/deeplearning/litmodule/classification/base.py
+++ b/cneuromax/fitting/deeplearning/litmodule/classification/base.py
@@ -1,6 +1,6 @@
-"""."""
-
+""":class:`BaseClassificationLitModule` & its config dataclass."""
from abc import ABCMeta
+from dataclasses import dataclass
from functools import partial
from typing import Annotated as An
@@ -13,45 +13,48 @@
from torchmetrics.classification import MulticlassAccuracy
from cneuromax.fitting.deeplearning.litmodule import BaseLitModule
-from cneuromax.utils.annotations import ge, one_of
+from cneuromax.utils.beartype import ge, one_of
+@dataclass
class BaseClassificationLitModuleConfig:
- """Base classification Lightning Module config.
+ """Holds :class:`BaseClassificationLitModule` config values.
- Attributes:
- num_classes: .
+ Args:
+ num_classes: Number of classes to classify between.
"""
num_classes: An[int, ge(2)]
class BaseClassificationLitModule(BaseLitModule, metaclass=ABCMeta):
- """Base classification Lightning Module.
+ """Base Classification :mod:`lightning` ``LitModule``.
+
+ Args:
+ config: See :class:`BaseClassificationLitModuleConfig`.
+ nnmodule: See :paramref:`~.BaseLitModule.nnmodule`.
+ optimizer: See :paramref:`~.BaseLitModule.optimizer`.
+ scheduler: See :paramref:`~.BaseLitModule.scheduler`.
Attributes:
- accuracy (``MulticlassAccuracy``): The accuracy metric.
- config (``BaseClassificationLitModuleConfig``): .
+ accuracy\
+ (:class:`~torchmetrics.classification.MulticlassAccuracy`)
"""
def __init__(
self: "BaseClassificationLitModule",
+ config: BaseClassificationLitModuleConfig,
nnmodule: nn.Module,
optimizer: partial[Optimizer],
scheduler: partial[LRScheduler],
- num_classes: An[int, ge(2)],
) -> None:
- """Calls parent constructor & initializes accuracy metric.
-
- Args:
- nnmodule: .
- optimizer: .
- scheduler: .
- num_classes: .
- """
- super().__init__(nnmodule, optimizer, scheduler)
+ super().__init__(
+ nnmodule=nnmodule,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ )
self.accuracy: MulticlassAccuracy = MulticlassAccuracy(
- num_classes=num_classes,
+ num_classes=config.num_classes,
)
def step(
@@ -62,11 +65,13 @@ def step(
],
stage: An[str, one_of("train", "val", "test")],
) -> Float[Tensor, " "]:
- """Computes accuracy and cross entropy loss.
+ """Computes the model accuracy and cross entropy loss.
Args:
- batch: .
- stage: .
+ batch: A tuple ``(X, y)`` where ``X`` is the input data and\
+ ``y`` is the target data.
+ stage: See\
+ :paramref:`~.BaseLitModule.stage_step.stage`.
Returns:
The cross entropy loss.
@@ -74,7 +79,7 @@ def step(
x: Float[Tensor, " batch_size *x_shape"] = batch[0]
y: Int[Tensor, " batch_size"] = batch[1]
logits: Float[Tensor, " batch_size num_classes"] = self.nnmodule(x)
- preds: Int[Tensor, " batch_size"] = torch.argmax(logits, dim=1)
- accuracy: Float[Tensor, " "] = self.accuracy(preds, y)
- self.log(f"{stage}/acc", accuracy)
- return f.cross_entropy(logits, y)
+ preds: Int[Tensor, " batch_size"] = torch.argmax(input=logits, dim=1)
+ accuracy: Float[Tensor, " "] = self.accuracy(preds=preds, target=y)
+ self.log(name=f"{stage}/acc", value=accuracy)
+ return f.cross_entropy(input=logits, target=y)
diff --git a/cneuromax/fitting/deeplearning/litmodule/nnmodule/__init__.py b/cneuromax/fitting/deeplearning/litmodule/nnmodule/__init__.py
new file mode 100644
index 00000000..0c8e0d70
--- /dev/null
+++ b/cneuromax/fitting/deeplearning/litmodule/nnmodule/__init__.py
@@ -0,0 +1,7 @@
+r""":class:`torch.nn.Module`\s."""
+from cneuromax.fitting.deeplearning.litmodule.nnmodule.mlp import (
+ MLP,
+ MLPConfig,
+)
+
+__all__ = ["MLPConfig", "MLP"]
diff --git a/cneuromax/fitting/deeplearning/nnmodule/mlp.py b/cneuromax/fitting/deeplearning/litmodule/nnmodule/mlp.py
similarity index 58%
rename from cneuromax/fitting/deeplearning/nnmodule/mlp.py
rename to cneuromax/fitting/deeplearning/litmodule/nnmodule/mlp.py
index 4e506b5e..7fd6e167 100644
--- a/cneuromax/fitting/deeplearning/nnmodule/mlp.py
+++ b/cneuromax/fitting/deeplearning/litmodule/nnmodule/mlp.py
@@ -1,5 +1,4 @@
-"""MLP class & config."""
-
+""":class:`MLP` + its config dataclass."""
from dataclasses import dataclass
from typing import Annotated as An
@@ -8,14 +7,14 @@
from omegaconf import MISSING
from torch import Tensor, nn
-from cneuromax.utils.annotations import ge, lt
+from cneuromax.utils.beartype import ge, lt
@dataclass
class MLPConfig:
- """Multi-layer perceptron (MLP) configuration.
+ """Holds :class:`MLP` config values.
- Attributes:
+ Args:
dims: List of dimensions for each layer.
p_dropout: Dropout probability.
"""
@@ -30,9 +29,14 @@ class MLP(nn.Module):
Allows for a variable number of layers, activation functions, and
dropout probability.
+ Args:
+ config: See :class:`MLPConfig`.
+ activation_fn: The singular activation function to use in\
+ between each layer.
+
Attributes:
- config (``MLPConfig``): .
- model (``nn.Sequential``): .
+ model (:class:`torch.nn.Sequential`): The internal\
+ :class:`~torch.nn.Module` that holds the MLP's layers.
"""
def __init__(
@@ -40,44 +44,39 @@ def __init__(
config: MLPConfig,
activation_fn: nn.Module,
) -> None:
- """Calls parent constructor & initializes model.
-
- Args:
- config: .
- activation_fn: .
- """
super().__init__()
- self.config = config
self.model = nn.Sequential()
-
for i in range(len(config.dims) - 1):
self.model.add_module(
- f"fc_{i}",
- nn.Linear(config.dims[i], config.dims[i + 1]),
+ name=f"fc_{i}",
+ module=nn.Linear(config.dims[i], config.dims[i + 1]),
)
if i < len(config.dims) - 2:
- self.model.add_module(f"act_{i}", activation_fn)
+ self.model.add_module(name=f"act_{i}", module=activation_fn)
if config.p_dropout: # > 0.0:
self.model.add_module(
- f"drop_{i}",
- nn.Dropout(config.p_dropout),
+ name=f"drop_{i}",
+ module=nn.Dropout(config.p_dropout),
)
def forward(
self: "MLP",
x: Float[Tensor, " batch_size *d_input"],
) -> Float[Tensor, " batch_size output_size"]:
- """Flattens input dimensions and pass through the model.
+ """Flattens input's dimensions and passes it through the model.
Note:
- This MLP isn't (yet?) suitable for cases where the output is
- multidimensional.
+ This MLP is currently only capable of returning 1D\
+ :class:`torch.Tensor` batches.
+
+ TODO: Add support for returning 2D+ :class:`torch.Tensor`\
+ batches.
Args:
- x: .
+ x: The input data batch.
Returns:
- The output vector batch.
+ The output batch.
"""
out: Float[Tensor, " batch_size flattened_d_input"] = rearrange(
x,
diff --git a/cneuromax/fitting/deeplearning/litmodule/store.py b/cneuromax/fitting/deeplearning/litmodule/store.py
new file mode 100644
index 00000000..90fa39ee
--- /dev/null
+++ b/cneuromax/fitting/deeplearning/litmodule/store.py
@@ -0,0 +1,64 @@
+""":class:`.BaseLitModule` :mod:`hydra-core` config store."""
+from hydra_zen import ZenStore
+from torch.optim import SGD, Adam, AdamW
+from transformers import (
+ get_constant_schedule,
+ get_constant_schedule_with_warmup,
+)
+
+from cneuromax.fitting.deeplearning.litmodule.nnmodule import (
+ MLP,
+ MLPConfig,
+)
+from cneuromax.utils.hydra_zen import (
+ fs_builds,
+ pfs_builds,
+)
+
+
+def store_mlp_config(store: ZenStore) -> None:
+ """Stores :mod:`hydra-core` ``litmodule/nnmodule`` group config.
+
+ Config name: ``mlp``.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ """
+ store(
+ fs_builds(MLP, config=MLPConfig()),
+ name="mlp",
+ group="litmodule/nnmodule",
+ )
+
+
+def store_basic_optimizer_configs(store: ZenStore) -> None:
+ """Stores :mod:`hydra-core` ``litmodule/optimizer`` group configs.
+
+ Config names: ``adam``, ``adamw``, ``sgd``.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ """
+ store(pfs_builds(Adam), name="adam", group="litmodule/optimizer")
+ store(pfs_builds(AdamW), name="adamw", group="litmodule/optimizer")
+ store(pfs_builds(SGD), name="sgd", group="litmodule/optimizer")
+
+
+def store_basic_scheduler_configs(store: ZenStore) -> None:
+ """Stores :mod:`hydra-core` ``litmodule/scheduler`` group configs.
+
+ Config names: ``constant``, ``linear_warmup``.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ """
+ store(
+ pfs_builds(get_constant_schedule),
+ name="constant",
+ group="litmodule/scheduler",
+ )
+ store(
+ pfs_builds(get_constant_schedule_with_warmup),
+ name="linear_warmup",
+ group="litmodule/scheduler",
+ )
diff --git a/cneuromax/fitting/deeplearning/nnmodule/__init__.py b/cneuromax/fitting/deeplearning/nnmodule/__init__.py
deleted file mode 100644
index 5343da85..00000000
--- a/cneuromax/fitting/deeplearning/nnmodule/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""PyTorch Modules."""
-
-from hydra.core.config_store import ConfigStore
-
-from cneuromax.fitting.deeplearning.nnmodule.mlp import MLP, MLPConfig
-from cneuromax.utils.hydra import fs_builds
-
-__all__ = ["MLP", "MLPConfig", "store_configs"]
-
-
-def store_configs(cs: ConfigStore) -> None:
- """Store Hydra ``litmodule/nnmodule`` group configs.
-
- Names: ``mlp``.
-
- Args:
- cs: .
- """
- cs.store(
- group="litmodule/nnmodule",
- name="mlp",
- node=fs_builds(MLP, config=MLPConfig()),
- )
diff --git a/cneuromax/fitting/deeplearning/runner.py b/cneuromax/fitting/deeplearning/runner.py
new file mode 100644
index 00000000..c7961642
--- /dev/null
+++ b/cneuromax/fitting/deeplearning/runner.py
@@ -0,0 +1,75 @@
+""":class:`DeepLearningTaskRunner`."""
+from functools import partial
+from typing import Any
+
+from hydra_zen import ZenStore
+from lightning.pytorch import Trainer
+from lightning.pytorch.loggers.wandb import WandbLogger
+
+from cneuromax.fitting.config import (
+ FittingSubtaskConfig,
+)
+from cneuromax.fitting.deeplearning.config import DeepLearningTaskConfig
+from cneuromax.fitting.deeplearning.datamodule import (
+ BaseDataModule,
+)
+from cneuromax.fitting.deeplearning.litmodule import BaseLitModule
+from cneuromax.fitting.deeplearning.litmodule.store import (
+ store_basic_optimizer_configs,
+ store_basic_scheduler_configs,
+ store_mlp_config,
+)
+from cneuromax.fitting.deeplearning.store import (
+ store_basic_trainer_config,
+)
+from cneuromax.fitting.deeplearning.train import train
+from cneuromax.fitting.runner import FittingTaskRunner
+from cneuromax.store import store_wandb_logger_configs
+
+
+class DeepLearningTaskRunner(FittingTaskRunner):
+ """Deep Learning ``task`` runner."""
+
+ @classmethod
+ def store_configs(
+ cls: type["DeepLearningTaskRunner"],
+ store: ZenStore,
+ ) -> None:
+ """Stores structured configs.
+
+ .. warning::
+
+ Make sure to call this method if you are overriding it.
+
+ Args:
+ store:\
+ See :paramref:`~.FittingTaskRunner.store_configs.store`.
+ """
+ super().store_configs(store)
+ store_basic_optimizer_configs(store)
+ store_basic_scheduler_configs(store)
+ store_mlp_config(store)
+ store_basic_trainer_config(store)
+ store_wandb_logger_configs(
+ store,
+ clb=WandbLogger,
+ )
+ store(DeepLearningTaskConfig, name="config")
+
+ @classmethod
+ def run_subtask( # noqa: PLR0913
+ cls: type["DeepLearningTaskRunner"],
+ trainer: partial[Trainer],
+ datamodule: BaseDataModule,
+ litmodule: BaseLitModule,
+ logger: partial[WandbLogger],
+ config: FittingSubtaskConfig,
+ ) -> Any: # noqa: ANN401
+ """Runs the ``subtask``."""
+ return train(
+ trainer=trainer,
+ datamodule=datamodule,
+ litmodule=litmodule,
+ logger=logger,
+ config=config,
+ )
diff --git a/cneuromax/fitting/deeplearning/store.py b/cneuromax/fitting/deeplearning/store.py
new file mode 100644
index 00000000..63f21d66
--- /dev/null
+++ b/cneuromax/fitting/deeplearning/store.py
@@ -0,0 +1,26 @@
+"""Deep Learning :mod:`hydra-core` config store."""
+from hydra_zen import ZenStore
+from lightning.pytorch import Trainer
+
+from cneuromax.utils.hydra_zen import (
+ pfs_builds,
+)
+
+
+def store_basic_trainer_config(store: ZenStore) -> None:
+ """Stores :mod:`hydra-core` ``trainer`` group configs.
+
+ Config name: ``base``.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ """
+ store(
+ pfs_builds(
+ Trainer,
+ accelerator="${config.device}",
+ default_root_dir="${config.output_dir}/lightning/",
+ ),
+ name="base",
+ group="trainer",
+ )
diff --git a/cneuromax/fitting/deeplearning/train.py b/cneuromax/fitting/deeplearning/train.py
new file mode 100644
index 00000000..0e90e981
--- /dev/null
+++ b/cneuromax/fitting/deeplearning/train.py
@@ -0,0 +1,70 @@
+""":func:`train`."""
+from functools import partial
+
+from lightning.pytorch import Trainer
+from lightning.pytorch.loggers.wandb import WandbLogger
+
+from cneuromax.fitting.config import (
+ FittingSubtaskConfig,
+)
+from cneuromax.fitting.deeplearning.datamodule import BaseDataModule
+from cneuromax.fitting.deeplearning.litmodule import BaseLitModule
+from cneuromax.fitting.deeplearning.utils.lightning import (
+ instantiate_trainer_and_logger,
+ set_batch_size_and_num_workers,
+ set_checkpoint_path,
+)
+
+
+def train(
+ trainer: partial[Trainer],
+ datamodule: BaseDataModule,
+ litmodule: BaseLitModule,
+ logger: partial[WandbLogger],
+ config: FittingSubtaskConfig,
+) -> float:
+ """Trains a Deep Neural Network.
+
+ Note that this function will be executed by
+ ``num_nodes * gpus_per_node`` processes/tasks. Those variables are
+ set in the Hydra launcher configuration.
+
+ Trains (or resumes training) the model, saves a checkpoint and
+ returns the final validation loss.
+
+ Args:
+ trainer: See :class:`~lightning.pytorch.Trainer`.
+ datamodule: See :class:`.BaseDataModule`.
+ litmodule: See :class:`.BaseLitModule`.
+ logger: See\
+ :class:`~lightning.pytorch.loggers.wandb.WandbLogger`.
+ config: See :paramref:`~.FittingSubtaskConfig`.
+
+ Returns:
+ The final validation loss.
+ """
+ full_trainer, full_logger = instantiate_trainer_and_logger(
+ partial_trainer=trainer,
+ partial_logger=logger,
+ device=config.device,
+ )
+ """TODO: Add logic for HPO"""
+ set_batch_size_and_num_workers(
+ trainer=full_trainer,
+ datamodule=datamodule,
+ litmodule=litmodule,
+ device=config.device,
+ output_dir=config.output_dir,
+ )
+ ckpt_path = set_checkpoint_path(trainer=full_trainer, config=config)
+ full_trainer.fit(
+ model=litmodule,
+ datamodule=datamodule,
+ ckpt_path=ckpt_path,
+ )
+ """TODO: Add logic for HPO
+ trainer.save_checkpoint(filepath=config.model_load_path)
+ """
+ return full_trainer.validate(model=litmodule, datamodule=datamodule)[0][
+ "val/loss"
+ ]
diff --git a/cneuromax/fitting/deeplearning/utils/__init__.py b/cneuromax/fitting/deeplearning/utils/__init__.py
index 603a94b3..7a0adde0 100644
--- a/cneuromax/fitting/deeplearning/utils/__init__.py
+++ b/cneuromax/fitting/deeplearning/utils/__init__.py
@@ -1 +1 @@
-"""Deep Learning utilities."""
+r""":mod:`~cneuromax.fitting.deeplearning`\-wide utilities."""
diff --git a/cneuromax/fitting/deeplearning/utils/lightning.py b/cneuromax/fitting/deeplearning/utils/lightning.py
index 77a27bc0..9b0f6801 100644
--- a/cneuromax/fitting/deeplearning/utils/lightning.py
+++ b/cneuromax/fitting/deeplearning/utils/lightning.py
@@ -1,129 +1,235 @@
-"""Lightning utilities."""
-
+""":mod:`lightning` utilities."""
import copy
import logging
+import os
import time
+from functools import partial
+from pathlib import Path
+from typing import Annotated as An
import numpy as np
-from hydra.utils import instantiate
-from hydra_plugins.hydra_submitit_launcher.config import (
- LocalQueueConf,
- SlurmQueueConf,
+import torch
+from hydra_plugins.hydra_submitit_launcher.submitit_launcher import (
+ SlurmLauncher,
)
from lightning.pytorch import Trainer
+from lightning.pytorch.loggers.wandb import WandbLogger
from lightning.pytorch.trainer.connectors.checkpoint_connector import (
_CheckpointConnector,
)
from lightning.pytorch.tuner.tuning import Tuner
+from torch.distributed import ReduceOp
+from wandb_osh.lightning_hooks import TriggerWandbSyncLightningCallback
-from cneuromax.fitting.deeplearning.datamodule import BaseDataModule
-from cneuromax.fitting.deeplearning.fitter import (
- DeepLearningFitterHydraConfig,
+from cneuromax.fitting.config import (
+ FittingSubtaskConfig,
)
+from cneuromax.fitting.deeplearning.datamodule import BaseDataModule
from cneuromax.fitting.deeplearning.litmodule import BaseLitModule
+from cneuromax.fitting.utils.hydra import get_launcher_config
+from cneuromax.utils.beartype import one_of
+from cneuromax.utils.misc import get_path
+
+
+def instantiate_trainer_and_logger(
+ partial_trainer: partial[Trainer],
+ partial_logger: partial[WandbLogger],
+ device: An[str, one_of("cpu", "gpu")],
+) -> tuple[Trainer, WandbLogger | None]:
+ """Creates :mod:`lightning` instances.
+
+ Args:
+ partial_trainer: See :class:`~lightning.pytorch.Trainer`.
+ partial_logger: See\
+ :class:`~lightning.pytorch.loggers.wandb.WandbLogger`.
+ device: See :paramref:`~.FittingSubtaskConfig.device`.
+
+ Returns:
+ * A :class:`~lightning.pytorch.Trainer` instance.
+ * A :class:`~lightning.pytorch.loggers.wandb.WandbLogger`\
+ instance or ``None``.
+ """
+ launcher_config = get_launcher_config()
+ wandb_key_path = Path(
+ str(os.environ.get("CNEUROMAX_PATH")) + "/WANDB_KEY.txt",
+ )
+ if wandb_key_path.exists():
+ offline = launcher_config._target_ == get_path( # noqa: SLF001
+ SlurmLauncher,
+ )
+ logger = partial_logger(offline=offline)
+ else:
+ logging.info("W&B key not found. Logging disabled.")
+ logger = None
+ callbacks = None
+ if launcher_config._target_ == get_path(SlurmLauncher): # noqa: SLF001
+ callbacks = [TriggerWandbSyncLightningCallback()]
+ trainer = partial_trainer(
+ devices=launcher_config.gpus_per_node or 1
+ if device == "gpu"
+ else launcher_config.tasks_per_node,
+ logger=logger,
+ callbacks=callbacks,
+ )
+ return trainer, logger
+
+
+def set_batch_size_and_num_workers(
+ trainer: Trainer,
+ datamodule: BaseDataModule,
+ litmodule: BaseLitModule,
+ device: An[str, one_of("cpu", "gpu")],
+ output_dir: str,
+) -> None:
+ """Sets attribute values for a :class:`~.BaseDataModule`.
+
+ See :func:`find_good_per_device_batch_size` and
+ :func:`find_good_per_device_num_workers` for more details on how
+ these variables' values are determined.
+
+ Args:
+ trainer: See :class:`~lightning.pytorch.Trainer`.
+ datamodule: See :class:`.BaseDataModule`.
+ litmodule: See :class:`.BaseLitModule`.
+ device: See :paramref:`~.FittingSubtaskConfig.device`.
+ output_dir: See :paramref:`~.BaseSubtaskConfig.output_dir`.
+ """
+ proposed_per_device_batch_size = find_good_per_device_batch_size(
+ litmodule=litmodule,
+ datamodule=datamodule,
+ device=device,
+ output_dir=output_dir,
+ )
+ proposed_per_device_num_workers = find_good_per_device_num_workers(
+ datamodule=datamodule,
+ per_device_batch_size=proposed_per_device_batch_size,
+ )
+ per_device_batch_size = int(
+ trainer.strategy.reduce(
+ torch.tensor(proposed_per_device_batch_size),
+ reduce_op=ReduceOp.MIN, # type: ignore [arg-type]
+ ),
+ )
+ per_device_num_workers = int(
+ trainer.strategy.reduce(
+ torch.tensor(proposed_per_device_num_workers),
+ reduce_op=ReduceOp.MAX, # type: ignore [arg-type]
+ ),
+ )
+ datamodule.per_device_batch_size = per_device_batch_size
+ datamodule.per_device_num_workers = per_device_num_workers
def find_good_per_device_batch_size(
- config: DeepLearningFitterHydraConfig,
- launcher_config: LocalQueueConf | SlurmQueueConf,
+ litmodule: BaseLitModule,
+ datamodule: BaseDataModule,
+ device: str,
+ output_dir: str,
) -> int:
- """Finds an appropriate ``per_device_batch_size`` parameter.
+ """Probes a :attr:`~.BaseDataModule.per_device_batch_size` value.
This functionality makes the following, not always correct, but
generally reasonable assumptions:
- - As long as the ``total_batch_size / dataset_size`` ratio remains
- small (e.g. ``< 0.01`` so as to benefit from the stochasticity of
- gradient updates), running the same number of gradient updates with
- a larger batch size will yield faster training than running the same
- number of gradient updates with a smaller batch size.
- - Loading data from disk to RAM is a larger bottleneck than loading
+
+ - As long as the ``total_batch_size / dataset_size`` ratio remains\
+ small (e.g. ``< 0.01`` so as to benefit from the stochasticity of\
+ gradient updates), running the same number of gradient updates with\
+ a larger batch size will yield better training performance than\
+ running the same number of gradient updates with a smaller batch\
+ size.
+
+ - Loading data from disk to RAM is a larger bottleneck than loading\
data from RAM to GPU VRAM.
- - If you are training on multiple GPUs, each GPU has roughly the
+
+ - If you are training on multiple GPUs, each GPU has roughly the\
same amount of VRAM.
Args:
- config: .
- launcher_config: The Hydra launcher configuration.
+ litmodule: See :class:`.BaseLitModule`.
+ datamodule: See :class:`.BaseDataModule`.
+ device: See :paramref:`~.FittingSubtaskConfig.device`.
+ output_dir: See :paramref:`~.BaseSubtaskConfig.output_dir`.
Returns:
- per_device_batch_size: The estimated proper batch size per
- device.
+ A roughly optimal ``per_device_batch_size`` value.
"""
- logging.info("Finding good `batch_size` parameter...")
- litmodule: BaseLitModule = instantiate(config.litmodule)
- datamodule: BaseDataModule = instantiate(config.datamodule)
- datamodule.per_device_num_workers = launcher_config.cpus_per_task or 1
+ litmodule_copy = copy.deepcopy(litmodule)
+ datamodule_copy = copy.deepcopy(datamodule)
+ launcher_config = get_launcher_config()
+ datamodule_copy.per_device_num_workers = launcher_config.cpus_per_task or 1
trainer = Trainer(
- accelerator=config.device,
+ accelerator=device,
devices=1,
max_epochs=-1,
- default_root_dir=config.data_dir + "/lightning/tuner/",
+ default_root_dir=output_dir + "/lightning/tuner/",
)
tuner = Tuner(trainer=trainer)
+ logging.info("Finding good `batch_size` parameter...")
per_device_batch_size = tuner.scale_batch_size(
- model=litmodule,
- datamodule=datamodule,
+ model=litmodule_copy,
+ datamodule=datamodule_copy,
mode="binsearch",
batch_arg_name="per_device_batch_size",
)
if per_device_batch_size is None:
- raise ValueError # Won't happen according to Lightning source code.
+ error_msg = (
+ "Lightning's `scale_batch_size` method returned `None`. "
+ "This is outside of the user's control, please try again."
+ )
+ raise ValueError(error_msg)
num_computing_devices = launcher_config.nodes * (
launcher_config.gpus_per_node or 1
- if config.device == "gpu"
+ if device == "gpu"
else launcher_config.tasks_per_node
)
per_device_batch_size: int = min(
- # Account for GPU memory discrepancies & ensure total batch_size
+ # Account for GPU memory discrepancies & ensure total batch size
# is < 1% of the train dataloader size.
int(per_device_batch_size * 0.9),
- len(datamodule.train_dataloader()) // (100 * num_computing_devices),
+ len(datamodule_copy.train_dataloader())
+ // (100 * num_computing_devices),
)
logging.info(f"Best `batch_size` parameter: {per_device_batch_size}.")
return per_device_batch_size
-def find_good_num_workers(
- config: DeepLearningFitterHydraConfig,
- launcher_config: LocalQueueConf | SlurmQueueConf,
+def find_good_per_device_num_workers(
+ datamodule: BaseDataModule,
per_device_batch_size: int,
max_num_data_passes: int = 100,
) -> int:
- """Finds an appropriate `num_workers` parameter.
+ """Probes a :attr:`~.BaseDataModule.per_device_num_workers` value.
- This function makes use of the ``per_device_batch_size`` parameter
- found by the ``find_good_per_device_batch_size`` function in order
- to find an appropriate ``num_workers`` parameter.
- It does so by iterating through a range of ``num_workers`` values
- and measuring the time it takes to iterate through a fixed number of
- data passes; picking the ``num_workers`` value that yields the
- shortest time.
+ Iterates through a range of ``num_workers`` values and measures the
+ time it takes to iterate through a fixed number of data passes;
+ returning the value that yields the shortest time.
Args:
- config: .
- launcher_config: The Hydra launcher configuration.
- per_device_batch_size: .
- max_num_data_passes: Maximum number of data passes to iterate
+ datamodule: See :class:`.BaseDataModule`.
+ per_device_batch_size: The return value of\
+ :func:`find_good_per_device_batch_size`.
+ max_num_data_passes: Maximum number of data passes to iterate\
through.
Returns:
- num_workers: An estimated proper number of workers.
+ A roughly optimal ``per_device_num_workers`` value.
"""
+ launcher_config = get_launcher_config()
logging.info("Finding good `num_workers` parameter...")
if launcher_config.cpus_per_task in [None, 1]:
logging.info("Only 1 worker available/provided. Returning 0.")
return 0
times = []
for num_workers in range(launcher_config.cpus_per_task or 1 + 1):
- datamodule: BaseDataModule = instantiate(config.datamodule)
- datamodule.per_device_batch_size = per_device_batch_size
- datamodule.per_device_num_workers = num_workers
- datamodule.prepare_data()
- datamodule.setup("fit")
+ datamodule_copy = copy.deepcopy(datamodule)
+ datamodule_copy.per_device_batch_size = per_device_batch_size
+ datamodule_copy.per_device_num_workers = num_workers
+ datamodule_copy.prepare_data()
+ datamodule_copy.setup("fit")
start_time = time.time()
num_data_passes = 0
while num_data_passes < max_num_data_passes:
- for _ in datamodule.train_dataloader():
+ for _ in datamodule_copy.train_dataloader():
num_data_passes += 1
if num_data_passes == max_num_data_passes:
break
@@ -136,17 +242,35 @@ def find_good_num_workers(
return best_time
+def set_checkpoint_path(
+ trainer: Trainer, # noqa: ARG001
+ config: FittingSubtaskConfig, # noqa: ARG001
+) -> str | None:
+ """Sets the path to the checkpoint to resume training from.
+
+ TODO: Implement.
+
+ Args:
+ config: See :paramref:`~.FittingSubtaskConfig`.
+ trainer: See :class:`~lightning.pytorch.Trainer`.
+
+ Returns:
+ The path to the checkpoint to resume training from.
+ """
+ return None
+
+
class InitOptimParamsCheckpointConnector(_CheckpointConnector):
- """Initialized optimizer parameters Lightning checkpoint connector.
+ """Tweaked :mod:`lightning` checkpoint connector.
- Makes use of the newly instantiated optimizers' hyper-parameters
- rather than the checkpointed hyper-parameters. For use when resuming
- training with different optimizer hyper-parameters (e.g. with the
- PBT Hydra sweeper).
+ Allows to make use of the instantiated optimizers'
+ hyper-parameters rather than the checkpointed hyper-parameters.
+ For use when resuming training with different optimizer
+ hyper-parameters (e.g. with a PBT :mod:`hydra-core` Sweeper).
"""
def restore_optimizers(self: "InitOptimParamsCheckpointConnector") -> None:
- """Preserves newly instantiated parameters."""
+ """Tweaked method to preserve newly instantiated parameters."""
new_optims = copy.deepcopy(self.trainer.strategy.optimizers)
super().restore_optimizers()
for ckpt_optim, new_optim in zip(
@@ -160,7 +284,7 @@ def restore_optimizers(self: "InitOptimParamsCheckpointConnector") -> None:
strict=True,
):
for ckpt_optim_param_group_key in ckpt_optim_param_group:
- # Skip the `params`` key as it is not a HP.
+ # Skip the `params` key as it is not a HP.
if ckpt_optim_param_group_key != "params":
# Place the new Hydra instantiated optimizers'
# HPs back into the restored optimizers.
diff --git a/cneuromax/fitting/deeplneuroevo/__init__.py b/cneuromax/fitting/deeplneuroevo/__init__.py
new file mode 100644
index 00000000..1d1be551
--- /dev/null
+++ b/cneuromax/fitting/deeplneuroevo/__init__.py
@@ -0,0 +1 @@
+"""Deep Learning + Neuroevolution."""
diff --git a/cneuromax/fitting/neuroevolution/__init__.py b/cneuromax/fitting/neuroevolution/__init__.py
index 3483e923..a9755b71 100644
--- a/cneuromax/fitting/neuroevolution/__init__.py
+++ b/cneuromax/fitting/neuroevolution/__init__.py
@@ -1 +1 @@
-"""Neuroevolution module."""
+"""Neuroevolution."""
diff --git a/cneuromax/fitting/neuroevolution/agent/__init__.py b/cneuromax/fitting/neuroevolution/agent/__init__.py
new file mode 100644
index 00000000..1e0bef86
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/agent/__init__.py
@@ -0,0 +1,7 @@
+"""Neuroevolution Agent."""
+from cneuromax.fitting.neuroevolution.agent.base import (
+ BaseAgent,
+ BaseAgentConfig,
+)
+
+__all__ = ["BaseAgent", "BaseAgentConfig"]
diff --git a/cneuromax/fitting/neuroevolution/agent/base.py b/cneuromax/fitting/neuroevolution/agent/base.py
new file mode 100644
index 00000000..dde7d169
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/agent/base.py
@@ -0,0 +1,128 @@
+""":class:`BaseAgent` & its config."""
+from abc import ABCMeta, abstractmethod
+from dataclasses import dataclass
+from typing import Annotated as An
+
+from torch import Tensor
+
+from cneuromax.utils.beartype import ge, le
+
+
+@dataclass
+class BaseAgentConfig:
+ """Holds :class:`BaseAgent` config values.
+
+ Args:
+ env_transfer: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.env_transfer`.
+ fit_transfer: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.fit_transfer`.
+ mem_transfer: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.mem_transfer`.
+ """
+
+ env_transfer: bool = "${config.env_transfer}" # type: ignore[assignment]
+ fit_transfer: bool = "${config.fit_transfer}" # type: ignore[assignment]
+ mem_transfer: bool = "${config.mem_transfer}" # type: ignore[assignment]
+
+
+class BaseAgent(metaclass=ABCMeta):
+ """Root Neuroevolution agent class.
+
+ From an algorithmic perspective, we make use of 50% truncation
+ selection, meaning that the top 50% of agents in terms of fitness
+ score are selected and will produce two children agents each.
+
+ From an implementation perspective, ``pop_size`` instances of this
+ class will be created upon initialization. Whenever an
+ agent is selected, a copy of this object will be created and sent
+ to a MPI process in possession of a non-selected agent. Both this
+ original instance and the copy sent to the other process will be
+ mutated in-place (meaning no new instance will be created).
+
+ It might therefore be useful to sometimes consider this class as
+ an ``AgentContainer`` class rather than an ``Agent`` class.
+
+ Args:
+ config: See :class:`BaseAgentConfig`.
+ pop_idx: The agent's population index. An index of ``0`` means\
+ that the agent is in the generator population while an\
+ index of ``1`` means that the agent is in the\
+ discriminator population.
+ pops_are_merged: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.pop_merge`.
+
+ Attributes:
+ config (:class:`BaseAgentConfig`): See :paramref:`config`.
+ role (``str``): The agent's role. Can be either ``"generator"``\
+ or ``"discriminator"``.
+ is_other_role_in_other_pop (``bool``): Whether the agent is the\
+ other role in the other population. If the two populations\
+ are merged (see :paramref:`pops_are_merged`), then an\
+ agent is both a generator and a discriminator. It is a\
+ generator/discriminator in this population while it is a\
+ discriminator/generator in the other population. Such\
+ type of agent needs to accomodate this property through\
+ its network architecture.
+ curr_eval_score (``float``): The score obtained by the agent\
+ during the current evaluation.
+ curr_eval_num_steps (``int``): The number of steps taken by the\
+ agent during the current evaluation.
+ saved_env (``torchrl.envs.EnvBase``): The :mod:`torchrl`\
+ environment instance to resume from (only set if
+ :paramref:`~.BaseAgentConfig.env_transfer` is ``True``).
+ saved_env_out (``tensordict.Tensordict``): The latest output\
+ from the environment to resume from (only set if\
+ :paramref:`~.BaseAgentConfig.env_transfer` is ``True``).
+ curr_episode_score: The current episode score (only set if\
+ :paramref:`~.BaseAgentConfig.env_transfer` is ``True``).
+ curr_episode_num_steps: The number of steps taken in the\
+ current episode (only set if\
+ :paramref:`~.BaseAgentConfig.env_transfer` is ``True``).
+ continual_fitness: The agent's fitness in addition to all of\
+ its predecessors' fitnesses (only set if\
+ :paramref:`~.BaseAgentConfig.fit_transfer` is ``True``).
+ """
+
+ def __init__(
+ self: "BaseAgent",
+ config: BaseAgentConfig,
+ pop_idx: An[int, ge(0), le(1)],
+ *,
+ pops_are_merged: bool,
+ ) -> None:
+ self.config = config
+ self.role = "generator" if pop_idx == 0 else "discriminator"
+ self.is_other_role_in_other_pop = pops_are_merged
+ self.initialize_eval_attributes()
+
+ def initialize_eval_attributes(self: "BaseAgent") -> None:
+ """Initializes attributes used during evaluation."""
+ self.curr_eval_score = 0
+ self.curr_eval_num_steps = 0
+ if self.config.env_transfer:
+ self.saved_env = None
+ self.saved_env_out = None
+ self.curr_episode_num_steps = 0
+ self.curr_episode_score = 0
+ if self.config.fit_transfer:
+ self.continual_fitness = 0
+
+ @abstractmethod
+ def mutate(self: "BaseAgent") -> None:
+ """Applies random mutation(s) to the agent."""
+
+ @abstractmethod
+ def reset(self: "BaseAgent") -> None:
+ """Resets the agent's memory state."""
+
+ @abstractmethod
+ def __call__(self: "BaseAgent", x: Tensor) -> Tensor:
+ """Runs the agent for one timestep given :paramref:`x`.
+
+ Args:
+ x: An input observation.
+
+ Returns:
+ The agent's output.
+ """
diff --git a/cneuromax/fitting/neuroevolution/config.py b/cneuromax/fitting/neuroevolution/config.py
new file mode 100644
index 00000000..128fae6c
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/config.py
@@ -0,0 +1,101 @@
+"""Neuroevolution ``subtask`` and ``task`` configs."""
+from dataclasses import dataclass, field
+from typing import Annotated as An
+from typing import Any
+
+import wandb
+from hydra_zen import make_config
+
+from cneuromax.fitting.config import (
+ FittingSubtaskConfig,
+)
+from cneuromax.fitting.neuroevolution.agent import BaseAgent
+from cneuromax.fitting.neuroevolution.space import BaseSpace
+from cneuromax.utils.beartype import ge
+from cneuromax.utils.hydra_zen import (
+ builds,
+ fs_builds,
+ p_builds,
+ pfs_builds,
+)
+
+
+@dataclass
+class NeuroevolutionSubtaskConfig(FittingSubtaskConfig):
+ """Neuroevolution ``subtask`` config.
+
+ Args:
+ agents_per_task: Number of agents per task (``num_tasks`` =\
+ ``num_nodes`` x ``tasks_per_node``).
+ prev_num_gens: Number of generations from a previous experiment\
+ to load.
+ total_num_gens: Number of generations to run the experiment for\
+ (including the previous number of generations).
+ save_interval: Number of generations between each save point.\
+ `0` means no save point except for the last generation.
+ save_first_gen: Whether to save the state of the experiment\
+ after the first generation (usually for plotting purposes).
+ pop_merge: Whether to merge both generator and discriminator\
+ populations into a single population. This means that each\
+ agent will be evaluated on both its generative and\
+ discriminative abilities.
+ env_transfer: Whether an agent's environment state\
+ (position, velocity, ...) is transferred to its children\
+ if it passes through the selection process.
+ fit_transfer: Whether an agent's fitness is transferred to\
+ its children if it passes through the selection process.
+ mem_transfer: Whether an agent's memory (hidden state) is\
+ transferred to its children if it passes through the\
+ selection process.
+ eval_num_steps: Number of environment steps to run each agent\
+ for during evaluation. ``0`` means that the agent will run\
+ until the environment terminates (``eval_num_steps = 0`` is\
+ not supported for ``env_transfer = True``).
+ """
+
+ agents_per_task: An[int, ge(1)] = 1
+ prev_num_gens: An[int, ge(0)] = 0
+ total_num_gens: An[int, ge(1)] = 10
+ save_interval: An[int, ge(0)] = 0
+ save_first_gen: bool = False
+ pop_merge: bool = False
+ env_transfer: bool = False
+ fit_transfer: bool = False
+ mem_transfer: bool = False
+ eval_num_steps: An[int, ge(0)] = 0
+
+ def __post_init__(self: "NeuroevolutionSubtaskConfig") -> None:
+ """Post-initialization updates."""
+ if self.save_interval == 0:
+ self.save_interval = self.total_num_gens - self.prev_num_gens
+
+
+@dataclass
+class NeuroevolutionTaskConfig(
+ make_config( # type: ignore[misc]
+ space=builds(BaseSpace),
+ agent=p_builds(BaseAgent),
+ logger=pfs_builds(wandb.init),
+ config=fs_builds(NeuroevolutionSubtaskConfig),
+ ),
+):
+ """Neuroevolution ``task`` config.
+
+ Args:
+ defaults: Hydra defaults.
+ space: See :class:`~neuroevolution.space.BaseSpace`.
+ agent: See :class:`~neuroevolution.agent.BaseAgent`.
+ logger: See :func:`wandb.init`.
+ config: See :class:`.NeuroevolutionSubtaskConfig`.
+ """
+
+ defaults: list[Any] = field(
+ default_factory=lambda: [
+ "_self_",
+ {"logger": "wandb_simexp"},
+ "project",
+ "task",
+ {"task": None},
+ {"override hydra/launcher": "submitit_local"},
+ ],
+ )
diff --git a/cneuromax/fitting/neuroevolution/evolve.py b/cneuromax/fitting/neuroevolution/evolve.py
new file mode 100644
index 00000000..5150a86c
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/evolve.py
@@ -0,0 +1,194 @@
+""":func:`evolve`."""
+from collections.abc import Callable
+from functools import partial
+from typing import Any
+
+import wandb
+
+from cneuromax.fitting.neuroevolution.agent import BaseAgent
+from cneuromax.fitting.neuroevolution.config import (
+ NeuroevolutionSubtaskConfig,
+)
+from cneuromax.fitting.neuroevolution.space import BaseSpace
+from cneuromax.fitting.neuroevolution.utils.compute import (
+ compute_generation_results,
+ compute_save_points,
+ compute_start_time_and_seeds,
+ compute_total_num_env_steps_and_process_fitnesses,
+)
+from cneuromax.fitting.neuroevolution.utils.evolve import (
+ evaluate_on_cpu,
+ evaluate_on_gpu,
+ mutate,
+)
+from cneuromax.fitting.neuroevolution.utils.exchange import (
+ exchange_agents,
+ update_exchange_and_mutate_info,
+)
+from cneuromax.fitting.neuroevolution.utils.initialize import (
+ initialize_agents,
+ initialize_common_variables,
+ initialize_gpu_comm,
+)
+from cneuromax.fitting.neuroevolution.utils.readwrite import (
+ load_state,
+ save_state,
+)
+from cneuromax.fitting.neuroevolution.utils.validate import validate_space
+from cneuromax.fitting.neuroevolution.utils.wandb import setup_wandb
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def evolve(
+ space: BaseSpace,
+ agent: partial[BaseAgent],
+ logger: Callable[..., Any],
+ config: NeuroevolutionSubtaskConfig,
+) -> None:
+ """Neuroevolution.
+
+ Note that this function and all of its sub-functions will be called
+ by ``num_nodes * tasks_per_node`` MPI processes/tasks. These two
+ variables are set in the Hydra launcher configuration.
+
+ Args:
+ space: See :class:`~.space.BaseSpace`.
+ agent: See :class:`~.agent.BaseAgent`.
+ logger: See :func:`~.utils.wandb.setup_wandb`.
+ config: See :paramref:`~.post_process_base_config.config`.
+ """
+ comm, _, _ = get_mpi_variables()
+ validate_space(space=space, pop_merge=config.pop_merge)
+ save_points = compute_save_points(
+ prev_num_gens=config.prev_num_gens,
+ total_num_gens=config.total_num_gens,
+ save_interval=config.save_interval,
+ save_first_gen=config.save_first_gen,
+ )
+ (
+ pop_size,
+ len_agents_batch,
+ exchange_and_mutate_info,
+ exchange_and_mutate_info_batch,
+ seeds_batch,
+ generation_results,
+ generation_results_batch,
+ total_num_env_steps,
+ ) = initialize_common_variables(
+ agents_per_task=config.agents_per_task,
+ num_pops=space.num_pops,
+ )
+ if space.evaluates_on_gpu:
+ ith_gpu_comm = initialize_gpu_comm()
+ if config.prev_num_gens > 0:
+ (
+ agents_batch,
+ generation_results,
+ total_num_env_steps,
+ ) = load_state(
+ prev_num_gens=config.prev_num_gens,
+ len_agents_batch=len_agents_batch,
+ output_dir=config.output_dir,
+ )
+ else:
+ agents_batch = initialize_agents(
+ agent=agent,
+ len_agents_batch=len_agents_batch,
+ num_pops=space.num_pops,
+ pop_merge=config.pop_merge,
+ )
+ setup_wandb(logger=logger)
+ for curr_gen in range(config.prev_num_gens + 1, config.total_num_gens + 1):
+ start_time, seeds = compute_start_time_and_seeds(
+ generation_results=generation_results,
+ curr_gen=curr_gen,
+ num_pops=space.num_pops,
+ pop_size=pop_size,
+ pop_merge=config.pop_merge,
+ )
+ if curr_gen == 1:
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 3.
+ comm.Scatter(
+ sendbuf=seeds,
+ recvbuf=seeds_batch,
+ )
+ exchange_and_mutate_info_batch[:, :, 3] = seeds_batch
+ else:
+ update_exchange_and_mutate_info(
+ num_pops=space.num_pops,
+ pop_size=pop_size,
+ exchange_and_mutate_info=exchange_and_mutate_info,
+ generation_results=generation_results,
+ seeds=seeds,
+ )
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 13.
+ comm.Scatter(
+ sendbuf=exchange_and_mutate_info,
+ recvbuf=exchange_and_mutate_info_batch,
+ )
+ exchange_agents(
+ num_pops=space.num_pops,
+ pop_size=pop_size,
+ agents_batch=agents_batch,
+ exchange_and_mutate_info_batch=exchange_and_mutate_info_batch,
+ )
+ mutate(
+ agents_batch=agents_batch,
+ exchange_and_mutate_info_batch=exchange_and_mutate_info_batch,
+ num_pops=space.num_pops,
+ )
+ fitnesses_and_num_env_steps_batch = (
+ (
+ evaluate_on_gpu(
+ ith_gpu_comm=ith_gpu_comm,
+ agents_batch=agents_batch,
+ space=space,
+ curr_gen=curr_gen,
+ transfer=config.env_transfer
+ or config.fit_transfer
+ or config.mem_transfer,
+ )
+ )
+ if space.evaluates_on_gpu
+ else evaluate_on_cpu(
+ agents_batch=agents_batch,
+ space=space,
+ curr_gen=curr_gen,
+ )
+ )
+ compute_generation_results(
+ generation_results=generation_results,
+ generation_results_batch=generation_results_batch,
+ fitnesses_and_num_env_steps_batch=fitnesses_and_num_env_steps_batch,
+ agents_batch=agents_batch,
+ num_pops=space.num_pops,
+ )
+ # Primary process gathers fitnesses, number of environment steps
+ # and pickled agent sizes
+ comm.Gather(
+ sendbuf=generation_results_batch,
+ recvbuf=generation_results,
+ )
+ total_num_env_steps = (
+ compute_total_num_env_steps_and_process_fitnesses(
+ generation_results=generation_results,
+ total_num_env_steps=total_num_env_steps,
+ curr_gen=curr_gen,
+ start_time=start_time,
+ pop_merge=config.pop_merge,
+ )
+ )
+ # State saving.
+ if curr_gen in save_points:
+ save_state(
+ agents_batch=agents_batch,
+ generation_results=generation_results,
+ total_num_env_steps=total_num_env_steps,
+ curr_gen=curr_gen,
+ output_dir=config.output_dir,
+ )
+ wandb.finish()
diff --git a/cneuromax/fitting/neuroevolution/net/__init__.py b/cneuromax/fitting/neuroevolution/net/__init__.py
new file mode 100644
index 00000000..fe2f0d46
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/net/__init__.py
@@ -0,0 +1 @@
+"""Neural nets for neuroevolution."""
diff --git a/cneuromax/fitting/neuroevolution/net/cpu/__init__.py b/cneuromax/fitting/neuroevolution/net/cpu/__init__.py
new file mode 100644
index 00000000..499ce44f
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/net/cpu/__init__.py
@@ -0,0 +1 @@
+"""CPU-based Neural nets for neuroevolution."""
diff --git a/cneuromax/fitting/neuroevolution/net/cpu/static/__init__.py b/cneuromax/fitting/neuroevolution/net/cpu/static/__init__.py
new file mode 100644
index 00000000..5460f3e2
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/net/cpu/static/__init__.py
@@ -0,0 +1,4 @@
+"""Static architecture CPU-based Neural nets for neuroevolution."""
+from .rnnfc import CPUStaticRNNFC, CPUStaticRNNFCConfig
+
+__all__ = ["CPUStaticRNNFC", "CPUStaticRNNFCConfig"]
diff --git a/cneuromax/fitting/neuroevolution/net/cpu/static/rnnfc.py b/cneuromax/fitting/neuroevolution/net/cpu/static/rnnfc.py
new file mode 100644
index 00000000..1e8368cd
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/net/cpu/static/rnnfc.py
@@ -0,0 +1,64 @@
+""":class:`CPUStaticRNN` & :class:`CPUStaticRNNConfig`."""
+from dataclasses import dataclass
+
+import torch
+from jaxtyping import Float32
+from torch import Tensor, nn
+
+
+@dataclass
+class CPUStaticRNNFCConfig:
+ """Config values for :class:`CPUStaticRNNFC`.
+
+ Args:
+ input_size: Size of the input tensor.
+ hidden_size: Size of the RNN hidden state.
+ output_size: Size of the output tensor.
+ """
+
+ input_size: int
+ hidden_size: int
+ output_size: int
+
+
+class CPUStaticRNNFC(nn.Module):
+ """CPU-running static architecture RNN w/ a final FC layer.
+
+ Args:
+ config: See :class:`CPUStaticRNNFCConfig`.
+ """
+
+ def __init__(self: "CPUStaticRNNFC", config: CPUStaticRNNFCConfig) -> None:
+ super().__init__()
+ self.rnn = nn.RNNCell(
+ input_size=config.input_size,
+ hidden_size=config.hidden_size,
+ )
+ self.fc = nn.Linear(
+ in_features=config.hidden_size,
+ out_features=config.output_size,
+ )
+ self.h: Float32[Tensor, " hidden_size"] = torch.zeros(
+ size=(config.hidden_size,),
+ )
+
+ def reset(self: "CPUStaticRNNFC") -> None:
+ """Resets the hidden state of the RNN."""
+ self.h *= torch.zeros_like(self.h)
+
+ def forward(
+ self: "CPUStaticRNNFC",
+ x: Float32[Tensor, " input_size"],
+ ) -> Tensor:
+ """Forward pass.
+
+ Args:
+ x: Input tensor.
+
+ Returns:
+ Output tensor.
+ """
+ x: Float32[Tensor, " hidden_size"] = self.rnn(input=x, hx=self.h)
+ self.h = x
+ x: Float32[Tensor, " output_size"] = self.fc(input=x)
+ return x
diff --git a/cneuromax/fitting/neuroevolution/runner.py b/cneuromax/fitting/neuroevolution/runner.py
new file mode 100644
index 00000000..ebc211e4
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/runner.py
@@ -0,0 +1,75 @@
+""":class:`NeuroevolutionTaskRunner`."""
+from collections.abc import Callable
+from functools import partial
+from typing import Any
+
+import wandb
+from hydra_zen import ZenStore
+
+from cneuromax.fitting.neuroevolution.agent import BaseAgent
+from cneuromax.fitting.neuroevolution.config import (
+ NeuroevolutionSubtaskConfig,
+ NeuroevolutionTaskConfig,
+)
+from cneuromax.fitting.neuroevolution.evolve import evolve
+from cneuromax.fitting.neuroevolution.space import BaseSpace
+from cneuromax.fitting.runner import FittingTaskRunner
+from cneuromax.store import store_wandb_logger_configs
+
+
+class NeuroevolutionTaskRunner(FittingTaskRunner):
+ """Neuroevolution ``task`` runner."""
+
+ @classmethod
+ def store_configs(
+ cls: type["NeuroevolutionTaskRunner"],
+ store: ZenStore,
+ ) -> None:
+ """Stores structured configs.
+
+ .. warning::
+
+ Make sure to call this method if you are overriding it.
+
+ Args:
+ store:\
+ See :paramref:`~.FittingTaskRunner.store_configs.store`.
+ """
+ super().store_configs(store=store)
+ store_wandb_logger_configs(store, clb=wandb.init)
+ store(NeuroevolutionTaskConfig, name="config")
+
+ @staticmethod
+ def validate_subtask_config(config: NeuroevolutionSubtaskConfig) -> None:
+ """Validates the ``subtask`` config.
+
+ Args:
+ config: See :class:`~.NeuroevolutionSubtaskConfig`.
+
+ Raises:
+ RuntimeError: If\
+ :paramref:`~.NeuroevolutionSubtaskConfig.device` is\
+ set to ``gpu`` but CUDA is not available.
+ """
+ if config.eval_num_steps == 0 and config.env_transfer:
+ error_msg = "`env_transfer = True` requires `eval_num_steps > 0`."
+ raise ValueError(error_msg)
+ if (
+ config.total_num_gens - config.prev_num_gens
+ ) % config.save_interval != 0:
+ error_msg = (
+ "`save_interval` must be a multiple of "
+ "`total_num_gens - prev_num_gens`."
+ )
+ raise ValueError(error_msg)
+
+ @classmethod
+ def run_subtask(
+ cls: type["NeuroevolutionTaskRunner"],
+ space: BaseSpace,
+ agent: partial[BaseAgent],
+ logger: Callable[..., Any],
+ config: NeuroevolutionSubtaskConfig,
+ ) -> Any: # noqa: ANN401
+ """Runs the ``subtask``."""
+ return evolve(space=space, agent=agent, logger=logger, config=config)
diff --git a/cneuromax/fitting/neuroevolution/space/__init__.py b/cneuromax/fitting/neuroevolution/space/__init__.py
new file mode 100644
index 00000000..f965e5f6
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/space/__init__.py
@@ -0,0 +1,14 @@
+"""Neuroevolution Spaces."""
+from cneuromax.fitting.neuroevolution.space.base import (
+ BaseSpace,
+ BaseSpaceConfig,
+)
+from cneuromax.fitting.neuroevolution.space.reinforcement import (
+ BaseReinforcementSpace,
+)
+
+__all__ = [
+ "BaseSpace",
+ "BaseSpaceConfig",
+ "BaseReinforcementSpace",
+]
diff --git a/cneuromax/fitting/neuroevolution/space/base.py b/cneuromax/fitting/neuroevolution/space/base.py
new file mode 100644
index 00000000..11012e00
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/space/base.py
@@ -0,0 +1,66 @@
+""":class:`BaseSpace` and its config."""
+from abc import ABCMeta, abstractmethod
+from dataclasses import dataclass
+from typing import Annotated as An
+from typing import Any
+
+import numpy as np
+
+from cneuromax.fitting.neuroevolution.agent import BaseAgent
+from cneuromax.utils.beartype import ge
+
+
+@dataclass
+class BaseSpaceConfig:
+ """Holds :class:`BaseSpace` config values.
+
+ Args:
+ eval_num_steps: See :attr:`~.BaseSpaceConfig.eval_num_steps`.
+ """
+
+ eval_num_steps: An[int, ge(0)] = 0
+
+
+class BaseSpace(metaclass=ABCMeta):
+ """Space Base class.
+
+ A ``Space`` is a :mod:`torchrl` environment wrapper with which
+ agents produce behaviour and receive fitness scores.
+
+ Args:
+ config: See :class:`~.BaseSpaceConfig`.
+ num_pops: Number of agents interacting with each other in a\
+ given space.
+ evaluates_on_gpu: Whether GPU devices are used to evaluate\
+ agents.
+ """
+
+ def __init__(
+ self: "BaseSpace",
+ config: BaseSpaceConfig,
+ num_pops: int,
+ *,
+ evaluates_on_gpu: bool,
+ ) -> None:
+ self.config = config
+ self.num_pops = num_pops
+ self.evaluates_on_gpu = evaluates_on_gpu
+
+ @abstractmethod
+ def evaluate(
+ self: "BaseSpace",
+ agents: list[list[BaseAgent]],
+ curr_gen: An[int, ge(1)],
+ ) -> np.ndarray[np.float32, Any]:
+ """.
+
+ Method called once per iteration (every generation) in order to
+ evaluate and attribute fitnesses to agents.
+
+ Args:
+ agents: Agent(s) to evaluate.
+ curr_gen: The current generation number/index.
+
+ Returns:
+ The fitnesses and number of steps ran.
+ """
diff --git a/cneuromax/fitting/neuroevolution/space/reinforcement.py b/cneuromax/fitting/neuroevolution/space/reinforcement.py
new file mode 100644
index 00000000..9aeaa0ec
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/space/reinforcement.py
@@ -0,0 +1,156 @@
+""":class:`BaseReinforcementSpace`."""
+import copy
+from abc import ABCMeta
+from typing import Annotated as An
+from typing import Any, final
+
+import numpy as np
+import wandb
+from tensordict import TensorDict
+from torchrl.envs import EnvBase
+
+from cneuromax.fitting.neuroevolution.agent import BaseAgent
+from cneuromax.fitting.neuroevolution.space.base import (
+ BaseSpace,
+ BaseSpaceConfig,
+)
+from cneuromax.utils.beartype import ge
+
+
+class BaseReinforcementSpace(BaseSpace, metaclass=ABCMeta):
+ """Reinforcement Base Space class.
+
+ Args:
+ env: The :mod:`torchrl` environment to run the evaluation on.
+ config: See :paramref:`~.BaseSpace.config`.
+ """
+
+ def __init__(
+ self: "BaseReinforcementSpace",
+ config: BaseSpaceConfig,
+ env: EnvBase,
+ ) -> None:
+ super().__init__(config=config, num_pops=1, evaluates_on_gpu=False)
+ self.env = env
+
+ @final
+ def run_pre_eval(
+ self: "BaseReinforcementSpace",
+ agent: BaseAgent,
+ curr_gen: int,
+ ) -> TensorDict:
+ """Resets/loads the environment before evaluation begins.
+
+ Args:
+ agent: The agent being evaluated.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+
+ Returns:
+ See :paramref:`run_post_eval.out`.
+ """
+ if curr_gen > 1 and agent.config.env_transfer:
+ self.env = copy.deepcopy(agent.saved_env)
+ return copy.deepcopy(agent.saved_env_out)
+ self.env.set_seed(seed=curr_gen)
+ return self.env.reset()
+
+ @final
+ def env_done_reset(
+ self: "BaseReinforcementSpace",
+ agent: BaseAgent,
+ out: TensorDict,
+ curr_gen: int,
+ ) -> TensorDict | dict[str, bool]:
+ """Resets the agent/environment when the environment terminates.
+
+ Args:
+ agent: See :paramref:`pre_eval_reset.agent`.
+ out: The latest environment output.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+
+ Returns:
+ See :paramref:`run_post_eval.out`.
+ """
+ # env,fit,env+fit,env+fit+mem: reset, mem,mem+fit: no reset
+ if not (
+ agent.config.mem_transfer
+ or (agent.config.mem_transfer and agent.config.fit_transfer)
+ ):
+ agent.reset()
+ if agent.config.env_transfer:
+ wandb.log(
+ {"score": agent.curr_episode_score, "gen": curr_gen},
+ )
+ agent.curr_episode_score = 0
+ agent.curr_episode_num_steps = 0
+ self.env.set_seed(seed=curr_gen)
+ return self.env.reset()
+ return out
+
+ @final
+ def run_post_eval(
+ self: "BaseReinforcementSpace",
+ agent: BaseAgent,
+ out: TensorDict,
+ curr_gen: int,
+ ) -> None:
+ """Resets the agent & saves the environment post-evaluation.
+
+ Args:
+ agent: See :paramref:`pre_eval_reset.agent`.
+ out: The latest environment output.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+ """
+ if not agent.config.mem_transfer:
+ agent.reset()
+ if agent.config.env_transfer:
+ agent.saved_env = copy.deepcopy(self.env)
+ agent.saved_env_out = copy.deepcopy(out)
+ if not agent.config.env_transfer:
+ wandb.log(
+ {"score": agent.curr_eval_score, "gen": curr_gen},
+ )
+
+ @final
+ def evaluate(
+ self: "BaseReinforcementSpace",
+ agents: list[list[BaseAgent]],
+ curr_gen: An[int, ge(1)],
+ ) -> np.ndarray[np.float32, Any]:
+ """Evaluation function called once per generation.
+
+ Args:
+ agents: A 2D list containing the agent to evaluate.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+ """
+ agent = agents[0][0]
+ agent.curr_eval_score = 0
+ agent.curr_eval_num_steps = 0
+ out = self.run_pre_eval(agent=agent, curr_gen=curr_gen)
+ while not out["done"]:
+ out = out.set(key="action", item=agent(x=out["observation"]))
+ out = self.env.step(tensordict=out)["next"]
+ agent.curr_eval_score += out["reward"]
+ agent.curr_eval_num_steps += 1
+ if agent.config.env_transfer:
+ agent.curr_episode_score += out["reward"]
+ agent.curr_episode_num_steps += 1
+ if agent.config.fit_transfer:
+ agent.continual_fitness += out["reward"]
+ if out["done"]:
+ out = self.env_done_reset(
+ agent=agent,
+ out=out,
+ curr_gen=curr_gen,
+ )
+ if agent.curr_eval_num_steps == self.config.eval_num_steps:
+ out["done"] = True
+ self.run_post_eval(agent=agent, out=out, curr_gen=curr_gen)
+ return np.array(
+ (
+ agent.continual_fitness
+ if agent.config.fit_transfer
+ else agent.curr_eval_score,
+ agent.curr_eval_num_steps,
+ ),
+ )
diff --git a/cneuromax/fitting/neuroevolution/utils/__init__.py b/cneuromax/fitting/neuroevolution/utils/__init__.py
new file mode 100644
index 00000000..09995909
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/__init__.py
@@ -0,0 +1 @@
+r""":mod:`cneuromax.fitting.neuroevolution`\-wide utilities."""
diff --git a/cneuromax/fitting/neuroevolution/utils/compute.py b/cneuromax/fitting/neuroevolution/utils/compute.py
new file mode 100644
index 00000000..3444683d
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/compute.py
@@ -0,0 +1,223 @@
+"""Not agent-based computation functions for Neuroevolution fitting."""
+import logging
+import pickle
+import time
+from typing import Annotated as An
+
+import numpy as np
+
+from cneuromax.fitting.neuroevolution.agent import BaseAgent
+from cneuromax.fitting.neuroevolution.utils.type import (
+ Fitnesses_and_num_env_steps_batch_type,
+ Generation_results_batch_type,
+ Generation_results_type,
+ Seeds_type,
+)
+from cneuromax.utils.beartype import ge
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def compute_generation_results(
+ generation_results: Generation_results_type | None,
+ generation_results_batch: Generation_results_batch_type,
+ fitnesses_and_num_env_steps_batch: Fitnesses_and_num_env_steps_batch_type,
+ agents_batch: list[list[BaseAgent]],
+ num_pops: An[int, ge(1)],
+) -> None:
+ """Fills the :paramref:`generation_results` array with results.
+
+ Extracts the fitnesses & number of environment steps from
+ :paramref:`fitnesses_and_num_env_steps_batch`, computes the
+ pickled agent sizes and stores all of this information in
+ :paramref:`generation_results`.
+
+ Args:
+ generation_results: An array maintained solely by the\
+ primary process (secondary processes have this variable\
+ set to ``None``) containing several pieces of information\
+ about the results of a given generation. The 3rd\
+ dimension contains the following information at the\
+ following indices: 0) Agent fitness, 1) Number of\
+ environment steps taken by the agent during the\
+ evaluation, 2) Size of the agent when serialized.
+ generation_results_batch: A sub-array of\
+ :paramref:`generation_results` maintained by the process\
+ calling this function.
+ fitnesses_and_num_env_steps_batch: The output values of\
+ the evaluation performed in :func:`.evaluate_on_cpu`\
+ or :func:`.evaluate_on_gpu` on the agents maintained\
+ by the process calling this function.
+ agents_batch: A 2D list of agents maintained by the process\
+ calling this function.
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+ """
+ comm, _, _ = get_mpi_variables()
+ # Store the fitnesses and number of environment steps
+ generation_results_batch[:, :, 0:2] = fitnesses_and_num_env_steps_batch
+ # Store the size of the agents
+ for i in range(len(agents_batch)):
+ for j in range(num_pops):
+ generation_results_batch[i, j, 2] = len(
+ pickle.dumps(obj=agents_batch[i][j]),
+ )
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 6.
+ comm.Gather(
+ sendbuf=generation_results_batch,
+ recvbuf=generation_results,
+ )
+
+
+def compute_save_points(
+ prev_num_gens: An[int, ge(0)],
+ total_num_gens: An[int, ge(0)],
+ save_interval: An[int, ge(0)],
+ *,
+ save_first_gen: bool,
+) -> list[int]: # save_points
+ """Compute generations at which to save the state.
+
+ Args:
+ prev_num_gens: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.prev_num_gens`.
+ total_num_gens: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.total_num_gens`.
+ save_interval: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.save_interval`.
+ save_first_gen: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.save_first_gen`.
+
+ Returns:
+ A list of generations at which to save the state.
+ """
+ save_points: list[int] = list(
+ range(
+ prev_num_gens + save_interval,
+ total_num_gens + 1,
+ save_interval,
+ ),
+ )
+ if save_first_gen and save_interval != 1:
+ save_points = [prev_num_gens + 1, *save_points]
+ return save_points
+
+
+def compute_start_time_and_seeds(
+ generation_results: Generation_results_type | None,
+ curr_gen: An[int, ge(1)],
+ num_pops: An[int, ge(1)],
+ pop_size: An[int, ge(1)],
+ *,
+ pop_merge: bool,
+) -> tuple[float | None, Seeds_type | None]: # start_time, seeds
+ """Compute the start time and seeds for the current generation.
+
+ Fetches the start time and generates the seeds for the current\
+ generation. If :paramref:`pop_merge` is ``True``, the seeds are\
+ shared between the populations.
+
+ Args:
+ generation_results: See\
+ :paramref:`~compute_generation_results.generation_results`.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+ pop_size: Total number of agent per population.
+ pop_merge: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.pop_merge`.
+
+ Returns:
+ * The start time for the current generation.
+ * See\
+ :paramref:`~.update_exchange_and_mutate_info.seeds`.
+ """
+ comm, rank, size = get_mpi_variables()
+ np.random.seed(seed=curr_gen)
+ if rank != 0:
+ return None, None
+ start_time = time.time()
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 1 & 8.
+ seeds = np.random.randint(
+ low=0,
+ high=2**32,
+ size=(
+ pop_size,
+ 1 if pop_merge else num_pops,
+ ),
+ dtype=np.uint32,
+ )
+ if pop_merge:
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 2 & 9.
+ seeds = np.repeat(a=seeds, repeats=2, axis=1)
+ if curr_gen == 1:
+ seeds[:, 1] = seeds[:, 1][::-1]
+ if curr_gen > 1:
+ # `generation_results` is only `None` when `rank != 0`. The
+ # following `assert` statement is for static type checking
+ # reasons and has no execution purposes.
+ assert generation_results is not None # noqa: S101
+ fitnesses = generation_results[:, :, 0]
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 10.
+ fitnesses_sorting_indices = fitnesses.argsort(axis=0)
+ fitnesses_index_ranking = fitnesses_sorting_indices.argsort(axis=0)
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 11.
+ for j in range(num_pops):
+ seeds[:, j] = seeds[:, j][fitnesses_index_ranking[:, j]]
+ return start_time, seeds
+
+
+def compute_total_num_env_steps_and_process_fitnesses(
+ generation_results: Generation_results_type | None,
+ total_num_env_steps: An[int, ge(0)] | None,
+ curr_gen: An[int, ge(1)],
+ start_time: float | None,
+ *,
+ pop_merge: bool,
+) -> An[int, ge(0)] | None: # total_num_env_steps
+ """Processes the generation results.
+
+ Args:
+ generation_results: See\
+ :paramref:`~.compute_generation_results.generation_results`.
+ total_num_env_steps: The total number of environment\
+ steps taken by all agents during the entire experiment.\
+ This variable is maintained solely by the primary process\
+ (secondary processes set this to ``None``).
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+ start_time: Generation start time.
+ pop_merge: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.pop_merge`.
+
+ Returns:
+ The updated total number of environment steps.
+ """
+ _, rank, _ = get_mpi_variables()
+ if rank != 0:
+ return None
+ # `generation_results`, `total_num_env_steps` & `start_time` are
+ # only `None` when `rank != 0`. The following `assert` statements
+ # are for static type checking reasons and have no execution
+ # purposes.
+ assert generation_results is not None # noqa: S101
+ assert total_num_env_steps is not None # noqa: S101
+ assert start_time is not None # noqa: S101
+ fitnesses = generation_results[:, :, 0]
+ if pop_merge:
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 7.
+ fitnesses[:, 0] += fitnesses[:, 1][::-1]
+ fitnesses[:, 1] = fitnesses[:, 0][::-1]
+ num_env_steps = generation_results[:, :, 1]
+ total_num_env_steps += int(num_env_steps.sum())
+ logging.info(f"{curr_gen}: {int(time.time() - start_time)}")
+ logging.info(f"{fitnesses.mean(axis=0)}\n{fitnesses.max(axis=0)}\n")
+ return total_num_env_steps
diff --git a/cneuromax/fitting/neuroevolution/utils/compute_test.py b/cneuromax/fitting/neuroevolution/utils/compute_test.py
new file mode 100644
index 00000000..c1e240a9
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/compute_test.py
@@ -0,0 +1,42 @@
+"""Tests for :mod:`~.neuroevolution.utils.compute`."""
+from cneuromax.fitting.neuroevolution.utils.compute import compute_save_points
+
+
+def test_compute_save_points() -> None:
+ """:func:`~.compute_save_points` tests."""
+ assert compute_save_points(
+ prev_num_gens=0,
+ total_num_gens=10,
+ save_interval=1,
+ save_first_gen=False,
+ ) == list(range(1, 11, 1))
+ assert compute_save_points(
+ prev_num_gens=0,
+ total_num_gens=10,
+ save_interval=1,
+ save_first_gen=True,
+ ) == list(range(1, 11, 1))
+ assert compute_save_points(
+ prev_num_gens=0,
+ total_num_gens=10,
+ save_interval=2,
+ save_first_gen=False,
+ ) == list(range(2, 11, 2))
+ assert compute_save_points(
+ prev_num_gens=0,
+ total_num_gens=10,
+ save_interval=2,
+ save_first_gen=True,
+ ) == [1, *list(range(2, 11, 2))]
+ assert compute_save_points(
+ prev_num_gens=20,
+ total_num_gens=30,
+ save_interval=2,
+ save_first_gen=False,
+ ) == list(range(22, 31, 2))
+ assert compute_save_points(
+ prev_num_gens=20,
+ total_num_gens=30,
+ save_interval=2,
+ save_first_gen=True,
+ ) == [21, *list(range(22, 31, 2))]
diff --git a/cneuromax/fitting/neuroevolution/utils/evolve.py b/cneuromax/fitting/neuroevolution/utils/evolve.py
new file mode 100644
index 00000000..e1773301
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/evolve.py
@@ -0,0 +1,161 @@
+"""Evolutionary operations for Neuroevolution fitting.
+
+The selection operation is implicit in :mod:`cneuromax`, see
+:func:`.update_exchange_and_mutate_info` for more details.
+"""
+from typing import Annotated as An
+
+import numpy as np
+from mpi4py import MPI
+
+from cneuromax.fitting.neuroevolution.agent import (
+ BaseAgent,
+)
+from cneuromax.fitting.neuroevolution.space.base import BaseSpace
+from cneuromax.fitting.neuroevolution.utils.type import (
+ Exchange_and_mutate_info_batch_type,
+ Fitnesses_and_num_env_steps_batch_type,
+)
+from cneuromax.utils.beartype import ge
+from cneuromax.utils.misc import seed_all
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def mutate(
+ agents_batch: list[list[BaseAgent]],
+ exchange_and_mutate_info_batch: Exchange_and_mutate_info_batch_type,
+ num_pops: int,
+) -> None:
+ """Mutate :paramref:`agents_batch`.
+
+ Args:
+ agents_batch: See\
+ :paramref:`~.compute_generation_results.agents_batch`.
+ exchange_and_mutate_info_batch: A sub-array of\
+ :paramref:`~.update_exchange_and_mutate_info.exchange_and_mutate_info`
+ maintained by this process.
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+ """
+ seeds = exchange_and_mutate_info_batch[:, :, 3]
+ for i in range(len(agents_batch)):
+ for j in range(num_pops):
+ seed_all(seed=seeds[i, j])
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 4 & 16.
+ agents_batch[i][j].mutate()
+
+
+def evaluate_on_cpu(
+ agents_batch: list[list[BaseAgent]],
+ space: BaseSpace,
+ curr_gen: An[int, ge(1)],
+) -> (
+ Fitnesses_and_num_env_steps_batch_type # fitnesses_and_num_env_steps_batch
+):
+ """Evaluate :paramref:`agents_batch`.
+
+ Args:
+ agents_batch: See\
+ :paramref:`~.compute_generation_results.agents_batch`.
+ space: The :class:`~.BaseSpace` instance used throughout the\
+ execution.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+
+ Returns:
+ The output of agent evaluation performed by the process calling\
+ this function on the agents it maintains\
+ (:paramref:`agents_batch`). See\
+ :meth:`~.BaseSpace.evaluate`.
+ """
+ fitnesses_and_num_env_steps_batch = np.zeros(
+ shape=(len(agents_batch), space.num_pops, 2),
+ dtype=np.float32,
+ )
+ seed_all(seed=curr_gen)
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 5.
+ for i in range(len(agents_batch)):
+ fitnesses_and_num_env_steps_batch[i] = space.evaluate(
+ agents=[agents_batch[i]],
+ curr_gen=curr_gen,
+ )
+ return fitnesses_and_num_env_steps_batch
+
+
+def evaluate_on_gpu(
+ ith_gpu_comm: MPI.Comm,
+ agents_batch: list[list[BaseAgent]],
+ space: BaseSpace,
+ curr_gen: An[int, ge(1)],
+ *,
+ transfer: bool,
+) -> (
+ Fitnesses_and_num_env_steps_batch_type # fitnesses_and_num_env_steps_batch
+):
+ """Gather :paramref:`agents_batch` on process subset & evaluate.
+
+ Args:
+ ith_gpu_comm: A :mod:`mpi4py` communicator used by existing CPU\
+ processes to exchange agents for GPU work queueing.
+ agents_batch: See\
+ :paramref:`~.compute_generation_results.agents_batch`.
+ space: See :paramref:`~.evaluate_on_cpu.space`.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+ transfer: Whether any of\
+ :paramref:`~.NeuroevolutionSubtaskConfig.env_transfer`,\
+ :paramref:`~.NeuroevolutionSubtaskConfig.fit_transfer`\
+ or\
+ :paramref:`~.NeuroevolutionSubtaskConfig.mem_transfer`\
+ is ``True``.
+
+ Returns:
+ The output of agent evaluation by this process. See\
+ :meth:`~.BaseSpace.evaluate`.
+ """
+ comm, rank, size = get_mpi_variables()
+ ith_gpu_comm_rank = ith_gpu_comm.Get_rank()
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 5.
+ # As opposed to the CPU evaluation, agents are not evaluated on the
+ # process that mutates them but instead gathered on a single process
+ # that evaluates them on the GPU, before sending back their
+ # fitnesses to the process that mutated them.
+ ith_gpu_batched_agents: list[
+ list[list[BaseAgent]]
+ ] | None = ith_gpu_comm.gather(sendobj=agents_batch)
+ if ith_gpu_comm_rank == 0:
+ # `ith_gpu_agents_batch` is only `None` when
+ # `ith_gpu_comm_rank != 0`. The following `assert` statement
+ # is for static type checking reasons and has no execution
+ # purposes.
+ assert ith_gpu_batched_agents is not None # noqa: S101
+ ith_gpu_agents_batch: list[list[BaseAgent]] = []
+ for agent_batch in ith_gpu_batched_agents:
+ ith_gpu_agents_batch = ith_gpu_agents_batch + agent_batch
+ seed_all(seed=curr_gen)
+ ith_gpu_fitnesses_and_num_env_steps_batch = space.evaluate(
+ ith_gpu_agents_batch,
+ curr_gen,
+ )
+ fitnesses_and_num_env_steps_batch = np.empty(
+ shape=(len(agent_batch), space.num_pops, 2),
+ dtype=np.float32,
+ )
+ ith_gpu_comm.Scatter(
+ sendbuf=None
+ if ith_gpu_comm_rank != 0
+ else ith_gpu_fitnesses_and_num_env_steps_batch,
+ recvbuf=fitnesses_and_num_env_steps_batch,
+ )
+ # Send back the agents to their corresponding processes if
+ # `transfer == True` as the agents have been modified by the
+ # evaluation process.
+ if transfer:
+ # Prevents `agents_batch` from being overwritten.
+ temp_agents_batch = ith_gpu_comm.scatter(sendobj=ith_gpu_agents_batch)
+ for i in range(len(agent_batch)):
+ agents_batch[i] = temp_agents_batch[i]
+ return fitnesses_and_num_env_steps_batch
diff --git a/cneuromax/fitting/neuroevolution/utils/exchange.py b/cneuromax/fitting/neuroevolution/utils/exchange.py
new file mode 100644
index 00000000..468002d0
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/exchange.py
@@ -0,0 +1,177 @@
+"""Process agent exchange for Neuroevolution fitting."""
+from typing import Annotated as An
+
+import numpy as np
+from mpi4py import MPI
+
+from cneuromax.fitting.neuroevolution.agent import (
+ BaseAgent,
+)
+from cneuromax.fitting.neuroevolution.utils.type import (
+ Exchange_and_mutate_info_batch_type,
+ Exchange_and_mutate_info_type,
+ Generation_results_type,
+ Seeds_type,
+)
+from cneuromax.utils.beartype import ge, le
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def update_exchange_and_mutate_info(
+ num_pops: An[int, ge(1), le(2)],
+ pop_size: An[int, ge(1)],
+ exchange_and_mutate_info: Exchange_and_mutate_info_type | None,
+ generation_results: Generation_results_type | None,
+ seeds: Seeds_type | None,
+) -> None:
+ """Update the exchange and mutate information.
+
+ The selection process of the algorithm is in some sense implicit in
+ :mod:`cneuromax`. We make use of 50% truncation selection, which is
+ reflected in the information stored inside
+ :paramref:`exchange_and_mutate_info`.
+
+ In some sense, the selection process of the algorithm is performed
+ in this function.
+
+ Args:
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+ pop_size: See\
+ :paramref:`~.compute_start_time_and_seeds.pop_size`.
+ exchange_and_mutate_info: An array maintained only by\
+ the primary process (secondary processes set this to\
+ ``None``) containing information for all processes on\
+ how to exchange and mutate agents. Precisions on the 3rd\
+ dimension: 0) The size of the agent when serialized, 1)\
+ The position of the agent paired for with the current\
+ agent, 2) Whether to send or receive the agent, 3) The\
+ seed to randomize the mutation and evaluation of the\
+ agent.
+ generation_results: See\
+ :paramref:`~.compute_generation_results.generation_results`.
+ seeds: The seeds to set the mutation and evaluation randomness\
+ for the current generation.
+ """
+ _, rank, _ = get_mpi_variables()
+ if rank != 0:
+ return
+ # `exchange_and_mutate_info`, `generation_results`, and `seeds`are
+ # only `None` when `rank != 0`. The following `assert` statements
+ # are for static type checking reasons and have no execution
+ # purposes.
+ assert exchange_and_mutate_info is not None # noqa: S101
+ assert generation_results is not None # noqa: S101
+ assert seeds is not None # noqa: S101
+ serialized_agent_sizes = generation_results[:, :, 2]
+ fitnesses = generation_results[:, :, 0]
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 10.
+ fitnesses_sorting_indices = fitnesses.argsort(axis=0)
+ fitnesses_index_ranking = fitnesses_sorting_indices.argsort(axis=0)
+ # 0) MPI buffer size
+ exchange_and_mutate_info[:, :, 0] = np.max(serialized_agent_sizes)
+ for j in range(num_pops):
+ # Each selected/non-selected agent is paired with a
+ # corresponding non-selected/selected agent. Both agents are
+ # placed in the same position in the ranking sub-leaderboard of
+ # selected and non-selected agents.
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 12.
+ # (Section 11 is performed in
+ # :func:`.compute_start_time_and_seeds`)
+ paired_agent_ranking = (
+ fitnesses_index_ranking[:, j] + pop_size // 2
+ ) % pop_size
+ paired_agent_position = fitnesses_sorting_indices[
+ :,
+ j,
+ ][paired_agent_ranking]
+ # 1) Agent pair position
+ exchange_and_mutate_info[:, j, 1] = paired_agent_position
+ # 2) Sending (1 means sending, 0 means receiving)
+ exchange_and_mutate_info[:, :, 2] = np.greater_equal(
+ fitnesses_index_ranking,
+ pop_size // 2,
+ ) # Also section 12 (send)
+ # 3) Seeds to set randomness for mutation & evaluation.
+ exchange_and_mutate_info[:, :, 3] = seeds
+
+
+def exchange_agents(
+ num_pops: An[int, ge(1), le(2)],
+ pop_size: An[int, ge(1)],
+ agents_batch: list[list[BaseAgent]],
+ exchange_and_mutate_info_batch: Exchange_and_mutate_info_batch_type,
+) -> None:
+ """Exchange agents between processes.
+
+ Args:
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+ pop_size: See\
+ :paramref:`~.compute_start_time_and_seeds.pop_size`.
+ agents_batch: See\
+ :paramref:`~.compute_generation_results.agents_batch`.
+ exchange_and_mutate_info_batch: See\
+ :paramref:`~.mutate.exchange_and_mutate_info_batch`.
+ """
+ comm, rank, _ = get_mpi_variables()
+ mpi_buffer_size = exchange_and_mutate_info_batch[:, :, 0]
+ paired_agent_position = exchange_and_mutate_info_batch[:, :, 1]
+ sending = exchange_and_mutate_info_batch[:, :, 2]
+ len_agents_batch = len(agents_batch)
+ # List to contain the `len_agents_batch` * `num_pops` number of
+ # MPI requests created with the `isend` and `irecv` methods.
+ req: list[MPI.Request] = []
+ # Iterate over all agents in the batch.
+ for i in range(len_agents_batch):
+ for j in range(num_pops):
+ # Can determine the rank of the paired process rank from the
+ # `paired_agent_position` and `len_agents_batch` variables.
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 14.
+ paired_process_rank = int(
+ paired_agent_position[i, j] // len_agents_batch,
+ )
+ if sending[i, j] == 1: # 1 means sending
+ # Give a unique tag for this agent that the receiving
+ # process will be able to match.
+ tag = int(pop_size * j + len_agents_batch * rank + i)
+ # Send (non-blocking) the agent and append the MPI
+ # request.
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 15.
+ req.append(
+ comm.isend(
+ obj=agents_batch[i][j],
+ dest=paired_process_rank,
+ tag=tag,
+ ),
+ )
+ else: # not 1 (0) means receiving
+ # Give a unique tag for this agent that the sending
+ # process will be able to match.
+ tag = int(pop_size * j + paired_agent_position[i, j])
+ # Receive (non-blocking) the agent and append the MPI
+ # request.
+ # See https://github.com/courtois-neuromod/cneuromax/blob/main/docs/genetic.pdf
+ # for a full example execution of the genetic algorithm.
+ # The following block is examplified in section 15.
+ req.append(
+ comm.irecv(
+ buf=mpi_buffer_size[i, j],
+ source=paired_process_rank,
+ tag=tag,
+ ),
+ )
+ # Wait for all MPI requests and retrieve a list composed of the
+ # agents received from the other processes and `None` for the
+ # agents that were sent.
+ agent_or_none_list: list[BaseAgent | None] = MPI.Request.waitall(req)
+ # Replacing existing agents with the received agents.
+ for i, agent_or_none in enumerate(agent_or_none_list):
+ if agent_or_none:
+ agents_batch[i // num_pops][i % num_pops] = agent_or_none
diff --git a/cneuromax/fitting/neuroevolution/utils/initialize.py b/cneuromax/fitting/neuroevolution/utils/initialize.py
new file mode 100644
index 00000000..b806cc23
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/initialize.py
@@ -0,0 +1,170 @@
+"""Variable initialization for Neuroevolution fitting."""
+from functools import partial
+from typing import Annotated as An
+
+import numpy as np
+from mpi4py import MPI
+
+from cneuromax.fitting.neuroevolution.agent import (
+ BaseAgent,
+)
+from cneuromax.fitting.neuroevolution.utils.type import (
+ Exchange_and_mutate_info_batch_type,
+ Exchange_and_mutate_info_type,
+ Generation_results_batch_type,
+ Generation_results_type,
+ Seeds_batch_type,
+)
+from cneuromax.fitting.utils.hydra import get_launcher_config
+from cneuromax.utils.beartype import ge, le
+from cneuromax.utils.misc import seed_all
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def initialize_common_variables(
+ agents_per_task: An[int, ge(1)],
+ num_pops: An[int, ge(1), le(2)],
+) -> tuple[
+ An[int, ge(1)], # pop_size
+ An[int, ge(1)], # len_agents_batch
+ Exchange_and_mutate_info_type | None, # exchange_and_mutate_info
+ Exchange_and_mutate_info_batch_type, # exchange_and_mutate_info_batch
+ Seeds_batch_type, # seeds_batch
+ Generation_results_type | None, # generation_results
+ Generation_results_batch_type, # generation_results_batch
+ An[int, ge(0)] | None, # total_num_env_steps
+]:
+ """Initializes variables common to all execution modes.
+
+ Args:
+ agents_per_task: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.agents_per_task`.
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+
+ Returns:
+ * See :paramref:`~.compute_start_time_and_seeds.pop_size`.
+ * See :paramref:`~initialize_agents.len_agents_batch`.
+ * See\
+ :paramref:`~.update_exchange_and_mutate_info.exchange_and_mutate_info`.
+ * See :paramref:`~.mutate.exchange_and_mutate_info_batch`.
+ * An array used as a buffer by all processes to receive the\
+ seeds from the primary process during the first generation\
+ only.
+ * See\
+ :paramref:`~.compute_generation_results.generation_results`.
+ * See\
+ :paramref:`~.compute_generation_results.generation_results_batch`.
+ * See\
+ :paramref:`~.compute_total_num_env_steps_and_process_fitnesses.total_num_env_steps`.
+ """
+ comm, rank, size = get_mpi_variables()
+ launcher_config = get_launcher_config()
+ pop_size = (
+ launcher_config.nodes
+ * launcher_config.tasks_per_node
+ * agents_per_task
+ )
+ len_agents_batch = pop_size // size
+ exchange_and_mutate_info = (
+ None
+ if rank != 0
+ else np.empty(
+ shape=(pop_size, num_pops, 4),
+ dtype=np.uint32,
+ )
+ )
+ exchange_and_mutate_info_batch = np.empty(
+ shape=(len_agents_batch, num_pops, 4),
+ dtype=np.uint32,
+ )
+ seeds_batch = np.empty(
+ shape=(len_agents_batch, num_pops),
+ dtype=np.uint32,
+ )
+ generation_results_batch = np.empty(
+ shape=(len_agents_batch, num_pops, 3),
+ dtype=np.float32,
+ )
+ generation_results = (
+ None
+ if rank != 0
+ else np.empty(
+ shape=(pop_size, num_pops, 3),
+ dtype=np.float32,
+ )
+ )
+ total_num_env_steps = None if rank != 0 else 0
+ return (
+ pop_size,
+ len_agents_batch,
+ exchange_and_mutate_info,
+ exchange_and_mutate_info_batch,
+ seeds_batch,
+ generation_results,
+ generation_results_batch,
+ total_num_env_steps,
+ )
+
+
+def initialize_gpu_comm() -> MPI.Comm:
+ """Initializes a communicator for GPU work queueing.
+
+ Assuming the experiment is ran with ``N`` MPI processes &
+ ``M`` GPUs, this function will create ``M`` communicators, each
+ containing ``N/M`` processes. Each communicator will be used to
+ gather mutated agents onto one process, which will then
+ evaluate them on the GPU.
+
+ Returns:
+ See :paramref:`~.evaluate_on_gpu.ith_gpu_comm`.
+ """
+ comm, rank, size = get_mpi_variables()
+ launcher_config = get_launcher_config()
+ if not launcher_config.gpus_per_node:
+ error_msg = (
+ "The number of GPUs per node must be a positive integer "
+ "in order to setup GPU work queueing."
+ )
+ raise ValueError(error_msg)
+ tasks_per_gpu = size // launcher_config.gpus_per_node
+ gpu_idx = rank // tasks_per_gpu
+ ith_gpu_comm_task_list = np.arange(
+ start=gpu_idx * tasks_per_gpu,
+ stop=(gpu_idx + 1) * tasks_per_gpu,
+ ).tolist()
+ return comm.Create_group(comm.group.Incl(ith_gpu_comm_task_list))
+
+
+def initialize_agents(
+ agent: partial[BaseAgent],
+ len_agents_batch: An[int, ge(1)],
+ num_pops: An[int, ge(1), le(2)],
+ *,
+ pop_merge: bool,
+) -> list[list[BaseAgent]]: # agents_batch
+ """Initializes a batch of agents.
+
+ Args:
+ agent: See :class:`~.BaseAgent`.
+ len_agents_batch: The number of agents per population\
+ maintained in\
+ :paramref:`~.compute_generation_results.agents_batch`\
+ by the process calling this function during a\
+ given generation.
+ num_pops: See :meth:`~.BaseSpace.num_pops`.
+ pop_merge: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.pop_merge`.
+
+ Returns:
+ A 2D list of agents maintained by this process.
+ """
+ agents_batch: list[list[BaseAgent]] = []
+ for i in range(len_agents_batch):
+ agents_batch.append([])
+ for j in range(num_pops):
+ seed_all(len_agents_batch * i + j)
+ agents_batch[-1].append(
+ agent(pop_idx=j, pops_are_merged=pop_merge),
+ )
+
+ return agents_batch
diff --git a/cneuromax/fitting/neuroevolution/utils/readwrite.py b/cneuromax/fitting/neuroevolution/utils/readwrite.py
new file mode 100644
index 00000000..4b7bbf92
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/readwrite.py
@@ -0,0 +1,112 @@
+"""File reading and writing utilities for Neuroevolution fitting."""
+import pickle
+from pathlib import Path
+from typing import Annotated as An
+
+from cneuromax.fitting.neuroevolution.agent import (
+ BaseAgent,
+)
+from cneuromax.fitting.neuroevolution.utils.type import (
+ Generation_results_batch_type,
+ Generation_results_type,
+)
+from cneuromax.utils.beartype import ge
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def load_state(
+ prev_num_gens: An[int, ge(0)],
+ len_agents_batch: An[int, ge(1)],
+ output_dir: str,
+) -> tuple[
+ list[list[BaseAgent]], # agents_batch
+ Generation_results_type | None, # generation_results
+ An[int, ge(0)] | None, # total_num_env_steps
+]:
+ """Load a previous experiment state from disk.
+
+ Args:
+ prev_num_gens: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.prev_num_gens`.
+ len_agents_batch: See\
+ :paramref:`~.initialize_agents.len_agents_batch`.
+ output_dir: See\
+ :paramref:`~.BaseSubtaskConfig.output_dir`.
+
+ Returns:
+ * See ~.compute_generation_results.agents_batch`.
+ * See\
+ :paramref:`~.compute_generation_results.generation_results`.
+ * See\
+ :paramref:`~.compute_total_num_env_steps_and_process_fitnesses.total_num_env_steps`.
+ """
+ comm, rank, size = get_mpi_variables()
+ if rank == 0:
+ path = Path(f"{output_dir}/{prev_num_gens}/state.pkl")
+ if not path.exists():
+ error_msg = f"No saved state found at {path}."
+ raise FileNotFoundError(error_msg)
+ with path.open(mode="rb") as f:
+ state = pickle.load(file=f)
+ agents: list[list[BaseAgent]] = state[0]
+ generation_results: Generation_results_type = state[1]
+ total_num_env_steps: int = state[2]
+ batched_agents: list[list[list[BaseAgent]]] = [
+ agents[i * len_agents_batch : (i + 1) * len_agents_batch]
+ for i in range(size)
+ ]
+ # `comm.scatter` argument `sendobj` is wrongly typed. `[]` is the
+ # workaround for not being able to set it to `None`.
+ # See https://github.com/mpi4py/mpi4py/issues/434
+ agents_batch = comm.scatter(sendobj=[] if rank != 0 else batched_agents)
+ return (
+ agents_batch,
+ None if rank != 0 else generation_results,
+ None if rank != 0 else total_num_env_steps,
+ )
+
+
+def save_state(
+ agents_batch: list[list[BaseAgent]],
+ generation_results: Generation_results_batch_type | None,
+ total_num_env_steps: An[int, ge(0)] | None,
+ curr_gen: An[int, ge(1)],
+ output_dir: str,
+) -> None:
+ """Dump the current experiment state to disk.
+
+ Args:
+ agents_batch: See\
+ :paramref:`~.compute_generation_results.agents_batch`.
+ generation_results: See\
+ :paramref:`~.compute_generation_results.generation_results`.
+ total_num_env_steps: See\
+ :paramref:`~.compute_total_num_env_steps_and_process_fitnesses.total_num_env_steps`.
+ curr_gen: See :paramref:`~.BaseSpace.curr_gen`.
+ output_dir: See\
+ :paramref:`~.BaseSubtaskConfig.output_dir`.
+ """
+ comm, rank, _ = get_mpi_variables()
+ batched_agents: list[list[list[BaseAgent]]] | None = comm.gather(
+ sendobj=agents_batch,
+ )
+ if rank != 0:
+ return
+ # `batched_agents`, `generation_results`, and `total_num_env_steps`
+ # are only `None` when `rank != 0`. The following `assert`
+ # statements are for static type checking reasons and have no
+ # execution purposes.
+ assert batched_agents is not None # noqa: S101
+ assert generation_results is not None # noqa: S101
+ assert total_num_env_steps is not None # noqa: S101
+ agents: list[list[BaseAgent]] = []
+ for agent_batch in batched_agents:
+ agents = agents + agent_batch
+ path = Path(f"{output_dir}/{curr_gen}/state.pkl")
+ if not path.parent.exists():
+ path.parent.mkdir(parents=True)
+ with path.open(mode="wb") as f:
+ pickle.dump(
+ obj=[agents, generation_results, total_num_env_steps],
+ file=f,
+ )
diff --git a/cneuromax/fitting/neuroevolution/utils/type.py b/cneuromax/fitting/neuroevolution/utils/type.py
new file mode 100644
index 00000000..70b107af
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/type.py
@@ -0,0 +1,37 @@
+"""Typing for various Neuroevolution fitting variables."""
+import numpy as np
+from nptyping import Float32, Shape, UInt32
+
+Exchange_and_mutate_info_batch_type = np.ndarray[
+ Shape[
+ "Len_agents_batch, Num_pops, "
+ "[mpi_buffer_size, agent_pair_position, sending, seeds]"
+ ],
+ UInt32,
+]
+Exchange_and_mutate_info_type = np.ndarray[
+ Shape[
+ "Pop_size, Num_pops, "
+ "[mpi_buffer_size, agent_pair_position, sending, seeds]"
+ ],
+ UInt32,
+]
+Fitnesses_and_num_env_steps_batch_type = np.ndarray[
+ Shape["Len_agents_batch, Num_pops, [fitness, num_env_steps]"],
+ Float32,
+]
+Generation_results_batch_type = np.ndarray[
+ Shape[
+ "'Len_agents_batch', 'Num_pops', "
+ "[fitness, num_env_steps, serialized_agent_size]'"
+ ],
+ Float32,
+]
+Generation_results_type = np.ndarray[
+ Shape[
+ "Pop_size, Num_pops, [fitness, num_env_steps, serialized_agent_size]"
+ ],
+ Float32,
+]
+Seeds_type = np.ndarray[Shape["Pop_size, Num_pops"], UInt32]
+Seeds_batch_type = np.ndarray[Shape["Len_agents_batch, Num_pops"], UInt32]
diff --git a/cneuromax/fitting/neuroevolution/utils/validate.py b/cneuromax/fitting/neuroevolution/utils/validate.py
new file mode 100644
index 00000000..237b836c
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/validate.py
@@ -0,0 +1,23 @@
+"""Run validation for Neuroevolution fitting."""
+from cneuromax.fitting.neuroevolution.space.base import BaseSpace
+from cneuromax.fitting.utils.hydra import get_launcher_config
+
+
+def validate_space(space: BaseSpace, *, pop_merge: bool) -> None:
+ """Makes sure that the Space is valid given config values.
+
+ Args:
+ space: See :paramref:`~.evaluate_on_cpu.space`.
+ pop_merge: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.pop_merge`.
+ """
+ launcher_config = get_launcher_config()
+ if pop_merge and space.num_pops != 2: # noqa: PLR2004
+ error_msg = "`pop_merge = True` requires `num_pops = 2`."
+ raise ValueError(error_msg)
+ if not launcher_config.gpus_per_node and space.evaluates_on_gpu:
+ error_msg = (
+ "GPU evaluation is not supported when `gpus_per_node` is not "
+ "specified in the launcher config or set to 0."
+ )
+ raise ValueError(error_msg)
diff --git a/cneuromax/fitting/neuroevolution/utils/wandb.py b/cneuromax/fitting/neuroevolution/utils/wandb.py
new file mode 100644
index 00000000..656f64a6
--- /dev/null
+++ b/cneuromax/fitting/neuroevolution/utils/wandb.py
@@ -0,0 +1,19 @@
+""":mod:`wandb` utilities for Neuroevolution fitting."""
+from collections.abc import Callable
+from typing import Any
+
+from wandb.util import generate_id
+
+from cneuromax.utils.mpi4py import get_mpi_variables
+
+
+def setup_wandb(logger: Callable[..., Any]) -> None:
+ """Sets up :mod:`wandb` logging for all MPI processes.
+
+ Args:
+ logger: See :func:`~.wandb.init`.
+ """
+ comm, rank, _ = get_mpi_variables()
+ wandb_group_id = generate_id() if rank == 0 else None
+ wandb_group_id = comm.bcast(wandb_group_id)
+ logger(group=wandb_group_id)
diff --git a/cneuromax/fitting/runner.py b/cneuromax/fitting/runner.py
new file mode 100644
index 00000000..b8703504
--- /dev/null
+++ b/cneuromax/fitting/runner.py
@@ -0,0 +1,23 @@
+""":class:`FittingTaskRunner`."""
+from hydra_zen import ZenStore
+
+from cneuromax.fitting.store import store_launcher_configs
+from cneuromax.runner import BaseTaskRunner
+
+
+class FittingTaskRunner(BaseTaskRunner):
+ """Fitting ``task`` runner."""
+
+ @classmethod
+ def store_configs(cls: type["FittingTaskRunner"], store: ZenStore) -> None:
+ """Stores structured configs.
+
+ .. warning::
+
+ Make sure to call this method if you are overriding it.
+
+ Args:
+ store: See :meth:`~.BaseTaskRunner.store_configs`.
+ """
+ super().store_configs(store)
+ store_launcher_configs(store)
diff --git a/cneuromax/fitting/store.py b/cneuromax/fitting/store.py
new file mode 100644
index 00000000..ed49a9c3
--- /dev/null
+++ b/cneuromax/fitting/store.py
@@ -0,0 +1,29 @@
+r"""Fitting :mod:`hydra-core` config storing."""
+from typing import Any
+
+from hydra_plugins.hydra_submitit_launcher.config import SlurmQueueConf
+from hydra_zen import ZenStore
+
+
+def store_launcher_configs(store: ZenStore) -> None:
+ """Stores Hydra ``hydra/launcher`` group configs.
+
+ Names: ``base``, ``submitit_slurm_acan``,
+ ``submitit_slurm_acan_simexp``.
+
+ Args:
+ store: See :meth:`~.BaseTaskRunner.store_configs`.
+ """
+ store(["module load apptainer"], name="setup_apptainer_acan")
+ setup: Any = "${merge:${setup_apptainer_acan},${copy_data_commands}}"
+ python = "apptainer --nv exec ${oc.env:SCRATCH}/cneuromax.sif python"
+ store(
+ SlurmQueueConf(python=python, setup=setup),
+ group="hydra/launcher",
+ name="submitit_slurm_acan",
+ )
+ store(
+ SlurmQueueConf(account="rrg-pbellec", setup=setup),
+ group="hydra/launcher",
+ name="submitit_slurm_acan_simexp",
+ )
diff --git a/cneuromax/fitting/utils/__init__.py b/cneuromax/fitting/utils/__init__.py
new file mode 100644
index 00000000..b6df1a76
--- /dev/null
+++ b/cneuromax/fitting/utils/__init__.py
@@ -0,0 +1 @@
+"""Fitting utilities."""
diff --git a/cneuromax/fitting/utils/hydra.py b/cneuromax/fitting/utils/hydra.py
new file mode 100644
index 00000000..a59e2e0d
--- /dev/null
+++ b/cneuromax/fitting/utils/hydra.py
@@ -0,0 +1,57 @@
+""":mod:`hydra-core` utilities."""
+from hydra._internal.core_plugins.basic_launcher import (
+ BasicLauncher,
+)
+from hydra.core.hydra_config import HydraConfig
+from hydra_plugins.hydra_submitit_launcher.config import (
+ LocalQueueConf,
+ SlurmQueueConf,
+)
+from hydra_plugins.hydra_submitit_launcher.submitit_launcher import (
+ LocalLauncher,
+ SlurmLauncher,
+)
+from omegaconf import DictConfig, OmegaConf
+
+from cneuromax.utils.misc import get_path
+
+
+def get_launcher_config() -> LocalQueueConf | SlurmQueueConf:
+ """Retrieves/validates this job's :mod:`hydra-core` launcher config.
+
+ Returns:
+ The :mod:`hydra-core` launcher config.
+
+ Raises:
+ TypeError: If the launcher config is not a dict or if the\
+ launcher is not supported.
+ """
+ launcher_dict_config: DictConfig = HydraConfig.get().launcher
+ launcher_container_config = OmegaConf.to_container(
+ cfg=launcher_dict_config,
+ )
+ if not isinstance(launcher_container_config, dict):
+ raise TypeError
+ launcher_config_dict = dict(launcher_container_config)
+ if launcher_dict_config._target_ == get_path( # noqa: SLF001
+ LocalLauncher,
+ ):
+ return LocalQueueConf(**launcher_config_dict)
+ if launcher_dict_config._target_ == get_path( # noqa: SLF001
+ SlurmLauncher,
+ ):
+ return SlurmQueueConf(**launcher_config_dict)
+ if launcher_dict_config._target_ == get_path( # noqa: SLF001
+ BasicLauncher,
+ ):
+ error_msg = (
+ "`hydra/launcher: basic` (the default launcher) is not supported. "
+ "Use `override hydra/launcher: submitit_local` or "
+ "`override hydra/launcher: submitit_slurm`."
+ )
+ raise TypeError(error_msg)
+ error_msg = (
+ "Unsupported launcher: "
+ f"{launcher_dict_config._target_}" # noqa: SLF001
+ )
+ raise TypeError(error_msg)
diff --git a/cneuromax/projects/__init__.py b/cneuromax/projects/__init__.py
new file mode 100644
index 00000000..8b0c76fc
--- /dev/null
+++ b/cneuromax/projects/__init__.py
@@ -0,0 +1 @@
+"""Project repository."""
diff --git a/cneuromax/projects/classify_mnist/__init__.py b/cneuromax/projects/classify_mnist/__init__.py
new file mode 100644
index 00000000..e5039b3f
--- /dev/null
+++ b/cneuromax/projects/classify_mnist/__init__.py
@@ -0,0 +1,44 @@
+"""MNIST classification ``project``."""
+from hydra_zen import ZenStore
+
+from cneuromax.fitting.deeplearning.runner import DeepLearningTaskRunner
+from cneuromax.utils.hydra_zen import fs_builds
+
+from .datamodule import (
+ MNISTClassificationDataModule,
+ MNISTClassificationDataModuleConfig,
+)
+from .litmodule import MNISTClassificationLitModule
+
+__all__ = [
+ "TaskRunner",
+ "MNISTClassificationDataModuleConfig",
+ "MNISTClassificationDataModule",
+ "MNISTClassificationLitModule",
+]
+
+
+class TaskRunner(DeepLearningTaskRunner):
+ """MNIST classification ``task`` runner."""
+
+ @classmethod
+ def store_configs(cls: type["TaskRunner"], store: ZenStore) -> None:
+ """Stores :mod:`hydra-core` MNIST classification configs.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ """
+ super().store_configs(store=store)
+ store(
+ fs_builds(
+ MNISTClassificationDataModule,
+ config=MNISTClassificationDataModuleConfig(),
+ ),
+ name="classify_mnist",
+ group="datamodule",
+ )
+ store(
+ fs_builds(MNISTClassificationLitModule),
+ name="classify_mnist",
+ group="litmodule",
+ )
diff --git a/cneuromax/task/classify_mnist/datamodule.py b/cneuromax/projects/classify_mnist/datamodule.py
similarity index 71%
rename from cneuromax/task/classify_mnist/datamodule.py
rename to cneuromax/projects/classify_mnist/datamodule.py
index afc50ec2..fd90522b 100644
--- a/cneuromax/task/classify_mnist/datamodule.py
+++ b/cneuromax/projects/classify_mnist/datamodule.py
@@ -1,5 +1,4 @@
-"""Datamodule & config for MNIST classification task."""
-
+""":mod:`lightning` DataModule + conf for MNIST classification task."""
from dataclasses import dataclass
from typing import Annotated as An
@@ -11,15 +10,15 @@
BaseDataModule,
BaseDataModuleConfig,
)
-from cneuromax.utils.annotations import ge, lt, one_of
+from cneuromax.utils.beartype import ge, lt, one_of
@dataclass
class MNISTClassificationDataModuleConfig(BaseDataModuleConfig):
- """.
+ """Configuration for :class:`MNISTClassificationDataModule`.
- Attributes:
- val_percentage: Percentage of the training dataset to use for
+ Args:
+ val_percentage: Percentage of the training dataset to use for\
validation.
"""
@@ -27,25 +26,23 @@ class MNISTClassificationDataModuleConfig(BaseDataModuleConfig):
class MNISTClassificationDataModule(BaseDataModule):
- """.
+ """MNIST Classification :mod:`lightning` DataModule.
+
+ Args:
+ config: The instance's configuration.
Attributes:
- train_val_split (``tuple[float, float]``): The train/validation
- split (sums to ``1``).
- transform (``transforms.Compose``): The Torchvision dataset
- transformations.
+ train_val_split (`tuple[float, float]`): The train/validation\
+ split (sums to `1`).
+ transform (:class:`~transforms.Compose`): The\
+ :mod:`torchvision` dataset transformations.
"""
def __init__(
self: "MNISTClassificationDataModule",
config: MNISTClassificationDataModuleConfig,
) -> None:
- """Calls parent constructor & initializes instance attributes.
-
- Args:
- config: .
- """
- super().__init__(config)
+ super().__init__(config=config)
self.train_val_split = (
1 - config.val_percentage,
config.val_percentage,
@@ -63,7 +60,7 @@ def prepare_data(self: "MNISTClassificationDataModule") -> None:
def setup(
self: "MNISTClassificationDataModule",
- stage: An[str, one_of("fit", "test")],
+ stage: An[str, one_of("fit", "validate", "test")],
) -> None:
"""Creates the train/val/test datasets.
@@ -76,13 +73,13 @@ def setup(
train=True,
transform=self.transform,
)
- self.dataset.train, self.dataset.val = random_split(
+ self.datasets.train, self.datasets.val = random_split(
dataset=mnist_full,
lengths=self.train_val_split,
)
else: # stage == "test":
- self.dataset.test = MNIST(
+ self.datasets.test = MNIST(
root=self.config.data_dir,
train=False,
transform=self.transform,
diff --git a/cneuromax/projects/classify_mnist/datamodule_test.py b/cneuromax/projects/classify_mnist/datamodule_test.py
new file mode 100644
index 00000000..2673a929
--- /dev/null
+++ b/cneuromax/projects/classify_mnist/datamodule_test.py
@@ -0,0 +1,72 @@
+"""Tests for :mod:`~.classify_mnist.datamodule`."""
+from pathlib import Path
+
+import pytest
+from torch.utils.data import Subset
+from torchvision.datasets import MNIST
+
+from . import (
+ MNISTClassificationDataModule,
+ MNISTClassificationDataModuleConfig,
+)
+
+
+@pytest.fixture()
+def datamodule(tmp_path: Path) -> MNISTClassificationDataModule:
+ """:class:`~.MNISTClassificationDataModule` fixture.
+
+ Args:
+ tmp_path: The temporary path for the\
+ :class:`~.MNISTClassificationDataModule`.
+
+ Returns:
+ A generic :class:`~.MNISTClassificationDataModule` instance.
+ """
+ return MNISTClassificationDataModule(
+ MNISTClassificationDataModuleConfig(
+ data_dir=str(tmp_path) + "/",
+ device="cpu",
+ val_percentage=0.1,
+ ),
+ )
+
+
+def test_setup_fit(datamodule: MNISTClassificationDataModule) -> None:
+ """Tests :meth:`~.MNISTClassificationDataModule.setup` #1.
+
+ Verifies that :func:`~.MNISTClassificationDataModule.setup` behaves
+ correctly when
+ :paramref:`~.MNISTClassificationDataModule.setup.stage` is
+ ``"fit"``.
+
+ Args:
+ datamodule: A generic :class:`~.MNISTClassificationDataModule`\
+ instance, see :func:`datamodule`.
+ """
+ datamodule.prepare_data()
+ datamodule.setup(stage="fit")
+
+ assert isinstance(datamodule.datasets.train, Subset)
+ assert isinstance(datamodule.datasets.val, Subset)
+
+ assert len(datamodule.datasets.train) == 54000
+ assert len(datamodule.datasets.val) == 6000
+
+
+def test_setup_test(datamodule: MNISTClassificationDataModule) -> None:
+ """Tests :meth:`~.MNISTClassificationDataModule.setup` #2.
+
+ Verifies that :func:`~.MNISTClassificationDataModule.setup` behaves
+ correctly when
+ :paramref:`~.MNISTClassificationDataModule.setup.stage` is
+ ``"test"``.
+
+ Args:
+ datamodule: A generic :class:`~.MNISTClassificationDataModule`\
+ instance, see :func:`datamodule`.
+ """
+ datamodule.prepare_data()
+ datamodule.setup(stage="test")
+
+ assert isinstance(datamodule.datasets.test, MNIST)
+ assert len(datamodule.datasets.test) == 10000
diff --git a/cneuromax/projects/classify_mnist/litmodule.py b/cneuromax/projects/classify_mnist/litmodule.py
new file mode 100644
index 00000000..5eea736f
--- /dev/null
+++ b/cneuromax/projects/classify_mnist/litmodule.py
@@ -0,0 +1,34 @@
+""":class:`MNISTClassificationLitModule`."""
+from functools import partial
+
+from torch import nn
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import LRScheduler
+
+from cneuromax.fitting.deeplearning.litmodule.classification import (
+ BaseClassificationLitModule,
+ BaseClassificationLitModuleConfig,
+)
+
+
+class MNISTClassificationLitModule(BaseClassificationLitModule):
+ """MNIST Classification :mod:`lightning` Module.
+
+ Args:
+ nnmodule: See :paramref:`~.BaseLitModule.nnmodule`.
+ optimizer: See :paramref:`~.BaseLitModule.optimizer`.
+ scheduler: See :paramref:`~.BaseLitModule.scheduler`.
+ """
+
+ def __init__(
+ self: "MNISTClassificationLitModule",
+ nnmodule: nn.Module,
+ optimizer: partial[Optimizer],
+ scheduler: partial[LRScheduler],
+ ) -> None:
+ super().__init__(
+ config=BaseClassificationLitModuleConfig(num_classes=10),
+ nnmodule=nnmodule,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ )
diff --git a/cneuromax/task/classify_mnist/mlp.yaml b/cneuromax/projects/classify_mnist/task/mlp.yaml
similarity index 66%
rename from cneuromax/task/classify_mnist/mlp.yaml
rename to cneuromax/projects/classify_mnist/task/mlp.yaml
index b3ee81bb..c7145d90 100644
--- a/cneuromax/task/classify_mnist/mlp.yaml
+++ b/cneuromax/projects/classify_mnist/task/mlp.yaml
@@ -2,10 +2,7 @@
defaults:
- /datamodule: classify_mnist
- /litmodule: classify_mnist
- - /litmodule/nnmodule: mlp
- - override /hydra/launcher: submitit_local
- _self_
-
litmodule:
optimizer:
lr: 0.002
@@ -14,13 +11,6 @@ litmodule:
dims: [784, 128, 10]
activation_fn:
_target_: torch.nn.ReLU
-
-logger:
- name: mlp
- project: classify_mnist
- entity: cneuroml
-
trainer:
max_epochs: 3
-
device: gpu
diff --git a/cneuromax/projects/classify_mnist/task/mlp_beluga.yaml b/cneuromax/projects/classify_mnist/task/mlp_beluga.yaml
new file mode 100644
index 00000000..e615d14f
--- /dev/null
+++ b/cneuromax/projects/classify_mnist/task/mlp_beluga.yaml
@@ -0,0 +1,18 @@
+# @package _global_
+# Run the ``mlp`` version locally first, then copy the data over to your
+# SLURM machine. Command template:
+# scp -r MY_DATA_PATH/MNIST/ MY_USER@MY_MACHINE_IP:MY_DATA_PATH/.
+defaults:
+ - mlp
+ - override /hydra/launcher: submitit_slurm
+ - _self_
+hydra:
+ launcher:
+ gpus_per_node: 1
+ cpus_per_task: 10
+ mem_gb: 46
+ timeout_min: 15
+ account: rrg-pbellec
+ python: "/cvmfs/soft.computecanada.ca/easybuild/software/2020/Core/\
+ apptainer/1.1.8/bin/apptainer \
+ exec --nv ${oc.env:SCRATCH}/cneuromax.sif python3"
diff --git a/cneuromax/projects/neuroevorl_control/__init__.py b/cneuromax/projects/neuroevorl_control/__init__.py
new file mode 100644
index 00000000..a38c9f74
--- /dev/null
+++ b/cneuromax/projects/neuroevorl_control/__init__.py
@@ -0,0 +1,41 @@
+"""Control task neuroevolution ``project``."""
+from hydra_zen import ZenStore
+
+from cneuromax.fitting.neuroevolution.runner import NeuroevolutionTaskRunner
+from cneuromax.utils.hydra_zen import builds, fs_builds
+
+from .agent import GymAgent, GymAgentConfig
+from .space import GymReinforcementSpace, GymReinforcementSpaceConfig
+
+__all__ = [
+ "TaskRunner",
+]
+
+
+class TaskRunner(NeuroevolutionTaskRunner):
+ """MNIST classification ``task`` runner."""
+
+ @classmethod
+ def store_configs(cls: type["TaskRunner"], store: ZenStore) -> None:
+ """Stores :mod:`hydra-core` MNIST classification configs.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ """
+ super().store_configs(store)
+ store(
+ fs_builds(
+ GymReinforcementSpace,
+ config=fs_builds(GymReinforcementSpaceConfig),
+ ),
+ name="rl_control_nevo",
+ group="space",
+ )
+ store(
+ builds(
+ GymAgent,
+ config=fs_builds(GymAgentConfig),
+ ),
+ name="rl_control_nevo",
+ group="agent",
+ )
diff --git a/cneuromax/projects/neuroevorl_control/agent.py b/cneuromax/projects/neuroevorl_control/agent.py
new file mode 100644
index 00000000..aa14c94f
--- /dev/null
+++ b/cneuromax/projects/neuroevorl_control/agent.py
@@ -0,0 +1,159 @@
+""":class:`GymAgent` & :class:`GymAgentConfig`."""
+from dataclasses import dataclass
+from typing import Annotated as An
+
+import torch
+import torch.nn.functional as f
+from jaxtyping import Float32, Int64
+from torch import Tensor
+from torchrl.data.tensor_specs import ContinuousBox
+from torchrl.envs.libs.gym import GymEnv
+
+from cneuromax.fitting.neuroevolution.agent import BaseAgent, BaseAgentConfig
+from cneuromax.fitting.neuroevolution.net.cpu.static import (
+ CPUStaticRNNFC,
+ CPUStaticRNNFCConfig,
+)
+from cneuromax.utils.beartype import ge, le, one_of
+from cneuromax.utils.torch import RunningStandardization
+
+
+@dataclass
+class GymAgentConfig(BaseAgentConfig):
+ """:class:`CPUStaticRNNFC` config values.
+
+ Args:
+ env_name: See\
+ :paramref:`~.NeuroevolutionSubtaskConfig.env_name`.
+ hidden_size: Size of the RNN hidden state.
+ mutation_std: Standard deviation of the mutation noise.
+ """
+
+ env_name: str = "${space.config.env_name}"
+ hidden_size: int = 50
+ mutation_std: float = 0.01
+
+
+class GymAgent(BaseAgent):
+ """Gym Feature-Based Control Static Agent.
+
+ Args:
+ config: See :paramref:`~BaseAgent.config`.
+ pop_idx: See :paramref:`~BaseAgent.pop_idx`.
+ pops_are_merged: See :paramref:`~BaseAgent.pops_are_merged`.
+ """
+
+ def __init__(
+ self: "GymAgent",
+ config: GymAgentConfig,
+ pop_idx: An[int, ge(0), le(1)],
+ *,
+ pops_are_merged: bool,
+ ) -> None:
+ super().__init__(
+ config=config,
+ pop_idx=pop_idx,
+ pops_are_merged=pops_are_merged,
+ )
+ self.config: GymAgentConfig
+ temp_env = GymEnv(env_name=config.env_name)
+ self.num_actions = temp_env.action_spec.shape.numel()
+ self.net = CPUStaticRNNFC(
+ config=CPUStaticRNNFCConfig(
+ input_size=temp_env.observation_spec[
+ "observation"
+ ].shape.numel(),
+ hidden_size=config.hidden_size,
+ output_size=self.num_actions,
+ ),
+ )
+ self.output_mode: An[
+ str,
+ one_of("continuous", "discrete"),
+ ] = temp_env.action_spec.domain
+ if self.output_mode == "continuous":
+ action_space: ContinuousBox = temp_env.action_spec.space
+ self.output_low = action_space.low
+ self.output_high = action_space.high
+ self.standardizer = RunningStandardization(self.net.rnn.input_size)
+
+ def mutate(self: "GymAgent") -> None:
+ """Mutates the agent."""
+ for param in self.net.parameters():
+ param.data += self.config.mutation_std * torch.randn_like(
+ input=param.data,
+ )
+
+ def reset(self: "GymAgent") -> None:
+ """Resets the agent's memory state."""
+ self.net.reset()
+
+ def __call__(
+ self: "GymAgent",
+ x: Float32[Tensor, " obs_size"],
+ ) -> Float32[Tensor, " act_size"] | Int64[Tensor, " act_size"]:
+ """Forward pass.
+
+ Args:
+ x: The input observation.
+
+ Returns:
+ The output action.
+ """
+ x: Float32[Tensor, " obs_size"] = self.env_to_net(x=x)
+ x: Float32[Tensor, " act_size"] = self.net(x=x)
+ x: Float32[
+ Tensor,
+ " act_size",
+ ] | Int64[
+ Tensor,
+ " act_size",
+ ] = self.net_to_env(x=x)
+ return x
+
+ def env_to_net(
+ self: "GymAgent",
+ x: Float32[Tensor, " obs_size"],
+ ) -> Float32[Tensor, " out_size"]:
+ """Processes the observation before feeding it to the network.
+
+ Args:
+ x: The input observation.
+
+ Returns:
+ The processed observation.
+ """
+ x: Float32[Tensor, " obs_size"] = self.standardizer(x=x)
+ return x
+
+ def net_to_env(
+ self: "GymAgent",
+ x: Float32[Tensor, " act_size"],
+ ) -> Float32[Tensor, " act_size"] | Int64[Tensor, " act_size"]:
+ """Processes the network output before feeding it to the env.
+
+ Args:
+ x: The network output.
+
+ Returns:
+ The processed network output.
+ """
+ if self.output_mode == "discrete":
+ x_d: Float32[Tensor, " act_size"] = torch.softmax(input=x, dim=0)
+ x_d: Int64[Tensor, " "] = torch.multinomial(
+ input=x_d,
+ num_samples=1,
+ ).squeeze()
+ # Turn the integer into a one-hot vector.
+ x_d: Int64[Tensor, " act_size"] = f.one_hot(
+ x_d,
+ num_classes=self.num_actions,
+ )
+ return x_d
+ else: # self.output_mode == "continuous" # noqa: RET505
+ x_c: Float32[Tensor, " act_size"] = torch.tanh(input=x)
+ x_c: Float32[Tensor, " act_size"] = (
+ x_c * (self.output_high - self.output_low) / 2
+ + (self.output_high + self.output_low) / 2
+ )
+ return x_c
diff --git a/cneuromax/projects/neuroevorl_control/space.py b/cneuromax/projects/neuroevorl_control/space.py
new file mode 100644
index 00000000..f1784879
--- /dev/null
+++ b/cneuromax/projects/neuroevorl_control/space.py
@@ -0,0 +1,38 @@
+""":class:`GymReinforcementSpace` & its config."""
+from dataclasses import dataclass
+
+from omegaconf import MISSING
+from torchrl.envs.libs.gym import GymEnv
+
+from cneuromax.fitting.neuroevolution.space import (
+ BaseReinforcementSpace,
+ BaseSpaceConfig,
+)
+
+
+@dataclass
+class GymReinforcementSpaceConfig(BaseSpaceConfig):
+ """Holds :class:`GymReinforcementSpace` config values.
+
+ Args:
+ env_name: The name of the :mod:`gymnasium` environment.
+ """
+
+ env_name: str = MISSING
+
+
+class GymReinforcementSpace(BaseReinforcementSpace):
+ """:class:`.BaseSpace` for reinforcement on :mod:`gymnasium`.
+
+ Args:
+ config: See :class:`GymReinforcementSpaceConfig`.
+ """
+
+ def __init__(
+ self: "GymReinforcementSpace",
+ config: GymReinforcementSpaceConfig,
+ ) -> None:
+ super().__init__(
+ config=config,
+ env=GymEnv(env_name=config.env_name),
+ )
diff --git a/cneuromax/projects/neuroevorl_control/task/acrobot.yaml b/cneuromax/projects/neuroevorl_control/task/acrobot.yaml
new file mode 100644
index 00000000..95f257bc
--- /dev/null
+++ b/cneuromax/projects/neuroevorl_control/task/acrobot.yaml
@@ -0,0 +1,17 @@
+# @package _global_
+defaults:
+ - /space: rl_control_nevo
+ - /agent: rl_control_nevo
+ - _self_
+
+hydra:
+ launcher:
+ tasks_per_node: 3
+
+config:
+ agents_per_task: 20
+ total_num_gens: 50
+
+space:
+ config:
+ env_name: "Acrobot-v1"
diff --git a/cneuromax/runner.py b/cneuromax/runner.py
new file mode 100644
index 00000000..e7e80425
--- /dev/null
+++ b/cneuromax/runner.py
@@ -0,0 +1,70 @@
+""":class:`BaseTaskRunner`."""
+from abc import ABC, abstractmethod
+from typing import Any, final
+
+from hydra_zen import ZenStore, zen
+
+from cneuromax.config import BaseHydraConfig
+from cneuromax.utils.hydra_zen import destructure
+from cneuromax.utils.runner import (
+ get_absolute_project_path,
+ get_project_and_task_names,
+)
+
+
+class BaseTaskRunner(ABC):
+ """``task`` runner.
+
+ Stores configs and runs the ``task``.
+
+ Attributes:
+ hydra_config: The structured :class:`hydra.HydraConf` config\
+ used during the ``task`` execution.
+ """
+
+ hydra_config = BaseHydraConfig
+
+ @final
+ @classmethod
+ def store_configs_and_run_task(cls: type["BaseTaskRunner"]) -> None:
+ """Stores various configs and runs the ``task``.
+
+ Args:
+ cls: The :class:`BaseTaskRunner` subclass calling this\
+ method.
+ """
+ store = ZenStore()
+ store(cls.hydra_config, name="config", group="hydra")
+ project_name, task_name = get_project_and_task_names()
+ store({"project": project_name}, name="project")
+ store({"task": task_name}, name="task")
+ # See https://github.com/mit-ll-responsible-ai/hydra-zen/discussions/621
+ store = store(to_config=destructure)
+ cls.store_configs(store=store)
+ store.add_to_hydra_store(overwrite_ok=True)
+ zen(cls.run_subtask).hydra_main(
+ config_path=get_absolute_project_path(),
+ config_name="config",
+ version_base=None,
+ )
+
+ @classmethod
+ @abstractmethod
+ def store_configs(cls: type["BaseTaskRunner"], store: ZenStore) -> None:
+ """Stores structured configs.
+
+ Stores the :class:`hydra.HydraConf` config.
+
+ Args:
+ cls: See :paramref:`~store_configs_and_run_task.cls`.
+ store: A :class:`hydra_zen.ZenStore` instance that manages\
+ the :mod:`hydra-core` configuration store.
+ """
+
+ @staticmethod
+ @abstractmethod
+ def run_subtask(*args: Any, **kwargs: Any) -> Any: # noqa: ANN401
+ """Runs the ``subtask`` given :paramref:`config`.
+
+ This method is meant to hold the ``subtask`` execution logic.
+ """
diff --git a/cneuromax/serving/__init__.py b/cneuromax/serving/__init__.py
index 4b0b121f..981e7372 100644
--- a/cneuromax/serving/__init__.py
+++ b/cneuromax/serving/__init__.py
@@ -1 +1 @@
-"""Serving module."""
+"""Model serving."""
diff --git a/cneuromax/store.py b/cneuromax/store.py
new file mode 100644
index 00000000..e5a7ad65
--- /dev/null
+++ b/cneuromax/store.py
@@ -0,0 +1,38 @@
+r""":mod:`cneuromax`\-wide :mod:`hydra-core` config storing."""
+from collections.abc import Callable
+from typing import Any
+
+from hydra_zen import ZenStore
+from lightning.pytorch.loggers.wandb import WandbLogger
+
+from cneuromax.utils.hydra_zen import pfs_builds
+
+
+def store_wandb_logger_configs(
+ store: ZenStore,
+ clb: Callable[..., Any],
+) -> None:
+ """Stores :mod:`hydra-core` ``logger`` group configs.
+
+ Config names: ``wandb``, ``wandb_simexp``.
+
+ Args:
+ store: See :paramref:`~.BaseTaskRunner.store_configs.store`.
+ clb: :mod:`wandb` initialization callable.
+ """
+ dir_key = "save_dir" if clb == WandbLogger else "dir"
+ base_args: dict[str, Any] = { # `fs_builds`` does not like dict[str, str]
+ "name": "${task}",
+ dir_key: "${config.output_dir}",
+ "project": "${project}",
+ }
+ store(
+ pfs_builds(clb, **base_args),
+ group="logger",
+ name="wandb",
+ )
+ store(
+ pfs_builds(clb, **base_args, entity="cneuroml"),
+ group="logger",
+ name="wandb_simexp",
+ )
diff --git a/cneuromax/task/__init__.py b/cneuromax/task/__init__.py
deleted file mode 100644
index b46429a6..00000000
--- a/cneuromax/task/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Repository of tasks."""
diff --git a/cneuromax/task/classify_mnist/__init__.py b/cneuromax/task/classify_mnist/__init__.py
deleted file mode 100644
index add1d13e..00000000
--- a/cneuromax/task/classify_mnist/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""MNIST classification task."""
-
-from hydra.core.config_store import ConfigStore
-
-from cneuromax.task.classify_mnist.datamodule import (
- MNISTClassificationDataModule,
- MNISTClassificationDataModuleConfig,
-)
-from cneuromax.task.classify_mnist.litmodule import (
- MNISTClassificationLitModule,
-)
-from cneuromax.utils.hydra import fs_builds
-
-__all__ = [
- "MNISTClassificationDataModule",
- "MNISTClassificationDataModuleConfig",
- "MNISTClassificationLitModule",
-]
-
-
-def store_configs(cs: ConfigStore) -> None:
- """Stores the MNIST classification configs.
-
- Args:
- cs: .
- """
- cs.store(
- group="datamodule",
- name="classify_mnist",
- node=fs_builds(
- MNISTClassificationDataModule,
- config=MNISTClassificationDataModuleConfig(),
- ),
- )
- cs.store(
- group="litmodule",
- name="classify_mnist",
- node=fs_builds(MNISTClassificationLitModule),
- )
diff --git a/cneuromax/task/classify_mnist/datamodule_test.py b/cneuromax/task/classify_mnist/datamodule_test.py
deleted file mode 100644
index 01b5d995..00000000
--- a/cneuromax/task/classify_mnist/datamodule_test.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""."""
-
-from pathlib import Path
-
-import pytest
-from torch.utils.data import Subset
-from torchvision.datasets import MNIST
-
-from cneuromax.task.classify_mnist import (
- MNISTClassificationDataModule,
- MNISTClassificationDataModuleConfig,
-)
-
-
-@pytest.fixture()
-def datamodule(tmp_path: Path) -> MNISTClassificationDataModule:
- """.
-
- Args:
- tmp_path: .
-
- Returns:
- A generic ``MNISTDataModule`` instance.
- """
- return MNISTClassificationDataModule(
- MNISTClassificationDataModuleConfig(
- data_dir=str(tmp_path) + "/",
- device="cpu",
- val_percentage=0.1,
- ),
- )
-
-
-def test_setup_fit(datamodule: MNISTClassificationDataModule) -> None:
- """.
-
- Args:
- datamodule: .
- """
- datamodule.prepare_data()
- datamodule.setup("fit")
-
- assert isinstance(datamodule.dataset.train, Subset)
- assert isinstance(datamodule.dataset.val, Subset)
-
- assert len(datamodule.dataset.train) == 54000
- assert len(datamodule.dataset.val) == 6000
-
-
-def test_setup_test(datamodule: MNISTClassificationDataModule) -> None:
- """.
-
- Args:
- datamodule: .
- """
- datamodule.prepare_data()
- datamodule.setup("test")
-
- assert isinstance(datamodule.dataset.test, MNIST)
- assert len(datamodule.dataset.test) == 10000
diff --git a/cneuromax/task/classify_mnist/litmodule.py b/cneuromax/task/classify_mnist/litmodule.py
deleted file mode 100644
index a3801c8f..00000000
--- a/cneuromax/task/classify_mnist/litmodule.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Lightning Module for MNIST classification."""
-
-
-from functools import partial
-
-from torch import nn
-from torch.optim import Optimizer
-from torch.optim.lr_scheduler import LRScheduler
-
-from cneuromax.fitting.deeplearning.litmodule.classification import (
- BaseClassificationLitModule,
-)
-
-
-class MNISTClassificationLitModule(BaseClassificationLitModule):
- """MNIST classification Lightning Module."""
-
- def __init__(
- self: "MNISTClassificationLitModule",
- nnmodule: nn.Module,
- optimizer: partial[Optimizer],
- scheduler: partial[LRScheduler],
- ) -> None:
- """Calls parent constructor.
-
- Args:
- nnmodule: .
- optimizer: .
- scheduler: .
- """
- super().__init__(nnmodule, optimizer, scheduler, num_classes=10)
diff --git a/cneuromax/task/classify_mnist/mlp_beluga.yaml b/cneuromax/task/classify_mnist/mlp_beluga.yaml
deleted file mode 100644
index ba2e007c..00000000
--- a/cneuromax/task/classify_mnist/mlp_beluga.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-# @package _global_
-defaults:
- - /datamodule: classify_mnist
- - /litmodule: classify_mnist
- - /litmodule/nnmodule: mlp
- - override /hydra/launcher: submitit_slurm
- - _self_
-
-hydra:
- callbacks:
- log_job_return:
- _target_: hydra.experimental.callbacks.LogJobReturnCallback
- launcher:
- gpus_per_node: 1
- cpus_per_task: 10
- mem_gb: 46
- timeout_min: 15
- account: rrg-pbellec
- python: "/cvmfs/soft.computecanada.ca/easybuild/software/2020/Core/\
- apptainer/1.1.8/bin/apptainer \
- exec --nv ${oc.env:SCRATCH}/cneuromax.sif python3"
-
-# Run the MLP.yaml version first locally, then copy the data over to Béluga
-# Example: scp -r data/example_run/MNIST/ \
-# mleclei@beluga.computecanada.ca:/scratch/mleclei/Dropbox/cneuromax/data/example_run/.
-data_dir: ${oc.env:CNEUROMAX_PATH}/data/example_run/
-
-litmodule:
- nnmodule:
- config:
- dims: [784, 128, 10]
- activation_fn:
- _target_: torch.nn.ReLU
-
-logger:
- name: mlp_beluga
- project: classify_mnist
- entity: cneuroml
-
-device: gpu
-
-trainer:
- max_epochs: 3
diff --git a/cneuromax/testing/__init__.py b/cneuromax/testing/__init__.py
new file mode 100644
index 00000000..8649b29c
--- /dev/null
+++ b/cneuromax/testing/__init__.py
@@ -0,0 +1 @@
+"""Model testing/analytics."""
diff --git a/cneuromax/utils/__init__.py b/cneuromax/utils/__init__.py
index fb76eced..f8b606ba 100644
--- a/cneuromax/utils/__init__.py
+++ b/cneuromax/utils/__init__.py
@@ -1 +1 @@
-"""Utility functions for cneuromax."""
+r""":mod:`cneuromax`\-wide utilities."""
diff --git a/cneuromax/utils/annotations.py b/cneuromax/utils/beartype.py
similarity index 52%
rename from cneuromax/utils/annotations.py
rename to cneuromax/utils/beartype.py
index 70c02f3d..d3ac1599 100644
--- a/cneuromax/utils/annotations.py
+++ b/cneuromax/utils/beartype.py
@@ -1,5 +1,4 @@
-"""Type annotations validated through Beartype."""
-
+"""Type annotations validator using :mod:`beartype`."""
from beartype.vale import Is
from beartype.vale._core._valecore import BeartypeValidator
@@ -8,7 +7,8 @@ def not_empty() -> BeartypeValidator:
"""Makes sure the string is not empty.
Returns:
- .
+ A :mod:`beartype` object that raises an exception if the\
+ annotated value does not satisfy the condition.
"""
def _not_empty(x: object) -> bool:
@@ -20,13 +20,13 @@ def _not_empty(x: object) -> bool:
def equal(element: object) -> BeartypeValidator:
- """Makes sure the value is equal to the input argument.
+ """Verifies that the annotated value is equal to the input argument.
Args:
- element: The object to compare against.
+ element: The object to compare the annotated value against.
Returns:
- .
+ See return description of :func:`not_empty`.
"""
def _equal(x: object, element: object) -> bool:
@@ -38,15 +38,16 @@ def _equal(x: object, element: object) -> bool:
def one_of(*elements: object) -> BeartypeValidator:
- """Makes sure the value is one of the input arguments.
+ """Verifies that the annotated value is one of the input arguments.
- Used to replace Typing ``Literal`` which is not supported by Hydra.
+ Used to replace :class:`typing.Literal` which is not supported by
+ :mod:`omegaconf`-based configs.
Args:
- elements: The objects to compare against.
+ elements: The objects to compare the annotated value against.
Returns:
- .
+ See return description of :func:`not_empty`.
"""
def _one_of(x: object, elements: tuple[object, ...]) -> bool:
@@ -57,36 +58,18 @@ def _one_of(x: object, elements: tuple[object, ...]) -> bool:
return Is[lambda x: _one_of(x, elements)]
-def has_keys(keys: list[str]) -> BeartypeValidator:
- """Makes sure the dictionary has the given keys.
-
- Args:
- keys: The keys to check for.
-
- Returns:
- .
- """
-
- def _has_keys(x: object, keys: list[str]) -> bool:
- if isinstance(x, dict):
- return all(key in x for key in keys)
- return False
-
- return Is[lambda x: _has_keys(x, keys)]
-
-
def ge(val: float) -> BeartypeValidator:
- """Validates greater than or equal to input argument.
+ """Verifies that the annotated value is ``> or =`` :paramref:`val`.
Args:
- val: The value to compare against.
+ val: The value to compare the annotated value against.
Returns:
- .
+ See return description of :func:`not_empty`.
"""
def _ge(x: object, val: float) -> bool:
- if isinstance(x, float) and x >= val:
+ if isinstance(x, int | float) and x >= val:
return True
return False
@@ -94,17 +77,17 @@ def _ge(x: object, val: float) -> bool:
def gt(val: float) -> BeartypeValidator:
- """Validates greater than input argument.
+ """Verifies that the annotated value is ``>`` :paramref:`val`.
Args:
- val: The value to compare against.
+ val: See :paramref:`~ge.val`.
Returns:
- .
+ See return description of :func:`not_empty`.
"""
def _gt(x: object, val: float) -> bool:
- if isinstance(x, float) and x > val:
+ if isinstance(x, int | float) and x > val:
return True
return False
@@ -112,17 +95,17 @@ def _gt(x: object, val: float) -> bool:
def le(val: float) -> BeartypeValidator:
- """Validate less than or equal to input argument.
+ """Verifies that the annotated value is ``< or =`` :paramref:`val`.
Args:
- val: The value to compare against.
+ val: See :paramref:`~ge.val`.
Returns:
- .
+ See return description of :func:`not_empty`.
"""
def _le(x: object, val: float) -> bool:
- if isinstance(x, float) and x <= val:
+ if isinstance(x, int | float) and x <= val:
return True
return False
@@ -130,17 +113,17 @@ def _le(x: object, val: float) -> bool:
def lt(val: float) -> BeartypeValidator:
- """Validate less than input argument.
+ """Verifies that the annotated value is ``<`` :paramref:`val`.
Args:
- val: The value to compare against.
+ val: See :paramref:`~ge.val`.
Returns:
- .
+ See return description of :func:`not_empty`.
"""
def _lt(x: object, val: float) -> bool:
- if isinstance(x, float) and x < val:
+ if isinstance(x, int | float) and x < val:
return True
return False
diff --git a/cneuromax/utils/gymnasium_test.py b/cneuromax/utils/gymnasium_test.py
new file mode 100644
index 00000000..a9a5936c
--- /dev/null
+++ b/cneuromax/utils/gymnasium_test.py
@@ -0,0 +1,12 @@
+""":mod:`gymnasium` tests."""
+from torchrl.envs.libs.gym import GymEnv
+
+
+def test_init_env() -> None:
+ """Test :class:`GymEnv` initialization.
+
+ Current latest version of :mod:`opencv-python` raises an error when
+ instantiating a :class:`GymEnv`. Remove this test when the issue is
+ resolved.
+ """
+ GymEnv(env_name="CartPole-v0")
diff --git a/cneuromax/utils/hydra.py b/cneuromax/utils/hydra.py
deleted file mode 100644
index c82f18da..00000000
--- a/cneuromax/utils/hydra.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Hydra-related utilities."""
-
-from collections.abc import Callable
-from typing import Any
-
-from hydra_zen import make_custom_builds_fn
-
-
-def get_path(clb: Callable[..., Any]) -> str:
- """Returns path to input class/function.
-
- Used to fill in ``_target_`` in Hydra configuration.
-
- Args:
- clb: Class or function.
-
- Returns:
- Path to class or function.
- """
- return f"{clb.__module__}.{clb.__name__}"
-
-
-fs_builds = make_custom_builds_fn( # type: ignore[var-annotated]
- populate_full_signature=True,
- hydra_convert="partial",
-)
-pfs_builds = make_custom_builds_fn( # type: ignore[var-annotated]
- zen_partial=True,
- populate_full_signature=True,
- hydra_convert="partial",
-)
diff --git a/cneuromax/utils/hydra_zen.py b/cneuromax/utils/hydra_zen.py
new file mode 100644
index 00000000..475ea176
--- /dev/null
+++ b/cneuromax/utils/hydra_zen.py
@@ -0,0 +1,74 @@
+""":mod:`hydra-zen` utilities."""
+from dataclasses import is_dataclass
+from typing import Any
+
+from hydra_zen import make_custom_builds_fn
+from hydra_zen.wrapper import default_to_config
+from omegaconf import OmegaConf
+
+builds = make_custom_builds_fn( # type: ignore[var-annotated]
+ populate_full_signature=False,
+ hydra_convert="partial",
+)
+""":mod:`hydra-zen` custom build function.
+
+Args:
+ populate_full_signature: Set to ``False``.
+ hydra_convert: Set to ``"partial"``.
+"""
+
+p_builds = make_custom_builds_fn( # type: ignore[var-annotated]
+ zen_partial=True,
+ populate_full_signature=False,
+ hydra_convert="partial",
+)
+""":mod:`hydra-zen` custom build function.
+
+Args:
+ zen_partial: Set to ``True``.
+ populate_full_signature: Set to ``False``.
+ hydra_convert: Set to ``"partial"``.
+"""
+
+fs_builds = make_custom_builds_fn( # type: ignore[var-annotated]
+ populate_full_signature=True,
+ hydra_convert="partial",
+)
+""":mod:`hydra-zen` custom build function.
+
+Args:
+ populate_full_signature: Set to ``True``.
+ hydra_convert: Set to ``"partial"``.
+"""
+
+pfs_builds = make_custom_builds_fn( # type: ignore[var-annotated]
+ zen_partial=True,
+ populate_full_signature=True,
+ hydra_convert="partial",
+)
+""":mod:`hydra-zen` custom build function.
+
+Args:
+ zen_partial: Set to ``True``.
+ populate_full_signature: Set to ``True``.
+ hydra_convert: Set to ``"partial"``.
+"""
+
+
+def destructure(x: Any) -> Any: # noqa: ANN401
+ """Disables :mod:`hydra` config type checking.
+
+ See `discussion `_.
+ """
+ # apply the default auto-config logic of `store`
+ x = default_to_config(target=x)
+ if is_dataclass(obj=x):
+ # Recursively converts:
+ # dataclass -> omegaconf-dict (backed by dataclass types)
+ return OmegaConf.create(
+ obj=OmegaConf.to_container(
+ cfg=OmegaConf.create(obj=x), # type: ignore[call-overload]
+ ),
+ )
+ return x
diff --git a/cneuromax/utils/misc.py b/cneuromax/utils/misc.py
new file mode 100644
index 00000000..55ffde96
--- /dev/null
+++ b/cneuromax/utils/misc.py
@@ -0,0 +1,32 @@
+"""Miscellaneous utilities."""
+import random
+from collections.abc import Callable
+from typing import Any
+
+import numpy as np
+import torch
+
+
+def get_path(clb: Callable[..., Any]) -> str:
+ """Returns the path to the input callable.
+
+ Args:
+ clb: The callable to retrieve the path for.
+
+ Returns:
+ The full module path to :paramref:`clb`.
+ """
+ return f"{clb.__module__}.{clb.__name__}"
+
+
+def seed_all(seed: int | np.uint32) -> None:
+ """Sets the random seed for all relevant libraries.
+
+ Args:
+ seed: The random seed.
+ """
+ random.seed(a=int(seed))
+ np.random.seed(seed=seed)
+ torch.manual_seed(seed=seed)
+ torch.cuda.manual_seed(seed=int(seed))
+ torch.cuda.manual_seed_all(seed=int(seed))
diff --git a/cneuromax/utils/mpi4py.py b/cneuromax/utils/mpi4py.py
new file mode 100644
index 00000000..68800a24
--- /dev/null
+++ b/cneuromax/utils/mpi4py.py
@@ -0,0 +1,20 @@
+""":mod:`mpi4py` utilities."""
+from typing import Annotated as An
+
+from mpi4py import MPI
+
+from cneuromax.utils.beartype import ge
+
+
+def get_mpi_variables() -> tuple[MPI.Comm, An[int, ge(0)], An[int, ge(1)]]:
+ """Retrieves MPI variables from the MPI runtime.
+
+ Returns:
+ * The MPI communicator.
+ * The rank of the current process.
+ * The total number of processes.
+ """
+ comm = MPI.COMM_WORLD
+ rank = comm.Get_rank()
+ size = comm.Get_size()
+ return comm, rank, size
diff --git a/cneuromax/utils/runner.py b/cneuromax/utils/runner.py
new file mode 100644
index 00000000..afa3fe8e
--- /dev/null
+++ b/cneuromax/utils/runner.py
@@ -0,0 +1,95 @@
+""":mod:`~cneuromax.runner.BaseTaskRunner` utilities."""
+import os
+import sys
+from importlib import import_module
+from types import ModuleType
+from typing import Any
+
+
+def get_project_and_task_names() -> tuple[str, str]:
+ """Retrieves ``project`` and ``task`` from script arguments.
+
+ Raises:
+ RuntimeError: If ``project`` or ``task`` arguments are\
+ missing.
+
+ Returns:
+ The ``project`` and ``task`` names.
+ """
+ has_project_arg, has_task_arg = False, False
+ for arg in sys.argv:
+ if arg.startswith("project="):
+ has_project_arg = True
+ project_name = arg.split("=", maxsplit=1)[-1]
+ if arg.startswith("task="):
+ has_task_arg = True
+ task_name = arg.split("=", maxsplit=1)[-1]
+ if not has_project_arg:
+ error_msg = (
+ "Invalid script arguments. You must specify the "
+ "``project`` argument in the form ``project=foo``."
+ )
+ raise RuntimeError(error_msg)
+ if not has_task_arg:
+ error_msg = (
+ "Invalid script arguments. You must specify the "
+ "``task`` argument in the form ``task=bar``."
+ )
+ raise RuntimeError(error_msg)
+ return project_name, task_name
+
+
+def get_absolute_project_path() -> str:
+ """.
+
+ Returns:
+ The absolute path to the ``project`` module.
+ """
+ project_name, _ = get_project_and_task_names()
+ return f"{os.environ['CNEUROMAX_PATH']}/cneuromax/projects/{project_name}/"
+
+
+def get_project_module() -> ModuleType:
+ """Retrieves the ``project`` module.
+
+ Raises:
+ RuntimeError: If the ``project`` argument is invalid or\
+ the ``project`` module does not exist.
+
+ Returns:
+ The ``project`` module.
+ """
+ project_name, _ = get_project_and_task_names()
+ try:
+ project_module = import_module(
+ name=f"cneuromax.projects.{project_name}",
+ )
+ except ModuleNotFoundError as error:
+ error_msg = (
+ "Invalid project name. Make sure that "
+ f"`cneuromax/projects/{project_name}/__init__.py` exists."
+ )
+ raise RuntimeError(error_msg) from error
+ return project_module
+
+
+def get_task_runner_class() -> Any: # noqa: ANN401
+ """.
+
+ Raises:
+ RuntimeError: If the ``project`` module does not define a\
+ :mod:`~cneuromax.runner.BaseTaskRunner` class.
+
+ Returns:
+ The :mod:`~cneuromax.runner.BaseTaskRunner` class.
+ """
+ project_module = get_project_module()
+ try:
+ task_runner = project_module.TaskRunner
+ except AttributeError as error:
+ error_msg = (
+ "Invalid project module. The ``project`` module must "
+ "define a ``TaskRunner`` class."
+ )
+ raise RuntimeError(error_msg) from error
+ return task_runner
diff --git a/cneuromax/utils/torch.py b/cneuromax/utils/torch.py
new file mode 100644
index 00000000..4f977b1c
--- /dev/null
+++ b/cneuromax/utils/torch.py
@@ -0,0 +1,44 @@
+""":mod:`torch` utilities."""
+import torch
+from jaxtyping import Float32
+from torch import Tensor
+
+
+class RunningStandardization:
+ """Standardizes the running data.
+
+ Args:
+ x_size: Size of the input tensor.
+ """
+
+ def __init__(self: "RunningStandardization", x_size: int) -> None:
+ self.mean: Float32[Tensor, " x_size"] = torch.zeros(size=(x_size,))
+ self.var: Float32[Tensor, " x_size"] = torch.zeros(size=(x_size,))
+ self.std: Float32[Tensor, " x_size"] = torch.zeros(size=(x_size,))
+ self.n: Float32[Tensor, " 1"] = torch.zeros(size=(1,))
+
+ def __call__(
+ self: "RunningStandardization",
+ x: Float32[Tensor, " x_size"],
+ ) -> Float32[Tensor, " x_size"]:
+ """Inputs ``x``, updates attrs and returns standardized ``x``.
+
+ Args:
+ x: Input tensor.
+
+ Returns:
+ Standardized tensor.
+ """
+ self.n += torch.ones(size=(1,))
+ new_mean: Float32[Tensor, " x_size"] = (
+ self.mean + (x - self.mean) / self.n
+ )
+ new_var: Float32[Tensor, " x_size"] = self.var + (x - self.mean) * (
+ x - new_mean
+ )
+ new_std: Float32[Tensor, " x_size"] = torch.sqrt(new_var / self.n)
+ self.mean, self.var, self.std = new_mean, new_var, new_std
+ standardized_x: Float32[Tensor, " x_size"] = (x - self.mean) / (
+ self.std + self.std.eq(0)
+ )
+ return standardized_x
diff --git a/cneuromax/utils/wandb.py b/cneuromax/utils/wandb.py
new file mode 100644
index 00000000..8cfc9240
--- /dev/null
+++ b/cneuromax/utils/wandb.py
@@ -0,0 +1,24 @@
+""":mod:`wandb` utilities."""
+import logging
+import os
+from pathlib import Path
+
+import wandb
+
+
+def login_wandb() -> None:
+ """Logs in to W&B using the key stored in ``WANDB_KEY.txt``."""
+ wandb_key_path = Path(
+ str(os.environ.get("CNEUROMAX_PATH")) + "/WANDB_KEY.txt",
+ )
+ if wandb_key_path.exists():
+ with wandb_key_path.open(mode="r") as f:
+ key = f.read().strip()
+ wandb.login(key=key)
+ else:
+ logging.info(
+ "W&B key not found, proceeding without. You can retrieve your key "
+ "from `https://wandb.ai/settings` and store it in a file named "
+ "`WANDB_KEY.txt` in the root directory of the project. Discard "
+ "this message if you meant not to use W&B.",
+ )
diff --git a/docs/Contribution.rst b/docs/Contribution.rst
index 45592031..a197f2cc 100644
--- a/docs/Contribution.rst
+++ b/docs/Contribution.rst
@@ -16,14 +16,19 @@ collaborativity. The following instructions are meant for people who wish to
contribute to the code base, either by fixing bugs, adding new features or
improving the documentation.
+Regardless of whether you wish to contribute to the ``main`` branch or solely
+to your own branch, check out the ``classify_mnist`` `code folder
+`_
+for a template of how to structure your code.
+
Making sure the code doesn't break
----------------------------------
-The main branch is protected meaning that contributions happen through
+The ``main`` branch is protected meaning that contributions happen through
pull requests rather than direct pushes.
In order for any pull request to go through, it will need to pass a number of
-common and standard checks (using GitHub actions) that ensure that the code is
+common and standard checks (using GitHub Actions) that ensure that the code is
of high quality and does not break any portion of the existing code base.
.. note::
@@ -128,15 +133,16 @@ There are so far two small pain points:
- The esbonio server will sometimes announce a build error (bottom right),
which will prevent further documentation visualization. To fix this, you
- should delete the contents of the ``docs/_build`` and ``docs/autoapi``
- folders (do not delete the folders themselves if you use Dropbox/Maestral)
- and restart the esbonio server (by its icon).
+ should delete the contents of the ``docs/_build`` (do not delete the folder
+ itself if you use Dropbox/Maestral) and ``docs/_autosummary`` folders and
+ and restart the esbonio server (by pressing its icon).
GitHub Copilot is installed in the DevContainer. Simply discard the log-in
notifications if you do not want to make use of it.
You can run ``git``, ``pytest`` & ``mypy`` commands from the integrated
-terminal. However running experiments requires special docker flags, so you
-should run them from the terminal outside of the DevContainer.
+terminal. However running the library itself requires special Docker flags and
+should thus be ran from the terminal outside of VSCode (refere to the
+``Execution`` section).
Git/GitHub workflow for contributing
------------------------------------
@@ -194,6 +200,39 @@ your local repository.
git pull
git branch -d
+Documenting your contribution
+-----------------------------
+
+.. note::
+
+ Make sure to not leave any of your ``__init__.py`` files empty else the
+ specific subpackage will not be documented.
+
+We use `sphinx.ext.autosummary
+`_ to
+automatically generate documentation from `Google-style Python docstrings
+`_.
+This webpage holds the API reference documentation for the ``main`` branch of
+the repository and is automatically updated upon each push.
+Take a look at `this Python file
+`_
+and its `corresponding documentation webpage
+`_
+that showcase most of the available docstring commands available and their
+effects on the documentation page.
+
+.. note::
+
+ Document your ``__init__`` method arguments in the class docstring rather
+ than in the ``__init__`` docstring.
+
+Assuming that you are using the library's development Docker image in your
+editor, you can preview your changes to ``.rst`` by clicking the preview button
+on the top right of the editor. In general, you can preview your changes to all
+``.rst``, ``.py`` and ``README.md`` files after re-building the documentation
+by pressing the ``esbonio`` button on the bottom right of the editor and then
+opening the locally created ``.html`` files.
+
Setting up Maestral/Dropbox to move code across machines
-----------------------------------------------------------
@@ -245,11 +284,10 @@ all machines. On a machine with Dropbox, run:
.. code-block:: bash
- mkdir -p data/ docs/_build/ docs/autoapi/ .vscode/ .coverage
+ mkdir -p data/ docs/_build/ .vscode/ .coverage
mkdir -p .mypy_cache/ .pytest_cache/ .ruff_cache/
sudo attr -s com.dropbox.ignored -V 1 data/
sudo attr -s com.dropbox.ignored -V 1 docs/_build/
- sudo attr -s com.dropbox.ignored -V 1 docs/autoapi/
sudo attr -s com.dropbox.ignored -V 1 .vscode/
sudo attr -s com.dropbox.ignored -V 1 .coverage
sudo attr -s com.dropbox.ignored -V 1 .mypy_cache/
@@ -282,7 +320,7 @@ possible, you can delete the following:
- The ``cneuromax/serving/`` folder
- Any non-relevant folder inside ``cneuromax/task/``
- The ``docs/`` folder
-- The ``LICENSE`` file
-- The ``Containerfile`` file
+- The ``Dockerfile`` file
- Most of the contents of the ``README.md`` file
- The ``renovate.json`` file
+- The irrelevant dependencies in the ``pyproject.toml`` file
diff --git a/docs/Execution_On_a_Slurm_cluster.rst b/docs/Execution_On_a_Slurm_cluster.rst
index f6b6706b..42f35d21 100644
--- a/docs/Execution_On_a_Slurm_cluster.rst
+++ b/docs/Execution_On_a_Slurm_cluster.rst
@@ -21,7 +21,7 @@ Run a python script
apptainer exec -B /etc/passwd -B /etc/slurm/ -B /opt/software/slurm -B /usr/lib64/libmunge.so.2 \
-B /cvmfs/soft.computecanada.ca/easybuild/software/2020/Core/apptainer/1.1.8/bin/apptainer \
-B /var/run/munge/ --env LD_LIBRARY_PATH=/opt/software/slurm/lib64/slurm -B $CNEUROMAX_PATH $SCRATCH/cneuromax.sif \
- python3 -m cneuromax.fitting.deeplearning -m task=classify_mnist/mlp_beluga
+ python -m cneuromax project=classify_mnist task=mlp_beluga
Run Jupyter-lab
---------------
diff --git a/docs/Execution_On_an_Ubuntu_machine.rst b/docs/Execution_On_an_Ubuntu_machine.rst
index d29d37e8..741d2194 100644
--- a/docs/Execution_On_an_Ubuntu_machine.rst
+++ b/docs/Execution_On_an_Ubuntu_machine.rst
@@ -22,6 +22,12 @@ On an Ubuntu machine
Run a python script
-------------------
+
+.. note::
+
+ Run ``cd ${CNEUROMAX_PATH}/cneuromax`` before the following command to get
+ tab completion for the ``task`` argument.
+
.. code-block:: bash
# Example of a simple MNIST training run
@@ -29,7 +35,8 @@ Run a python script
-e PYTHONPATH=${PYTHONPATH}:${CNEUROMAX_PATH} \
-v ${CNEUROMAX_PATH}:${CNEUROMAX_PATH} -v /dev/shm:/dev/shm \
-w ${CNEUROMAX_PATH} cneuromod/cneuromax:latest \
- python3 -m cneuromax.fitting.deeplearning task=classify_mnist/mlp
+ python -m cneuromax project=classify_mnist task=mlp
+
Run a notebook
--------------
@@ -39,7 +46,7 @@ From your own machine create a SSH tunnel to the running machine.
.. code-block:: bash
# Example
- ssh mleclei@123.456.7.8 -NL 8888:localhost:8888
+ ssh MY_USER@123.456.7.8 -NL 8888:localhost:8888
Run the lab.
diff --git a/docs/Installation_On_a_Slurm_cluster.rst b/docs/Installation_On_a_Slurm_cluster.rst
index fc4930c9..e219e2a4 100644
--- a/docs/Installation_On_a_Slurm_cluster.rst
+++ b/docs/Installation_On_a_Slurm_cluster.rst
@@ -27,5 +27,5 @@ On a Slurm cluster
module load apptainer && apptainer build ${SCRATCH}/cneuromax.sif \
docker://cneuromod/cneuromax:latest
-Make sure to re-run this command whenever you modify the Containerfile
+Make sure to re-run this command whenever you modify the Dockerfile
and want to make use of the latest changes.
diff --git a/docs/__init__.py b/docs/__init__.py
index a254e1ab..535ceb2e 100644
--- a/docs/__init__.py
+++ b/docs/__init__.py
@@ -1 +1 @@
-"""Documentation for the docs package."""
+"""Documentation."""
diff --git a/docs/_static/paramlink_target_color.css b/docs/_static/paramlink_target_color.css
new file mode 100644
index 00000000..b30457f7
--- /dev/null
+++ b/docs/_static/paramlink_target_color.css
@@ -0,0 +1,3 @@
+span:target {
+ border: 3px solid olive;
+}
diff --git a/docs/_templates/module.rst b/docs/_templates/module.rst
new file mode 100644
index 00000000..cfd3a449
--- /dev/null
+++ b/docs/_templates/module.rst
@@ -0,0 +1,20 @@
+{{ name | escape | underline}}
+
+.. automodule:: {{ fullname }}
+ :members:
+
+{% block modules %}
+{% if modules %}
+
+.. rubric:: Submodules
+
+.. autosummary::
+ :toctree:
+ :template: module.rst
+ :recursive:
+ {% for item in modules %}
+ {{ item }}
+ {%- endfor %}
+
+{% endif %}
+{% endblock %}
diff --git a/docs/conf.py b/docs/conf.py
index a6c9a914..9b14789f 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,43 +1,39 @@
"""Configuration file for the Sphinx documentation builder."""
-
import sys
from pathlib import Path
-sys.path.insert(0, str(Path("..").resolve()))
-
-# -- Project information
+from sphinx.application import Sphinx
-project = "cneuromax"
-copyright = "2023, The cneuromax Authors" # noqa: A001
-author = "The cneuromax Authors"
+sys.path.insert(0, str(Path("..").resolve()))
+project = "CNeuroMax"
+copyright = "2023, The CNeuroMax Authors" # noqa: A001
+author = "The CNeuroMax Authors"
version = "0.0.1"
-
-# -- General configuration
-
extensions = [
- "autoapi.extension",
- "sphinx.ext.duration",
- "sphinx.ext.doctest",
+ "myst_parser",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
+ "sphinx.ext.viewcode",
+ "sphinx_autodoc_typehints",
"sphinx_copybutton",
+ "sphinx_paramlinks",
]
-
-intersphinx_mapping = {
- "python": ("https://docs.python.org/3/", None),
- "sphinx": ("https://www.sphinx-doc.org/en/master/", None),
+autodoc_default_options = {
+ "private-members": True,
+ "show-inheritance": True,
+ "special-members": "__main__",
}
-intersphinx_disabled_domains = ["std"]
-
+autodoc_member_order = "bysource"
+autosummary_generate = True
+html_static_path = ["_static"]
+html_theme = "furo"
+html_title = "CNeuroMax"
+paramlinks_hyperlink_param = "name"
templates_path = ["_templates"]
+typehints_defaults = "comma"
-# -- Options for HTML output
-
-html_theme = "furo"
-autoapi_type = "python"
-autoapi_dirs = ["../cneuromax"]
-autoapi_python_class_content = "both"
-autoapi_keep_files = True
+def setup(app: Sphinx) -> None: # noqa: D103
+ app.add_css_file("paramlink_target_color.css")
diff --git a/docs/genetic.pdf b/docs/genetic.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..dbb2a976a8f209139dd35af9b1b4a278cc5c9975
GIT binary patch
literal 67502
zcmbSx1z1&0*EZcHEseCabc1x4;vq!3yFogILwAR?bccX6!l7G2x>HI((0?C%;(Oos
zdB6X^{)>Ix%*3Q;Lp+PQjAdGPbT;pNt2qLOfMurYmQYNTlC!p_6R
z$Hv7A2rA0GP*!50vaz%_rIIx@wzdZZB`uunZB5xZpK-7~{84Edni)D-{_zkK8oQd4
zy@{)_DbTB?wWT3DFgPk(KpMF7FdR#JJ1IjKQz}L&!Dk%LxHzHLvuB(<0?Zs748R4*
zDchU;?;@h2Xr^{1P$RkiHrCjXiW80fxiS^IioKJqq0Qey+H(Jatolb1Jb_>
zN|IC@XzWS~5BLhILRpE56KK+upmK3?HT`FmJpZB+c<6WU|9{H(|5herY2yO8j$OtE
z@R5|MvAqc}03}m9a~BILZf+hfPEk=RXBQx>wrC!ihdy5_XvZTtiIU009__}w3`rE&ULovaMR}?B
z2%A(`fuV=q1!ez=@{{+EfhLgnV|G4M`)HT>Hq5oFv0ryrC%>G2oY?=mE^5SybQvlv
zI?nmIkz#*F`0MU+M_1+U{AA_lbzR5({jWl|b~HX#BjaC}%G!Q3Z#93?+m6=sy9m@M
z;rOKe$OOqmA`lB4Akd{{gG}gjb0{Fg$m1ObESl8D=5t8>J
zB}rj#jp$?fi=jx{v>H~v#;yKF8TAA8SugxXib7i%fpqm~8SGEofa)kGmwYnIdL5FAWEsmT
zMq83X<@|cfMBb>$BJ~Asg^b$qK!1(z+AZG|MywfUfIjj?LMJGCEMg4n2RIgtiH|rV
zuZZb=P72PmE<5Uu-6wxV$Ro4Ri&PVbeA!9T_nZ96sLFIl6)ei0l;C7j1%YhIY#g3D
zwM+>AXw$Rf9v5X#Ayuv)gKYKZy&q%VShEo)Th!DhB0qZ&b=^DH5D=Ll`sd3H#z&ux
z&+YmFZ+!Hhn$zF@Z`XiI6|^I}2aJBe$_E^UBK@rm!|fscf4lB;GA{hMC&bcsDU~l`
z=x=55?(@TcPDuT-c>ibftyubnpYPP(>qWn
zUbX=J$@m-tzExL+`gVHip}BrcacjDy7^-96GA9KRb#Ir$ppm&zCAQ+Up5yoH3fWV0
zt;nX?G7VzW^}(UX@7L|Q?eN{s$|okp(r0tzi>e8CvVNT;_j}l&r})>%zN^@0W<9KK
zPLoNkKa+p;Kjq0%jF&f=j*qKlV>Xi4&R(QBnXx^e?ahhmQL^#)Jp5_3=M??5)*T|{
zu^%;v2!>mDTdR;b;Jatda;E#-sUxp~w*OO41g&nW6HB2yq8EO#6EdcWf}^)HB8Bw7
z(0rb(P8aRr?^T~_*b365zMx4(Cg4lUZY{oopLyvuT`tQ0I36p;x~eum`_gky_Au9+
zlDXOm@4_zdD<&L&W#>>vN6M{~pLLWF%1?FDp)5=Or|PV-X5T#@sg1r>d#@7}d2&7(
zQOSX!Bn#^|~U$j5kZR2^JV+L1A}oHyH=&O8b)YO%jJ8s>lewD
z)#yt<(IFGKD!9;=)KK{<@n&v^0ERm+p8iF2&l2iW$vFr`Z%St{YWo`VeFZ_-58X1E
zt^M;SxtZT{kroFzmUjv%_rmrzG#V2mTE`T{II$)fy!Qq9%aD6*7-&BFc$lafUFGW%
zzY2+I4QV;dx63;~9tdiaa~*j_CM6ptWWkr_b>WGsCT5hWZlA6sb=$@rvd{2jU?zhe
zUO6Yl_dTV>XbGQd(9q+f;{$q*9{-E*A5sahrDG81i7GYiFe0CMzxRe1LR|Uzc83v9
zmGF_K3ZG#i+_dazh>2!Cd(BS4_qqyc>fVHR_vlsR@{gYWcPGRX2CuS35P8ut_06+lrU`V+yZ_AT
z*WNOT<}lfA{Gr{=B%$cj=uGm#1pl6UJy-)WC~O4GFs79I;_=F|lQZilBEe@`|}r&_-e*x)oDe);inR)Vd&dN0ZYx<*@L`
z38gJ#rL`PY4cixz(>Ld^@?pGu7GdalSZ6q=V%8{WX7_{HS=aK%;L%Qx-5a|6U2-s9
zR#HX_v5(m;!FM{Mo=dj=w9yY$ncpx*`@fA2jbuiNa{U@g$;9MS@=$frvfH+4u2S|`
z4o9uZQ18r_N%c@ib3FB%
zm7cO;Ip%U+d_w}ZOuhA8kp2#g%@N^e0l4T(LDSf-T%Uixr&%#g=mU)5MoaLFi8U*`v
z_&K}})T4Q@blZ6W*hnRL3JY9JNOKzw-z$1)$$;7W=0<^EJe+#lx9tbTQ*+@S`fNP%
zG~Bw8M(D7r_^A~xR6dtR+mvKK!o(U3k5&o~cYbEFy>b)b{JwWJGjs2A=>Fpg;=Erb
z2X&dv43cJ6y1soP>Q2H1-@aU#3pgq$Z1<4n77BC}zh0<8Cx6CO^3|Nbq2jfQcPP~9
zt6bibFPR0jSBRfEFJhm}M19I{4frYkYWbBs?#iv1IY4!<8gX7=k>inpJ*=_D88^PdlgF9s{J_B6Ou
zm|5{?mVHrk(qoluz?gZ~k&Jq@BmKiiU9pdbi%zw{sWoY;B9lq3&>BIopX4Ng2{DTa
zk!d=R=_Ok0?2{QnNo!7Va7P2N0$5?1(D?&CMpWAS$@eA|WZ1ol!ZSmATBG$&jY)MC
znbun+N#_-r=G39|wK^(%+6VMD6*?{Sy34_387{7PRd#1H%AKw9AihTj^u|*rHtCVd
z2{o3_c$NE$2Io_?D#NFmwJPOD)tKjkO`bE)Z5n0>+a!e9o>ePTG!FLgdd|~hFaF(_
zU6F5*Y~8=xN%9&mK0&%J^Fw-)W}!hsW|`Z5dY)kQi@DxCCEJNcLe~0ulf(JojL6ps
zHP+AEl=~|OQ_~|${}=c`eQWlP0wXScwVm_aVcv6I9~eOKASdplMuy9qvmZ>^0M
zx?n;b8?Oyp+ldB3^7{G2K|a$ng317DM>yePKm>dD`9>ZOL6D}rY&=X6^n#R_UMPr
zh~JlnkZ`MhBz%(4<#dgqQb5J*R&q4R<8%jkw
z&Xik{QmxhYoA`$6ZF(zgs+R|F3blm1OSQJFcuh+4wHwLdU61(I(r{)`M+Eb<8MqIE
zfvsxj+rO)3S>wH?+SlnJRod5~A@-pE4n+&~^@o+(o;#!U^ei5><#>(SZmlbY+azod
zjmBh1Ek7%G>JK0XPu1}M3j|d4ue-3Q%qcC^gbQ&y4rsWsm=0*rCqJu63@=n-F*Rbs
zQ%nO5_5RN$yzeY3W9t|H9Y#CXF18c|%*#ws?8iDgFCJScj|>lJtxQv`fVNprg^h|Yd$FNz%XFa^M7VC4>j8|kaUg*p!roe{qNr3}qlI^tG(e%TvlfK~{GU)_USg4J3HL%-
zjPDX)gJO@R=j{Hgz@yk>%Z-5Y%i
zbqFsJxdjvphHv5jcR+^~L%o*oRE6`!Jx%b>LAaw`Wq4@HZ9?!;gu($`%J+E4+gLDi
zi^o3zLaZW8%WoJzhw-8iAA>*eyiizxp$D-|u&;R|Zb2=jKvIUyFY-6k>}+N-SjDFp
zLS`Iu3Kvy?q_Cvy7CLkUo*@9ke9JiI9sN@l@@jZzH^4#k8bv|!=KZfB4NQR@ld}k?
zXE$O3djfm|U`0Vs9y(hAG1)hY3*sFi-eV&>a#meWfCd%C1xF!)r#h1X=g#=`1+c}i
znF86`lf6ef*QSPnt9N8oES8uz2ukNmzF1tz+^a7B2{7i-6G`>3$M7m}nF#zMcXs|O
zn4L{9-st`eFxj|@AFvq>1D+5PuJC(rtVJPQC7mk!z#uYqrQgC7B#tZj#9qs6kiJm#
zDNcE(Lt_O=QO=S@9?jYMcFR9R&rSxiRW0
zo--Yq-_Xz_80@NsCTKuDK3|KgL8&H+v}ODs(7)n(b=n{JB@r?D)}R^-q@ulDf{k^6
zGfQ0_^sIqVGwhk`KZ3yX8=RpqHPPTR>O^cH(p!>@lz%o|Nm__e(Ctk43P%>0M&*m<
zDv1nD=l?sv4{_%z{RNwF3wq*%i1jTnysrqK?Zbbr_j-2;dtgx>)Da`TBb)o3X8iuq
z?k^M(En7Lu(3M_`xK)=qngke7Q;%muciE
zSMJ%;+4)%^lM-}_t?jhr*D1n71VouJ0a}ceEgrfU@);=9D~?>QA_aP1AvZX4sA|Fv
z-x@WZNa{XiN||-)Sj(|3qvKq8?g?GW0Rqh?aKoIPWbk?%ogs~>xaXL^Y;;w^Z?FI=
zVqm1q6)rDDVY5RdvbSdC1ZE6MU!WQb3L+seZF;@fR5}si*l;%0J_9g!v1=x={PGA&iH>wBm>p!RzR)tlupbogp#i?iuZivxBs}0R->8lr0Okrpc@VQO?gvKHm-rm5
ztUsr94RTW?RIE4aWmFD6W}^yJ#7!+2kef8fXjyjylLc-TRc#>G10&);!9o3Vt_z03
zK;TII<$JFt*tUnc3U);SW{O0d4t}X*oZ!F0`fZ$KxL_hO?G{DJo1&IC!0!nIV#L=#n7Fgr7hd_*(fxuX2f64p<6)0>dr>Zo5L!9Ajma-e5Z)!Gl&q7YKG~A~
zaDnuy6hn&>6mp8WMj8_pu;hWs+@Vgzr{AYwPdhsS3L|pL_d{impwr2>Cgx$7nx|E>0?KiT<(xtIsg@K2-R5-Az%I!-Q#hE0iDwy3A
zm?Wzzc#UG}@rJcn1BAxmF3${2up!=oXB1}u4K4YKI%!PCLx@}lfAd@YMlLyU4@fSQ
z&ED!CGqqx|+lJT9(nHTWH_6!8E%8wu-oXFhIbDJIg?ah~CdawCtpKyh*#DP+p8k{?
z#io%s8xj-#LbaF;kNRVtiJVT
zgUlZg=IW3-&WIQVAA+RFwD$AX|BRE7GUy)wtWj;iO9$w~n1zKvC0DDlpsdTN3H~oM
zs2LVd^B?^87bRMV1z&BbJ5d}B1Tm;V4KdwUFQWK&AW}>
z#@19VxX1R`YO(>F3V;lh9aI|nNseRs_38b#RBn=A_uOxtS&gQsWmDx)iHQpZESB!{
zPx(YttG;k_EX-{V@kYPFN_V`gbNxcKm9pp7`i@KdrgQ6{Rbsg60BNhoEz|zm9jFa8
z0h>1iIjhw{t%IQ-pvNE(#=Aa$(0;c>udxJ+>sGG!
zV8r=flWu=V!isWJBL!<{AVsGG{-@CDrw7w8sZHLDrZY4CG%&X#S0H6`n-}
zF~EqpD$}-?T0glY@$vXO|2`z)2e7TidPu|3h$P7HQTYL(Yq=HS1$_UMf04Y38X4nP
za=lqnm38q;-GzEcf2aE}=+sLrl}-~(ES3B>_~8i6$#nlb@$P@;jNmCSbo{Nr~`d4~0YsfQyM$RO3
z*d~pB3XU6CiiXd`auDp?^S{Ts&Ve71ehycF2pk2>J06!50HjK#-?h~)TjT5ta|e5h
z+(`-J*AiVMiqiorw{?{UY$6K8D}r*E+}o#-$1j{}6yJ6}WLHTL5S;z?Z|-Qm#cSi~
z?R830wvQSlkNG#F{{S2(hTLz6^&S4(MB_i6c)4HhKLBY`ZsvY)L8<
zc3+0Jr=o^2NkU#!=$6gx_vwkQShX;YZNF2*bx(CQH@s!#XR?74U(G3V-zfC{%3Z|m
zzu{a=f-<2L9;df(%8OXmT>nxWL!(EHjQhCRai^s@XkL7IE_rt+zX{);wQXmtC&ue~
z0!EBv6{gmI79wew>QA|>FmhoivuHOil;y@-xJ5z~Q>wBNBP1R4$
zi52cB7YT0!x34H3f?C1}`$7heb!+WM)35uZ7_miv+B0^h+>eh1&NL4Mz)rDlCS
zrwzuJP8(0{M+&ugE4PxD_@4vKaJtT=4LEb!igsjK3U+$rtSyd*j4h5MHQVW0$<521
zpW$fGBUw0V>pyGB6QZ5*02wxB*?T0WOqVq$Em5mlr-QFkWAs(073Qb1HFusnJ3W4@
z8z3X@3=rg|Qxf^>h^}(`Pr;}_loXlSS#!Q5YrV4SfbS6KJc0QHe}~`=J|JfWc4Dj4
zs~N~0eSP7{Rn#luqO0HiS047}tJvjY0#H4OIF;Z7l!LD&Sak?mJBso2yw4rA4Q!-Xfd8v
zSKFwq_XxL%lvvxqBp?n^MvJAPfMC=5b5kZe!H&5it0npk!VQJ?&<%<2tDY09>(`i{
zGFTf5L^hE}$C7m>p+>>;W3JW@*Bg~qvc@%fuV%pCVVw#AmiaZlnA4)y1+40n?Bv7t
zh9vJV=omtf?zYl7azNyA}%VA&o-e
zelq?}oF>p_^s;Ls1IE5qdJb927NU<1i-ryZ52l5$*
zM$701VWRrh;2W^T_Zx8V$bS?dgFy1gLQ0J#hl)*$5^9_(4oMmIy9Fw%pc2;VIvr67
z|8miPF-}w>6KLUmZWBy)`FR4WvG&&g-vb6bH(xGF>}?F~Dz_M@o|XfF-RRN-Z{Z<~
z?@QK_(eVdsO@^mujvD*Qzh%oAxQlnw6V>&V2Ua-HB4o$#P;NT?_~p!55Gcd{w7H?f
zJTix{TSBdZ$?Q8j25_heZ?Z-B|Nzi2Mt&Oga0!kK&Bo*&sx;)VC6oN
zsPRU0{CJDAstgqJKK^MNoKV>oGs@}=@@C0jhT%hA;S2x)jrwaY23qL*h(B0G5d*b~
zN(9#9HR~G*P}QG41me{ZxT+7GGC?hbfP&RBY}5AUZ2a&yuUQuH7k1d4>_iwRJN(pZ
z4-jE5Tdg{1L7S789k`=c0{hd4rdj#Rc!}EjJ|m-<@;FVn`DHx
zQC!J=!P;YI)BLFTL&z^MAD_XFPMmqIlECQsL4Tkd94%oLgZRRnfC8`LXyE4l4R%M-
z$JZmTXWuT~M1nFw;V6kE6k`@|jv2qWyBF@f#`1Rm?*6ktmT6RvcJ;}!V+{uOQPIiC
zT}_Oha5W*~`1?*Bd~tAh7PQ#<@jZEoT%R@=&O*I;>OG9k3wPsFPOuHwAQG-ZeJRbD
zyyx2M+sWy-SRH9eU8Kcl-)${zAhV78Yv85FD91wkU%+?IB~J2cLOXuMwFM#Aas0x1
zLtgrlrGBfL)qRD~h1hlxHD~7YOW`Xk_vHm2U5bSdx-YK_WInvl-I!)%#hjdEWc6WZ
z4OuR>gBJg+M@>#D>ns@RVl6ze0Us|q%_rO2BHA!J=pwy*zZ}q)(tiH>sJ78mO&fy|
zEMdy|eMsYuu!G{Kkhfr=7OW{}3Cp&=HpWx1<&aUHxy69w^~a}|i0)WN4syPZb0X7w
zxGUnn$U9mz%5dsK_D%17bjKCzM`XRzY5hbWy)z^1vb(3s@I;A?#iS
zE%NlyOrr+c1*)ALt+RHp=dgFbszBREu(Uv@Q>F5JJ~-9BDZdfJJ&5Oqhp>!b$S3g~
zifg)j>MfOsJYt`yh*D1{tJsTy7Z;^H{qw*pI)`CFP|a9cJ#=aS`nxT
z$3*O$5RB!o-xPVk60rNPBfe;JsDc)``_ShfBf42Hc&6M1e#~@RqHBp-?|`7Lp4Ho0
zdt=eRQXfz5Z#-_z-0wJd$L#2NWn)F);DqSa(!Jaif@Ql*F|cFiM1T!-(fLf(&ywWuv1#=62Njdk36(jSc@#m2^c3y$|TBzapR
zf*ZLTU3Z9QA7lEB1N%bZ+HKncCMN2ShhLN}vpkcgA|Rw)TM(?}#Kw3?%5t;7QH%GN
zn2f1x2(pD(R%r?Qe*c#{oH5yYJ~T^?QyN!ID_@6s5<_cPlqynwE~51>{{8W_eb2;7
zmTXC_zU9V`+7`M*BX7AF8mW-7y-7`-3WX5SZ`W^~g@iL@Z3xi;>iw^n@y;c&IugUW
zd*Mi-TInLyDaabNQb80L9KkS-rd0u3_uV<&`tXqBt-R_K56AWbXWd@#fxzo|#kS_`
zsgU+w@a{teq(7MrA-dYG-#H6uN6gqjOKQIXo-laXDolJG_aXnL%g{Qc{e~K5?WOD>
z93H9`2Sn2B{i8M&?a9)L%u&JgskCr9+h%VwBY9G`oHWp6GQ^yI+Xbk;Wuwf!@9sT|
zO#@Lp!(+B7=<1BYCuhaR(uF9MVpZ
z)wz+H3elC7{>9*4xBdwD!EQr)mA7nf=5?zDD%HMZP@=u%DSw}c3@@hPQZLMJQ&G()
zhCtP}Yq9p+@O|jJx1Nkj^#N%Xo*YAa@AH$9^W%B-Iyg9sF;m!;hAkmm=y;l~91~{-
ziBqr(RZ7FyA>o8b`DhQ`d@0L{A+wdoQ6o%jfC9Td^z{tqTG`az^K?nav}(uj12D?$
z{7}Gs2Lh`pfG@N2ZJ-s;)Th9H1^ulEwkEV{nEDhuq@5zP^iQQC7@9yZ=6VcXP`+Dv
zLGN|iJ_NWjNtigv#frJOF8aZ1znvHgI58QBd0MzY7T{CpT>1nI$AmsPY`FKgK51D<
zP|xf$yn`UvP#h|@!wT$x75uANKnN=`$6xsBR5AWJT
z-mKPdx_8JmE#K?-bP#&07EiG9y~#geT5&w*xywEiNyJv`ST=f}5ikSsWv=02ql7dy
zd;buX9OPT_jT$cg^J|oyl|eDATcc3Q0%F&ZhQY)TypXO#hfj0fCaJ}WgeUUDH!Ey4
zQHL8ZBD@vI1vwjwT?+o(pbU7t_$+;~iRumdS;KEwag){jh|_X!mt|hR>LXZ(wlANX
zjj;b8-?R>HKppQ|UJvOpykZTcgdmSM!Q2vxufXl-zJOJD
zr1lSR99h$Bb@{=qQRaCX?ogsyQ84gUgegx6)l`8|^UtPPIg6;^+1&{e-Ocn*yy`2t
z^&$$3`Tq{^3PQQ{f5Os@>I^zJhj}Qr`m~@$7c48kKVp-I%UKG7+{citbJ7sttHJ~b
zxt%}2E>m1gC6|U_JOOWa2=!tMDgZujSD-o~WFG}yAOY{mmmIlOCH7w-W6S|AxfY`E
zLoQej9K6w|i**A(-r>Xj{o=k$(EUw0<5d3b;|T9-zI%Pq#q{_sJx>+9oicBF{^Ikr
zg~ZWpM|ozp9Fd9mSv}FWzH}8W-dDp;3lc)#BJ$F6L<-{FGEWdwzK^T;yG8s`qbLxM
zplehAj1C`_^s@>ef~e{Sn>UxR0;!h;qOlTR>5D98iar2`Heh4@NJeF6>KD`N;5aC)
z8gACFok+ZURgYwX~4NvA9&Y!BdsK0EAZ(ZQ*oVEq01vp6>GLPy@^xf
zDvvZ3h&Z>n9esf|p;30DAxjGeaiNz=Hxf?^K;5KKZ5-pEhP1oY>V_vp-CUUV5hEs*
z1cr&otsAyR5#L!bPPB=?)^W6%8*aKZJ_afR0P6zw07(L{CK<4%pgzbgf?z3)j;~BFyWJ3?^ZbpEC<-`pMG(E+?5^^RXQ^p?D
z@WfY!mjXw3VApQ+)Dm)|twfhq|FpO+RYAx3hNUPz)U7tvNf2CG5PlA{EjaoTYRXFr
z!7eZvJ;m0b?!*0Ru^AH3A2E`M99Ch<`7+zmBf+-FffYSpbe!BCPczcckeK2EdKLwS
z&5$v;_arhC;A9jUC?|J+1~?hap#el$nivR>p~v{x-V=nS~)d#(lxurJpqwkTh~2oO@Eh)6|2y1x~_
zD?-Z`1}vb_Y+IH1^xHq(jC&?r0$CyIlK@p4CIa(Fo0txmM}#seLKY0@G{q2d_CVzO
z%+YSfhcHVMZ~c!j*QA{RVRmwr_vGjlheHFW?!%w)f=flO{q@+-BU;Jgs_>U0Aj`=#
zXZQZ0=WFr3tz;9&30sQ1)*HAdhcp%x_J`<8-uFgQtv;#_o`@+Nv#K(uB6UZ%3Nu5x
zjP`jx$VWmJE70F0tffCEh+T5xQ~y}Sn^FZK-?NhMJDS^yPPA{nZ(WKzV@(Wui+A9J
z?5Fj9GpHHkT^XDEsYuHamfplr&waXUOOP0!I{R_()mm$B>rz)v2ICFY8~8IGD05dp
z$i2nvKN>fwcr7}~KCNo;`HRy3xI1S5V_DG8Hs^!foNFK%`R9p%KcU9AEbs);&K-76
z{;jT={CD5LFOO1RzcyPt(Mt4=r3oNI-ISvmXdUkUj@2X-a$wW$v-so^%OHm0u*P>W
z@RPavnS?&Y=GNbEvY8Y_+Mk%fjyGigbaLDGr9P-ibtTLxJTEjq5MwsYXAo-ph2c{T2-zO15%|cxK=@zMLuEz>
zSHD*l`3j0YIrV=ehQD$JG(P}If})~^?MLSh_daQ2qpaoIe+M^lOXS2qVa}AY*{5B;
zGNjP+3&UKCv(ZW0yb#~jXpFS1RxSfG{%qIc5a7$*5i}Qb#M?SJa-9H*3h@-JTvcZA
z1z_+iKA4ksP+xWnA|0d(MI&8|=^J_&AYwuY9`}qB?3kOfZLe__rbl{jk#y$SwKA
zC^pUli!c47<-M+BVdya$14LyU*0bQ3wrOIrD>X|eD1C~#eSVHe))&<483k|z|PcFE){Zcu(^w3}v;<##;^@g7d1K2!oYdrH^O4Qbc?;b0K
zV=3zkOw)z4)MQNQUvKftag*s|@q?(~vECQ2qjapjQ_;be(rveV{0a3F$-5WPaTX-8
z#L5}mQU}_&s^omMqgZm~xVon@kVoN;W76x|x~9Z4JYEX=s3*xZ>hYH^OVtfGp1i9w
znLU@XoSnZ;o2Ec@uPTeP=##6bh?9HdH{w~0%=qmJT>QMoMtC8rAMcZ4ho|Aze2oZs
zg!w`En{$fX-rZcTco|p`(ug=4Q5j{gw*(hvpP6;A`>OAWg%)C_r81KHWAr%JTb45%
z%NxJ3#lc26IwkO4bwTnFv~-#2tw@VAyEH*#N!L7_ZW#1di(YUGdgDC^#Hh3w`l|*neL{%=O
zkTtYr-Y>tCvaAtc9(Z)~MVK0brfBy9L;y>y%Dx(I9uk$+|Bm)rnYqq)_gf&rY?-=Y
zmtu{fCUvCBkaLm#p!`I77XGqo`@DRMY&VN1Zv1Zk3)%_Uy*U4vTH(P>t375kWIjQ^_V7p
zKdXFO{zBzx{B9OTQO`G!lUJPvi9(LK>t>I0{uVFNCfL|s!HE!=+_7ZmOiE!fw%1lo
z1V37=G7MKtUOz9EG}mF?-v7+z8>2L4#H5vfx9_17<3hFny6Za;{8LIriO8c!QMnVys581kqr?v$fht!N=SJ2gIS
zCPN8F>V?3fV29D*=p(#Ei(Ms+ZDyONS##~$l3VXK2*PFX@wGS5$L=EuT|a~gd4(j)
z*miwE#+dz*mkskuZC)0HMOTTb#pr13;6FKwG$%Px-stF99oBt3^i$8nzukk#L@S`;m5q+-2@Ve!^+h3f_T6^
z6=6oSk~LPrlfp-rYI+8Y`WR|Dw012518REGxRQ?W@!RLvY6dS41w;#LF{D2}QULd{
z?x1PUE)YkO=GN~GcvX7LF5q`j=1yBIIaPXe>5%p~QhKsR^J+}wfOZ;)yC5NE^QEZz
zieaW9bCcElBsIBnwIpD^NV@q)=w7Vg8mX#6qi}u5JH)o+8BnAFi&ivf+Nq#RO;XvU#wA^MEIxJ7Uf*S!-kV$%Putpg>jpVMC|2
zw$12-b*bF@xo1?cRfR#d-PV?B^`v?JbG50=6F_;Xq+z$Ad^^ItA}VVDC8RyMPcK|w
zF$A|gdE({jwQa-%;wLsQ>*q%+3Qi>LuhD5*84+bJ2nC68J{}nei#h2U_I|dFfq&9u
z?bzFF3;*5{2F`J@PrTfaY7|SH%4!kT(``5z4j$DG299&FK|ImmiYzEmIJK{16
zCx4y^bDjw+m$AcwWRyx{nLGu*rafvz72OWIBg-DO!*7i_k9a&SH}nX3%;7kIC2i#e
zmDk~vfp1+hjR_*WV}1@&T}B@nwHuPfv#*^?W~{Qp7<`sy=K-(QXXpz#xG7d*Z{@#%
zm^}ytirw$xvCt1me6R=)h{5?laOulxJbFNZ^d|40)`7)1*Ye6uN4_^N65)>q
zRV>>IkpsMpcGEf+AT8&ySqg5V--xcAvteM=Jj_%#|v35c>K7aPAw*&-32xpk8&
zoygJEE1HSyIFQhjHGMxG+#TZZIe6UT3n8kplXGQwT=Nomoa1vLTISZfP^qv-sO~pl
zPu!gn%y3cyLa)0SVQQ^;etGh^dMrOq)i#5WORE>4LzT~cg@o<3LewM6s7Ep%wP?bZ
z<)v{y0xa&r0FLWD4(9(lLlbbkN%1)zD6jIsK+A*i#|2LS!5DxWmLz{*5Yu>
zgGSjn%r^TfNVH2?e{&qdx(0N0v0ybc~$e~L1;9_1(@
z)?;?o1E);38+=<9*Xs--kQWZ)p|UKyg~VCR&Nr8@&;&3$5YdExJCC649}61|-q;br
zKQ_og=&SwKdwK&C(z64}^JvT_7NN{_xbWslBo?kxGdlGSo(u$T5I5!!3jsF{+rqlP
zZY1RLYb_EB0`*;Qfy7QLQkorj{OHW8P+a((Lt>@rWcGUfNZS4pX52Zo5i?;(9PyxB
zIZ0go<1r^%HV#~NBwE}4x`k-t@iw?{iF07Be7lgF|L7MFU&=x=VJ9_dCS2zEJeeSQ
zsiazXLwgi&42@JM>XCfbxAN_9qVj4^gP(CJxV>f@1BbH}XL#S9`l+0FFnw3kVmaeu
zoKW1$%T1{zR=Ci3nz&ndp*F6ljZWvu)T?+$gMX?)C>gG=08maM1)A5xBd2_f-0Hd)
zzzx*$#=+kY6Dpi(JjEAL*S$Ur<P3`U
z@AD6CPsI8hQFxC=>%|0*bEP3jI~D`cf$lC@3ECjshqL&H?e
zT#N)=YYLXt5Rbn$rEf)_n`ZBS&tov*6^Lm6;iOYo>;-yg!E$fxY3=QdfiOtpP)?S^
z;my4fhvi;dAg2{CN@vn#Sr>6NoiTl5aMP^2K{f>gotl9p7h)**;8hSRY}kGpT+i|)
zxC?}cW^<_&aI+x@uHoM!25UNn7W(L$^AH-tfKl;S?gFxAKyt8z?;oN28JX9-Ou)gQ
z^UX62?xy^*S1l~&e*g=f1*FolX1r^1TQ})Hpi?erj}T#hQ1pG@dRm&*=N-Q2LQ2Ta
z@ByqJ$&O{P_j{(*QVg_U)jtlX4_H@qjv_^DZI}!k|HY
zr5{LRNJ9Z7|(iPMshkE(1e$GtNJZS?y!#e~wm-69wu7Yg^BJ0Z)*$rBa}IaDE46
zQ?6e6OCO*To*EIJ_XX~ou@?=%=?@!qc&IHO_F;hL!P=0N!#)d4{oz#s@m#)ZR*+ce
ztDNa?GOj#0yF5PvzBb5dNZj2yckWO%Zq{f}
zlNQj`;oikGDU!;gS%B8BjIn1IQBNF+tT?1uK!KqWHyij{2o^&rjxdPRK+!W|I_NL?
zP&2eh&4NCI=bd?8MVNcCc=%-Vw+6N59$<F9WRLjCv1dL7Bz?;YDvBoyxiOM(ts|;?yKN!xM!3Z8
z%E(QXykcANVF1Wu0r2e$HzO7^vt>EtyHP9QK|9*<|Aw
z*>SxGgh-4yJFhY5zBYA&Cf&h;EI=))kEC<`@EziMn@Do;fTx80Mj<*p;k$FTeq8(4
z7NGS8bQ;3w^UD7B67ET9yKKX;gvR0KB&5k6C60%B2BFgd+KwOMx|+kO*Dx%3h_RFL
zv3a8n3_|OPm4v^{clPE-;c*B{>R|T!v-b5w(GaInyBcU^+L>^01A(NjfUboN*nvkF
zfZm{R2C^>OR3?tGhd#(i2?sihxC(&HBLa(-a`h>opO6R!OhiMNP0iZ4?=e{;5iUzE
z%P1_lUtP|B@EKH}VX*cXh-xh2Ow<6mEDO*tbYW+FQV=k(fkjd0YTq9#dnyPPSiz}>eJ?nGnR^HA*Dyu|JTyuz~}g7+9Ui8(r~}TB)!Ys&QIDgKtKya!NBW;e@4iuz(ryhtQ=#dcqA0
z2Lf7VV>gBYpp#4qh5yAEermB?Sr?!gGcudWA>#?Kd>Xr7uvjt=zbjOUOE%_2s!rwT
znknuOjQB|`s@0dxUcz_)x@N(*RQ
zMx~ksGv4yLhEJ303AW+cWtdtfc6)`L#j4b+bNIxDvr^x(C^nw+vJ8gp)%auA5i2?Y
zqMO|ppw18|WpkkMo=1b`gMc(Bz(JtsWZ=cJ+PP$`U7OTon3r}O_zTz}!z_;+E5;x$
zr8IzQziZU&rXAOQuO(D))5kyDAkX98G3CTJhKWGMV?6xG68ao17bi+>-c{Rr^oz54
zb-25DcQ!;Mk?ySrDSicm+PtIoFaf$w#(gpc+7DK6%esVh-QqKGOFM{8rAd`^7t&YY
zl+#)1Waq5k=q!g`xyfUztEQ|9{}CKl&}p|UzA8(LGk+9U-W=cbd@x{nnY;~B1Ke2b
zvX)`NnSUKchv>G5rO#s&&}kQA+LzRZshDcPhTukI3<$ScYpJ*p!F_}#`DL>C<>eo>
zB#PO14diWjEbh+l-*Q;fOWPu!^u`|3OM|;Sstbgf7goAHk|<>p0y1?HFvUPKadn|E
zJJ7rsq|L=?CFEg_7KxYt7tM1)X+CNJ>no7#r!82Mh4}!HEe!OrD8fJD4VRGL_eXvo
zj-J(V&87f!yTKtA$$kgT4#zBD=>t4}p8uQY1M5Eqxj%$DoBHT@_4P=Ku(Vb24UqKp
zBa@x#x-f)1a$1FLIcq~i3amof|dho>Z
zJ$W0H>8A*LKK>B$Ncn^5{}T0ZcR#I!E_v{yuMh71Z%p4ub#jdL70UFMVD$&lKbZc{
zF#}8wsDd&*3zC=Ed{eHbEKdl|IQ7Z2A?4y5QSP%^^r)D2D7K4_
zQ+L4E|CYJ|p?^r-fc*b8bpyTrKc{Ybz?A=o)D2|l6)@*>JDJ-CT@Mi?uP{~9I-;on
z?TtPUjEgHMheKJL0x*{t*f0V6uE+uL6Byv9ywDBY2Iu%9K-xv{b^YLP#Jfko`5y*x
zR9^}EVJlbrTLii@OFo2Z!~&%QDFV#t-xrKZcx9wzOpOS7Cj+oLOXL270Psc#47}|4
zW+aRRFerWY>urWcYwyis8DDf_48FT_yZf
zmj<0DuQ6pvaS|HwIPS
z8KWLy{@^U@;%Ch*o^XQ;%?g{**=U33kWjf5p{RZ392#lFVPXLnIxx6p5NOV!zZ;(F
z;ZW;Hu@gmtRBSG=zD7TIwkmbMZbO$@ylW`Rlos?+32Lzkt1`Q+*g;I1Xi6jJde
zbhMTZB)teh2?=I+v*X>Y&%T}$re$W^#%V{<+QpU1trMnEpkgG5#1rV6JWZ^ZE*j|8
zHAyxO*EJzYZU$;_G*?zEH3zmJqd6cCvOa@P1_Vi|KaK@1LA0!t&!js2=2jMf`p*KT|zifjzAJn
z+p_`R_10X2p~z{k^8F4VpohA|iE|AXTA5D3B#A2<1^{@li~)egQs#U`VA!_U4iH23
zb(s>U8ql@s0ani!X#R+4h54>7+@7M&CruZ&{d|e0&*k|Abj#H4Rr`peG8=uPhzy33
z5n0_>QJnZ@(kz**LWyQpz{>k>EGrBxLpxEd?c0!k(txlGKf;docn=_TQwx8#PgeY=
z4L8gviIZ=LLQx3QSIQATz4%h;2p7CFNtU>ip)?Um>tkT(pT`Db?=z+`thAh1KDBP_
zI=lbGmsr^yaUsoOgXdV&3WnOA2b}`5nv3sVKjf$CxL$^YKj<}Ka!!R<5KIPWpXL!E
z#x*8C+#PvvFbf+^w0A2fM;d|+hAcTWV9hq@6{I*yBY-2~#(5Mjz-4T4C!)cuaIY>M
zfFm-^DQ5AyihM9!S&btx^Rt#ER`n0|hLEDSNqOX-pSItavD(Oi+8CtrhZPxBrzB@C
zti@W0!#+fI12Z$8tMIY((s}-dG-?ybK|7PhSu%UD5tTqri2`_#)bO>C@%rt>m#z5@
zez=y8#@Y3P8TwyoJ>ViG71fi>fK;)ivZplPv`B!MPU3
zGP`o6h~nLb=*6SXwY9{N*oJcBuJncr);8}5F=|Z?yAuuR!N8%XID-H5#>ZG;_p}Ug
zQ`8T*69Wgh8K`Yh#Qk{UvS0*PdbCn2);f2;bJoaSIMNK?YG;iF?{_Ul?6m`8^`C?nBbYRRQ%t!XNQ4Q2Qs)`}H}(B0
z_TSWZ2F!^UnE`Yi5D&1*+TnkbWxUAmj2xbv5tSzF`GyW9A|IYJ-%?n#;?XI(vWYhgj
zS>V#Q9atgVt8gc2tq;!IWPiG-v-7=|7c{AVM!-EyyW_Xw?vK}
zg8SjdQ^p*)%7xzlVO#ZG*!)FaedW8nFI04V=7u%Hg)(1US^R$HnDY@53qAOi9<-!N
z{`;OjmE$~dLnF>YUTZVTK2OAMFDgi@<5<1E8m{OvmBNh&3X$^^(v-MA&Dg$GJBn!;
z#5$df(mE!jO67llBTQr&g8slw4*lh#>E-EhlI!Y9R3=emX!BCeM_+Veg#CW2q~v*TqYq>J(0oC{WQ|AghvpdeW`MBM(vm;lKz{dQ6
zGPL)Pc+2Vv`zpq(-&1!)H0?5KG%vcc_>a^E{TyVFqv
z-`1^ek*5_e^|p6z$LXjo@3tn&WVP=9X6eT;Ood@rc4(<
zI)ryNA&gNgC8un?T>P#JtPE!U6r@T`T3r6uJNuE-evrraI4gt&G)V~laECV
zjc79{E)vqNc;3;}cZz8|UmpmZCz7wW8jI7!OH~SCq@Mg?bof{
z95s%V+!XGbmw18#SgIW&Owk)jSbf^lZ^KdrOEEnyb+-v$eHSe65?)Pd&Fxz;{gpBD
z*lTNWGEaC_)N4f@H&9$R(Dw21I_penoX(|Le5>@#F5yY9u$gutRny;A>2ahW_3muJ
zX)j~v;CRp>x_U>^Eu&TF_0fE1O?4|lG8Bu4$m7@j4W7d4^tYV{@cN&f^e?YkNs?b&~ah9+t?_Qx0`3o
zvi(N6oqpBbV3XSN$Fp33a
z?NBzmJiXVdj<_;#`+Zp`>@#4B#}G|$JiZ|WYL;Ha`uB!e1!n*6qJGue66$2POFJ-{|Y((?_8dibI}wMR*_ZzaO98Z?<6ga7_?CkOTjL`v`1-b<@}Q
z72LnvigbVd<%#^nw*uCn*G@}|wNsHP#~0w(dBK8xM=zn+v6Gq@_i{;M*B*mN!P)e-
z)xImd&bMu@KARxZn{KW@h-Q)Ctr*rMehpg1m^X1-S=iWanKHz#CFXx{(7H)gXpCM=(E4gR;TyM|9#J2==
zPcI&j#R|Yd9wE#=71*0(W#ACzA7xLxU3g6WxD{Y&=9TeqH^>Is1Yr8ekE7zLM`$>!DIyUdY;xh(_$L5lgm#d#4ooc#L&bh1W
z;coRCWMw!0+$-zht7`R~v8z)8y5pA3U3ka#Js;?w-|ZN6*I8`V0T!&_^ysL6sAA!o
zUQFH1yLsaBeqE;&yw^U8-aTL!5KKXNp>L1-fR#|+GWNlJ`-Q*qW%DNb=-PBO;*g#b
z{}b!;1V+??;xnHQ^>+Y`IRfH?1Otj5%cC9ftO%cvSJ)2l|IsRnVrBfP1-8=OGL+R0@gp&cp0c)So5}hNqBp^RGS3o((~$+5y_a7v!p|m#bMFpZ$J~)S
zu8KoF`@K(NioXthdbY3{+M-|2dejyDx3$Hv-zA*){4dweqW_`J?&<1?Vzpb5v0A&p
zr2ZRP3X0znYh&^R^P4)~$DKOgr=&$+-#cjuJm8k3w8i!R-#VJMhf_1}ySt~mA;<+j
z1l8X*-KX8};d_#$uw9_%TKvE{ro7NG->7Y!w?}2TvYkok^^6S&jamBWdOv8PTv2Af
zd}ZNj<}t~lji!iq2bv&~9$7=z3^}Mvac6$gxi$ECzmab&t=#)v=Ol$7H_bb5#!gMA
zD6dU6D^ZblZYF4{IYt8^a}ihpbTZH!V{0+>c@t2E^&a)ln~v->6YMWYM4V!~FNj|w
z9K0<`P|1nZm#tA1j@X4qIi2f(t4c`%?P>BWC7L^xzl!;1#Sf}XYpiwbpARTuDT$va
z(*w!)`B?QcC3QHI{=BqRn{jy9d$=$0{Z!R5*R0B4s2R&zx8{F>*`MfQp0Hraira*o
zKg-_q?kK1&8JU0=nA+o^&L#+gTuW3~pi8`Li3xpg7P`x#2doHU+>%JsmkU(CLdQ${
z2rLOw=opVEs`!JB_n3}a+>PHn?wH$Iuc?O3j!R{OHU1K6Vc%)3-($NDB92Gyv$**n
zEq;ITmDZ_-G(LZm`yR5RjhXZ5oW(#){H24JIy({Re0j5mJxG<9v`hR`j*NJ*n@6VD
zttLP3LO$EPLeY)2eSD?(b?1OXs4pj;enw{Yb-<^$8wdHAtyAj(W4&7HM9IRW`ejHL3!O$
zTg#=lv5B*4_{b>qaQliq%Wvs(WZcCM6qA$%1Lk-1pTOMxMFsb__ndQzXj~&8eT6`5
z141hJAVp$}a=OmjK53cfwL^MB15In>6ch>Ew&$EGGq13_SjwsWH_oPVx^mR`Ynl3F
z2d!vDHH3uue}*oD8+CCWBlVt&=&68ZLryiLyFElr&|axzx%rBsm%ZPoHQH-aj3vlE?+{i=+u&MI?fB>CA>JI8)K1^rN}5Z0xYKTBu-fNVX!HN
zvY}yy1)H=I^<2RZ9W?uDRGma!kA|2owAdU${O`M0a0e{g5MIT%dAM^oJG;mKY6ZJ<
zdo_L`JJvi?YPA0rxKE=Qrm5ukeV^Gng1$lF5P0
zEAD33wH;X~HnA&xgAlf0jfZT;^xw9$-}=lb>A%Te8@t7NR?^%PHLd&y%D&cL;8>4e
z4ULRX!gv*goHz1HH6$z|Y|Ur`2D^o8DfmCO(X3H+!Fm;dV|ZgN28X4G4hDz8OI&=?
zBcUrxc!B;3DReh2idR9G1;w|w>`uthCb?b%$03;^>PDv9EULc#+eHe-Y%1Xt#GT4)
zH{Uv_thy|6M=ISnW{pVGDP6em{_X~M<^vQ3?EAFOEJ1qn_^~ac>;O&Q(?@M@im2Yr
z-~@`c;@u(XEs9p;sA=C}p@z+?_ps<_2z&+D`ko5>a66oU6GLR^7Q#K5Qe
z#Nhv!$-y4~qJ&a?1f#rzJ!s{Gr0!+zrE9A^T*3@s#
z6eEBUb~zQ3?lyf@^gJ5BHO3>SOFxfB_o)PlcJAlVsF1@(>u;ut6nbNb@1opo8uxeH
zhBd~A+V@t7B#Z0GzEH@*d|Bz~e5yvg_S+b|c4L~NpeqMIN{cyG;c0&gA_g-#d!svuUw|-!wP=_iaqBm%lZDn!bH&?CekgI09v0UBkndmlprn9Hr
z%?fVoN!!!R7@gX#F%U6rEEmf7<$RnmN?NJ!BK&F$nm*c0Y$K3?V(2z%hG|&N4_Mdq
z_${US&cf%kwF77^1cRZYX3#|@mf$FJcK(b8yW$E%KH>w(9j@!Ic=*$7QiFh1n&9oi
zJ0m}8*817dX3&Np0@%jKu*61j-&kks{QF()0P|Y{r$k^!DNO8kx)`--qf2$_4sl||
ztYG^48&hi=1MvF~i%C92B)jCTOb@7yiztfucYd#a5Pay6=B8IC70G_5)?r}c1B>S+
zb&ibJWX8>&4~J<%{`%8(%=Wa&3F9!e$zl2g>94iXyH@{54~27N?4~|$cDn4eNkOr0
z=)2+KutFrBXuzQ=PNa0Vujb1sv(u-%dKi9Hk3>lJN%K$;(YObkzDM*jImR=Au=Zj_
z#&Q(5{)nvob>nA*!%JkKfN00%W0)LR*fDJa`Pz$x9#=6Ari2$}xkG)kps|UBC>I*8
zt0=|yY2z;CD?4349f=5sR&60xRKi1Dm4T>)K-Fz5qiMt+T^0f1>f5o%D%fvYd!;b}t-5t-JwhS@Xy>vTjol+IiaB1}6-$|wlw
z-ch;Nu-p@Et_U@_SF+sfZo>!AjFg7*QZ&14t}7N9fWaRoHCulO#lQ-z8Hgf|mSV=7
zjxX(l-hgdgzC=cEYb3?OIu*m4cyZS?CZeb7kWp^tG<=jsB)#!KAc97Q`99;3_PyOA
zKXmlu5Ql*6D?Q`SjhSvZ=v5pZId-PiPgpi#@S6JGmFN4L5g`@M35>Zr7@$>0!b=>I
z?~xXM=?Y}eg!}y5A~Xf`(J;~BOZf7qG|j=z3{L2BKpMfIf3&Mo;my5yLA;!eemEuj
z{0w%qIiGaL5~SQImjK$HVS8g3rQ5vTIixZ1=^}sW9GnXeY9U{0J-UotBm3vJ+MMtnQ3Co+4(W5leDMUH--C2oVmJ5pb
z8#kXBzv0t&_j%FtKg8&)XX4+(6soB&kJ!yT8lBTj@FI9z{>ndj_yILoHFQ;<`Qsez
zSwA
z@gDGt^-5Ql$^Tl@p2dD6{KG|0UMj_f?Db}jwx;y|X8!fB3*g_;#wg$*%Krv2fs>F`
zPTSeOMHHD=M*?x=7^fjQoOh5i8$Ka67q6e;f$!)B(F_eMlUTk?3!Lb4
zb)TVM7{5NH1yg9TjE=MMYPABjo}m%36Q|uRakIB4g7lLm$J-{XIXg0+;lk(%$lS@>*{JlYE}Tn-#Cw$V03r3}X@=?_wZMs@dodGTt3Y
zet8}ouL8yEPrh9jJtil?dw@LIC6o7q_h2t-0Sa(-=9klE+awE%bw390MT8ZSNm{`T{g=F4$j1p|RE8>yFSMQ6c#(!I)BjmN+0_x(eV5XYJrZ);3WCLV6Fag(6s
zwD5`0(dbM!>=3J8Yk7C))C_FP*Cx6y9bNFBcJDCY`8PLh-FJ)vPJr_h
zW7~-&-}4O{a1sJKqdg0BzT^Ogh+?>rjTZj19#a2}`HTxIk>COU`{xrkDsu3@;QF%|
z9W=(HCj%QlAI)bWu;BLwA)67+s&xQ*qYfO{)?`%R%*-@)1KPb=hu#2zyUnPvV53UA
z7nbSjt%3nFTJm%Ivp1?v%pyIXcU1KSW0D#ViC3ZEK;i)n8}@BIYu+7hQ!5@y_3&+YXgHqb7F02J6Gi@Dp)!4x6_6pg^cu)a|Zg&haTVqe8h-V}n1e`AXtdcYwiu?MZocy+{-VpFeQXimC-ouUQx6&}*}
z=9T6eUO_kLAnY150@C{GITthXlfE20lJDrKEaiWS+^WegFRIpwVM9D>U4QR??)|RDaU~lG*
z7c0scl?iaf6jf}TCrm|u4k07bw9$iogX!tWhtUiu8up7fn4_N^sKAUw%`OQ2b4N{x
zZw!0gScz;7P-7hC8aj&f0AbV7jGF6}cB;4fh5YK(>ZO*)8~ZBP=PkBE!>&!*hqlz1
zz)(1KQnGdjs3R+$&DB|`U)SLlh-qOa0m(xrIbU~S_6#uj8B?R3)tfyI!sU!C-*Q1i
zx~BL(HmJ)q7i@*LcyZ7-Xvp{&UrKvRrh4fzp{4tf?)eF5ho(Si>M~1PRhP_O!nHV7
z4*XE$Kgw&?*6TPJvRKHaEMQ#{zt}~Pbe$ZK@4>rLE<9478zk_cM{FRuT`jHjO|>oe
zWLO`P{ot8juYApBHHz~np>kK^!F?v+*W^ZXYQc1-EneMoxR)Fgi2CeAM9
zK5dbv+{aJ0aTwJ~pGd@Iz!qu~YZvF0V&bfw=A80YV%EBQ+q1Z|^z@NnS!XyX^ovE|~-U~AAj>F&*jDu#sf-X)~Gk_Ph!
zCRJ>CZc6T>x>RL~lOr{Ke9LboH9k9Iy*rQX52Gm@egt})pxSXR@90#%hD20hCpdM%
z^PykwS0vq{;oPxO`2mp6%`q_Y!HJ$Au7h6rOC*v~I=V^o6?EM7%Eb81D4(YztGm~q
z-!joB?R#+%zlwL1qb?Tic9erVE^Dq6YF{yolIn~jwWv#ZA#QySz=6Km-}Ag=qi)GW
z>Nt`nnCx4O#=JPENEL5LL>CS_%S%5Ah+`m_EYQ5Ts5dGqIG!d+9C4G|W&21O
z5ESV@v`%3|ZEqW03GbecymhLU_%VNLSR$YbF@)bYFTbjOqE+m7MPZcrMdcFp-F5Wg
z0QtHhK{otF3=FJi%2_E+@U*|X$4_1s)7UiP(T%_X<-6_t^Dn(h>YeC_z
z*O@XULYBc5j*B%xHc{Clh|&)pe+0=|UJCN#w+IlHXyPmD7n*Bp1twc&bgW8+s^Omq
z_h<>aXJnfijebzOThs}?B8^xnMY2`8j+5WVX0Lz>4CTJmE78il=HZS=W=hyL?d@NZCf4&CJjd*_5?!n-Xt(
zXVk!1>-t8(cvi%j`)umibGl;1!V~Nqo>)N+6HEQ{Tw?4nqZ602X5pH!RNn`M@e96|
zR-&Dqi78&@xBo`kv|@QQbz(Fl<+?^Cdp)AQMC^*?MSrwn^EL@}IeQDr3RP0jT)w05*gr3MaVTWMfqNUvw*<0{i<=*VxA?J|Ge
z8c`T0x6kxkZg>=*YdDdd3YG*o-0%*6K5%fjO|&f9vL&$JjhRE^J&^#m-qFfePxlM{
z-!|#&80>%IB`_T1olwhdysH0P)7l-SOk8OeS|d<2CT0D$iXySrQ?8b#?-Qd5o{P=e>Nhc{UYfp9fpRc
zDjt@yd4_>O3pp&wkQg={TTvDpf-Wm%Z#U*2(XjD;oKoBnP-JRM8z8ItjI(MHxh51-
zP_R#XnwEqlh>J5RK#n@Cd(xZ`m>A>|dow}SFCZJb$J(|T9#SBgp8hjh0;2`Vo2L6B
ziWi6Hf*kT~Y@(VCuzobjeA!E!;qbNMM)wbNJSZtVkT;-X-N&gG2ye8UtC{79m?x9e
zYlq%hCf6X20$4wQdAk10E@O8ef&^?&&s)^2uN9L_uY$}_akQVq1~eTRtl
z2u5#KeAS-5I(Z26Ml<-HwXa<$RR|Y>gu&F!!U9FFX*spsRANN4Qh{Y5=PrC%%}1;b
z^V}+kHhnYo!$}nxzlP;(c+Y1nQ*5x4SbZwh_RnugdARHxRGcGa{q92+{+~Bv3mMI!
z^V-hpm2Shz`jBfL32D6F00wrg+AZAZvi3d8UKR+A`gwevzbn)9PbUGMC(^iT&BtPo
zvo|3wq36zNNz-|@*8Q)Q7cc%1^&l(S_&1y#4d!75!oovCzUy{%vDYlv`=$Wm``po|x;_{tEU
zuASU86BLA!chq}I6ECNotg>KBrYu$zi-4SWH1ofqz3)L5)OuRoiIYc%o3acERsF05
zi`xBhDnCtN#Vne>LWqgDzQxW8Iv~GR(#e3qnKIfM(Z1V|KFtYhSHqFM+n}ezf#@@a
z<#?aMqoKr>O;l;!Z6IA?CoFQIe0XYXwiBQ$66A+e=J13^mfP+uo>G1l~*d%n>
z)v3s9hnjnDMf>|!QklZzPs6ksqoGsc{412xD0Zg@vH3(9z>~Sg9MVH*
z+P~RB)|=(S++-kzb&n6u1#^2GsS=;jv7&@W4ZNg&$bBO*u+D&9wBC*;o=Y0l7(^z^&?
zoPqmq_X!%N4&mH#bxcdyWWPen4vfIWK)b|277duzWYWBq!qrHza|8Vn)|H+no2{l#2iB1tJM&oOrYsi(tPn_zs&(uMx5s
zN<^(s$hrBp$VvYAI`lhR%8Tg|pJ6
zoWGpFp(RM=N$s~nQbmT=1=PBpMUMMS*o(<^5-Ux`tYOd#qKtp}PK?vM9vJ_EJGIcL
zgG31k^<81a_yA-!?$$okv@U^xv|=wdux`$$+U55)b<7d`HnniQokR(IFgIU)KW(2K
z;_#wPtT>P*W``Xg^^JelxT@7s3*ysfJZT-5MKPRq?MiQN^b(D^wA6BR5c~
z(h}=oNO77FpD1bjZaGL37!1Hp{JuiwRupm}KQrZFEV5dhcUWvL7<>dLq(C^*3%g-q
z5XiU0?r*#vhOb#|Xlhsz0gK|b&2O2)2`Q{0>fEon0!9`&NHf?IQVVG^tUEsn{zYoB
z#fw*V4$Giws!J`3V|A>Wu@oG`iax9mWC9!Z#C6qRvlqJ4!tz8^t
z{Rs7&1VWG)jl)LfzOH6h)=L@WYuJrYRLmegu?DQ7Eg6DNDn1+r5P$XCPBU!OF>+`pu^$LTHESrbk
z%cF;xw&DY}N>x0o*@Hbc{n8^Bm&)DZhg9u5sm_*~IE;U)A~<
z^1dBMfiIh|&@mFYOH}grL(hX-0KumHSP34h)RaF)j{DhKgT(4ta}mW?WO=>dc_mkF
zgBJgDPRM-WV7G`W@2B0l=&b%8RJ(8rmN=-$$V+Jmu9$)T*_j7szuu-HcnBrP=3!hhy*$c~|`|
z@tmbm#t;r?s+nAg`vf3LQgt8-&ILEklC8iQJW?ISXu>1n3+d~4mUrxujNrtj1J;n6>mgZcrcXM(~7|%Jn54UL&
zFunhkMrhM2V5-YCm94ka^kLfouC-XaovC3zhE1!EyW|}IaY+shcJZ3+!%ropL&YL9w*9;7zwSX*p$BKmlj?z?Cg*}mXrUycH`j*O!~5GJr7Mc_0$20KcYi}xoa#@DT5LXk
zOeiK#m^rPMf<(v<2?+dR9V{GOHhe@0CpN2OMA&Y-Gp~Y#{J1d{f5jZ}t8q-59c&7G
z)gXY1!$OxWu1Ez!#m5=XqQgFlRNK+V98|8p=mXK>=EN_C8{@V_C>c)?l`2JMU*F3{
z#%3dMhLd;!ZRV=~gJ8J16(qT-MDpiMhpA4nq;)eeE_k^O$Qj4uu!gHneF##&FiN7K
z<~)1(XxbkrZ!wMS(mq6yycZIv%~6c&yzmV84rG{cB3
z6494@SwZ$s)T7WJckGf8P~Y5lLiSHJ<{b~w5GL6Fp=n?NwC8R%XCOlXKQ}&o+rAwx
zPIz`2d#QU3sA6sUtbC>!#Hw{-xp!%*UwUySR-QsTCgpCE{4iS;VsYCneaAyE`7-gHd|3m>@0SJf=T-|jK?W)3cXZ5~
ztk>J6X{hkcB)wT=2|Q}}oNp`b<3cij_g26B>RV*o;VCAg-F~0MMC=R`nH-Gqr6{|j
zNd`=u7UAYYe!`D4QXG;^&KRaQT1>yr99J?YEx#c&nit+Zu&iK4C(!hL)0SaJSOX_b
zVQWV8ZVBlvvD`ycaEUh*{?7j$3@e6J#$C6m-*4dIK#);yLjw-0%sitbCyFJjWF>oU
z6Tl*r8{#sz_?=G;4KkB?4?Il>*vX?{_gzS7eD~eooVy?fr_+0DW!~#ZTB8?-2ZY}j
zcqBdh!_*$g9*7pBH#VqisB1WQP49lCA-<#N%XXvvaN|D<_lt5*e=hfkKApoP4C49i
z2j*L{+R|mTl-h@GzrZ;rg~5tIml<_=QR^C?bShP_dFr=h4ipvQ*0gzkl^+o>
z9uq|e;Bq2eY&Dcw2F9wsFy#0Li4ftN0ueV}wZnh-)%Z#cp{aVz$zy{gQIzEIK-E}h
zXP+FtsmwL787jW8bq#}f6G73Q{hbE)f6x&T3IlnqNse+I95=P^8yUB}FHuFK5`Yht
zc}&h~-~5A=m|zm!)J_de)6`*Dk8`sn(f&VUWBez75scP~tRyP_zXsaOKSmD;?MM0q
zktwDuzQQF9u-vcX>^MZQ6!SrD=kaaa6uqRCG9wxy1)dNs=(+JW&YoV91PUDyqsI%;
zn2`-i$%{j$2OjULq@5KPj>{^T_dES39!s*`gsa-E_z}*}=mEW;w*^5d0(5W|&J$2Ke%;k|+Urt=`DH6MRaE}?)0*QizOPM_uRlxv{jH-jFhe`ppXpy{)=v*~
z=HLQx!
zQ%uIAW6?8;cU*vC^~v2{*k&j>CvptqHJCGM4N2~V>q3y4E?K7ex3ak+N?sF`N=7v7
z4rPDrm$Vu;X5;7j_|kEl+-A1X^B*oQ%XYStBpV0~-*LQwis0z$#J33Of5L8{TD;>2
zAPs#xE*s9DlfZMiRd+8N4oEd)_(}>rqg)t%9)m}-vRXW@O?}h57Um!|K+sEy*x$+l
zjp4kjX;or$pm2b+UjU+8D2W}EaO-HyGfJyF*$I}ToM(KCM=Rhn9$-R`fEG*E_D-}a
z&L81dREP`RJR`e%{NXNbbnG(Vx?9dR9@Q#9u;pB*ctn5wBiN@B5NA`>svJxnk0uQeUKmjagAl(G8(edh66;`FITa=NXG`@GtIv552*hJ
zTGKG|_Xqzs%&tAagI2`kQ1P1=I?d4ZOL0rwYl803nn8?O=a#pZ2>PXGXK>|-DV!IU
zqlUot(ASxo#k$%jmba!~^h=$ey~jk8oxlvZ`Yq}v^n6G>)cr&Bg;wtfwGigU{x|a8
zW{!z2yEw~S>7>6J9BZD7v`Rh6XcGaU7W$J)<1}`UUnJ#mB1CK;PhMvvdrnhS=l6|#
zaLO&WuR}*o=o7V2m`7!dJ{>s8nw+%H&040C}yU|I6ad-<+~5#jn-jeA{wl55jP1?8|B!##asv@1sZ%so}pgRj*j17J{%)4ip-SN_)s0gCgHB$(#UR
z6l860h=C?NA>K#;=^6=aey7e|ZoN)k9>+L^3yTfR#h)9OvEffHT
zuw?jj587(qJm_@^|qz)GOWtSCq;s(d;Dg5jBIx>dk1C@D^^;>13-DPJZ1gjagumdv;Comi1-
zeZfg9#J(#av#{H8R32^iWq+_-$yFl+SQ*I2(7e3vz}Oa`0w-vc(89XKOCBV=-q<;*
z2Hnd_Ezq5Hc7uT1aJL#eyVRik-|HxPIj|<<0~)nAaG1AU!H}xYVP6tQk6z{=gosS+
zeA_;;8qe;dn3s1tx#=*(Elyq6U
z6%r!AjEMHg@??qStAUPB3i9wK!z{fz6N?7YsB?26E$vtDZJmbmrps9VK&Dudm6`u$
z=jKhgM+4cK@?<@yc)bve%{LJ^BYksGLS$D+SJ^dz=ank^f#+B6)>(SOQFe};I_E!4
z)26<~|De;UjfVefgal2Ub-e^30%hkbieIArm7(chmUcMatC7W>z-oMEn$)^c6f?Vs
zM*37=rGpzTMQl`+PFBf>uoY4IhrJgXNw)cQcd|Ze{s@<0`pNW{vdH*7^d42p8*w<+
z^-XVXGcl2#IkmeqLW~{TP4K;w!)g@_f~tKNGH+8jV{=oYUz7Rf^N4vX)#ovco}!We
zen+dJvU8JwvY(2>9O@((%;lg2Zph)4W_O#~R?>2r|Lmcc%}uYXslHt(;6IS+s5iMh
zo<0A0uQ8iVn#|n(s=$=k5jjF|F_k&k2@GkpcFKPzWq%E%w5y}sI`EIJxUBs5-*O7hN{@*|gg`e!Od$BICONRBTtzWoXR?Ew
zxiei>P;+yH7To5RkxI96U`v{?DJfVH&`>O34i0f?a%&7(sWA-G()=SKb(+WV&J-0E
zcSece?Hfr`vhDmiLd}K3=8Ekbh-);4!0qsB!2S-}8Mw}C2eix;;d-W=E$TZw7uuwmNdjCY0YdCa3y
zKoCx*W0NQxOEOFhOrD`49Ltg#C9GV>j~7~@2#Rp(`cIUwXUwRy?Ob!>h5NWBb{P*a
z@i0K-!J%|RR)vfITRcE6vs_I@;eKP{cO#q&Vl48o?553Pat-5(YuO_*%Ui)~9hbjX
zr$j?HDqk)ltcO5y9D^8#k5c9~5I)_RAupV?b>IruHSa%->V8KuEU3Sde4#qRG8l
zu4H%UEQrTUBOK35`?2Q-J97_e8B0suP{PL*(C#laPnh&WqGFb$VGLHjssR^Ckwg&u
z$)~)7AJC{7(}nT%UZz>R7Oj1vsB(@zzHV;GUpJRV%6BaC$p#;^9_`b%vTJn6@3h39
z6dIxhOn=P;?OP`HCHnUWUWvhr4u0~i@HX@NMDA&{kMO&)#+);{Mg8Bu9KSHm=E!Y!
z9QN@ISQUu;1#5vcr;;GQS&Uo<4gYKo?UfE?^wIFgRnA?;#|kaqtOl_bxrS8+Eme4X
zoRSf9mvB5GIr<*CaC{nOOBkit1RPdNjVYK^Ze~VcW}(Of0Y&{X#q7~KhA;4}FS97*j(ld!}*<{FCLZSIgmL>RSRJ{2~=
zohrLO6N84bq5zif4}aL(y`t`V{xqcg`yHuBNK6xol|N2!oGLqw5n<
zMV{UVa+9Sm3HTt5e`ad7{eY)WcOx(>){E&fxTHDPIoy
zTavoawKTOCCzP>x)J%!Olxs9Sm&Tr{FAGiH`c~P
zT$!6Tm^VUVA^>meawAzan6m;SdU6!TaeSrwVpwv=;q4nI3bcSX-y9=U=(1NQW$A!9
zg1qR4cM{)a!tqV0qVP?rXhHE|sOF+{vDp}I*M|QxZW+Y^#&c%c$z{$6F!&azy{&nr
zVuho`ksTKJgE%Rp80(OGD#dU=Z6xEegnl9gUTPJTQKvD3-JDwqUpEN*P&ej?O4zpK
z+u(5f0FP}D8fd@l%Vkc1ju0H{Gw5mK_U)R%w0aR1yjGBXa($<&P2uGpqOmhr@S|@K
z1X&wAuR$=-56Aa*Z>?P$v>Nwk9|c~=WBXq2QTSf&eYIno*r=
zQsPTn*P@t&$gq=m)~%sQ0h=np5X$tVB;d+z?SQav=7a@?+=JR(r?-m9b?di!TMz
zwL+I5DY!ej4#up+6XbKrGowiOV(3Q;wnl%v^gC<_a)uXoHu+n{QfFTm$g17fqko8W
z7!>HeOum(0O?>Qn%zBqmk3T@QibhowzJzz!d}mfcIq4~yZ>pE*AgrZZd=l*_OB$2w}&B4R#eVHZ`Wq&1;!Q6zE|AJIwt91zmmJ_=bL8Xcme
zi5g|`*!k+aG_#)GL*Eo;R39%10%_r{;4^CnT38x6SX|i<2gLWp>S*D9K1$Vg9=Yf(
z4U$~{S`{y8%fH+z4ntFwz%tXY=PYn;u|H-cL|gOIDAd>g^2M}FLxICW3j5aHO3Ndu
zGr0?l8qQrrD)3}RTu#QYV}K9kGt|6LZdGY17@5RwNIPsUb1-Rvlg8lCxAY>gr4Ue#Q^f-vK4+nEGHIR6XHT@W6Xy6M
zP|V~mM@%`_-*T(%=^v{0>hv{K-Q;cpYNOa>MPerVk(;{{G`UIe`<7j{?a~lXbi(
ztcbP_Yo$%NGQ4m=PB@pPVjGww;W|?}U)Q!45_n@gE{%Y8Eh{=665ms_pW`3Q6{B*Q
zmFH(lfybR{<5z{HoMY8wS$J1Sxo$;Q0u`5oLlt3?x!J&Dz6aI}3`C>ijP#3dg}jYR
zTcv7FalfpHr4*8@!_4cuBEiU$mdMQng^(xB
ziV;J;cnsY9j_m6Q>(Zx5>}xnQnP?Dk!(J!>U75T^=EWRzR3|NDUhWrjW)7ovQq05)
z&ApYZwLk3x$!kANVf_yRtcG7`vZB4zrReSnS!1f)9P5`Nxz-U>eMx~cFBLud{CIR
zPR1Ttn#;{n`!h>5yfmJhvKEeb`tl{Aj;w{2^~Y+G6^!1weFbri8CCHdTmXEYRD*Yo
z{LB2AJfr5YTIAVJig4HEKW5FhBwjs&jbm52}iHC7lQ4C3yyV(~x$>K10tVq!|h
z!*ZkYW=|iOxa#T*DSZh{QBm=b+3L>#)m`WtOD(=9FRvhOC=cAHM92-{)!|lE;4ROQ
z*iwj+pzn;XI|KH+85Ke1;h7Ul)o8hwj9SSFh=%TQSXdgo%ot<*i*P|?AP5IwHjd)&
zzlE3^X%J^_Ex!{4O_y-P9I$<0=!@|W(zbv}yj~phuK|picfu%$*+Mu<;Cdj6md
zwJ03j)i(O+cE{8c>O01s6T~=3K5ysJOF8nD#VwuL^iD@iI@lflP42U0y9><
zdiF^OoBqJlH!4bg5Zi{LC4Rq!AtXp0zhawKdTTE4;D-xj1%pE+aJ>j}5JCbLF0Oh)
z0vRAJFf>Pl|4z0ra@n^CTvp(=9LpXCl2PeULIQ?yW8I1&7B(Qnqu{qJz&!_(|Hs!`
zK-IBy-J(cvcMp)@?k>TDy99T45AG7&-Q5YU!6CT226uOTP0so5_mBU+_r_q4U3+y^
zOEukV)||7e0bOCo1pb}+0Jv%+fDE~O=c={q_h&P^ZYw8N`PQ}m6&MbQ{v$BZ(KUUq
z2e@>A(+=dV$^c$zVG+MwpQ9xJPXM`QK=(MRkr9TdJ6Q5>N6aB~C+^C{g
zUK;oz;UQYVln^gj@|68>l_b91U1XzAlY>4-~zq0C2`frUQc%a5R
zPtWsjl}%GxPmcv`@yC1DPwS-NpmtKtA{3Drx!7m}0CdCO0zLpeWfvZ-Nft?2Wr;q(
zp@KXxrt3th^N5knQK$)|{re$54zj;03!SbsK0sLgFt}z0(cufWQ`2v$J2^ZZ7Na`y
z|C{O`zA}^joBZd2q)(}Sz4Nf-M_(lZ_lrji$o)cPS38{Q=R5U(;pIB*{iQpkSbu1^}XIh4^SQ6Q5!x$VEz5y8TN(*n!jrcu+AP8(>#b<
zhKfn8&c*AugsDQV(#y~N*i2=9MG!Z54{+LYZ#islbmErOqz7g;;`$m?nIv<5fh3GR
zH-Y$KvG-sNHj0j4QNh(!Jq*S58P(s5TN6V9j9-oi>CbB)^k
zvAycm(Ik-7mH7j)z53OOKyhqDBlccDp6kwE$1Amfe~u~zyI@rXIqzQUgMPdiwYzuc
z*>~i;UQ_9CDr^t5ygE~X@Xb4(5P5yzqC4IGx5_B$djJ@SUfe;c*Aa=@$=-aW@z;vI
z)*ctS&pU}NabD7bn60#(CM?asE-!Ho9Nw2Z>D864rxtd$nU!%L_BS7}`J{NlqqZo7
zV0|iGKV3;v?7VWuJS)@o4s;*L8G_A=w7$umG!X`#dczB?naKTcm|PabBhP;F7Ns5*
z0argu&IYJqxYIWUdDT)t80yH`iQ56_j2HI0wF9BMw{VrjFvvR6JDqpn$#h1Z`&1Pcm`6|hP8PrGLI6?x_gedhX8fxQ{iPY3O|2a!%as3uaGL*IX{Lb%&PJ1E
z1(BQg>Mky1ft=!*gae+LPjylUtX4N}jXtMp&bH$eL6qCCM)l+OCzjJx(a`W$|dw*P%
zfx_&zKB1!}UO7hfCjK7Sdc9J_utor?UF^a8(_((#ww)V%Z8XSL63!)%3&hQk)?bfY
z<&wo3Pk#UnE+)-6ZZn-)rd&j52^??w@u5>NBu8tRRw`?py6Hr}?)#I6ri-d8LQg}b
zfI?#PKiKZ~{q6ksye;nqeinXft?yAEHBiJKb}IGuR)zdAQr4)MXpd?JR2+lyXN@Gj
z^%CY`&D;Gly0CLY3jm1}na}NRWBs`in67}?>OLpvuj*=URxXK#z*V*@nCEb?7xkyq
zf&fMvm5TJIMuQuwV?#u8Zw+g-t-&P&ixXs>4UE|kzm%y8o?Q61gw__>-ZcbMFJpe<
z;A23|A?EGh;bX`yiq@3qA=Brt&Q5kpobm}>K%&_fw~L;telJn%mH>%9FpCZ4$rpha
zhDzZ*(La@f{l8kQaz?+-?FpBl((XZi`@sA|FxFo64(1Q%?!nsPD}K<&dQf#t
zHsa?{42Tq|p>DDOeui8XX`*^w2I6O57!ic?kzAJx%X>eFz(?={?aqL6C`fb+nS=L~
zFFgQab_e#}A`9DTho?8$P+&ey*k1h#Ayi?)Nk1p7nMy5QpNgHy7?w(P|K!nz&;^j(Zfr{Aav26}2#SO0hry!0
zy8A5vMM0prhE_Kr=sKux2pEs}hq>b>ATTjVJX}Bc2HKJ{R0609Tsed~o;xOW?55zk
zryKT<%&~l*#;@ksdy@6=svEH#9bEd+7PoPerOU1UT>#hB1GsL`qi~RE1>o&<)I_k2
z>IoGv)t=ODWox?V~w=TMGJU@}VB7fO)(%fqBFU
zJFa;|r05wLs0AFtS&h#^aRI3L7I+fGj3^~SJl^sN+)#u%ZZ>sjmpY}Z0b5vhPn!P)S1rLv*dpukOCCi&G&u*~sH&dgKSNXl%s2BhSstKL|l
zED^%A0%>zo$Ra@<8(m}hj0IVPAZ%^*Q#V!{rkqBByRW@PD>=MhI(TzjAg1XZR12sa>sY(#Mf_Ky1aECVS3VjNHzuSyaB
zMR*T;TNVhC*xgNZvBrdQb3a=hkXV}@zu(Bs*_~s`DlxAGhMFr>Gsq)!KH|;&U;%E$
z!mPjQNT7@bH!UCvd0z!(i
z*6WyAd`lO)uc!B=AOPSNu``mBAT36n&V>B?FHLe~DggDj5=5OCrSBrPcvVPVe6}RE!KW<8FkK2^${(C{
zJOFxwl?Sni&A=>22&4$m3}zy4uhF9r1}%fJMJ%_R!FhxIViH$L10P6HUOs5{4|+oi
zjDC-7WPv_jYJ5yIYrU>I;yt8+Sn9;_d|*~zEN1?4bi(!sB>Q)oKtxA7^e;`YKP>r|
zCdj1+QVyPz%9k*J9eB62yte*ZClbgUq-xjZaJuha0|>w%QvZ)ZL=rHGl(n@@lojZJ
zy49Oh*T|a$1_F9xGoUxx135=Rm2EYH{td_HUBtRJr(C$GuXMoPmF#YW>(v{tr;%LP
zxxbN36%Rrv3hG0YORtO625GzESUU_>}-IM}O1R31h+~
zbc=cB*Z3f#Ra?zD##FyVc`}Rkw`*8&15ye6i9b9AQE!no5R@NEGs+l0Z_E~6Wv)IfW!jd>FK+D)#`BUYP2Aj@v1`e~U?}mLr%TPEY2OteOy(h`uECVb
zOy;Seqx1eL&CL{}cb~Y5FH1d@!SLp%(^Q_hEVl5|_Fk#-;~XlJZcJHpzkV7wb($><
zYS?}N&f8hX=m=`|8IbeW-q@JY!rt9K_iuR8H#u+KqGe@ltaX8FkAKPZ2M%Nd5K{GU
zFt$`N2zmg9zbpVS+yH>#aR3bG`367(BLIf~DhFV=%{vSq&Fnfnq&rv*d@Z*(n#?Oe
z1aqsKdZ*KmHoekh|DMnid2Ahv^lpYqy}0e-BE7-omI_HXY04KALAaNTn@^P*v5#uP
zPN`h--$tiq6snVp3w4R*Pr42jWUVoKo$p
znq-&$5Er8en!+3a?dpJLW+x}gdv!OrJ^C+!yRaW<4!rVw`YJG>)etM_oTNKs(5nFq
z*O?0m0zpHmj$XZ=@dGRo)2g^hNxswG)QQs4Yv@bo)Zg)lHw+O#j_3OH$u7q9v-y*X
z8;%P-0(9zEpQ#g3m}+Eg0@Q3wny)_i<>Ef-)kL6E{)VX8*Nfhng
zXc?^!aBaO)b6%dl@7G;t8Lciv)h)cr?(AhNV9weZ@&ia$U?lZdDBiScDtH%)p}2p|
zS!o8J*4%ZqSfbf`{vFGA3;ma
zO!+~`hXu=#nVWU_6^p3O3DX{ohUa2xsUg?nt1Vg}crp4HlLCZkJ%<`vecd~Qn&b^-
zkedjcxS3qtNh2h)1==hUMGJ>g7fE|m3ab9XTxxIqoZ$`Jus2Me@1zTL@SM9BUV}yN
zBwr=2Xoca=@!VHfxbx=(8cXz>xN*YIerRE1C9?~TA#gbQlyG>$F98su$by*IU(U;*
zJuHb;9Dl;8jrOVNLm;IYKp<&^&XS4EdkofnRrt=_y|GV@-}#w8WOhH{0L&)e&H>6Hvk>
zLAm0$Ia~ctTZiNPND71>(jbV__L=n@A^R-RxMj_+Zt
zcrY%tdGb}ZQn%6e)C4IjO#Xwy|FA}FY2>`c6SqGE@5b~V6*QCI|31TrnemZvH>{0h
zu7pOiqKiw2+LWUVOHNZekKxJ`nIGzJ^9+Pgij(+ue7cCmM+tA6&oWmK8YFoo@KRDV
z+5CFF2kI>h*27L3%6p?M>lbx18LQT?NAeH!Cb0OQ(0gyvyHMz?;l_Xq{c;~bWU&h_
zh?R#&&$7fMNmCpO$5GDU3dah#?vd?!>>kpjREP?=VzDoS3BlsdfSZc=D8|UU4v(Vv
z>tEK&1$+vqd(`H!Q^?f5&ye>6c8wPPLY5(qf`rnARmNesQhU2PPf1|-Q#dWnv$l^E
znUS3mBhMmSnSS)rM`{Dt8jNDl%x~9pG>AG3ymA9Xt8mnYuDxLB|6uRYA`gr-&Rt>fNZJj~b7coD5k(X-1PfF_ik{We2d7?X&JyLS9-rl7!A
zejW{13Tv$tM-%lv-8hcIDAAzB?5juD;SDgtvTLQ4)*KG4hq~e{POEyZmxV
zU=#wNwF{pjS}F6eCy6Qu6~UOg0bg`;Sag+iJ>R}NX)sLW5)HJAPkn@1~9Rlm?-NND2C*e!Gr_@&iG0Shj
zseO45se5&!fQ93ohZJ;Vg$|)_O?WBIQGo)BGR-1$Waa0)88gky>dFqjv7EvvGCyuj
z(NqM()YDKa>S3sJA;hKs5;$$VZDM8SNIG0b6w&_(sFS&fg~Aq9&yuF=B9OWUBvA^R
z$b#t&GJ5bK9z(P>yO4bp{dGK)3wf;qFf?BQcc|D~wZdE~d(~
z-Y6A~%m4pOj`@RzAVBG0{sY+l)qWy@96|XPup#{~gBS#$-u(51dO>W=a#fdBHWdT{
zn$NpI%)WYe*8E?K7dh$h(v-&xVaO+S|TUI>LcWGs?Qs-UJXp_p5KVCo@o
z`KH8lUs;4g?7Dz`q}y1h?FT9W*)3rc41i&9_ep>s4;7%$Ab@`V>$V2KH1$6wzRoh`
zcN3p|`3As+9=55wx#awVpg94q^zU9_5x^GwbA~Q3_85@N#^Um~JOTHY)N;cS-2+~5
zpwQUE2Kj*w66c=OtlTgsfTzYdRkzc57k(j3Fz!kHliMA}uK&sHyXgoOM|~eSO`;kc
zkiLicQ>gv^+hnoxvHv#F0U+nnnIYgfO>86{Z@tEkWDPFc3qLp#9%yHu#+k7}I1&b{
z++}x7xKSgtL}3DH+`~%XE+$uGHF(XX_{nrK=m#8<)89J2RsSc+r(Odn?7vAK)bq@t
z9%=ezt-2S;rn*Eq`JleS
ze+0pFG+;12P5@L^U!XfWPv--nqX9do?l|u;TBohxCLkDgN>0+r&0iZ`zwc!`nzvlJ
z0tfguH{681wOg>}a-RmZee$aQ#&qfXRW%d8hdlY4*@c~&d95r*z6s@s+;wSO_Ad3;
z#zrk>vXc2(0
zP|vDKwD}zi7yp@8ltmTsAMy+hSZ#^!{`&dojgs$xHBZwW0mfvT*ywCDzj)LYi5jLR
z+wn*a)3Rey4%3mva2k|w0c!@Xp2d=D11$;ST96VU8S%dJSGG35Q@H6pwFkN7*%e0I)p2d!PR`=J~njsi9OG0Pg1hxStKmm?OqS
z#(y4!B>l&oSC6fLi>;wNvC&?ZPs1<-?NI9mCLa_JNKF_t_uEdifLa77iA58+?@FRP
zpd_w|cmCYhawJe!yH|q&qJI%cr(!9DBx`aA0Kp!MLzm4CD^8PBcwAo;IZ}x~bI$m_I
zQ@x&T%g;M$E+=u&D-PS}X{XT`dPh*O#fW2r_~W$STRkcIt>5Kn(Glj%{?DtmIPcAi
zQ5D+S-1XV&$d!p4E-bwB9k{wzI#^
zi?2F*@;~6x{^S3lcvpkItFuP&Ch7fPwB~HvU0r$ku*hiNdKa@!o$LCgIY39!kGh-i
ztR~syxr1cQ?J`%Xg~-lY9a;3O%kO-k#7p3jPliAlu6w;%0?RNp_Hg8!ymko#=3?N-
zagEXFkL@!Jl-I|(s;}$wjmzWi^_OT5{SbfXzZbwUML7)n$(>Yt3f!0Jp7_z^nF`#)
z8Jwj#eH6~9g3CQ0l|z*GA|B>Jv`F7`eF}{JW#7>=qFd3f5i^G5BV^9WOk;8Gq`I9i
zAg*c{mTQxL4)&$xlb+JIwQeD+lOHrx(;BW}HC8tPduBg*nABUmrpuiL@Ao>R!wC^k>$$QLlrmp`nGljHrS3
z4ye+Kez8yTAin19m_Z43LD5b^3TTA1g`0Zde5?z7MqV@e@)&5Db0G5q93cW+ozCw$
z$0Dyhoql)1uN~1WY^x>fO+;;`WT#8J>2y};EhWNd5Gk5#30`}^7VE2#)0c>0#@;6TDEw9RgL3YA%_muP8B^5{O1Z!8zUH_G{-^rb?>JHHB@A(C
z_9OPnuQ^3Qjv{iThZYvD?uZ9|*;Gtcf^YCjC?|eCR8uYjuG7fxC54|v9gO|lN+>0N
zM3nQ-+|IjI8;Q7-^YsP1@}vw)L$2Wj{`nDUS{iZ-x0rf5vtGpFPxa|VP!8kZDNu9y
zS8p|fK{AOffnM@>CczQs$e#4_ERD;do2wKJ}jNStX}oCoU}`B?E?FREhwr7wU7agUq|@FR)eL!lM5
zks0e49VYkJ43Wu1DAxFODPFco8L_zJm^F0MNXJ8zC^?3D(uG;~l*v5EH9jBTW8&wk
zHPx8YNY-_6U%mB-j~HG$`7#5XNW)efTi{6JBrqp*9(}=Wj3R?W<9W9EMeiJ8hy?1f
zPVuj2y!1Z{n5o8ACoaiKkrZ!9pw7uZ`38Ov(J=j%opTHR6hd_!8c?2f3w{+c^wX9+
zJL4AoD8#(Vo?KzN`4*RtqOLo`$9M_J@^g^FcihVsP5qL!ExA40-y>15y<>pcSq?Be
zm$m?YjnsF)Mj_zfNQbQt6ubKq9}n59h|wxf<;FD~9U)YaBM)1g|IS_0Cty*U>3&rf
z$dg0MH}nOjL8=A)6BrNCi-lAU*^32uqjl@>5TF?V8dwzjgbe(1I5XU$?gSJjxX}9z
zH+<5XfuTS8RK%QXh0i#UNT~UJjFA>b_*F19=)30$!F+Jtu1#3@rp3qc!Qv*w@8ZPm
zg=Q^v`fYD%P8q3hkY41$l4yNV9f~jPx7=5l^e0ICFETId3uiQpz_b^eE{AXZUgVYo
z>P1WIC2x378LS_@<1D<3UTuzD^f_*?v;Gb9n!Fs@yed9%-*_8z#C88HdaHQJcAsuRu(H??zLPyfPvtP8Z
z9Uc?p{5;ZSnOAPdiwI_`v^m)M*V^{B#%)~i^tHD*f~(He7S!cGo)EIWY%D~1?>*?!
z8*vDzyvp!TUf5>dK0UmFPVjUc!5Uc`Iyl-J=~=y(Z1l}xnTQyO-oJTx=#{?P8WGXU
z>6sY8(kmD_*f`l67CD3*_u=3>1wVi8Sctghh$ym5f{+iRi_xfQo|uehdBkEv^Mi
zFXHMbrs$~WX!PDxjG2fT*n$Eqy%-A-%fBU7BG&g3A0I5esExJbdp%J$BDQ~)in0^2
z|FiUcxwMg?nVz7HE0G2R5MAe|`TB;x#Mz5hA%_n-gQp*y^<&Izc@2um+%X6Qhq@!mrUTENcy
z+hdIHd-89)f0q)`t9?@kqW|41B8LCjg7LpC{^?{PJx4uD8ZCH+2U&p=6
zOi(+W&b;34-e#G#l=srPoDO*omG0n)K-6;ZI7}-t)aB~j&hla?(FF79jBV(<5*(Z419^l5d2K0UZoo&51lvFk
zI#Xw>D%)dhA?)0T%uWOK0S8BB0L6*|hq64W{RvvwMT6|)LjbFb3E~3IjzB?Vo1N6u
z@A~>h0K_hw4Wd8ISsPZTX>_K=}{or^zsg3x_oxHuKJjidVyryO`4+N8C)O1kbRc#
zT>z>k#K6{udZUc|7kGBweZ3TO^g#a94d{l9c{c)j3#cyif-g7>7_y&K6Xaz!e1GQ$
zQ(uS)h{jH5Juy;@=<*lf*#hOSR){nZzWZ~KqjFCHBMRZZHuSStFYbOQwUtX|SQ
zj|sAgv0{T+&2*>VQf1~R+C8{BR63lm)}ar)T`HbC+S~%QCyB?fvKX=9wSr6{IQy0R
zJs)qH4~(b7j2YNd@kWuvRSqD{!i*Z6e%-8>zZvr)5`p03szFn>wo%~vPkc%9hBMy3
z&tS+p{?LtJ)Z_Swpc?KpjXg7sU0%SL{KY`-i@`<_V{tZPvarE~umMBjAILpdSnGUQ
zfzM@cp^*X<{$_9CyKe;(YR`_jLH2I2bCcLJlZp%xvxejW*SD1Wq(&3m3_n
z`~}lh2DaA4e7}N@1C)Te6WG`RHTMv>Pgz=Fqo5NYAfHBbwWT6J&e5O6Y9gW$xnr%s
zAYDloceAm%Vy$2ovq_eD@&$}*XhFip2dhDLJ9*R~t*Y@czmU`TmP|lO5&DUDB>
zu5V;B!ma@K2Vj|{KgdX+%)Xz7UW&{t=rA$JA_JEFwK94)G(`Z2l#k4QK$v|b2);VK7<)MF0PBX{h9T?j}zSAp+#t~**lN&cad
z&);dm(*K79)|Nj#;OQr-%q1fo1_&RKP50*&b$jTwAP0s(Oa0P-JX>VApRoGpG(45e
zAp-1{EwP`W&f#34y*zM+&Ze|C=Wnt6KEKlPAkiP-s)6%PaB=L@ctxarLlp)`T)0jE
z%~$K8B^gJX@+JgXfn)D#p9mrZi9({wR`g}`GsT0Yo4e=8PEbfJR2m{Y?Clg1o(U)py(Jg4x
zUKAfSkO(#Cyc;eg2O>nT5zt3R2RdE8+=o%RANebqU!F1^c-nbw4B+>+a2
z6aH7tsPE0a!93Y#}%n
zzt4e$^I`NMzr7tleh>eI{hNCS-lq4ROUmoq=d>t%j_#ily!IQv2S0o0Ja;mSDF8%N
z9}F5*U8Fy>cS&xr?3C>CIo_f~jf>$X-m#|HOo}0J=0pUF8rez9*@V<+ezG#&)i2$
zFHe)fX70l&!e--nC*pOyzVta*T5^b9*Ay;u2~UmIgO~g*q{DB&6KTBfFg=R7mOD;N
zTD?U-meXA8&)IqmD=8Hk?1h~n8*FibT7b41
zZU01KKTBa_Wp`1?!nR4y|KY@}tXUOQRMV=!FN85yHo;+FJpHlPJ(qv64yGR=BOg=R
zN030(?tRqObzKM2mY$nt!oBy{a@*^
z!}=hL<~hkr(T%!Ca3%};%@pwtkIai!7`{*4pjnV9sVYYjva6I}Gq*gq$Q#WkM$+KO
zt1qDsWL<
zU6l&XR+wkgW~(%ovM?(t=J^X+nP;3vs4>
zLYG5Q4g&M!$ooZ!F2}AMlnZ)8-R|kXR+!>8pn&rO(*NN*s1_BORklQh_Tr@Cta?gT
zu=_~iMb5>Q)Wi4c0mF}kxHewX{A)2
zUAip%Ho4{q%7+x#Rkcci7p29u@HD!)JwctBZQ#6kPsa>Jtg7Zn^^K$E3e63sUfM{i
zv(vRQPTQrNITEaU4YCTCSEaip-*bkjSd;8BW*9ie^M@64hk{RXzKnmD`^kDedr(Gf
zB_4v@kOeiTH0X+fm+Du3Un(?P!OJ_F^TkQlLRLek&0U5<7wP6WCO@;Xbj}CXNt&wR
zrcY;KL$QoqRN37ED)I+elJ+WtxYO*orGj`9*TiI@y+(h0i3*RJEi4~+Enqf*!}U5=z&ZK7?|HDDjY`N
zz4k4fXp>`^L6J*nA!}eO{HQf7ZnV!OyJWtk+)f>zkX0mTJ5|?(s930;an!2NeE6ZA
zf1%>xquVA3*MtEyaK3tygJa