diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile
new file mode 100644
index 00000000..143ddb6e
--- /dev/null
+++ b/.buildkite/Dockerfile
@@ -0,0 +1,54 @@
+FROM docker.io/buildpack-deps:22.04-curl
+
+ENV PYTHONUNBUFFERED=1 \
+ PYTHONDONTWRITEBYTECODE=1 \
+ PIP_NO_CACHE_DIR=off \
+ PIP_DISABLE_PIP_VERSION_CHECK=on \
+ PIP_DEFAULT_TIMEOUT=100 \
+ PATH="$PATH:/root/.local/bin/:/.pyenv/bin/" \
+ PYENV_ROOT="/.pyenv"
+
+RUN apt-get -y update && \
+ apt-get -y install curl build-essential gcc git swig libffi-dev libncurses5-dev zlib1g zlib1g-dev libssl-dev libsqlite3-dev liblzma-dev libreadline-dev libbz2-dev && \
+ apt-get -y clean
+
+RUN curl https://pyenv.run | bash && \
+ eval "$(pyenv init -)" && \
+ pyenv install 3.11 && \
+ pyenv global 3.11 && \
+ pyenv install 3.10 && \
+ pyenv install 3.9 && \
+ pyenv install 3.8 && \
+ pyenv rehash && \
+ python --version && \
+ pyenv exec pip --version
+
+RUN mkdir -p /gcloud/ && \
+ curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-408.0.1-linux-x86_64.tar.gz && \
+ tar -xf google-cloud-cli-408.0.1-linux-x86_64.tar.gz -C /gcloud/ && \
+ /gcloud/google-cloud-sdk/install.sh --quiet --usage-reporting false && \
+ rm google-cloud-cli-408.0.1-linux-x86_64.tar.gz
+
+RUN eval "$(pyenv init --path)" && \
+ eval "$(pyenv init -)" && \
+ pyenv global 3.11 && python --version && python -m pip install \
+ --no-cache-dir pipx && \
+ pyenv rehash && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.3.0 --suffix 23 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.4.0 --suffix 24 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.5.0 --suffix 25 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.6.0 --suffix 26 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.7.0 --suffix 27 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.8.0 --suffix 28 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.9.0 --suffix 29 && \
+ pipx install --pip-args='--no-cache-dir' pdm~=2.10.0 --suffix 210 && \
+ pdm23 --version && \
+ pdm24 --version && \
+ pdm25 --version && \
+ pdm26 --version && \
+ pdm27 --version && \
+ pdm28 --version && \
+ pdm29 --version && \
+ pdm210 --version
+
+ENTRYPOINT bash
diff --git a/.buildkite/build-ci-runner.sh b/.buildkite/build-ci-runner.sh
new file mode 100755
index 00000000..c5d2b0e6
--- /dev/null
+++ b/.buildkite/build-ci-runner.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -eo pipefail
+
+GCR_PATH=gcr.io/embark-shared/ml/ci-runner
+
+buildah build-using-dockerfile \
+ --tag $GCR_PATH \
+ --file Dockerfile \
+ --layers \
+ --format=docker \
+ .
+
+buildah push --digestfile digest.txt --format=v2s2 $GCR_PATH
+echo "π CI base image pushed: \`$GCR_PATH@`cat digest.txt`\`"
+sed -i "3s|.*| image: gcr.io/embark-shared/ml/ci-runner@`cat digest.txt`|" pipeline.yml
diff --git a/.buildkite/check-line-endings.sh b/.buildkite/check-line-endings.sh
new file mode 100644
index 00000000..0f5bc49e
--- /dev/null
+++ b/.buildkite/check-line-endings.sh
@@ -0,0 +1,15 @@
+set -eo pipefail
+
+cr="$(printf "\r")"
+
+any_matches=1
+
+grep --exclude-dir=".git" -Ilsr "${cr}$" . || any_matches=0
+
+
+if [[ $any_matches -gt 0 ]]; then
+ buildkite-agent annotate --style "error" --context validate-changes "Repository contains CRLF line-endings. To avoid diff issues and cross-platform issues we require that all commits are done using a LF-style.
+
+If you're doing development on Windows, use \`git config --global core.autocrlf true\` to let Git fix this for you on commit."
+ exit 1
+fi
diff --git a/.buildkite/install-repo.sh b/.buildkite/install-repo.sh
new file mode 100644
index 00000000..d4589b7a
--- /dev/null
+++ b/.buildkite/install-repo.sh
@@ -0,0 +1,16 @@
+set -eo pipefail
+
+echo --- Setting up google-cloud-sdk
+
+if [ -f '/gcloud/google-cloud-sdk/path.bash.inc' ]; then . '/gcloud/google-cloud-sdk/path.bash.inc'; fi
+gcloud config set account monorepo-ci@embark-builds.iam.gserviceaccount.com
+
+echo --- Installing dependencies
+
+eval "$(pyenv init --path)"
+eval "$(pyenv init -)"
+
+${PDM_COMMAND:1:-1} use ${PYTHON_VERSION:1:-1}
+${PDM_COMMAND:1:-1} install --plugins
+${PDM_COMMAND:1:-1} install -d -G ci
+${PDM_COMMAND:1:-1} torch install cpu
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
new file mode 100644
index 00000000..2a7264ba
--- /dev/null
+++ b/.buildkite/pipeline.yml
@@ -0,0 +1,105 @@
+plugin_base: &plugin_base
+ service-account-name: monorepo-ci
+ image: gcr.io/embark-shared/ml/ci-runner@sha256:dac3595ade7e3e92ed006f6c29f461b71bb3a6b0ade8d3afb88ba8e55b9601d6
+ default-secret-name: buildkite-k8s-plugin
+ always-pull: false
+ use-agent-node-affinity: true
+
+agents: &agent
+ cluster: builds-fi-2
+ queue: monorepo-ci
+ size: small
+
+tiny: &tiny
+ agents: *agent
+ plugins:
+ - EmbarkStudios/k8s#1.2.10:
+ << : *plugin_base
+ resources-limit-cpu: 3
+ resources-limit-memory: 10Gi
+
+ agents: *agent
+
+small: &small
+ agents: *agent
+ plugins:
+ - EmbarkStudios/k8s#1.2.10:
+ << : *plugin_base
+ resources-limit-cpu: 7
+ resources-limit-memory: 20Gi
+
+large: &large
+ agents: *agent
+ plugins:
+ - EmbarkStudios/k8s#1.2.10:
+ << : *plugin_base
+ resources-limit-cpu: 14
+ resources-limit-memory: 35Gi
+
+
+env:
+ PDM_COMMAND: pdm210
+ PYTHON_VERSION: '3.10'
+
+
+steps:
+ - group: ":passport_control: Validating PR"
+ steps:
+ - label: ":hourglass: Validating branch age"
+ command: bash .buildkite/validate-branch-age.sh
+ << : *tiny
+
+ - label: ":straight_ruler: Checking line-endings"
+ command: bash .buildkite/check-line-endings.sh
+ << : *tiny
+
+ - label: ":lock: Checking lockfile"
+ command: bash .buildkite/validate-lockfile.sh
+ << : *tiny
+
+ - wait
+
+ - group: ":vertical_traffic_light: Validating changes"
+ steps:
+ - label: π Publish docs
+ command: bash .buildkite/publish-docs.sh
+ << : *tiny
+
+ - label: ":python-black: Validate black"
+ command: bash .buildkite/run-black.sh
+ << : *tiny
+
+ - label: ":isort: Validate isort"
+ command: bash .buildkite/run-isort.sh
+ << : *tiny
+
+ - label: ":bandit: Validate bandit"
+ command: bash .buildkite/run-bandit.sh
+ << : *tiny
+
+ - label: ":lint-roller: Validate flake8"
+ command: bash .buildkite/run-flake8.sh
+ << : *tiny
+
+ - label: ":pytest: Run tests"
+ command: bash .buildkite/run-pytest.sh
+ << : *large
+
+ - wait
+
+ - label: ":package: Validate packaging"
+ command: bash .buildkite/run-package.sh
+ << : *tiny
+
+ - label: ":packagecloud: Triggering Cloud-Training build"
+ trigger: erupt
+ if: 'build.pull_request.labels includes "trigger: cloud" || build.branch == "main"'
+
+ build:
+ branch: main
+ env:
+ EMOTE_CREATOR: "${BUILDKITE_BUILD_CREATOR}"
+ EMOTE_BRANCH: "${BUILDKITE_BRANCH}"
+ EMOTE_COMMIT: "${BUILDKITE_COMMIT}"
+ EMOTE_MESSAGE: "${BUILDKITE_MESSAGE}"
+ EMOTE_TAG: "${BUILDKITE_TAG}"
diff --git a/.buildkite/publish-docs.sh b/.buildkite/publish-docs.sh
new file mode 100644
index 00000000..742ecbbe
--- /dev/null
+++ b/.buildkite/publish-docs.sh
@@ -0,0 +1,27 @@
+set -eo pipefail
+
+
+source .buildkite/install-repo.sh
+
+echo --- Building docs
+pushd docs
+EXIT_CODE=0
+
+export TZ=UTC
+
+PDM=${PDM_COMMAND:1:-1} ${PDM_COMMAND:1:-1} run make deploy || EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "sphinx"
+:warning: Failed building documentation. Please check logs below, or build docs locally using `make deploy` to check for errors.
+EOF
+ exit 1
+else
+ if [[ "$BUILDKITE_BRANCH" = "main" ]]; then
+ gsutil rsync -r ./_build/dirhtml gs://embark-static/emote-docs
+ buildkite-agent annotate "β
New documentation deployed at https://static.embark.net/emote-docs/" --style "success" --context "sphinx"
+ else
+ buildkite-agent annotate "β
Documentation built succesfully" --style "success" --context "sphinx"
+ fi
+fi
+popd
diff --git a/.buildkite/run-bandit.sh b/.buildkite/run-bandit.sh
new file mode 100644
index 00000000..947bb0bb
--- /dev/null
+++ b/.buildkite/run-bandit.sh
@@ -0,0 +1,23 @@
+set -eo pipefail
+
+source .buildkite/install-repo.sh
+
+echo --- Running bandit
+
+EXIT_CODE=0
+${PDM_COMMAND:1:-1} run bandit --r emote experiments tests -ll > diff.txt || EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "bandit"
+:warning: \`bandit\` found issues with your code. Please fix the below issues, and update your PR.
+
+\`\`\`diff
+$(cat diff.txt)
+\`\`\`
+
+EOF
+else
+ buildkite-agent annotate "β
\`bandit\` found no code issues." --style "success" --context "bandit"
+fi
+
+exit $EXIT_CODE
diff --git a/.buildkite/run-black.sh b/.buildkite/run-black.sh
new file mode 100644
index 00000000..50375475
--- /dev/null
+++ b/.buildkite/run-black.sh
@@ -0,0 +1,23 @@
+set -eo pipefail
+
+source .buildkite/install-repo.sh
+
+echo --- Running black
+
+EXIT_CODE=0
+${PDM_COMMAND:1:-1} run black --check --diff emote tests experiments > diff.txt || EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "black"
+:warning: Your code isn't formatted by \`black\`. Please fix the below diffs, or run \`pdm run black emote tests experiments\` to automatically format it.
+
+\`\`\`diff
+$(cat diff.txt)
+\`\`\`
+
+EOF
+else
+ buildkite-agent annotate "β
Code formatted correctly " --style "success" --context "black"
+fi
+
+exit $EXIT_CODE
diff --git a/.buildkite/run-flake8.sh b/.buildkite/run-flake8.sh
new file mode 100644
index 00000000..5ffdf782
--- /dev/null
+++ b/.buildkite/run-flake8.sh
@@ -0,0 +1,23 @@
+set -eo pipefail
+
+source .buildkite/install-repo.sh
+
+echo --- Running flake8
+
+EXIT_CODE=0
+${PDM_COMMAND:1:-1} run flake8 emote experiments tests > diff.txt || EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "flake8"
+:warning: \`flake8\` found issues with your code. Please fix the below issues, and update your PR.
+
+\`\`\`diff
+$(cat diff.txt)
+\`\`\`
+
+EOF
+else
+ buildkite-agent annotate "β
\`flake8\` found no code issues." --style "success" --context "flake8"
+fi
+
+exit $EXIT_CODE
diff --git a/.buildkite/run-isort.sh b/.buildkite/run-isort.sh
new file mode 100644
index 00000000..20db4851
--- /dev/null
+++ b/.buildkite/run-isort.sh
@@ -0,0 +1,23 @@
+set -eo pipefail
+
+source .buildkite/install-repo.sh
+
+echo --- Running isort
+
+EXIT_CODE=0
+${PDM_COMMAND:1:-1} run isort --check --diff emote tests experiments > diff.txt || EXIT_CODE=$?
+cat diff.txt
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "isort"
+:warning: Your imports aren't sorted by \`isort\`. Please fix the below diffs, or run \`pdm run isort emote tests experiments\` to automatically format it.
+
+\`\`\`diff
+$(cat diff.txt)
+\`\`\`
+EOF
+else
+ buildkite-agent annotate "β
Imports sorted correctly " --style "success" --context "isort"
+fi
+
+exit $EXIT_CODE
diff --git a/.buildkite/run-package.sh b/.buildkite/run-package.sh
new file mode 100644
index 00000000..18a4c7db
--- /dev/null
+++ b/.buildkite/run-package.sh
@@ -0,0 +1,23 @@
+set -eo pipefail
+
+source .buildkite/install-repo.sh
+
+echo --- Packaging emote
+
+EXIT_CODE=0
+${PDM_COMMAND:1:-1} build > errors.txt || EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "package"
+:warning: Packaging failed. Please see below errors and correct any issues. You can try packaging locally with `pdm build`.
+
+\`\`\`shell
+$(cat errors.txt)
+\`\`\`
+
+EOF
+else
+ buildkite-agent annotate "β
Packaging succeeded." --style "success" --context "package"
+fi
+
+exit $EXIT_CODE
diff --git a/.buildkite/run-pytest.sh b/.buildkite/run-pytest.sh
new file mode 100644
index 00000000..4969b17f
--- /dev/null
+++ b/.buildkite/run-pytest.sh
@@ -0,0 +1,24 @@
+set -eo pipefail
+
+source .buildkite/install-repo.sh
+
+echo --- Running pytest
+
+
+EXIT_CODE=0
+${PDM_COMMAND:1:-1} run pytest --color=yes tests emote > errors.txt || EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "pytest"
+:warning: Tests failed. Please see below errors and correct any issues. You can run tests locally with \`pdm run pytest tests emote\`.
+
+\`\`\`term
+$(cat errors.txt)
+\`\`\`
+
+EOF
+else
+ buildkite-agent annotate "β
All tests passed." --style "success" --context "pytest"
+fi
+
+exit $EXIT_CODE
diff --git a/.buildkite/validate-branch-age.sh b/.buildkite/validate-branch-age.sh
new file mode 100644
index 00000000..eab3fa4a
--- /dev/null
+++ b/.buildkite/validate-branch-age.sh
@@ -0,0 +1,9 @@
+merge_base=$(git merge-base -a HEAD origin/main)
+last_merge=$(git log -1 "$merge_base" --format="%at")
+last_main_commit=$(git log -1 origin/main --format="%at")
+time_since_merge=$(( last_main_commit - last_merge ))
+
+if [[ $time_since_merge -gt 604800 ]]; then
+ buildkite-agent annotate --style "error" --context validate-changes "This branch is more than one week out of sync with main. Please rebase/merge main to unblock CI."
+ exit 1
+fi
diff --git a/.buildkite/validate-lockfile.sh b/.buildkite/validate-lockfile.sh
new file mode 100644
index 00000000..5e608416
--- /dev/null
+++ b/.buildkite/validate-lockfile.sh
@@ -0,0 +1,19 @@
+set -eo pipefail
+
+${PDM_COMMAND:1:-1} install --plugins
+
+EXIT_CODE=0
+TORCH_EXIT_CODE=0
+
+${PDM_COMMAND:1:-1} lock --check || EXIT_CODE=$?
+${PDM_COMMAND:1:-1} torch lock --check || TORCH_EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ] || [ $TORCH_EXIT_CODE -ne 0 ]; then
+ cat << EOF | buildkite-agent annotate --style "error" --context "lockfile"
+:lock: Lockfiles are outdated. Please run \`pdm lock && pdm torch lock\` and commit the result.
+EOF
+ exit 1
+else
+ buildkite-agent annotate --style "success" --context "lockfile" ":lock: Lockfile is up to date."
+ exit 0
+fi
diff --git a/.flake8 b/.flake8
new file mode 100644
index 00000000..af665900
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,3 @@
+[flake8]
+max-line-length = 100
+extend-ignore=E203,E501
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000..37645a51
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,4 @@
+# Documentation for this file can be found on the GitHub website here:
+# https://docs.github.com/en/free-pro-team@latest/github/creating-cloning-and-archiving-repositories/about-code-owners
+#
+* @singhblom
diff --git a/.github/release-action-config.json b/.github/release-action-config.json
new file mode 100644
index 00000000..9b7d6da9
--- /dev/null
+++ b/.github/release-action-config.json
@@ -0,0 +1,22 @@
+{
+ "categories": [
+ {
+ "title": "## π Features",
+ "labels": ["enhancement"]
+ },
+ {
+ "title": "## π Fixes",
+ "labels": ["bug", "t: bug"]
+ },
+ {
+ "title": "## π§ͺ Tests",
+ "labels": ["test", "a: test"]
+ },
+ {
+ "title": "## π§ͺ Development",
+ "labels": ["a: ci", "documentation"]
+ }
+ ],
+ "template": "Merged PRs since last release:\n\n${{CHANGELOG}}\n\n## Uncategorized:\n\n${{UNCATEGORIZED}}\n",
+ "pr_template": "- ${{AUTHOR}}: ${{TITLE}} (#${{NUMBER}})"
+}
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 00000000..12eba961
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,64 @@
+name: Deploy static content to Pages
+
+on:
+ pull_request:
+ branches: [main]
+ push:
+ branches: ["main"]
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+concurrency:
+ group: "pages"
+ cancel-in-progress: true
+
+jobs:
+ build-docs:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Accept Repository Changes
+ run: |
+ sudo apt-get --allow-releaseinfo-change update
+ - name: Setup Graphviz
+ uses: ts-graphviz/setup-graphviz@v1
+ - name: Setup Pages
+ uses: actions/configure-pages@v2
+ - name: Setup PDM
+ uses: pdm-project/setup-pdm@v3
+ with:
+ python-version: '3.10'
+ architecture: x64
+ version: 2.10.1
+ prerelease: false
+ enable-pep582: true
+ cache: true
+ cache-dependency-path: '**/pdm.lock'
+ - name: Install dependencies
+ run: pdm install --plugins && pdm install -d -G ci && pdm torch install cpu
+ - name: Build docs
+ run: cd docs && make deploy
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v1
+ with:
+ name: doc-build
+ path: 'docs/_build/dirhtml'
+
+ deploy:
+ needs: build-docs
+ if: github.ref == 'refs/heads/main'
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v1
+ with:
+ artifact_name: doc-build
diff --git a/.github/workflows/pre-release.yaml b/.github/workflows/pre-release.yaml
new file mode 100644
index 00000000..891528d6
--- /dev/null
+++ b/.github/workflows/pre-release.yaml
@@ -0,0 +1,90 @@
+name: Publish pre-release package
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+ workflow_dispatch:
+
+concurrency:
+ group: "pre-release"
+ cancel-in-progress: true
+
+jobs:
+ check_date:
+ runs-on: ubuntu-latest
+ name: Check latest commit
+ outputs:
+ should_run: ${{ steps.should_run.outputs.should_run }}
+ steps:
+ - uses: actions/checkout@v2
+
+ - id: should_run
+ continue-on-error: true
+ name: check latest commit is less than a day
+ if: ${{ github.event_name == 'schedule' }}
+ run: test -z $(git rev-list --after="24 hours" ${{ github.sha }}) && echo "::set-output name=should_run::false"
+
+ pre-release:
+ name: "Pre Release"
+ runs-on: "ubuntu-latest"
+ needs: check_date
+ if: ${{ needs.check_date.outputs.should_run != 'false' }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Fetch tags
+ run: git fetch --prune --unshallow --tags
+
+ - name: Update tag
+ uses: richardsimko/update-tag@v1
+ with:
+ tag_name: latest
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - uses: pdm-project/setup-pdm@v3
+ name: Setup PDM
+ with:
+ python-version: '3.10'
+ architecture: x64
+ version: 2.10.1
+ prerelease: true
+ enable-pep582: true
+ cache: true
+ cache-dependency-path: '**/pdm.lock'
+ - name: Install dependencies
+ run: pdm install --plugins -d -G ci
+
+ - name: Build Packages
+ run: pdm build
+
+ - name: "Build Changelog"
+ id: github_release
+ uses: mikepenz/release-changelog-builder-action@v3.4.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ configuration: ".github/release-action-config.json"
+ toTag: ${{ github.ref }}
+
+ - name: Delete old release assets
+ uses: mknejp/delete-release-assets@v1
+ with:
+ token: ${{ github.token }}
+ tag: latest
+
+ assets: 'emote*'
+
+ - name: Create Release
+ uses: mikepenz/action-gh-release@v0.2.0-a03 #softprops/action-gh-release
+ with:
+ body: ${{ steps.github_release.outputs.changelog }}
+ prerelease: true
+ target_commitish: ${{ github.sha }}
+ tag_name: "latest"
+ name: "Nightly release"
+ files: |
+ dist/*
+ LICENSE-*
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 00000000..cd35ab15
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,59 @@
+name: Release
+on:
+ push:
+ tags:
+ - v*
+
+jobs:
+ create-release:
+ name: release
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@master
+
+ - name: Fetch tags
+ run: git fetch --prune --unshallow --tags
+
+ - uses: pdm-project/setup-pdm@v3
+ name: Setup PDM
+ with:
+ python-version: '3.10'
+ architecture: x64
+ version: 2.10.1
+ prerelease: true
+ enable-pep582: true
+ cache: true
+ cache-dependency-path: '*.lock' # we have pdm.lock and torch.lock
+
+ - name: Install dependencies
+ run: pdm install -d -G ci
+
+ - name: Build Packages
+ run: pdm build
+
+ - name: "Build Changelog"
+ id: github_release
+ uses: mikepenz/release-changelog-builder-action@v3.4.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ configuration: ".github/release-action-config.json"
+ toTag: ${{ github.ref }}
+
+ - name: Release to GitHub
+ uses: mikepenz/action-gh-release@v0.2.0-a03
+ with:
+ body: ${{ steps.github_release.outputs.changelog }}
+ prerelease: false
+ tag_name: ${{ github.ref }}
+ name: "Release ${{ github.ref_name }}"
+ files: |
+ dist/*
+ LICENSE-*
+
+ - name: Publish Packages to PyPi
+ env:
+ PDM_PUBLISH_PASSWORD: ${{ secrets.PYPI_TOKEN }}
+
+ run: pdm publish -r pypi -u __token__
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..144d8197
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,28 @@
+# Miscellaneous editor files
+.vscode
+.dir-locals.el
+
+# Output from Python and Python tools
+__pycache__/
+*.egg-info
+.coverage
+
+# Files generated by the docs publishing systems
+docs/_build
+docs/generated
+docs/coverage.rst
+
+# Outputs from Emote and tests
+*.onnx
+runs/**
+/.pdm.toml
+/dist/
+/.buildkite/digest.txt
+/docs/adr/
+wandb/**
+
+__pypackages__/
+
+.DS_STORE
+.pids
+.pants.d
diff --git a/BUILD b/BUILD
new file mode 100644
index 00000000..8fa22376
--- /dev/null
+++ b/BUILD
@@ -0,0 +1,87 @@
+TORCH_VERSION = "1.12.0"
+CUDA_VERSION = "cu116"
+
+TORCH_VARIANTS = {
+ "base": f"=={TORCH_VERSION},!={TORCH_VERSION}+cpu,!={TORCH_VERSION}+{CUDA_VERSION}",
+ "cpu": f"=={TORCH_VERSION}+cpu,!={TORCH_VERSION}+{CUDA_VERSION}",
+ "gpu": f"=={TORCH_VERSION}+{CUDA_VERSION},!={TORCH_VERSION}+cpu",
+}
+
+
+if is_standalone():
+ __defaults__(
+ {
+ python_source: dict(resolve=parametrize(*resolves)),
+ python_sources: dict(resolve=parametrize(*resolves)),
+ }
+ )
+ resolves = ["cpu", "gpu", "base"]
+
+ TOOLS = {
+ "pytest": [
+ "pytest-cov!=2.12.1,<3.1,>=2.12",
+ "pytest-xdist<3,>=2.5",
+ "pytest~=8.0",
+ "pytest-platform-markers",
+ "pytest-rerunfailures",
+ "pytest-benchmark==4.0.0",
+ ],
+ "black": ["black>=22.6.0,<24"],
+ "ipython": ["ipython>=7.27.0,<8"],
+ "isort": ["isort[pyproject,colors]>=5.9.3,<6.0"],
+ "docformatter": ["docformatter[tomli]"],
+ "apibook": ["apibook~=0.1.0"],
+ }
+
+ for tool, reqs in TOOLS.items():
+ python_requirement(
+ name=tool,
+ requirements=reqs,
+ resolve=tool,
+ )
+
+ for resolve in resolves:
+ python_requirements(
+ name=resolve,
+ source="pyproject.toml",
+ resolve=resolve,
+ module_mapping={
+ "protobuf": ["google.protobuf"],
+ "opencv-python": ["cv2"],
+ },
+ overrides={
+ "gymnasium": {"dependencies": [f":{resolve}#box2d-py", f":{resolve}#pygame"]}
+ },
+ )
+
+ python_requirement(
+ name=f"pytest-{resolve}",
+ requirements=TOOLS["pytest"],
+ resolve=resolve,
+ )
+
+resources(
+ name="adr",
+ sources=["adr/*.md"],
+)
+
+resources(name="package_data", sources=["pyproject.toml", "README.md"])
+
+python_distribution(
+ name="package",
+ dependencies=[
+ ":package_data",
+ emote_dependency_path("/emote:emote@resolve=base"),
+ emote_dependency_path("/emote/algorithms@resolve=base"),
+ emote_dependency_path("/emote/algorithms/genrl@resolve=base"),
+ emote_dependency_path("/emote/memory@resolve=base"),
+ emote_dependency_path("/emote/nn@resolve=base"),
+ ],
+ provides=python_artifact(
+ name="emote",
+ version="0.1.0",
+ long_description_content_type="markdown",
+ ),
+ long_description_path="./README.md",
+ interpreter_constraints=[">=3.10,<3.11"],
+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..60003c8f
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,77 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+### Features
+
+- `OnnxExporter.add_metadata` allows setting metadata to export with the policy. The key has to be a string, and the value has to be convertible to string.
+
+### Changes
+
+#### New memory serialization format.
+A new version of memory Table export has been introduced. This removes the need for cloudpickle,
+while focusing on restoring data. Old memories can still be imported, and you can also force the old
+format by passing `version = TableSerializationFormat.Legacy` in `Table.store`. The new format works
+by simply ingesting all data from the memory using the regular `add_sequence`, instead of filling
+the data stores directly. As part of this, the `TableArray.store` and `TableArray.restore` functions
+have new arguments to handle versioning.
+
+ - New functions:
+ - `Column.configuration` and `Column.configure` for save and load respectively
+ - `Strategy.clear` to clear all state
+ - `Strategy.state` and `Strategy.load_state` for save and load of strategy data.
+ - `Strategy.begin_simple_import` and `Strategy.end_simple_import` to bookend the import process.
+ - `Strategy._in_simple_import` to allow derived classes to bypass work while import is happening.
+
+#### Other changes:
+
+ - Now targetting torch version 1.12, up from 1.11.
+ - `OnnxExporter` accepts a `device` argument to enable tracing on other devices.
+ - `FinalRewardTestCheck` can now be configured with another key and to use windowed data.
+ - `begin_training` has been split into `restore_state` followed by `begin_training`
+ - `CoverageBasedSampleStrategy` has been added which allows memory sampling that prioritises unvisited experiences. This can speed up training.
+
+### Deprecations
+
+- `emote.callbacks` has been converted to a package. Future built-in
+ callbacks will not be re-exported from `emote.callbacks`, and should
+ instead be imported from their internal location.
+- `emote.callbacks.LoggingMixin` is now in the `emote.mixins.logging` module instead.
+
+### Bugfixes
+
+- Fix `FeatureAgentProxy.input_names` to use `input_key` when configured.
+- `Callback.cycle` can now be `None`
+- Fixed a deprecation warning with `np.bool_` being used.
+
+## [23.0.0] - 2023-03-03
+
+### Breaking
+
+* The minimum required Python version is now 3.9 (#87)
+* The `torch-cpu` feature has been renamed to `torch` as it wasn't limited to CPU-only variants. (#76)
+
+### Added
+
+* Enable exporting ONNX policies for Gaussian MLPs in `emote.extra.onnx_exporter`. This allows you to peridiocally write ONNX files to disk. (#80)
+* Add system performance logger in `emote.extra.system_logger`. This'll log memory and CPU usage to Tensorboard. (#81)
+* Add memory warmup waiter in `emote.memory.memory` to ensure the memory has enough data before starting to sample. This avoids the collector having to block the training loop when training async. (#78)
+
+### Changed
+
+* Our PDM plugin for torch management has been split off into a [separate repository](https://github.com/EmbarkStudios/pdm-plugin-torch/) and [published to PYPI](https://pypi.org/project/pdm-plugin-torch/). (#88)
+* Switch to PDM 2.3 as default version for testing (#62)
+* The input key used for Feature Agent Proxies can now be customized (#79)
+
+## [22.0.0] - 2022-10-28
+
+This is the initial release
+
+[Unreleased]: https://github.com/EmbarkStudios/emote/compare/v23.0.0...HEAD
+[23.0.0]: https://github.com/EmbarkStudios/emote/releases/tag/v23.0.0
+[22.0.0]: https://github.com/EmbarkStudios/emote/releases/tag/v22.0.0
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..7d03b675
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at opensource@embark-studios.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000..eaf5572a
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,75 @@
+# Embark Contributor Guidelines
+
+Welcome! This project is created by the team at [Embark Studios](https://embark.games). We're glad you're interested in contributing! We welcome contributions from people of all backgrounds who are interested in making great software with us.
+
+At Embark, we aspire to empower everyone to create interactive experiences. To do this, we're exploring and pushing the boundaries of new technologies, and sharing our learnings with the open source community.
+
+If you have ideas for collaboration, email us at opensource@embark-studios.com.
+
+We're also hiring full-time engineers to work with us in Stockholm! Check out our current job postings [here](https://www.embark-studios.com/jobs).
+
+## Issues
+
+### Feature Requests
+
+If you have ideas or how to improve our projects, you can suggest features by opening a GitHub issue. Make sure to include details about the feature or change, and describe any uses cases it would enable.
+
+Feature requests will be tagged as `enhancement` and their status will be updated in the comments of the issue.
+
+### Bugs
+
+When reporting a bug or unexpected behaviour in a project, make sure your issue describes steps to reproduce the behaviour, including the platform you were using, what steps you took, and any error messages.
+
+Reproducible bugs will be tagged as `bug` and their status will be updated in the comments of the issue.
+
+### Wontfix
+
+Issues will be closed and tagged as `wontfix` if we decide that we do not wish to implement it, usually due to being misaligned with the project vision or out of scope. We will comment on the issue with more detailed reasoning.
+
+## Contribution Workflow
+
+### Open Issues
+
+If you're ready to contribute, start by looking at our open issues tagged as [`help wanted`](../../issues?q=is%3Aopen+is%3Aissue+label%3A"help+wanted") or [`good first issue`](../../issues?q=is%3Aopen+is%3Aissue+label%3A"good+first+issue").
+
+You can comment on the issue to let others know you're interested in working on it or to ask questions.
+
+### Making Changes
+
+1. Fork the repository.
+
+2. Create a new feature branch.
+
+3. Make your changes. Ensure that there are no build errors by running the project with your changes locally.
+
+4. Open a pull request with a name and description of what you did. You can read more about working with pull requests on GitHub [here](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork).
+
+5. A maintainer will review your pull request and may ask you to make changes.
+
+## Code Guidelines
+
+### Rust
+
+You can read about our standards and recommendations for working with Rust [here](https://github.com/EmbarkStudios/rust-ecosystem/blob/main/guidelines.md).
+
+### Python
+
+We recommend following [PEP8 conventions](https://www.python.org/dev/peps/pep-0008/) when working with Python modules.
+
+### JavaScript & TypeScript
+
+We use [Prettier](https://prettier.io/) with the default settings to auto-format our JavaScript and TypeScript code.
+
+## Licensing
+
+Unless otherwise specified, all Embark open source projects shall comply with the Rust standard licensing model (MIT + Apache 2.0) and are thereby licensed under a dual license, allowing licensees to choose either MIT OR Apache-2.0 at their option.
+
+## Contributor Terms
+
+Thank you for your interest in Embark Studiosβ open source project. By providing a contribution (new or modified code, other input, feedback or suggestions etc.) you agree to these Contributor Terms.
+
+You confirm that each of your contributions has been created by you and that you are the copyright owner. You also confirm that you have the right to provide the contribution to us and that you do it under the Rust dual licence model (MIT + Apache 2.0).
+
+If you want to contribute something that is not your original creation, you may submit it to Embark Studios separately from any contribution, including details of its source and of any license or other restriction (such as related patents, trademarks, agreements etc.)
+
+Please also note that our projects are released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md) to ensure that they are welcoming places for everyone to contribute. By participating in any Embark Studios open source project, you agree to keep to the Contributor Code of Conduct.
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 00000000..59aba4b2
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2022 Embark Studios AB.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 00000000..5e90380b
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Embark Studios AB.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..4eb3f573
--- /dev/null
+++ b/README.md
@@ -0,0 +1,163 @@
+
+
+
+
+
+
+
+
+# `π emote`
+
+**E**mbark's **Mo**dular **T**raining **E**ngine - a flexible framework for
+reinforcement learning
+
+[](https://embark.dev)
+[](https://discord.gg/dAuKfZS)
+[](https://buildkite.com/embark-studios/emote)
+[](https://embarkstudios.github.io/emote/)
+[](https://pdm.fming.dev)
+
+π§ This project is a **work in progress**. Things can and will change. π§
+
+
+
+
+## What it does
+
+Emote provides a way to build reusable components for creating reinforcement learning algorithms, and a
+library of premade componenents built in this way. It is strongly inspired by the callback setup used
+by Keras and FastAI.
+
+As an example, let us see how the SAC, the Soft Actor Critic algorithm by
+[Haarnoja et al.](https://arxiv.org/abs/1801.01290) can be written using Emote. The main algorithm in
+SAC is given in [Soft Actor-Critic Algorithms and Applications](https://arxiv.org/abs/1812.05905) and
+looks like this:
+
+
+
+
+
+
+
+Using the components provided with Emote, we can write this as
+
+```python
+device = torch.device("cpu")
+env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+table = DictObsTable(spaces=env.dict_space, maxlen=1000, device=device)
+memory_proxy = TableMemoryProxy(table)
+dataloader = MemoryLoader(table, 100, 2, "batch_size")
+
+q1 = QNet(2, 1)
+q2 = QNet(2, 1)
+policy = Policy(2, 1)
+ln_alpha = torch.tensor(1.0, requires_grad=True)
+agent_proxy = FeatureAgentProxy(policy, device)
+
+callbacks = [
+ QLoss(name="q1", q=q1, opt=Adam(q1.parameters(), lr=8e-3)),
+ QLoss(name="q2", q=q2, opt=Adam(q2.parameters(), lr=8e-3)),
+ PolicyLoss(pi=policy, ln_alpha=ln_alpha, q=q1, opt=Adam(policy.parameters())),
+ AlphaLoss(pi=policy, ln_alpha=ln_alpha, opt=Adam([ln_alpha]), n_actions=1),
+ QTarget(pi=policy, ln_alpha=ln_alpha, q1=q1, q2=q2),
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500),
+ FinalLossTestCheck([logged_cbs[2]], [10.0], 2000),
+]
+
+trainer = Trainer(callbacks, dataloader)
+trainer.train()
+```
+
+Here each callback in the `callbacks` list is its own reusable class that can readily be used
+for other similar algorithms. The callback classes themselves are very straight forward to write.
+As an example, here is the `PolicyLoss` callback.
+
+```python
+class PolicyLoss(LossCallback):
+ def __init__(
+ self,
+ *,
+ pi: nn.Module,
+ ln_alpha: torch.tensor,
+ q: nn.Module,
+ opt: optim.Optimizer,
+ max_grad_norm: float = 10.0,
+ name: str = "policy",
+ data_group: str = "default",
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ network=pi,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ )
+ self.policy = pi
+ self._ln_alpha = ln_alpha
+ self.q1 = q
+ self.q2 = q2
+
+ def loss(self, observation):
+ p_sample, logp_pi = self.policy(**observation)
+ q_pi_min = self.q1(p_sample, **observation)
+ # using reparameterization trick
+ alpha = torch.exp(self._ln_alpha).detach()
+ policy_loss = alpha * logp_pi - q_pi_min
+ policy_loss = torch.mean(policy_loss)
+ assert policy_loss.dim() == 0
+ return policy_loss
+```
+
+## Installation
+
+For installation and environment handling we use `pdm`. Install it from [pdm](https://pdm.fming.dev/latest/#installation). After `pdm` is set up, set up and activate the emote environment by running
+
+```bash
+pdm install
+```
+
+or for a full developer installation with all the extra dependencies:
+
+```bash
+pdm install -d -G :all
+```
+
+### Common problems
+
+**Torch won't install:** Check that your python version is correct. Try deleting your `.venv` and recreating it with
+
+```bash
+pdm venv create 3.10
+pdm install -G :all
+```
+
+**Box2d complains:** Box2d needs swig and python bindings. On apt-based systems try
+
+```bash
+sudo apt install swig
+sudo apt install python3.10-dev
+```
+
+**Python 3.10 is tricky to install:** For Ubuntu based distros try adding the deadsnakes PPA.
+
+## Contribution
+
+[](../main/CODE_OF_CONDUCT.md)
+
+We welcome community contributions to this project.
+
+Please read our [Contributor Guide](CONTRIBUTING.md) for more information on how to get started.
+Please also read our [Contributor Terms](CONTRIBUTING.md#contributor-terms) before you make any contributions.
+
+Any contribution intentionally submitted for inclusion in an Embark Studios project, shall comply with the Rust standard licensing model (MIT OR Apache 2.0) and therefore be dual licensed as described below, without any additional terms or conditions:
+
+### License
+
+This contribution is dual licensed under EITHER OF
+
+* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or )
+* MIT license ([LICENSE-MIT](LICENSE-MIT) or )
+
+at your option.
+
+For clarity, "your" refers to Embark or any other licensee/user of the contribution.
diff --git a/adr/01-continuous-releases.md b/adr/01-continuous-releases.md
new file mode 100644
index 00000000..6b03035b
--- /dev/null
+++ b/adr/01-continuous-releases.md
@@ -0,0 +1,23 @@
+# 1. "Nightly" continuous releases
+
+Date: 2022-10-21
+
+## Status
+
+Accepted
+
+## Context
+
+It would be useful for CI purposes, testing, and local development to be able to install wheels that have gone through
+CI; rather than pulling the whole git repository and installing. This somewhat aligns with the
+`git+ssh://.../owner/repo#egg=...` syntax, but that is still a repo pull and not easily distributable.
+
+## Decision
+
+Each night there'll be a nightly build done on the latest main; IFF there have been commits in the last 24 hours. This
+will be tagged as `latest` and relased as `pre-release` on GitHub.
+
+## Consequences
+
+We'll need to maintain somewhat reasonable stability and testing on average builds to support nightly builds. Nightly
+builds don't need to be as thoroughly tested.
diff --git a/adr/02-versioning.md b/adr/02-versioning.md
new file mode 100644
index 00000000..d8291967
--- /dev/null
+++ b/adr/02-versioning.md
@@ -0,0 +1,23 @@
+# 2. Versioning
+
+Date: 2022-10-24
+
+## Status
+
+Accepted
+
+## Context
+
+We need to follow a [PEP440](https://peps.python.org/pep-0440/) compatible versioning scheme. This is required to allow
+other tools to resolve versions and compatibility properly.
+
+## Decision
+
+We will follow a versioning on the pattern `YY.compatibility.patch`.
+
+## Consequences
+
+* The YY is always set to the last two digits of the current year. When increasing this field the other two fields are
+ reset to 0.
+* The compatibility field is increased whenever we make API-incompatible changes.
+* Otherwise, the patch field is increased.
diff --git a/adr/03-release-flow.md b/adr/03-release-flow.md
new file mode 100644
index 00000000..13bf7bc1
--- /dev/null
+++ b/adr/03-release-flow.md
@@ -0,0 +1,31 @@
+# 3. Releases flow
+
+Date: 2022-10-24
+
+## Status
+
+Accepted
+
+## Context
+
+In order to publish packages with high quality to PyPi and as tagged releases we need to have a consistent workflow that
+is easy to follow and reproducible for all users.
+
+## Decision
+
+We will use tagged releases on GitHub to publish to PyPi. These releases will follow the versioning scheme described in
+[02-versioning.md](02-versioning.md).
+
+## Consequences
+
+The flow will be as follows:
+
+* Upon needing a release, create a PR:
+ * Update `CHANGELOG.md` to ensure it contains all relevant changes. You can base this off of the nightly changelog.
+ * Based on the above changes, set a new version in `pyproject.toml`.
+ * Replace the heading in the changelog
+ * Add diff labels at the bottom.
+
+* Pull the new main, and tag it with `git tag -a vNEW_VERSION COMMIT_HASH`.
+* Push the tag with `git push vNEW_VERSION`
+* Make a new PR that adds back the "Unreleased" heading in the changelog.
diff --git a/adr/README.md b/adr/README.md
new file mode 100644
index 00000000..36813c81
--- /dev/null
+++ b/adr/README.md
@@ -0,0 +1,57 @@
+# ADRs
+
+For development of Emote we use `Architecture Decision Records`. They are a type of RFC but smaller in scope and more
+exact in the decision. The goal of adding an ADR is to summarize a discussion or fact-gathering effort. An RFC is the
+start of a discussion and may occur before in-depth fact-finding occurs.
+
+On the other hand, not every decision is an ADR. ADRs have to be significant. Things like naming, local APIs, or code
+structure rarely match this criteria. These may be better as open discussions or RFCs, which may not lead to an easily
+summarized conclusion. Instead, reach for an ADR when you can summarize the decision in a few sentences, at most. A good
+ADR should fit on the format "When doing ..., we do ... because of ...".
+
+```{admonition} [Significance criteria](https://engineering.atspotify.com/2020/04/when-should-i-write-an-architecture-decision-record/)
+An ADR should be written whenever a decision of significant impact is made; it is up to each team to align on what defines a significant impact.
+```
+
+## ADR Process
+
+The ADR process is meant to be very fast, with few fixed steps.
+
+1. Identify need for a decision
+2. Write an ADR using the below template
+3. Open a PR
+4. Once PR is accepted and merged, implement the decision.
+
+### Template
+
+```
+# SEQUENCE_NUMBER. TITLE
+
+Date: DATE WHEN PROPOSED
+
+## Status
+
+
+Accepted
+
+## Context
+
+Describe when this decision would be relevant and why.
+
+## Decision
+
+An exact decision of what we will do when the context applies..
+
+## Consequences
+
+The end result of applying the decision.
+```
+
+## Accepted ADRs
+```{toctree}
+---
+glob: true
+maxdepth: 1
+---
+*
+```
diff --git a/docs/BUILD b/docs/BUILD
new file mode 100644
index 00000000..f790975d
--- /dev/null
+++ b/docs/BUILD
@@ -0,0 +1,85 @@
+python_sources(name="py")
+
+DOC_DEPS = [
+ emote_dependency_path(":adr"),
+ emote_dependency_path("/emote@resolve=base"),
+ emote_dependency_path("/emote/algorithms@resolve=base"),
+ emote_dependency_path("/emote/algorithms/genrl@resolve=base"),
+ emote_dependency_path("/emote/callbacks@resolve=base"),
+ emote_dependency_path("/emote/env@resolve=base"),
+ emote_dependency_path("/emote/env/box2d@resolve=base"),
+ emote_dependency_path("/emote/extra@resolve=base"),
+ emote_dependency_path("/emote/memory@resolve=base"),
+ emote_dependency_path("/emote/mixins@resolve=base"),
+ emote_dependency_path("/emote/models@resolve=base"),
+ emote_dependency_path("/emote/nn@resolve=base"),
+ emote_dependency_path("/emote/utils@resolve=base"),
+ emote_dependency_path("/experiments/gym@resolve=base"),
+]
+
+pex_binary(
+ name="adr",
+ entry_point="adr.py",
+ resolve="base",
+)
+
+resource(
+ name="template",
+ source="./SUMMARY.tmpl",
+)
+
+pex_binary(
+ name="apibook",
+ entry_point="apibook.cli:main",
+ dependencies=["//:apibook#apibook"],
+ resolve="apibook",
+)
+
+adhoc_tool(
+ name="build-apibook",
+ runnable=":apibook",
+ args=["../emote", "src", "--summary-template-file", "SUMMARY.tmpl", "--verbose"],
+ execution_dependencies=DOC_DEPS
+ + [
+ ":template",
+ ],
+ log_output=True,
+ output_directories=["src"],
+ root_output_directory="/",
+)
+
+system_binary(
+ name="dot",
+ binary_name="dot",
+)
+
+pex_binary(
+ name="graphviz",
+ entry_point="graphviz.py",
+ resolve="base",
+)
+
+resources(
+ name="dot-files",
+ sources=["raw/*.dot"],
+)
+
+adhoc_tool(
+ name="build-dot",
+ runnable=":graphviz",
+ args=["./raw", "./src"],
+ execution_dependencies=[":dot-files"],
+ runnable_dependencies=[":dot"],
+ log_output=True,
+ output_directories=["src"],
+ root_output_directory="/",
+)
+
+md_book(
+ sources=["book.toml", "src/*.md", "adr.py"],
+ dependencies=DOC_DEPS
+ + [
+ ":build-apibook",
+ ":build-dot",
+ ],
+)
diff --git a/docs/SUMMARY.tmpl b/docs/SUMMARY.tmpl
new file mode 100644
index 00000000..ffc3cb92
--- /dev/null
+++ b/docs/SUMMARY.tmpl
@@ -0,0 +1,15 @@
+# Emote
+
+[Introduction](./intro.md)
+
+# Design
+
+- [Coding standard](./coding-standard.md)
+- [Editing documentation](./documentation.md)
+- [Metrics](./metrics.md)
+- [Getting started](./getting_started.md)
+- [Callback system](./callback.md)
+
+# API Reference
+
+{{apibook_toc}}
diff --git a/docs/adr.py b/docs/adr.py
new file mode 100644
index 00000000..5bc39897
--- /dev/null
+++ b/docs/adr.py
@@ -0,0 +1,91 @@
+"""Expands tabs into HTML."""
+
+import dataclasses
+import json
+import os
+import pathlib
+import sys
+
+
+@dataclasses.dataclass
+class MarkdownFile:
+ name: str
+ path: str
+ section: str
+ is_index: bool
+
+
+def find_all_markdowns(root: str) -> dict[str, str]:
+ """Find all markdown files in the given directory."""
+ markdowns = {}
+ for root, dirs, files in os.walk(root):
+ for file in files:
+ if file.endswith(".md"):
+ markdowns[file] = os.path.join(root, file)
+
+ return markdowns
+
+
+def output_chapter(path: str, number: int):
+ """Read the markdown file and output the chapter."""
+ file_content = pathlib.Path(path).read_text()
+
+ # parse the title from the non-empty line
+ name = ""
+
+ for line in file_content.splitlines():
+ if line.strip():
+ name = line.strip("# ")
+ break
+
+ chapter = {
+ "name": name,
+ "content": file_content,
+ "number": number,
+ "sub_items": [],
+ "parent_names": [],
+ "path": f"{path[2:]}",
+ "source_path": f"../../{path[2:]}",
+ }
+
+ return chapter
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ if sys.argv[1] == "supports":
+ sys.exit(0)
+
+ context, book = json.load(sys.stdin)
+
+ config = context["config"]["preprocessor"]["adr"]
+
+ for section in config["section"]:
+ markdowns = find_all_markdowns(section["path"])
+ title = section["title"]
+ book["sections"].append(
+ {
+ "PartTitle": title,
+ }
+ )
+
+ # we process the readme first to make it the index
+ if "README.md" in markdowns:
+ p = markdowns.pop("README.md")
+ chapter = output_chapter(p, [1])
+ chapter["name"] = title
+ book["sections"].append(
+ {
+ "Chapter": chapter,
+ }
+ )
+
+ for idx, path in enumerate(sorted(markdowns.keys())):
+ chapter = output_chapter(markdowns[path], [idx + 1])
+ book["sections"].append(
+ {
+ "Chapter": chapter,
+ }
+ )
+
+ print(json.dumps(book))
diff --git a/docs/book.toml b/docs/book.toml
new file mode 100644
index 00000000..a03536a6
--- /dev/null
+++ b/docs/book.toml
@@ -0,0 +1,33 @@
+[book]
+title = "Emote"
+authors = ["Embark Studios begin_training -> begin_cycle -> begin_batch -> backward -> end_batch;
+
+ end_batch -> begin_batch [constraint=no];
+ end_cycle -> begin_cycle [constraint=no];
+
+ end_batch -> end_cycle [style=dashed]
+ end_cycle -> end_training [style=dashed]
+ }
diff --git a/docs/src/callback.md b/docs/src/callback.md
new file mode 100644
index 00000000..58a7d257
--- /dev/null
+++ b/docs/src/callback.md
@@ -0,0 +1,14 @@
+# Callback system
+
+
+In this module you'll find the callback framework used by Emote. Those
+who have used FastAI before will recognize it, as it's heavily
+inspired by that system - but adapted for RL and our use-cases.
+
+## The `Callback` interface
+
+The callback is the core interface used to hook into the Emote framework. You can think of these as events - when the training loop starts, we'll invoke `begin_training` on all callback objects. Then we'll start a new cycle, and call :meth:`Callback.begin_cycle` for those that need it.
+
+All in all, the flow of callbacks is like this:
+
+
diff --git a/docs/src/cart_pole.gif b/docs/src/cart_pole.gif
new file mode 100644
index 00000000..96365f6a
Binary files /dev/null and b/docs/src/cart_pole.gif differ
diff --git a/docs/src/coding-standard.md b/docs/src/coding-standard.md
new file mode 100644
index 00000000..1c91fa8e
--- /dev/null
+++ b/docs/src/coding-standard.md
@@ -0,0 +1,65 @@
+# π Coding standard
+
+In emote we strive to maintain a consistent style, both visually and
+implementation-wise. In order to achieve this we rely on tools to
+check and validate our code as we work, and we require that all those
+tools are used for CI to pass.
+
+To have a smooth developer experience, we suggest you integrate these
+with your editor. We'll provide some example configurations below; and
+we welcome contributions to these pages. However, we strive to avoid
+*committing* editor configurations to the repository, as that'll more
+easily lead to mismatch between different editors - the description
+below is authoritative, not any specific editor configuration.
+
+We also require that all commits are made using LF-only line endings. Windows users will need to configure using the below command, or set up their editor appropriately. This helps keep emote platform-generic, and reduces risk for spurious diffs or tools misbehaving.
+
+ $ git config --global core.autocrlf true
+
+## Tools
+
+
+### black
+
+
+[`Black`](https://github.com/psf/black>) is an auto-formatter for Python,
+which mostly matches the PEP8 rules. We use black because it doesn't
+support a lot of configuration, and will format for you - instead of
+just complaining. We do allow overrides to these styles, nor do we
+allow disabling of formatting anywhere.
+
+To run black manually, you can use the command:
+
+ pdm run black emote/ tests/
+
+Which will format all code in emote.
+
+### isort
+
+
+[`isort`] (https://github.com/PyCQA/isort) is another formatting tool,
+but deals only with sorting imports. Isort is configured to be
+consistent with Black from within `pyproject.toml`.
+
+To run isort manually, you can use the command:
+
+ pdm run isort emote/ tests/
+
+
+## Example configurations
+
+
+### emacs
+
+
+```lisp
+(use-package python-black
+ :demand t
+ :after python
+ :hook (python-mode . python-black-on-save-mode-enable-dwim))
+
+(use-package python-isort
+ :demand t
+ :after python
+ :hook (python-mode . python-isort-on-save-mode))
+```
diff --git a/docs/src/documentation.md b/docs/src/documentation.md
new file mode 100644
index 00000000..079d0911
--- /dev/null
+++ b/docs/src/documentation.md
@@ -0,0 +1,22 @@
+# π Documentation
+
+To write documentation for *emote* we support both classic *ReStructured Text* (`.rst`) and modern `Markdown` (`.md`) files. These can also reference each other, though for ease of use a tree should maintain the same type.
+
+To include RST text into Markdown code, use the following pattern:
+
+
+ ```{eval-rst}
+ .. include:: snippets/include-rst.rst
+ ```
+
+That is to say, a code-block with the `eval-rst` directive and then a verbatim include of the markdown contents. The opposite, including Markdown in ReST can be achieved with this recipe:
+
+ .. include:: include.md
+ :parser: myst_parser.sphinx_
+
+See the [Myst documentation](https://myst-parser.readthedocs.io/en/latest/faq/index.html) for more recipes and directives.
+
+## Helpful commands
+
+* To build the docs: `pdm run docs`
+* To view the docs in your browser: `pdm run docs-serve`
diff --git a/docs/src/getting_started.md b/docs/src/getting_started.md
new file mode 100644
index 00000000..1f2ed253
--- /dev/null
+++ b/docs/src/getting_started.md
@@ -0,0 +1,25 @@
+# π₯ Getting Started
+
+In the `/experiments` folder, example runs can be found for different Gymnasium environments.
+
+For example, you can run the cartpole example using DQN with the following command:
+
+```python
+pdm run python experiments/train_dqn_cartpole.py
+```
+
+
+
+This comes with a lot of predefined arguments, such as the learning rate, the amount of hidden layers, the batch size, etc. You can find all the arguments in the `experiments/train_dqn_cartpole.py` file.
+
+## π Tensorboard
+
+To visualize the training process, you can use Tensorboard. To do so, run the following command:
+
+```bash
+pdm run tensorboard --logdir ./mllogs
+```
+
+This will start a Tensorboard server on `localhost:6006`. You can now open your browser and go to `localhost:6006` to see the training process where you can see the rewards over time, the loss over time, etc.
+
+
diff --git a/docs/src/haarnoja_sac.png b/docs/src/haarnoja_sac.png
new file mode 100644
index 00000000..607babb7
Binary files /dev/null and b/docs/src/haarnoja_sac.png differ
diff --git a/docs/src/intro.md b/docs/src/intro.md
new file mode 100644
index 00000000..df269982
--- /dev/null
+++ b/docs/src/intro.md
@@ -0,0 +1,23 @@
+
+# π Emote
+
+
+**Emote** β **E**\ mbark's **Mo**\ dular **T**\ raining **E**\ ngine β is a flexible framework
+for reinforcement learning written at Embark.
+
+## Installation
+
+
+Install [`PDM`](https://pdm.fming.dev/latest/#installation) following the instructions on the
+PDM site. Then install the package using:
+
+ pdm install
+
+Currently, our supported version of PDM is 2.10 -- other versions may work, but there are no guarantees.
+
+## Ideas and Philosophy
+
+We wanted a reinforcement learning framework that was modular both in the
+sense that we could easily swap the algorithm we used and how data was collected
+but also in the sense that the different parts of various algorithms could be reused
+to build other algorithms.
diff --git a/docs/src/metrics.md b/docs/src/metrics.md
new file mode 100644
index 00000000..ca1bf2ed
--- /dev/null
+++ b/docs/src/metrics.md
@@ -0,0 +1,32 @@
+# π‘ Metrics
+
+Emote can log metrics from two locations: inside the training loop, and outside the training
+loop. The base for this is the [`LoggingMixin`](emote.callbacks.logging.LoggingMixin) class in both cases,
+adds logging functionality to anything. However, it doesn't do any actual logging.
+
+On the training side, the second part of the puzzle is a LogWriter, for example
+[`TensorboardLogger`](emote.callbacks.logging.TensorboardLogger). We also provide a built-in
+[`TerminalLogger`](emote.callbacks.logging.TerminalLogger). These accept a list of objects derived from
+[`LoggingMixin`](emote.callbacks.logging.LoggingMixin), and will execute the actual writing of values from
+the previously of values. This makes implementing log-data-providers easier, as they do not have to
+care about *when to write*, only how often they can record data.
+
+```python
+logger = SystemLogger()
+tensorboard_log_writer = TensorboardLogger([logger], SummaryWriter("/tmp/output_dir"), 2000)
+trainer = Trainer([logger, tensorboard_log_writer])
+```
+
+Things behave slightly differently on the data-generation side. Our suggested (and only supported
+method) is to wrap the memory with a [`LoggingProxyWrapper`](emote.memory.memory.LoggingProxyWrapper). Since all data going into the training loop passes through the memory, and all data has associated metadata, this will capture most metrics.
+
+Our suggestion is that users primarily rely on this mechanism for logging data associated with the
+agents, as it'll get smoothed across all agents to reduce noise.
+
+
+```python
+env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+table = DictObsTable(spaces=env.dict_space, maxlen=1000, device="cpu")
+table_proxy = TableMemoryProxy(table, 0, True)
+table_proxy = LoggingProxyWrapper(table, SummaryWriter("/tmp/output_dir"), 2000)
+```
diff --git a/docs/src/tensorboard.png b/docs/src/tensorboard.png
new file mode 100644
index 00000000..00cd8a83
Binary files /dev/null and b/docs/src/tensorboard.png differ
diff --git a/emote/BUILD b/emote/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/__init__.py b/emote/__init__.py
new file mode 100644
index 00000000..3120f477
--- /dev/null
+++ b/emote/__init__.py
@@ -0,0 +1,31 @@
+"""
+Emote
+=====
+
+In order to do reinforcement learning we need to have two things:
+A **learning protocol** that specifies which losses to use, which network
+architectures, which optimizers, and so forth. We also need some kind of
+**data collector** that interacts with the world and stores the experiences
+from that in a way which makes them accessible to the learning protocol.
+
+In Emote, data collection is done by Collectors, the protocol for the
+learning algorithm is built up of Callbacks, and they are tied together
+by a Trainer.
+"""
+
+from . import nn, utils
+from .algorithms import sac
+from .callback import Callback
+from .trainer import Trainer, WeakReference
+
+
+__all__ = [
+ "Callback",
+ "Trainer",
+ "WeakReference",
+ "sac",
+ "nn",
+ "utils",
+]
+
+__version__ = "0.1.0"
diff --git a/emote/algorithms/BUILD b/emote/algorithms/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/algorithms/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/algorithms/__init__.py b/emote/algorithms/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/emote/algorithms/amp.py b/emote/algorithms/amp.py
new file mode 100644
index 00000000..54be8357
--- /dev/null
+++ b/emote/algorithms/amp.py
@@ -0,0 +1,181 @@
+from typing import Callable
+
+import torch
+
+from torch import Tensor, nn
+
+from emote import Callback
+from emote.callbacks.logging import LoggingMixin
+from emote.callbacks.loss import LossCallback
+
+
+def gradient_loss_function(model_output: Tensor, model_input: Tensor) -> Tensor:
+ """
+ Given inputs and outputs of an nn.Module, computes the sum of
+ squared derivatives of outputs to the inputs
+ Arguments:
+ model_output (Tensor): the output of the nn.Module
+ model_input (Tensor): the input to the nn.Module
+ Returns:
+ loss (Tensor): the sum of squared derivatives
+ """
+ # grad can be implicitly created only for scalar outputs
+ predictions = torch.split(model_output, 1, dim=0)
+ inputs_grad = torch.autograd.grad(
+ predictions, model_input, create_graph=True, retain_graph=True
+ )
+ inputs_grad = torch.cat(inputs_grad, dim=1)
+ inputs_grad_norm = torch.square(inputs_grad)
+ inputs_grad_norm = torch.sum(inputs_grad_norm, dim=1)
+ return torch.mean(inputs_grad_norm)
+
+
+class DiscriminatorLoss(LossCallback):
+ """This loss is used to train a discriminator for adversarial training."""
+
+ def __init__(
+ self,
+ discriminator: nn.Module,
+ imitation_state_map_fn: Callable[[Tensor], Tensor],
+ policy_state_map_fn: Callable[[Tensor], Tensor],
+ grad_loss_weight: float,
+ optimizer: torch.optim.Optimizer,
+ lr_schedule: torch.optim.lr_scheduler._LRScheduler,
+ max_grad_norm: float,
+ input_key: str = "features",
+ name: str = "Discriminator",
+ ):
+ super().__init__(
+ lr_schedule=lr_schedule,
+ name=name,
+ network=discriminator,
+ optimizer=optimizer,
+ max_grad_norm=max_grad_norm,
+ data_group=None,
+ )
+ self._discriminator = discriminator
+ self._imitation_state_map_function = imitation_state_map_fn
+ self._policy_state_map_function = policy_state_map_fn
+ self._grad_loss_weight = grad_loss_weight
+ self._obs_key = input_key
+
+ def loss(self, imitation_batch: dict, policy_batch: dict) -> Tensor:
+ """
+ Computing the loss
+ Arguments:
+ imitation_batch (dict): a batch of data from the reference animation.
+ the discriminator is trained to classify data from this batch as
+ positive samples
+ policy_batch (dict): a batch of data from the RL buffer. the discriminator
+ is trained to classify data from this batch as negative samples.
+ Returns:
+ loss (Tensor): the loss tensor
+ """
+ imitation_batch_size = imitation_batch["batch_size"]
+ policy_data_batch_size = policy_batch["batch_size"]
+
+ pos_obs: Tensor = imitation_batch["observation"][self._obs_key]
+ pos_next_obs: Tensor = imitation_batch["next_observation"][self._obs_key]
+ neg_obs: Tensor = policy_batch["observation"][self._obs_key]
+ neg_next_obs: Tensor = policy_batch["next_observation"][self._obs_key]
+
+ pos_obs = self._imitation_state_map_function(pos_obs)
+ pos_next_obs = self._imitation_state_map_function(pos_next_obs)
+ pos_input = torch.cat([pos_obs, pos_next_obs], dim=-1)
+ pos_input.requires_grad_(True)
+
+ neg_obs = self._policy_state_map_function(neg_obs)
+ neg_next_obs = self._policy_state_map_function(neg_next_obs)
+ neg_input = torch.cat([neg_obs, neg_next_obs], dim=-1)
+
+ pos_output = self._discriminator(pos_input)
+ neg_output = self._discriminator(neg_input)
+ assert pos_output.shape == (imitation_batch_size, 1)
+ assert neg_output.shape == (policy_data_batch_size, 1)
+
+ pos_loss = torch.mean(torch.square(pos_output - 1.0)) # Positive samples should label to 1.
+ neg_loss = torch.mean(
+ torch.square(neg_output + 1.0)
+ ) # Negative samples should label to -1.
+
+ grad_penalty_loss = self._grad_loss_weight * gradient_loss_function(pos_output, pos_input)
+
+ loss = pos_loss + neg_loss + grad_penalty_loss
+
+ self.log_scalar("amp/loss/pos_discrimination_loss", pos_loss)
+ self.log_scalar("amp/loss/neg_discrimination_loss", neg_loss)
+ self.log_scalar("amp/loss/grad_loss", grad_penalty_loss)
+ self.log_scalar("amp/loss/total", loss)
+ self.log_scalar("amp/predict/positive_samples_mean", torch.mean(pos_output))
+ self.log_scalar("amp/predict/positive_samples_std", torch.std(pos_output))
+ self.log_scalar("amp/predict/negative_samples_mean", torch.mean(neg_output))
+ self.log_scalar("amp/predict/negative_samples_std", torch.std(neg_output))
+
+ return loss
+
+
+class AMPReward(Callback, LoggingMixin):
+ """Adversarial rewarding with AMP."""
+
+ def __init__(
+ self,
+ discriminator: nn.Module,
+ state_map_fn: Callable[[Tensor], Tensor],
+ style_reward_weight: float,
+ rollout_length: int,
+ observation_key: str,
+ data_group: str,
+ ):
+ super().__init__()
+ self._discriminator = discriminator
+ self._order = 0
+ self.data_group = data_group
+ self._style_reward_weight = style_reward_weight
+ self._state_map_function = state_map_fn
+ self._rollout_length = rollout_length
+ self._obs_key = observation_key
+
+ def begin_batch(
+ self, observation: dict[str, Tensor], next_observation: dict[str, Tensor], rewards: Tensor
+ ):
+ """
+ Updating the reward by adding the weighted AMP reward
+ Arguments:
+ observation: current observation
+ next_observation: next observation
+ rewards: task reward
+ Returns
+ dict: the batch data with updated reward
+ """
+ obs = observation[self._obs_key]
+ bsz = obs.shape[0]
+ rollouts = obs.reshape(bsz // self._rollout_length, self._rollout_length, -1)
+ next_obs = next_observation[self._obs_key]
+
+ next_obs = next_obs.unsqueeze(dim=1)
+ combined_obs = torch.cat((rollouts, next_obs), dim=1)
+ next_obs = combined_obs[:, 1:]
+
+ next_obs = next_obs.reshape(bsz, -1)
+
+ state = self._state_map_function(obs)
+ next_state = self._state_map_function(next_obs)
+
+ consecutive_states = torch.cat([state, next_state], dim=-1)
+
+ predictions = self._discriminator(consecutive_states).detach()
+
+ style_reward = 1.0 - 0.25 * (predictions - 1.0) ** 2.0
+ scaled_style_reward = self._style_reward_weight * style_reward
+ assert scaled_style_reward.shape == rewards.shape
+
+ total_reward = rewards + scaled_style_reward
+
+ self.log_scalar("amp/unscaled_style_reward", torch.mean(style_reward))
+ self.log_scalar("amp/task_reward", torch.mean(rewards))
+ self.log_scalar("amp/scaled_style_reward", torch.mean(scaled_style_reward))
+ self.log_scalar("amp/total_reward", torch.mean(total_reward))
+ self.log_scalar("amp/predicts_mean", torch.mean(predictions))
+ self.log_scalar("amp/predicts_std", torch.std(predictions))
+
+ return {self.data_group: {"rewards": total_reward}}
diff --git a/emote/algorithms/dqn.py b/emote/algorithms/dqn.py
new file mode 100644
index 00000000..62ffcf7a
--- /dev/null
+++ b/emote/algorithms/dqn.py
@@ -0,0 +1,115 @@
+from __future__ import annotations
+
+import copy
+
+from typing import Optional
+
+import torch
+
+from torch import nn, optim
+
+from emote.algorithms.sac import soft_update_from_to
+from emote.callback import Callback
+from emote.callbacks.loss import LossCallback
+from emote.mixins.logging import LoggingMixin
+from emote.utils.gamma_matrix import discount, make_gamma_matrix, split_rollouts
+
+
+class QTarget(LoggingMixin, Callback):
+ def __init__(
+ self,
+ *,
+ q_net: nn.Module,
+ target_q_net: Optional[nn.Module] = None,
+ gamma: float = 0.99,
+ reward_scale: float = 1.0,
+ target_q_tau: float = 0.005,
+ data_group: str = "default",
+ roll_length: int = 1,
+ ):
+ """Compute and manage the target Q-values for Q-Learning algorithms.
+
+ Parameters:
+ q_net (nn.Module): The Q-network.
+ target_q_net (nn.Module, optional): The target Q-network. Defaults to a copy of q_net.
+ gamma (float): Discount factor for future rewards.
+ reward_scale (float): A scaling factor for the reward values.
+ target_q_tau (float): A soft update rate for target Q-network.
+ data_group (str): The data group to store the computed Q-target.
+ roll_length (int): The rollout length for a batch.
+
+ Methods:
+ begin_batch: Compute the target Q-value for a batch.
+ """
+ super().__init__()
+ self._order = 1 # this is to ensure that the data_group is prepared beforehand
+ self.data_group = data_group
+ self.q_net = q_net
+ self.target_q_net = copy.deepcopy(q_net) if target_q_net is None else target_q_net
+ self.reward_scale = reward_scale
+ self.tau = target_q_tau
+ self.rollout_len = roll_length
+ self.gamma_matrix = make_gamma_matrix(gamma, self.rollout_len)
+
+ def begin_batch(self, next_observation, rewards, masks):
+ next_q_values = self.target_q_net(**next_observation)
+ max_next_q_values = next_q_values.max(1)[0].unsqueeze(1)
+ last_step_masks = split_rollouts(masks, self.rollout_len)[:, -1]
+ max_next_q_values = torch.multiply(max_next_q_values, last_step_masks)
+ scaled_reward = self.reward_scale * rewards
+ scaled_rewards = split_rollouts(scaled_reward, self.rollout_len).squeeze(2)
+ q_target = discount(scaled_rewards, max_next_q_values, self.gamma_matrix).detach()
+ self.log_scalar("training/scaled_reward", torch.mean(scaled_reward))
+ self.log_scalar("training/q_target", torch.mean(q_target))
+
+ return {self.data_group: {"q_target": q_target}}
+
+ def end_batch(self):
+ super().end_batch()
+ soft_update_from_to(self.q_net, self.target_q_net, self.tau)
+
+
+class QLoss(LossCallback):
+ """Compute the Q-Learning loss.
+
+ Parameters:
+ name (str): Identifier for this loss component.
+ q (nn.Module): The Q-network.
+ opt (optim.Optimizer): The optimizer to use for the Q-network.
+ lr_schedule (optim.lr_scheduler._LRScheduler, optional): Learning rate scheduler.
+ max_grad_norm (float): Maximum gradient norm for gradient clipping.
+ data_group (str): The data group from which to pull data.
+ log_per_param_weights (bool): Whether to log weights per parameter.
+ log_per_param_grads (bool): Whether to log gradients per parameter.
+ """
+
+ def __init__(
+ self,
+ *,
+ name: str,
+ q: nn.Module,
+ opt: optim.Optimizer,
+ lr_schedule: Optional[optim.lr_scheduler._LRScheduler] = None,
+ max_grad_norm: float = 10.0,
+ data_group: str = "default",
+ log_per_param_weights=False,
+ log_per_param_grads=False,
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ lr_schedule=lr_schedule,
+ network=q,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ log_per_param_weights=log_per_param_weights,
+ log_per_param_grads=log_per_param_grads,
+ )
+ self.q_network = q
+ self.mse = nn.MSELoss()
+
+ def loss(self, observation, q_target, actions):
+ indices = actions.to(torch.int64)
+ q_value = self.q_network(**observation).gather(1, indices)
+ self.log_scalar(f"training/{self.name}_prediction", torch.mean(q_value))
+ return self.mse(q_value, q_target)
diff --git a/emote/algorithms/genrl/BUILD b/emote/algorithms/genrl/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/algorithms/genrl/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/algorithms/genrl/__init__.py b/emote/algorithms/genrl/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/emote/algorithms/genrl/proxies.py b/emote/algorithms/genrl/proxies.py
new file mode 100644
index 00000000..514e7415
--- /dev/null
+++ b/emote/algorithms/genrl/proxies.py
@@ -0,0 +1,44 @@
+from typing import Dict, Optional
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from emote.memory.memory import TableMemoryProxy
+from emote.memory.table import Table
+from emote.typing import AgentId, DictObservation, DictResponse
+
+
+class MemoryProxyWithEncoder(TableMemoryProxy):
+ def __init__(
+ self,
+ table: Table,
+ encoder: nn.Module,
+ minimum_length_threshold: Optional[int] = None,
+ use_terminal: bool = False,
+ input_key: str = "obs",
+ action_key: str = "actions",
+ ):
+ super().__init__(table, minimum_length_threshold, use_terminal)
+ self.encoder = encoder
+ self._input_key = input_key
+ self._action_key = action_key
+
+ def add(
+ self,
+ observations: Dict[AgentId, DictObservation],
+ responses: Dict[AgentId, DictResponse],
+ ):
+ updated_responses = {}
+ for agent_id, response in responses.items():
+ actions = np.array(response.list_data[self._action_key])
+ if np.size(actions) == 0:
+ updated_responses.update({agent_id: response})
+ else:
+ actions = torch.from_numpy(actions).to(torch.float)
+ obs = torch.from_numpy(observations[agent_id].array_data[self._input_key])
+ obs = obs.to(torch.float)
+ latent = self.encoder(actions, obs).detach().cpu().numpy()
+ new_response = DictResponse(list_data={self._action_key: latent}, scalar_data={})
+ updated_responses.update({agent_id: new_response})
+ super().add(observations, updated_responses)
diff --git a/emote/algorithms/genrl/vae.py b/emote/algorithms/genrl/vae.py
new file mode 100644
index 00000000..0875db53
--- /dev/null
+++ b/emote/algorithms/genrl/vae.py
@@ -0,0 +1,81 @@
+from typing import Callable
+
+import torch
+import torch.nn.functional as F
+
+from torch import nn, optim
+
+from emote.callbacks import LossCallback
+from emote.nn.initialization import normal_init_
+
+
+class VariationalAutoencoder(nn.Module):
+ def __init__(
+ self,
+ encoder: nn.Module,
+ decoder: nn.Module,
+ device: torch.device,
+ beta: float = 0.01,
+ ):
+ super().__init__()
+ assert encoder.output_size == decoder.input_size
+ self.latent_size = encoder.output_size
+ self.device = device
+ self.encoder = encoder
+ self.decoder = decoder
+ self.beta = beta
+ self.encoder.apply(normal_init_)
+ self.decoder.apply(normal_init_)
+
+ def forward(self, x, condition=None):
+ mu, log_std = self.encoder(x, condition)
+ var = torch.exp(log_std)
+ eps = torch.randn_like(var).to(self.device)
+ latent = eps.mul(var).add(mu)
+ x_hat = self.decoder(latent, condition)
+ x_hat = x_hat.view(x.size())
+ return x_hat, mu, log_std, latent
+
+ def loss(self, x, x_hat, mu, log_std):
+ restore_loss = F.mse_loss(x_hat, x)
+ var = torch.exp(log_std)
+ kld = torch.sum(-log_std + (mu**2) * 0.5 + var, 1) - self.latent_size
+ kl_loss = kld.mean()
+ info = {"restore_loss": restore_loss, "kl_loss": kl_loss}
+ loss = restore_loss + self.beta * kl_loss
+ return loss, info
+
+
+class VAELoss(LossCallback):
+ def __init__(
+ self,
+ *,
+ vae: VariationalAutoencoder,
+ opt: optim.Optimizer,
+ lr_schedule=None,
+ max_grad_norm: float = 10.0,
+ name: str = "vae",
+ data_group: str = "default",
+ input_key: str = "obs",
+ conditioning_func: Callable = lambda _: None,
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ lr_schedule=lr_schedule,
+ network=vae,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ )
+ self.vae = vae
+ self.conditioning_func = conditioning_func
+ self._input_key = input_key
+
+ def loss(self, observation, actions):
+ condition = self.conditioning_func(observation[self._input_key])
+ samples, dist_mean, dist_log_std, _ = self.vae.forward(actions, condition)
+ loss, info = self.vae.loss(actions, samples, dist_mean, dist_log_std)
+ self.log_scalar("vae/restore_loss", torch.mean(info["restore_loss"]))
+ self.log_scalar("vae/kl_loss", torch.mean(info["kl_loss"]))
+
+ return loss
diff --git a/emote/algorithms/genrl/wrappers.py b/emote/algorithms/genrl/wrappers.py
new file mode 100644
index 00000000..e88efb10
--- /dev/null
+++ b/emote/algorithms/genrl/wrappers.py
@@ -0,0 +1,127 @@
+from typing import Callable
+
+import torch
+
+from torch import Tensor, nn
+
+from emote.nn.gaussian_policy import GaussianMlpPolicy
+
+
+class DecoderWrapper(nn.Module):
+ def __init__(
+ self,
+ decoder: nn.Module,
+ condition_fn: Callable,
+ latent_multiplier: float = 3.0,
+ ):
+ super().__init__()
+ self.device = decoder.device
+ self._latent_multiplier = latent_multiplier
+ self.latent_size = decoder.input_size
+ self.output_size = decoder.output_size
+ self.condition_size = decoder.condition_size
+ self.condition_fn = condition_fn
+ self.decoder = decoder
+
+ for param in self.decoder.parameters():
+ param.requires_grad = False
+
+ def forward(self, latent: torch.Tensor, observation: torch.Tensor = None) -> torch.Tensor:
+ """Running decoder.
+
+ Arguments:
+ latent (torch.Tensor): batch x latent_size
+ observation (torch.Tensor): batch x obs_size
+
+ Returns:
+ torch.Tensor: the sample (batch x data_size)
+ """
+ latent = latent * self._latent_multiplier
+
+ latent = latent.to(self.device)
+ condition = None
+ if observation is not None:
+ observation = observation.to(self.device)
+ condition = self.condition_fn(observation)
+
+ sample = self.decoder.forward(latent, condition)
+
+ return sample
+
+ def load_state_dict(self, state_dict, strict=True):
+ model_dict = self.state_dict()
+ new_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
+ assert new_state_dict != {}
+ model_dict.update(new_state_dict)
+ super().load_state_dict(model_dict)
+
+
+class EncoderWrapper(nn.Module):
+ def __init__(
+ self,
+ encoder: nn.Module,
+ condition_fn: Callable,
+ ):
+ super().__init__()
+ self.encoder = encoder
+ self.device = encoder.device
+ self.action_size = encoder.input_size
+ self.latent_size = encoder.output_size
+ self.condition_size = encoder.condition_size
+
+ self.condition_fn = condition_fn
+
+ for param in self.encoder.parameters():
+ param.requires_grad = False
+
+ def forward(self, action: torch.Tensor, observation: torch.Tensor = None) -> torch.Tensor:
+ """Running encoder.
+
+ Arguments:
+ action (torch.Tensor): batch x data_size
+ observation (torch.Tensor): batch x obs_size
+
+ Returns:
+ torch.Tensor: the mean (batch x data_size)
+ """
+ action = action.to(self.device)
+ condition = None
+ if observation is not None:
+ observation = observation.to(self.device)
+ condition = self.condition_fn(observation)
+
+ mean, _ = self.encoder.forward(action, condition)
+ return mean
+
+ def load_state_dict(self, state_dict, strict=True):
+ model_dict = self.state_dict()
+ new_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
+ assert new_state_dict != {}
+ model_dict.update(new_state_dict)
+ super().load_state_dict(model_dict)
+
+
+class PolicyWrapper(nn.Module):
+ def __init__(
+ self,
+ decoder: DecoderWrapper,
+ policy: GaussianMlpPolicy,
+ ):
+ super().__init__()
+ self.latent_size = decoder.latent_size
+ self.decoder = decoder
+ self.policy = policy
+
+ def forward(self, obs: Tensor, epsilon: Tensor = None):
+ # we need to discard the extra dimensions of epsilon.
+ # the input epsilon is given for the original action space
+ # however, the policy outputs latent actions.
+ if epsilon is not None:
+ epsilon = epsilon[:, : self.latent_size]
+
+ if self.training:
+ sample, log_prob = self.policy.forward(obs, epsilon)
+ action = self.decoder(sample, obs)
+ return action, log_prob
+
+ return self.decoder(self.policy.forward(obs, epsilon), obs)
diff --git a/emote/algorithms/sac.py b/emote/algorithms/sac.py
new file mode 100644
index 00000000..f6a59dc0
--- /dev/null
+++ b/emote/algorithms/sac.py
@@ -0,0 +1,443 @@
+from __future__ import annotations
+
+import copy
+
+from typing import Any, Dict, Optional
+
+import torch
+
+from torch import nn, optim
+
+from emote.callback import Callback
+from emote.callbacks.loss import LossCallback
+from emote.extra.schedules import ConstantSchedule, Schedule
+from emote.mixins.logging import LoggingMixin
+from emote.proxies import AgentProxy, GenericAgentProxy
+from emote.utils.deprecated import deprecated
+from emote.utils.gamma_matrix import discount, make_gamma_matrix, split_rollouts
+from emote.utils.spaces import MDPSpace
+
+
+def soft_update_from_to(source, target, tau): # From rlkit
+ for target_param, param in zip(target.parameters(), source.parameters()):
+ target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
+
+
+class QLoss(LossCallback):
+ r"""A MSE loss between the action value net and the target q.
+
+ The target q values are not calculated here and need to be added to
+ the state before the loss of this module runs.
+
+ :param name (str): The name of the module. Used e.g. while logging.
+ :param q (torch.nn.Module): A deep neural net that outputs the
+ discounted loss given the current observations and a given
+ action.
+ :param opt (torch.optim.Optimizer): An optimizer for q.
+ :param lr_schedule (torch.optim.lr_scheduler._LRSchedule): Learning
+ rate schedule for the optimizer of q.
+ :param max_grad_norm (float): Clip the norm of the gradient during
+ backprop using this value.
+ :param data_group (str): The name of the data group from which this
+ Loss takes its data.
+ :param log_per_param_weights (bool): If true, log each individual
+ policy parameter that is optimized (norm and value histogram).
+ :param log_per_param_grads (bool): If true, log the gradients of
+ each individual policy parameter that is optimized (norm and
+ histogram).
+ """
+
+ def __init__(
+ self,
+ *,
+ name: str,
+ q: nn.Module,
+ opt: optim.Optimizer,
+ lr_schedule: Optional[optim.lr_scheduler._LRScheduler] = None,
+ max_grad_norm: float = 10.0,
+ data_group: str = "default",
+ log_per_param_weights=False,
+ log_per_param_grads=False,
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ lr_schedule=lr_schedule,
+ network=q,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ log_per_param_weights=log_per_param_weights,
+ log_per_param_grads=log_per_param_grads,
+ )
+ self.q_network = q
+ self.mse = nn.MSELoss()
+
+ def loss(self, observation, actions, q_target):
+ q_value = self.q_network(actions, **observation)
+ self.log_scalar(f"training/{self.name}_prediction", torch.mean(q_value))
+ return self.mse(q_value, q_target)
+
+
+class QTarget(LoggingMixin, Callback):
+ r"""Creates rolling averages of the Q nets, and predicts q values using
+ these.
+
+ The module is responsible both for keeping the averages correct in
+ the target q networks and supplying q-value predictions using the
+ target q networks.
+
+ :param pi (torch.nn.Module): A deep neural net that outputs actions
+ and their log probability given a state.
+ :param ln_alpha (torch.tensor): The current weight for the entropy
+ part of the soft Q.
+ :param q1 (torch.nn.Module): A deep neural net that outputs the
+ discounted loss given the current observations and a given
+ action.
+ :param q2 (torch.nn.Module): A deep neural net that outputs the
+ discounted loss given the current observations and a given
+ action. :param q1t (torch.nn.Module, optional): target Q
+ network. (default: None) :param q2t (torch.nn.Module,
+ optional): target Q network. (default: None) :param gamma
+ (float, optional): Discount factor for the rewards in time.
+ (default: 0.99) :param reward_scale (float, optional): Scale
+ factor for the rewards. (default: 1.0) :param target_q_tau
+ (float, optional): The weight given to the latest network in the
+ exponential moving average. So NewTargetQ = OldTargetQ * (1-tau)
+ + Q*tau. (default: 0.005) :param data_group (str, optional): The
+ name of the data group from which this Loss takes its data.
+ (default: "default") :param roll_length (int, optional): Rollout
+ length. (default: 1) :param use_terminal_masking (bool,
+ optional): Whether to use terminal masking for the next values.
+ (default: False)
+ """
+
+ def __init__(
+ self,
+ *,
+ pi: nn.Module,
+ ln_alpha: torch.tensor,
+ q1: nn.Module,
+ q2: nn.Module,
+ q1t: Optional[nn.Module] = None,
+ q2t: Optional[nn.Module] = None,
+ gamma: float = 0.99,
+ reward_scale: float = 1.0,
+ target_q_tau: float = 0.005,
+ data_group: str = "default",
+ roll_length: int = 1,
+ use_terminal_masking: bool = False,
+ ):
+ super().__init__()
+ self._order = 1 # this is to ensure that the data_group is prepared beforehand
+ self.data_group = data_group
+ self.policy = pi
+ self.q1t = copy.deepcopy(q1) if q1t is None else q1t
+ self.q2t = copy.deepcopy(q2) if q2t is None else q2t
+ self.ln_alpha = ln_alpha
+ self.q1 = q1
+ self.q2 = q2
+ self.reward_scale = reward_scale
+ self.tau = target_q_tau
+ self.gamma = torch.tensor(gamma)
+ self.rollout_len = roll_length
+ self.gamma_matrix = make_gamma_matrix(gamma, self.rollout_len).to(ln_alpha.device)
+ self.use_terminal_masking = use_terminal_masking
+
+ def begin_batch(self, next_observation, rewards, masks):
+ next_p_sample, next_logp_pi = self.policy(**next_observation)
+ next_q1t = self.q1t(next_p_sample, **next_observation)
+ next_q2t = self.q2t(next_p_sample, **next_observation)
+ min_next_qt = torch.min(next_q1t, next_q2t)
+ bsz = rewards.shape[0]
+
+ alpha = torch.exp(self.ln_alpha)
+ next_value = min_next_qt - alpha * next_logp_pi
+ scaled_reward = self.reward_scale * rewards
+
+ last_step_masks = split_rollouts(masks, self.rollout_len)[:, -1]
+ scaled_reward = split_rollouts(scaled_reward, self.rollout_len).squeeze(2)
+
+ if self.use_terminal_masking:
+ next_value = torch.multiply(next_value, last_step_masks)
+
+ qt = discount(scaled_reward, next_value, self.gamma_matrix).detach()
+ assert qt.shape == (bsz, 1)
+
+ self.log_scalar("training/next_logp_pi", torch.mean(next_logp_pi))
+ self.log_scalar("training/min_next_q_target", torch.mean(min_next_qt))
+ self.log_scalar("training/scaled_reward", torch.mean(scaled_reward))
+ self.log_scalar("training/q_target", torch.mean(qt))
+
+ return {self.data_group: {"q_target": qt}}
+
+ def end_batch(self):
+ super().end_batch()
+ soft_update_from_to(self.q1, self.q1t, self.tau)
+ soft_update_from_to(self.q2, self.q2t, self.tau)
+
+
+class PolicyLoss(LossCallback):
+ r"""Maximize the soft Q-value for the policy.
+
+ This loss modifies the policy to select the action that gives the
+ highest soft q-value.
+
+ :param pi (torch.nn.Module): A deep neural net that outputs actions
+ and their log probability given a state.
+ :param ln_alpha (torch.tensor): The current weight for the entropy
+ part of the soft Q.
+ :param q (torch.nn.Module): A deep neural net that outputs the
+ discounted loss given the current observations and a given
+ action.
+ :param lr_schedule (torch.optim.lr_scheduler._LRSchedule): Learning
+ rate schedule for the optimizer of policy.
+ :param opt (torch.optim.Optimizer): An optimizer for pi.
+ :param q2 (torch.nn.Module): A second deep neural net that outputs
+ the discounted loss given the current observations and a given
+ action. This is not necessary since it is fine if the policy
+ isn't pessimistic, but can be nice for symmetry with the Q-loss.
+ :param max_grad_norm (float): Clip the norm of the gradient during
+ backprop using this value.
+ :param name (str): The name of the module. Used e.g. while logging.
+ :param data_group (str): The name of the data group from which this
+ Loss takes its data.
+ :param log_per_param_weights (bool): If true, log each individual
+ policy parameter that is optimized (norm and value histogram).
+ :param log_per_param_grads (bool): If true, log the gradients of
+ each individual policy parameter that is optimized (norm and
+ histogram).
+ """
+
+ def __init__(
+ self,
+ *,
+ pi: nn.Module,
+ ln_alpha: torch.tensor,
+ q: nn.Module,
+ opt: optim.Optimizer,
+ lr_schedule: Optional[optim.lr_scheduler._LRScheduler] = None,
+ q2: Optional[nn.Module] = None,
+ max_grad_norm: float = 10.0,
+ name: str = "policy",
+ data_group: str = "default",
+ log_per_param_weights=False,
+ log_per_param_grads=False,
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ lr_schedule=lr_schedule,
+ network=pi,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ log_per_param_weights=log_per_param_weights,
+ log_per_param_grads=log_per_param_grads,
+ )
+ self.policy = pi
+ self._ln_alpha = ln_alpha
+ self.q1 = q
+ self.q2 = q2
+
+ def loss(self, observation):
+ p_sample, logp_pi = self.policy(**observation)
+ if self.q2 is not None:
+ q_pi_min = torch.min(self.q1(p_sample, **observation), self.q2(p_sample, **observation))
+ else:
+ # We don't actually need to be pessimistic in the policy update.
+ q_pi_min = self.q1(p_sample, **observation)
+ # using reparameterization trick
+ alpha = torch.exp(self._ln_alpha).detach()
+ policy_loss = alpha * logp_pi - q_pi_min
+ policy_loss = torch.mean(policy_loss)
+ self.log_scalar("policy/q_pi_min", torch.mean(q_pi_min))
+ self.log_scalar("policy/logp_pi", torch.mean(logp_pi))
+ self.log_scalar("policy/alpha", torch.mean(alpha))
+ assert policy_loss.dim() == 0
+ return policy_loss
+
+
+class AlphaLoss(LossCallback):
+ r"""Tweaks the alpha so that a specific target entropy is kept.
+
+ The target entropy is scaled with the number of actions and a
+ provided entropy scaling factor.
+
+ :param pi (torch.nn.Module): A deep neural net that outputs actions
+ and their log probability given a state.
+ :param ln_alpha (torch.tensor): The current weight for the entropy
+ part of the soft Q. :param lr_schedule
+ (torch.optim.lr_scheduler._LRSchedule | None): Learning rate
+ schedule for the optimizer of alpha.
+ :param opt (torch.optim.Optimizer): An optimizer for ln_alpha.
+ :param n_actions (int): The dimension of the action space. Scales
+ the target entropy.
+ :param max_grad_norm (float): Clip the norm of the gradient during
+ backprop using this value.
+ :param name (str): The name of the module. Used e.g. while logging.
+ :param data_group (str): The name of the data group from which this
+ Loss takes its data. :param t_entropy (float | Schedule | None):
+ Value or schedule for the target entropy.
+ """
+
+ def __init__(
+ self,
+ *,
+ pi: nn.Module,
+ ln_alpha: torch.tensor,
+ opt: optim.Optimizer,
+ lr_schedule: optim.lr_scheduler._LRScheduler | None = None,
+ n_actions: int,
+ max_grad_norm: float = 10.0,
+ max_alpha: float = 0.2,
+ name: str = "alpha",
+ data_group: str = "default",
+ t_entropy: float | Schedule | None = None,
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ lr_schedule=lr_schedule,
+ network=None,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ )
+ self.policy = pi
+ self._max_ln_alpha = torch.log(torch.tensor(max_alpha, device=ln_alpha.device))
+ # TODO(singhblom) Check number of actions
+ # self.t_entropy = -np.prod(self.env.action_space.shape).item() # Value from rlkit from Harnouja
+ t_entropy = -n_actions if t_entropy is None else t_entropy
+ if not isinstance(t_entropy, (int, float, Schedule)):
+ raise TypeError("t_entropy must be a number or an instance of Schedule")
+
+ self.t_entropy = (
+ t_entropy if isinstance(t_entropy, Schedule) else ConstantSchedule(t_entropy)
+ )
+ self.ln_alpha = ln_alpha # This is log(alpha)
+
+ def loss(self, observation):
+ with torch.no_grad():
+ _, logp_pi = self.policy(**observation)
+ entropy = -logp_pi
+ error = entropy - self.t_entropy.value
+ alpha_loss = torch.mean(self.ln_alpha * error.detach())
+ assert alpha_loss.dim() == 0
+ self.log_scalar("loss/alpha_loss", alpha_loss)
+ self.log_scalar("training/entropy", torch.mean(entropy).item())
+ return alpha_loss
+
+ def end_batch(self):
+ super().end_batch()
+ self.ln_alpha.requires_grad_(False)
+ self.ln_alpha = torch.clamp_max_(self.ln_alpha, self._max_ln_alpha)
+ self.ln_alpha.requires_grad_(True)
+ self.log_scalar("training/alpha_value", torch.exp(self.ln_alpha).item())
+ self.log_scalar("training/target_entropy", self.t_entropy.value)
+ self.t_entropy.step()
+
+ def state_dict(self):
+ state = super().state_dict()
+ state["network_state_dict"] = self.ln_alpha
+ return state
+
+ def load_state_dict(
+ self,
+ state_dict: Dict[str, Any],
+ load_weights: bool = True,
+ load_optimizer: bool = True,
+ load_hparams: bool = True,
+ ):
+ saved_ln_alpha = state_dict.pop("network_state_dict")
+
+ self.ln_alpha.requires_grad_(False)
+ self.ln_alpha.copy_(
+ saved_ln_alpha.detach()
+ ) # We copy to the existing tensor instead of creating a new one to keep references used by other loss functions, such as PolicyLoss, valid.
+ self.ln_alpha.requires_grad_(True)
+
+ # TODO(singhblom) Set the right device
+ super().load_state_dict(state_dict, load_weights, load_optimizer, load_hparams)
+
+
+class AgentProxyWrapper:
+ def __init__(self, *, inner: AgentProxy, **kwargs):
+ super().__init__(**kwargs)
+ self._inner = inner
+
+ def __call__(self, *args, **kwargs):
+ self._inner(*args, **kwargs)
+
+ @property
+ def input_names(self):
+ return self._inner.input_names
+
+ @property
+ def output_names(self):
+ return self._inner.output_names
+
+ @property
+ def policy(self):
+ return self._inner.policy
+
+
+class FeatureAgentProxy(GenericAgentProxy):
+ """An agent proxy for basic MLPs.
+
+ This AgentProxy assumes that the observations will contain a single
+ flat array of features.
+ """
+
+ @deprecated(reason="Use GenericAgentProxy instead", version="23.1.0")
+ def __init__(self, policy: nn.Module, device: torch.device, input_key: str = "obs"):
+ """Create a new proxy.
+
+ :param policy: The policy to execute for actions.
+ :param device: The device to run on.
+ :param input_key: The name of the features. (default: "obs")
+ """
+
+ super().__init__(
+ policy=policy,
+ device=device,
+ input_keys=(input_key,),
+ output_keys=("actions",),
+ )
+
+
+class VisionAgentProxy(FeatureAgentProxy):
+ """This AgentProxy assumes that the observations will contain image
+ observations 'obs'."""
+
+ @deprecated(reason="Use GenericAgentProxy instead", version="23.1.0")
+ def __init__(self, policy: nn.Module, device: torch.device):
+ super().__init__(policy=policy, device=device, input_key="obs")
+
+
+class MultiKeyAgentProxy(GenericAgentProxy):
+ """Handles multiple input keys.
+
+ Observations are dicts that contain multiple input keys (e.g. both
+ "features" and "images").
+ """
+
+ @deprecated(reason="Use GenericAgentProxy instead", version="23.1.0")
+ def __init__(
+ self,
+ policy: nn.Module,
+ device: torch.device,
+ input_keys: tuple,
+ spaces: MDPSpace = None,
+ ):
+ """Create a new proxy.
+
+ Args:
+ policy (nn.Module): The policy to execute for actions.
+ device (torch.device): The device to run on.
+ input_keys (tuple): The names of the input.
+ """
+ super().__init__(
+ policy=policy,
+ device=device,
+ input_keys=input_keys,
+ output_keys=("actions",),
+ spaces=spaces,
+ )
diff --git a/emote/callback.py b/emote/callback.py
new file mode 100644
index 00000000..40827200
--- /dev/null
+++ b/emote/callback.py
@@ -0,0 +1,250 @@
+from __future__ import annotations
+
+import inspect
+import logging
+import warnings
+
+from abc import ABCMeta
+from functools import wraps
+from typing import Any, Dict
+
+
+def _get_complex(obj, func, arg_names):
+ keys_from_member = getattr(func, "__keys_from_members__", {})
+
+ complex_kwargs = {}
+ if keys_from_member:
+ for arg_name, key in keys_from_member.items():
+ key_value = getattr(obj, key)
+ del arg_names[arg_names.index(arg_name)]
+ complex_kwargs[arg_name] = key_value
+
+ return complex_kwargs
+
+
+def _make_proxy(func):
+ @wraps(func)
+ def _proxy(*args, **kwargs): # noqa pylint: disable=unused-argument
+ return func()
+
+ return _proxy
+
+
+def _make_no_group(func, arg_names, complex_kwargs):
+ @wraps(func)
+ def _inner_no_group(*args, **kwargs):
+ arg_names_ = arg_names[len(args) :]
+ kwargs_ = {v: kwargs[v] for v in arg_names_ if v in kwargs}
+ for arg_name, key in complex_kwargs.items():
+ if key not in kwargs:
+ continue
+
+ kwargs_[arg_name] = kwargs[key]
+
+ return func(*args, **kwargs_)
+
+ return _inner_no_group
+
+
+def _make_group_unpack(func, group, arg_names, complex_kwargs):
+ @wraps(func)
+ def _inner_fixed_group(*args, **kwargs):
+ arg_names_ = arg_names[len(args) :]
+
+ group_ = kwargs[group]
+ inner_args = {v: group_[v] for v in arg_names_ if v in group_}
+ outer_args = {v: kwargs[v] for v in arg_names_ if v not in group_ and v in kwargs}
+ for arg_name, key in complex_kwargs.items():
+ if key in group_:
+ inner_args[arg_name] = kwargs[key]
+
+ elif key in kwargs:
+ outer_args[arg_name] = kwargs[key]
+
+ res = func(*args, **inner_args, **outer_args)
+ if isinstance(res, dict) and group in res:
+ group_.update(res[group])
+ del res[group]
+
+ return res
+
+ return _inner_fixed_group
+
+
+def _wrap_callback_function(obj, func, *, group: str = None, use_group: bool = True):
+ args = inspect.getfullargspec(func)
+ # backward needs to pass things to loss so treated specially.
+ # TODO(singhblom) Figure out if this is the nicest way to do it.
+ if args.varargs or args.varkw:
+ if func.__name__ != "backward":
+ warnings.warn(
+ f"Deprecated: {func.__qualname__} uses *args or **kwargs, this is deprecated",
+ UserWarning,
+ )
+ return func
+
+ arg_names = args.args + args.kwonlyargs
+ if arg_names == ["self"]:
+ return _make_proxy(func)
+
+ complex_kwargs = _get_complex(obj, func, arg_names)
+
+ if not use_group:
+ return _make_no_group(func, arg_names, complex_kwargs)
+
+ return _make_group_unpack(func, group, arg_names, complex_kwargs)
+
+
+class CallbackMeta(ABCMeta):
+ """The CallbackMeta metaclass modifies the callbacks so that they accept
+ data groups."""
+
+ def __init__(self, cls, bases, fields):
+ self._callbacks = {}
+
+ if cls == "Callback":
+ self._callbacks = {
+ field: method
+ for field, method in fields.items()
+ if inspect.isfunction(method)
+ and field
+ in [
+ "restore_state",
+ "begin_training",
+ "begin_cycle",
+ "begin_batch",
+ "backward",
+ "end_batch",
+ "end_cycle",
+ "end_training",
+ ] # TODO(singhblom) Should we filter out unused methods here as well?
+ }
+
+ else:
+ for base in bases:
+ self._callbacks = {**self._callbacks, **getattr(base, "_callbacks", {})}
+
+ for name, func in fields.items():
+ if getattr(func, "__is_callback__", None):
+ self._callbacks[name] = func
+
+ super().__init__(cls, bases, fields)
+
+ def __call__(self, *args, **kwargs):
+ instance = super().__call__(*args, **kwargs)
+
+ for name, func in self._callbacks.items():
+ concrete_func = getattr(instance, name)
+
+ if concrete_func.__qualname__ == func.__qualname__:
+ logging.debug("skipping patch of %s: not overridden", func.__qualname__)
+ continue
+
+ group_name = getattr(instance, "data_group", None)
+ if not group_name:
+ group_name = getattr(instance.__class__, "DATA_GROUP", None)
+
+ concrete_func = _wrap_callback_function(
+ instance,
+ concrete_func,
+ group=group_name,
+ use_group=group_name is not None,
+ )
+ setattr(instance, name, concrete_func)
+
+ return instance
+
+ def extend(self, func):
+ func.__is_callback__ = True
+ return func
+
+ def keys_from_member(self, **arg_name_to_key):
+ def _wrap(func):
+ func.__keys_from_members__ = arg_name_to_key
+ return func
+
+ return _wrap
+
+
+class Callback(metaclass=CallbackMeta):
+ """The principal modular building block of emote.
+
+ Callbacks are modular pieces of code that together build up the
+ training loop. They contain hooks that are executed at different
+ points during training. These can consume values from other
+ callbacks, and generate their own for others to consume. This allows
+ a very loosely coupled flow of data between different parts of the
+ code. The most important examples of callbacks in emote are the
+ Losses.
+
+ The concept has been borrowed from Keras and FastAI.
+ """
+
+ def __init__(self, cycle: int | None = None):
+ super().__init__()
+ self._order = 0
+ self.cycle = cycle
+
+ def restore_state(self, *args, **kwargs):
+ """Called before training starts to allow loader modules to import
+ state.
+
+ At this point, no assumptions can be made for other modules
+ state.
+ """
+ pass
+
+ def begin_training(self, *args, **kwargs):
+ """Called when training starts, both from scratch and when restoring
+ from a checkpoint."""
+ pass
+
+ def begin_cycle(self, *args, **kwargs):
+ """Called at the start of each cycle."""
+ pass
+
+ def begin_batch(self, *args, **kwargs):
+ """Called at the start of each batch, immediately after data has been
+ sampled."""
+ pass
+
+ def backward(self, *args, **kwargs):
+ """The main batch processing should happen here."""
+ pass
+
+ def end_batch(self, *args, **kwargs):
+ """Called when the backward pass has been completed."""
+ pass
+
+ def end_cycle(self, *args, **kwargs):
+ """Called when a callbacks cycle is completed."""
+ pass
+
+ def end_training(self, *args, **kwargs):
+ """Called right before shutdown, if possible."""
+ pass
+
+ def state_dict(self) -> Dict[str, Any]:
+ """Called by checkpointers primarily to capture state for on-disk
+ saving."""
+ return {}
+
+ def load_state_dict(
+ self,
+ state_dict: Dict[str, Any],
+ load_network: bool = True,
+ load_optimizer: bool = True,
+ load_hparams: bool = True,
+ ):
+ """Called from checkpoint-loaders during the `restore_state` phase,
+ primarily."""
+ pass
+
+
+class BatchCallback(Callback):
+ def __init__(self, cycle: int | None = None):
+ super().__init__(cycle)
+
+ @Callback.extend
+ def get_batch(self, *args, **kwargs):
+ pass
diff --git a/emote/callbacks/BUILD b/emote/callbacks/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/callbacks/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/callbacks/__init__.py b/emote/callbacks/__init__.py
new file mode 100644
index 00000000..4ad7d68f
--- /dev/null
+++ b/emote/callbacks/__init__.py
@@ -0,0 +1,17 @@
+""""""
+
+from emote.callbacks.checkpointing import Checkpointer, CheckpointLoader
+from emote.callbacks.generic import BackPropStepsTerminator
+from emote.callbacks.logging import TensorboardLogger
+from emote.callbacks.loss import LossCallback
+from emote.mixins.logging import LoggingMixin
+
+
+__all__ = [
+ "Checkpointer",
+ "CheckpointLoader",
+ "BackPropStepsTerminator",
+ "LoggingMixin",
+ "TensorboardLogger",
+ "LossCallback",
+]
diff --git a/emote/callbacks/checkpointing.py b/emote/callbacks/checkpointing.py
new file mode 100644
index 00000000..0154f52e
--- /dev/null
+++ b/emote/callbacks/checkpointing.py
@@ -0,0 +1,171 @@
+import logging
+import os
+import time
+import warnings
+
+from typing import List
+
+import torch
+
+from emote.callback import Callback
+
+
+class Checkpointer(Callback):
+ """Checkpointer writes out a checkpoint every n steps.
+
+ Exactly what is written to the checkpoint is determined by the
+ callbacks supplied in the constructor.
+
+ :param callbacks (List[Callback]): A list of callbacks that should
+ be saved.
+ :param run_root (str): The root path to where the run artifacts
+ should be stored.
+ :param checkpoint_interval (int): Number of backprops between
+ checkpoints.
+ :param storage_subdirectory (str): The subdirectory where the
+ checkpoints are stored.
+ """
+
+ def __init__(
+ self,
+ *,
+ callbacks: List[Callback],
+ run_root: str,
+ checkpoint_interval: int,
+ checkpoint_index: int = 0,
+ storage_subdirectory: str = "checkpoints",
+ ):
+ super().__init__(cycle=checkpoint_interval)
+ self._run_root = run_root
+ self._checkpoint_index = checkpoint_index
+ self._folder_path = os.path.join(run_root, storage_subdirectory)
+
+ self._cbs = []
+ names = []
+ for cb in callbacks:
+ if hasattr(cb, "name"):
+ self._cbs.append(cb)
+ names.append(cb.name)
+ else:
+ warnings.warn(
+ f"Checkpointer ignored {cb} because of not having the 'name' field.",
+ UserWarning,
+ )
+
+ if len(names) != len(set(names)):
+ raise ValueError(
+ "Checkpointer is given a list of callbacks with at least"
+ "two callbacks with identical names"
+ )
+
+ def begin_training(self):
+ os.makedirs(self._folder_path, exist_ok=True)
+
+ def end_cycle(self, bp_step, bp_samples):
+ name = f"checkpoint_{self._checkpoint_index}.tar"
+ final_path = os.path.join(self._folder_path, name)
+ state_dict = {
+ "callback_state_dicts": {cb.name: cb.state_dict() for cb in self._cbs},
+ "training_state": {
+ "latest_checkpoint": final_path,
+ "bp_step": bp_step,
+ "bp_samples": bp_samples,
+ "checkpoint_index": self._checkpoint_index,
+ },
+ }
+ torch.save(state_dict, final_path)
+ self._checkpoint_index += 1
+
+ return {
+ "latest_checkpoint": state_dict["training_state"]["latest_checkpoint"],
+ "checkpoint_index": state_dict["training_state"]["checkpoint_index"],
+ }
+
+
+class CheckpointLoader(Callback):
+ """CheckpointLoader loads a checkpoint like the one created by
+ Checkpointer.
+
+ This is intended for resuming training given a specific checkpoint
+ index. It also enables you to load network weights, optimizer, or
+ other callback hyper-params independently. If you want to do
+ something more specific, like only restore a specific network
+ (outside a callback), it is probably easier to just do it explicitly
+ when the network is constructed.
+
+ :param callbacks (List[Callback]): A list of callbacks that should
+ be restored.
+ :param run_root (str): The root path to where the run artifacts
+ should be stored.
+ :param checkpoint_index (int): Which checkpoint to load.
+ :param load_weights (bool): If True, it loads the network weights
+ :param load_optimizers (bool): If True, it loads the optimizer state
+ :param load_hparams (bool): If True, it loads other callback hyper-
+ params
+ :param storage_subdirectory (str): The subdirectory where the
+ checkpoints are stored.
+ """
+
+ def __init__(
+ self,
+ *,
+ callbacks: List[Callback],
+ run_root: str,
+ checkpoint_index: int,
+ load_weights: bool = True,
+ load_optimizers: bool = True,
+ load_hparams: bool = True,
+ storage_subdirectory: str = "checkpoints",
+ ):
+ super().__init__()
+ self._run_root = run_root
+ self._checkpoint_index = checkpoint_index
+ self._folder_path = os.path.join(run_root, storage_subdirectory)
+
+ self._load_weights = load_weights
+ self._load_optimizers = load_optimizers
+ self._load_hparams = load_hparams
+
+ self._cbs = []
+ names = []
+ for cb in callbacks:
+ if hasattr(cb, "name"):
+ self._cbs.append(cb)
+ names.append(cb.name)
+ else:
+ warnings.warn(
+ f"CheckpointLoader ignored {cb} because of not having the 'name' field.",
+ UserWarning,
+ )
+
+ if len(names) != len(set(names)):
+ raise ValueError(
+ "CheckpointLoader is given a list of callbacks with at least"
+ "two callbacks with identical names"
+ )
+
+ def restore_state(self):
+ start_time = time.time()
+ if not os.path.exists(self._folder_path):
+ raise InvalidCheckpointLocation(
+ f"Checkpoint folder {self._folder_path} was specified but does not exist."
+ )
+ name = f"checkpoint_{self._checkpoint_index}.tar"
+ final_path = os.path.join(self._folder_path, name)
+ logging.info(f"Loading checkpoints from {self._folder_path}")
+ state_dict: dict = torch.load(final_path)
+
+ for cb in self._cbs:
+ state = state_dict["callback_state_dicts"][cb.name]
+ cb.load_state_dict(state, self._load_weights, self._load_optimizers, self._load_hparams)
+
+ return_value = {}
+ if self._load_hparams:
+ return_value = state_dict.get("training_state", {})
+ duration = time.time() - start_time
+ logging.info(f"Loaded checkpoint from {final_path} in {duration:.2f}s")
+ return return_value
+
+
+class InvalidCheckpointLocation(ValueError):
+ pass
diff --git a/emote/callbacks/generic.py b/emote/callbacks/generic.py
new file mode 100644
index 00000000..37449242
--- /dev/null
+++ b/emote/callbacks/generic.py
@@ -0,0 +1,17 @@
+from emote.callback import Callback
+from emote.trainer import TrainingShutdownException
+
+
+class BackPropStepsTerminator(Callback):
+ """Terminates training after a given number of backprops.
+
+ :param bp_steps (int): The total number of backprops that the
+ trainer should run for.
+ """
+
+ def __init__(self, bp_steps: int):
+ assert bp_steps > 0, "Training steps must be above 0."
+ super().__init__(cycle=bp_steps)
+
+ def end_cycle(self):
+ raise TrainingShutdownException()
diff --git a/emote/callbacks/logging.py b/emote/callbacks/logging.py
new file mode 100644
index 00000000..0bc88aab
--- /dev/null
+++ b/emote/callbacks/logging.py
@@ -0,0 +1,119 @@
+import logging
+import time
+
+from typing import List
+
+import tensorboard # noqa
+
+from torch.utils.tensorboard import SummaryWriter
+
+from emote.callback import Callback
+from emote.mixins.logging import LoggingMixin
+
+
+class TensorboardLogger(Callback):
+ """Logs the provided loggable callbacks to tensorboard."""
+
+ def __init__(
+ self,
+ loggables: List[LoggingMixin],
+ writer: SummaryWriter,
+ log_interval: int,
+ log_by_samples: bool = False,
+ ):
+ super().__init__(cycle=log_interval)
+ self._logs = loggables
+ self._writer = writer
+ self._log_samples = log_by_samples
+
+ self._bp_samples_at_start = 0
+ self._bp_step_at_start = 0
+
+ def begin_training(self, bp_step, bp_samples):
+ self._start_time = time.monotonic()
+
+ self._bp_samples_at_start = bp_samples
+ self._bp_step_at_start = bp_step
+
+ def end_cycle(self, bp_step, bp_samples):
+ suffix = "bp_step"
+
+ for cb in self._logs:
+ for k, v in cb.scalar_logs.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ self._writer.add_scalar(k, v, bp_step)
+
+ for k, v in cb.windowed_scalar.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ self._writer.add_scalar(k, sum(v) / len(v), bp_step)
+
+ for k, v in cb.windowed_scalar_cumulative.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ self._writer.add_scalar(f"{k}/cumulative", v, bp_step)
+
+ for k, v in cb.image_logs.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ self._writer.add_image(k, v, bp_step, dataformats="HWC")
+
+ for k, (video_array, fps) in cb.video_logs.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ self._writer.add_video(k, video_array, bp_step, fps=fps, walltime=None)
+
+ time_since_start = time.monotonic() - self._start_time
+ self._writer.add_scalar(
+ "performance/bp_samples_per_sec",
+ (bp_samples - self._bp_samples_at_start) / time_since_start,
+ bp_step,
+ )
+
+ self._writer.add_scalar(
+ "performance/bp_steps_per_sec",
+ (bp_step - self._bp_step_at_start) / time_since_start,
+ bp_step,
+ )
+
+ self._writer.flush()
+
+
+class TerminalLogger(Callback):
+ """Logs the provided loggable callbacks to the python logger."""
+
+ def __init__(
+ self,
+ callbacks: List[LoggingMixin],
+ log_interval: int,
+ ):
+ super().__init__(cycle=log_interval)
+ self._logs = callbacks
+
+ def log_scalars(self, step, suffix=None):
+ """Logs scalar logs adding optional suffix on the first level.
+
+ **Example:** If k='training/loss' and suffix='bp_step', k will
+ be renamed to 'training_bp_step/loss'.
+ """
+ for log in self._logs:
+ for k, v in log.scalar_logs.items():
+ if suffix:
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+ logging.info("%s@%s:\t%.4f", k, step, v)
+
+ def end_cycle(self, bp_step):
+ self.log_scalars(bp_step)
diff --git a/emote/callbacks/loss.py b/emote/callbacks/loss.py
new file mode 100644
index 00000000..2c35da98
--- /dev/null
+++ b/emote/callbacks/loss.py
@@ -0,0 +1,119 @@
+from typing import Any, Dict, Optional
+
+import torch
+
+from torch import Tensor, nn, optim
+from torch.optim import lr_scheduler
+
+from emote.callback import Callback
+from emote.mixins.logging import LoggingMixin
+
+
+def _friendly_size_str(size: torch.Size):
+ return str(list(size)).replace("[", "").replace("]", "").replace(", ", "_")
+
+
+class LossCallback(LoggingMixin, Callback):
+ """Losses are callbacks that implement a *loss function*."""
+
+ def __init__(
+ self,
+ lr_schedule: Optional[optim.lr_scheduler._LRScheduler] = None,
+ *,
+ name: str,
+ network: Optional[nn.Module],
+ optimizer: Optional[optim.Optimizer],
+ max_grad_norm: float,
+ data_group: str,
+ log_per_param_weights=False,
+ log_per_param_grads=False,
+ ):
+ super().__init__()
+ self.data_group = data_group
+ self.name = name
+ self.network = network
+ self.optimizer = optimizer
+ if lr_schedule is None:
+ lr_schedule = lr_scheduler.ConstantLR(optimizer, factor=1.0)
+ self.lr_schedule = lr_schedule
+ self.parameters = [
+ p for param_group in optimizer.param_groups for p in param_group["params"]
+ ]
+ self._max_grad_norm = max_grad_norm
+ self._log_per_param_weights = log_per_param_weights
+ self._log_per_param_grads = log_per_param_grads
+ # Cache parameters and parameter name for all parameters that we optimize.
+ # We can use this when debugging per param values and gradients.
+ self._named_parameters = (
+ {
+ n: p
+ for n, p in network.named_parameters(recurse=True)
+ if any(p is p_ for p_ in self.parameters)
+ }
+ if self.network is not None
+ else {}
+ )
+
+ def backward(self, *args, **kwargs):
+ self.optimizer.zero_grad()
+ loss = self.loss(*args, **kwargs)
+ loss.backward()
+ grad_norm = nn.utils.clip_grad_norm_(self.parameters, self._max_grad_norm)
+ self.optimizer.step()
+ self.lr_schedule.step()
+ self.log_scalar(f"loss/{self.name}_lr", self.lr_schedule.get_last_lr()[0])
+ self.log_scalar(f"loss/{self.name}_loss", loss)
+ self.log_scalar(f"loss/{self.name}_gradient_norm", grad_norm)
+
+ if self._log_per_param_weights or self._log_per_param_grads:
+ self.log_per_param_weights_and_grads()
+
+ def log_per_param_weights_and_grads(self):
+ for name, parameter in self._named_parameters.items():
+ split = name.split(".")
+ log_name = self.name + "_" + "_".join(split[:-1])
+ param_type = split[-1]
+
+ if self._log_per_param_grads and parameter.grad is not None:
+ g_shape = _friendly_size_str(parameter.grad.shape)
+ self.log_histogram(f"{param_type}_grads/{log_name}_{g_shape}", parameter.grad)
+ self.log_scalar(
+ f"{param_type}_grads_l2/{log_name}_{g_shape}",
+ torch.norm(parameter.grad, p=2),
+ )
+
+ if self._log_per_param_weights:
+ p_shape = _friendly_size_str(parameter.shape)
+ self.log_histogram(f"{param_type}/{log_name}_{p_shape}", parameter)
+ self.log_scalar(f"{param_type}_l2/{log_name}_{p_shape}", torch.norm(parameter, p=2))
+
+ def state_dict(self):
+ state = super().state_dict()
+ if self.optimizer:
+ state["optimizer_state_dict"] = self.optimizer.state_dict()
+ if self.network:
+ state["network_state_dict"] = self.network.state_dict()
+ return state
+
+ def load_state_dict(
+ self,
+ state_dict: Dict[str, Any],
+ load_weights: bool = True,
+ load_optimizers: bool = True,
+ load_hparams: bool = True,
+ ):
+ if self.network and load_weights:
+ self.network.load_state_dict(state_dict.pop("network_state_dict"))
+
+ if self.optimizer and load_optimizers:
+ self.optimizer.load_state_dict(state_dict.pop("optimizer_state_dict"))
+
+ super().load_state_dict(state_dict, load_weights, load_optimizers, load_hparams)
+
+ @Callback.extend
+ def loss(self, *args, **kwargs) -> Tensor:
+ """The loss method needs to be overwritten to implement a loss.
+
+ :return: A PyTorch tensor of shape (batch,).
+ """
+ raise NotImplementedError
diff --git a/emote/callbacks/testing.py b/emote/callbacks/testing.py
new file mode 100644
index 00000000..08b4b85b
--- /dev/null
+++ b/emote/callbacks/testing.py
@@ -0,0 +1,55 @@
+from typing import List
+
+from emote.callback import Callback
+from emote.callbacks.loss import LossCallback
+from emote.mixins.logging import LoggingMixin
+from emote.trainer import TrainingShutdownException
+
+
+class FinalLossTestCheck(Callback):
+ """Logs the provided loggable callbacks to the python logger."""
+
+ def __init__(
+ self,
+ callbacks: List[LossCallback],
+ cutoffs: List[float],
+ test_length: int,
+ ):
+ super().__init__(cycle=test_length)
+ self._cbs = callbacks
+ self._cutoffs = cutoffs
+
+ def end_cycle(self):
+ for cb, cutoff in zip(self._cbs, self._cutoffs):
+ loss = cb.scalar_logs[f"loss/{cb.name}_loss"]
+ if loss > cutoff:
+ raise Exception(f"Loss for {cb.name} too high: {loss}")
+ raise TrainingShutdownException()
+
+
+class FinalRewardTestCheck(Callback):
+ def __init__(
+ self,
+ callback: LoggingMixin,
+ cutoff: float,
+ test_length: int,
+ key: str = "training/scaled_reward",
+ use_windowed: bool = False,
+ ):
+ super().__init__(cycle=test_length)
+ self._cb = callback
+ self._cutoff = cutoff
+ self._key = key
+ self._use_windowed = use_windowed
+
+ def end_cycle(self):
+ if self._use_windowed:
+ data = self._cb.windowed_scalar[self._key]
+ reward = sum(data) / len(data)
+ else:
+ reward = self._cb.scalar_logs[self._key]
+
+ if reward < self._cutoff:
+ raise Exception(f"Reward too low: {reward}")
+
+ raise TrainingShutdownException()
diff --git a/emote/callbacks/wb_logger.py b/emote/callbacks/wb_logger.py
new file mode 100644
index 00000000..4748173f
--- /dev/null
+++ b/emote/callbacks/wb_logger.py
@@ -0,0 +1,97 @@
+import time
+
+from typing import Dict, List
+
+from emote.callback import Callback
+from emote.mixins.logging import LoggingMixin
+
+
+try:
+ import wandb
+except ImportError as root:
+ raise ImportError("enable the optional `wandb` feature to use the WBLogger") from root
+
+
+class WBLogger(Callback):
+ """Logs the provided loggable callbacks to Weights&Biases."""
+
+ def __init__(
+ self,
+ callbacks: List[LoggingMixin],
+ config: Dict,
+ log_interval: int,
+ ):
+ super().__init__(cycle=log_interval)
+
+ self._cbs = callbacks
+ self._config = config
+
+ assert wandb.run is None
+ wandb.init(
+ project=self._config["wandb_project"],
+ name=self._config["wandb_run"],
+ config=wandb.helper.parse_config(self._config, exclude=("wandb_project", "wandb_run")),
+ )
+
+ self._bp_samples_at_start = 0
+ self._bp_step_at_start = 0
+
+ def begin_training(self, bp_step, bp_samples):
+ self._start_time = time.monotonic()
+
+ self._bp_samples_at_start = bp_samples
+ self._bp_step_at_start = bp_step
+
+ def end_cycle(self, bp_step, bp_samples):
+ log_dict = {}
+ suffix = "bp_step"
+
+ for cb in self._cbs:
+ for k, v in cb.scalar_logs.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ log_dict[k] = v
+
+ for k, v in cb.windowed_scalar.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ log_dict[k] = sum(v) / len(v)
+
+ for k, v in cb.windowed_scalar_cumulative.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ log_dict[f"{k}/cumulative"] = v
+
+ for k, v in cb.image_logs.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ log_dict[k] = wandb.Image(v)
+
+ for k, (video_array, fps) in cb.video_logs.items():
+ k_split = k.split("/")
+ k_split[0] = k_split[0] + "_" + suffix
+ k = "/".join(k_split)
+
+ log_dict[k] = wandb.Video(video_array, fps=fps)
+
+ time_since_start = time.monotonic() - self._start_time
+ samples_since_start = bp_samples - self._bp_samples_at_start
+ log_dict["performance/bp_samples_per_sec"] = samples_since_start / time_since_start
+
+ steps_since_start = bp_step - self._bp_step_at_start
+ log_dict["performance/bp_steps_per_sec"] = steps_since_start / time_since_start
+
+ log_dict["log/bp_step"] = bp_step
+ wandb.log(log_dict)
+
+ def end_training(self):
+ wandb.finish()
+ return super().end_training()
diff --git a/emote/env/BUILD b/emote/env/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/env/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/env/__init__.py b/emote/env/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/emote/env/box2d/BUILD b/emote/env/box2d/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/env/box2d/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/env/box2d/__init__.py b/emote/env/box2d/__init__.py
new file mode 100644
index 00000000..f49042c8
--- /dev/null
+++ b/emote/env/box2d/__init__.py
@@ -0,0 +1,33 @@
+import gymnasium
+
+from emote.env.wrappers import FrameStack, ScaledFloatFrame, WarpFrame
+
+
+def make_vision_box2d_env(
+ environment_id: str,
+ rank: int,
+ seed: int = 0,
+ frame_stack: int = 3,
+ use_float_scaling: bool = True,
+):
+ """
+ :param environment_id: (str) the environment ID
+ :param rank: (int) an integer offset for the random seed
+ :param seed: (int) the inital seed for RNG
+ :param frame_stack: (int) Stacks this many frames.
+ :param use_float_scaling: (bool) scaled the observations from char to normalised float
+ :return: the env creator function
+ """
+
+ def _thunk():
+ env = gymnasium.make(environment_id)
+ env.seed(seed + rank)
+ env = WarpFrame(env)
+ if use_float_scaling:
+ env = ScaledFloatFrame(env)
+
+ if frame_stack > 1:
+ env = FrameStack(env, frame_stack)
+ return env
+
+ return _thunk
diff --git a/emote/env/wrappers.py b/emote/env/wrappers.py
new file mode 100755
index 00000000..e81eb03d
--- /dev/null
+++ b/emote/env/wrappers.py
@@ -0,0 +1,130 @@
+from collections import deque
+
+import cv2
+import gymnasium
+import numpy as np
+
+from gymnasium import spaces
+
+
+cv2.ocl.setUseOpenCL(False)
+
+# Note:
+#
+# Most of these wrappers are from the openai baselines repository:
+# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
+#
+# Copyright (c) 2017 OpenAI (http://openai.com), used under the MIT license
+
+
+class WarpFrame(gymnasium.ObservationWrapper):
+ def __init__(self, env, width: int = 84, height: int = 84):
+ """Warp frames to width x height.
+
+ :param env: (Gym Environment) the environment
+ """
+ gymnasium.ObservationWrapper.__init__(self, env)
+ self.width = width
+ self.height = height
+ self.observation_space = spaces.Box(
+ low=0,
+ high=255,
+ shape=(self.height, self.width, 1),
+ dtype=env.observation_space.dtype,
+ )
+
+ def observation(self, frame):
+ """Returns the current observation from a frame.
+
+ :param frame: ([int] or [float]) environment frame
+ :return: ([int] or [float]) the observation
+ """
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
+ frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
+ return frame[:, :, None]
+
+
+class FrameStack(gymnasium.Wrapper):
+ def __init__(self, env, n_frames: int):
+ """Stack n_frames last frames.
+
+ Returns lazy array, which is much more memory efficient.
+
+ See Also
+ --------
+ LazyFrames (Below)
+
+ :param env: (Gym Environment) the environment
+ :param n_frames: (int) the number of frames to stack
+ """
+ gymnasium.Wrapper.__init__(self, env)
+ self.n_frames = n_frames
+ self.frames = deque([], maxlen=n_frames)
+ shp = env.observation_space.shape
+ self.observation_space = spaces.Box(
+ low=0,
+ high=255,
+ shape=(shp[0], shp[1], shp[2] * n_frames),
+ dtype=env.observation_space.dtype,
+ )
+
+ def reset(self):
+ obs = self.env.reset()
+ for _ in range(self.n_frames):
+ self.frames.append(obs)
+ return self._get_ob()
+
+ def step(self, action):
+ obs, reward, done, info = self.env.step(action)
+ self.frames.append(obs)
+ return self._get_ob(), reward, done, info
+
+ def _get_ob(self):
+ assert len(self.frames) == self.n_frames
+ return LazyFrames(list(self.frames))
+
+
+class ScaledFloatFrame(gymnasium.ObservationWrapper):
+ def __init__(self, env):
+ gymnasium.ObservationWrapper.__init__(self, env)
+ self.observation_space = spaces.Box(
+ low=0, high=1.0, shape=env.observation_space.shape, dtype=np.float32
+ )
+
+ def observation(self, observation):
+ # careful! This undoes the memory optimization, use
+ # with smaller replay buffers only.
+ return ((np.array(observation).astype(np.float32) / 255.0) - 0.5) * 2.0
+
+
+class LazyFrames(object):
+ def __init__(self, frames):
+ """This object ensures that common frames between the observations are
+ only stored once. It exists purely to optimize memory usage which can
+ be huge for DQN's 1M frames replay buffers.
+
+ This object should only be converted to np.ndarray before being
+ passed to the model.
+
+ :param frames: ([int] or [float]) environment frames
+ """
+ self._frames = frames
+ self._out = None
+
+ def _force(self):
+ if self._out is None:
+ self._out = np.concatenate(self._frames, axis=2)
+ self._frames = None
+ return self._out
+
+ def __array__(self, dtype=None):
+ out = self._force()
+ if dtype is not None:
+ out = out.astype(dtype)
+ return out
+
+ def __len__(self):
+ return len(self._force())
+
+ def __getitem__(self, i):
+ return self._force()[i]
diff --git a/emote/extra/BUILD b/emote/extra/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/extra/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/extra/__init__.py b/emote/extra/__init__.py
new file mode 100644
index 00000000..f3b0d32f
--- /dev/null
+++ b/emote/extra/__init__.py
@@ -0,0 +1 @@
+""""""
diff --git a/emote/extra/crud_storage.py b/emote/extra/crud_storage.py
new file mode 100644
index 00000000..6f8f35ba
--- /dev/null
+++ b/emote/extra/crud_storage.py
@@ -0,0 +1,212 @@
+"""Generic CRUD-based storage on disk."""
+
+import os
+
+from dataclasses import dataclass
+from datetime import datetime
+from typing import Callable, Generic, List, Optional, Sequence, TypeVar
+
+from emote.utils.threading import AtomicInt, LockedResource
+
+
+T = TypeVar("T")
+
+
+@dataclass(frozen=True)
+class StorageItemHandle(Generic[T]):
+ """
+ A handle that represents a storage item.
+ Can be safely exposed to users.
+ Not cryptographically safe: handles are guessable.
+
+ You can convert this handle from and to strings using `str(handle)` and
+ `StorageItemHandle.from_string(string)`.
+ """
+
+ handle: int
+
+ @staticmethod
+ def from_string(value: str) -> Optional["StorageItemHandle"]:
+ """Parses a handle from its string representation.
+
+ Returns None if the handle is invalid.
+ """
+ try:
+ return StorageItemHandle(int(value))
+ except ValueError:
+ return None
+
+ def __str__(self):
+ return str(self.handle)
+
+
+@dataclass(frozen=True)
+class StorageItem(Generic[T]):
+ # A handle that represents this item.
+ # Can be safely exposed to users.
+ # Not cryptographically safe: handles are guessable.
+ handle: StorageItemHandle[T]
+ # When the file was created (in UTC)
+ timestamp: datetime
+ # Path to the file in the filesystem
+ filepath: str
+
+
+class CRUDStorage(Generic[T]):
+ """Manages a set of files on disk in a simple CRUD way. All files will be
+ stored to a single directory with a name on the format
+ `{prefix}{timestamp}_{index}.{extension}`.
+
+ This class is thread-safe.
+ """
+
+ def __init__(self, directory: str, prefix: str = "", extension: str = "bin"):
+ assert len(extension) > 0
+ assert "." not in extension, "Extension should not contain a dot"
+
+ directory = os.path.abspath(directory)
+ if os.path.exists(directory) and not os.path.isdir(directory):
+ # Path exists, but it is not a directory
+ ENOTDIR = 20
+ raise os.error(ENOTDIR, os.strerror(ENOTDIR), directory)
+
+ os.makedirs(directory, exist_ok=True)
+
+ self._directory = directory
+ self._filename_counter = AtomicInt(0)
+ self._items: List[StorageItem[T]] = LockedResource([])
+ self._extension = extension
+ self._prefix = prefix
+
+ def create_with_data(self, data: bytearray) -> StorageItem[T]:
+ """Creates a new file with the given data."""
+
+ def save(filepath):
+ with open(filepath, "wb") as f:
+ f.write(data)
+
+ return self.create_with_saver(save)
+
+ def create_from_filepath(self, filepath: str) -> StorageItem[T]:
+ """Creates a new entry for an existing file.
+
+ The file must already be in the directory that this storage
+ manages. It does not need to conform to the naming convention
+ that the CRUDStorage normally uses.
+ """
+ assert os.path.isfile(filepath), "File does not exist"
+ if os.path.dirname(os.path.abspath(filepath)) != self._directory:
+ raise Exception(
+ f"Cannot add '{filepath}' to the storage because it "
+ f" is not in the storage directory '{self._directory}'"
+ )
+
+ utcdate = datetime.utcnow()
+ handle = StorageItemHandle(self._filename_counter.increment())
+ item = StorageItem(timestamp=utcdate, filepath=filepath, handle=handle)
+
+ with self._items as items:
+ items.append(item)
+ return item
+
+ def create_with_saver(self, saver: Callable[[str], None]) -> StorageItem[T]:
+ """Creates a new file by saving it via the provided function.
+
+ The function will be called with the path at which the file
+ should be saved.
+ """
+ if not os.path.isdir(self._directory):
+ raise Exception(f"The storage directory ({self._directory}) has been deleted")
+
+ # Get the local time
+ date = datetime.now()
+ # Get the time in UTC. Converting between timezones is a bit annoying without external
+ # libraries in python.
+ utcdate = datetime.utcnow()
+
+ # Try to find a valid filename.
+ # In rare cases where files exist that we didn't know about we may have
+ # to increment _filename_counter multiple times to find a valid filename.
+ while True:
+ # We need to use an atomic int here to ensure there is no race condition when
+ # generating file paths. We don't want it to be possible to call `create_with_saver`
+ # from two threads at the same time and they both try to write to the same file path.
+ handle = StorageItemHandle(self._filename_counter.increment())
+ # The filename is formatted in local time for ease of use
+ datestr = date.strftime(r"%Y-%m-%d_%H-%M")
+ filename = f"{self._prefix}{datestr}_{handle}.{self._extension}"
+ filepath = os.path.join(self._directory, filename)
+ if not os.path.exists(filepath):
+ break
+
+ item = StorageItem(timestamp=utcdate, filepath=filepath, handle=handle)
+ saver(item.filepath)
+ assert os.path.isfile(
+ item.filepath
+ ), f"Saver did not save the data to the provided filepath {item.filepath}"
+ with self._items as items:
+ items.append(item)
+ return item
+
+ def update(self, handle: StorageItemHandle[T], data: bytearray):
+ """Updates an existing file with the given contents."""
+ item = self.get(handle)
+ assert item is not None, "Invalid handle"
+ with open(item.filepath, "wb") as f:
+ f.write(data)
+
+ def items(self) -> Sequence[StorageItem[T]]:
+ """
+ :returns: a sequence of all files owned by this storage.
+ """
+ with self._items as items:
+ # Return a copy of the list to ensure it can be safely handed to different threads
+ # without the list potentially being modified while they are using it.
+ return items[:]
+
+ def delete(self, handle: StorageItemHandle[T]) -> bool:
+ """Deletes an existing file owned by this storage.
+
+ :returns: True if a file was deleted, and false if the file was
+ not owned by this storage.
+ :raises: Exception if this storage contains an entry for the
+ file, but it has been deleted on disk without going through
+ the CRUDStorage.
+ """
+ with self._items as items:
+ for item in items:
+ if item.handle != handle:
+ continue
+
+ items.remove(item)
+ try:
+ os.remove(item.filepath)
+ except FileNotFoundError as e:
+ raise Exception(
+ f"The file {item.filepath} has already been deleted "
+ " without going through the CRUDStorage"
+ ) from e
+ return True
+
+ return False
+
+ def get(self, handle: StorageItemHandle[T]) -> Optional[StorageItem[T]]:
+ """
+ :returns: The storage item corresponding handle or None if it was not found
+ """
+
+ with self._items as items:
+ # Slow, but this class is not expected to have to handle a large number of files
+ return next((item for item in items if item.handle == handle), None)
+
+ def latest(self) -> Optional[StorageItem[T]]:
+ """The last storage item that was added to the storage.
+
+ If items have been deleted, this is the last item of the ones
+ that remain.
+ """
+ with self._items as items:
+ if items:
+ return items[-1]
+
+ return None
diff --git a/emote/extra/onnx_exporter.py b/emote/extra/onnx_exporter.py
new file mode 100644
index 00000000..e29da4ce
--- /dev/null
+++ b/emote/extra/onnx_exporter.py
@@ -0,0 +1,313 @@
+from __future__ import annotations
+
+import copy
+import io
+import logging
+import time
+import warnings
+
+from queue import Empty, Queue
+from threading import Event
+from typing import Any, Mapping, Optional, Sequence
+
+import onnx
+import torch
+
+from google.protobuf import text_format
+
+from emote.callback import Callback
+from emote.extra.crud_storage import CRUDStorage, StorageItem, StorageItemHandle
+from emote.mixins.logging import LoggingMixin
+from emote.proxies import AgentProxy
+from emote.utils.spaces import MDPSpace
+from emote.utils.threading import LockedResource
+from emote.utils.timed_call import BlockTimers
+
+
+class QueuedExport:
+ def __init__(self, metadata: Optional[Mapping[str, str]]):
+ self.metadata = metadata
+ self.return_value = None
+ self._event = Event()
+
+ def process(self, storage: "OnnxExporter"):
+ self.return_value = storage._export_onnx(self.metadata)
+ self._event.set()
+
+ def block_until_complete(self):
+ self._event.wait()
+ return self.return_value
+
+
+def _get_version():
+ try:
+ from importlib.metadata import version
+
+ return version("emote-rl")
+ except: # noqa
+ return "unknown-version"
+
+
+def _save_protobuf(path, message, as_text: bool = False):
+ import os
+
+ dir_name = os.path.dirname(path)
+ if dir_name:
+ os.makedirs(dir_name, exist_ok=True)
+ if as_text:
+ with open(path, "w") as f:
+ f.write(text_format.MessageToString(message))
+ else:
+ with open(path, "wb") as f:
+ f.write(message.SerializeToString())
+
+
+class OnnxExporter(LoggingMixin, Callback):
+ """Handles onnx exports of a ML policy.
+
+ Call `export` whenever you want to save an onnx version of the
+ current model, or `export_threadsafe` if you're outside the
+ training loop.
+
+ Parameters:
+ :param agent_proxy: the agent API to export
+ :param spaces: The spaces describing the model inputs and outputs
+ :param requires_epsilon: If true, the API should accept an input epsilon per action
+ :param directory: path to the directory where the files should be created. If it does not exist
+ it will be created.
+ :param interval: if provided, will automatically export ONNX files at this cadence.
+ :param prefix: all file names will have this prefix.
+ :param device: if provided, will transfer the model inputs to this device before exporting.
+ """
+
+ __HAS_INSERTED_FILTER = False
+
+ def __init__(
+ self,
+ agent_proxy: AgentProxy,
+ spaces: MDPSpace,
+ requires_epsilon: bool,
+ directory: str,
+ interval: int | None = None,
+ prefix: str = "savedmodel_",
+ device: torch.device | None = None,
+ ):
+ super().__init__(cycle=interval)
+
+ if not OnnxExporter.__HAS_INSERTED_FILTER:
+ OnnxExporter.__HAS_INSERTED_FILTER = True
+ # This is caused by our old version of torch
+ warnings.filterwarnings("ignore", "Skipping _decide_input_format.*")
+
+ # https://github.com/pytorch/pytorch/issues/74799
+ warnings.filterwarnings("ignore", "Model has no forward function")
+
+ input_names = agent_proxy.input_names
+ input_shapes = [(k, spaces.state.spaces[k].shape) for k in input_names]
+
+ if requires_epsilon:
+ input_names = (*input_names, "epsilon")
+ input_shapes.append(
+ (
+ "epsilon",
+ (*spaces.actions.shape,),
+ )
+ )
+
+ output_shapes = [("actions", (*spaces.actions.shape,))]
+
+ self.policy = agent_proxy.policy
+ self.storage = CRUDStorage(directory, prefix, extension="onnx")
+ self.queued_exports = Queue()
+ self.export_counter = 0
+
+ # Cache the version tag on startup.
+ # It takes about 20ms to calculate
+ self.version_tag = _get_version()
+ self.scopes = BlockTimers()
+
+ self.inputs = input_shapes
+ self.outputs = output_shapes
+
+ self._device = device
+
+ self._metadata = LockedResource({})
+
+ def add_metadata(self, key: str, value: Any):
+ if not isinstance(key, str):
+ raise TypeError(f"key must be a string, but got {type(key)}")
+
+ if not isinstance(value, str):
+ try:
+ value = str(value)
+ except Exception as e:
+ raise TypeError(
+ f"value must be a string, but got {type(value)} for key {key:r} which is"
+ " not convertible to a string."
+ ) from e
+
+ with self._metadata as m:
+ m[key] = value
+
+ def end_batch(self):
+ self.process_pending_exports()
+
+ def end_cycle(self):
+ self.export()
+
+ for name, (mean, var) in self.scopes.stats().items():
+ self.log_scalar(f"onnx_export/{name}_ms", mean * 1000.0)
+ self.log_scalar(f"onnx_export/{name}_var_ms", var * 1000.0)
+
+ def process_pending_exports(self):
+ """If you are using `export_threadsafe` the main thread must call this
+ method regularly to make sure things are actually exported."""
+ while self.queued_exports.qsize() > 0:
+ try:
+ item = self.queued_exports.get_nowait()
+ except Empty:
+ return
+ item.process(self)
+
+ def _trace(self):
+ with self.scopes.scope("policycopy"):
+ policy = copy.deepcopy(self.policy)
+
+ policy.train(False)
+
+ with self.scopes.scope("trace"):
+ args = []
+
+ for _, shape in self.inputs:
+ arg = torch.randn(1, *shape).detach()
+ if self._device is not None:
+ arg = arg.to(self._device)
+
+ args.append(arg)
+
+ # NOTE: This might seem like a good use case for torch.jit.trace,
+ # but it unfortunately leaks a full copy of the neural network.
+ # See: https://github.com/pytorch/pytorch/issues/82532
+
+ with io.BytesIO() as f:
+ torch.onnx.export(
+ model=policy,
+ args=tuple(args),
+ f=f,
+ input_names=list(map(lambda pair: pair[0], self.inputs)),
+ output_names=list(map(lambda pair: pair[0], self.outputs)),
+ dynamic_axes={
+ **{pair[0]: {0: "N"} for pair in self.inputs},
+ **{pair[0]: {0: "N"} for pair in self.outputs},
+ },
+ opset_version=13,
+ )
+
+ f.seek(0)
+ model_proto = onnx.load_model(f)
+
+ return model_proto
+
+ def _export_onnx(self, metadata: Optional[Mapping[str, str]]) -> StorageItem:
+ def save_inner(export_path: str):
+ with self.scopes.scope("save"):
+ model_proto = self._trace()
+ model_version = self.export_counter
+ self.export_counter += 1
+
+ model_proto.producer_name = "emote"
+ model_proto.domain = "dev.embark.ml"
+ model_proto.producer_version = self.version_tag
+ model_proto.model_version = model_version
+ model_proto.doc_string = "exported via Emote checkpointer"
+
+ if metadata is not None:
+ onnx.helper.set_model_props(model_proto, metadata)
+
+ _save_protobuf(export_path, model_proto)
+
+ with self.scopes.scope("create"):
+ return self.storage.create_with_saver(save_inner)
+
+ def _export(self, metadata: Optional[Mapping[str, str]], sync: bool) -> StorageItem:
+ cleaned_metadata = {}
+
+ if metadata is not None:
+ for k, v in metadata.items():
+ if not isinstance(k, str):
+ raise TypeError(
+ f"metadata keys must be strings, but got {type(k)} for key {k!r}"
+ )
+
+ if not isinstance(v, str):
+ try:
+ v = str(v)
+ except Exception as e:
+ raise TypeError(
+ f"metadata values must be strings, but got {type(v)} for key {k!r}"
+ " which is not convertible to a string."
+ ) from e
+
+ cleaned_metadata[k] = v
+
+ metadata = cleaned_metadata
+
+ # nb: order matters here. We want to merge metadata coming in through the function call with
+ # the metadata we have already set on the exporter, with priority given to the metadata
+ # passed in through the function call.
+
+ with self._metadata as m:
+ if metadata is not None:
+ metadata = m | metadata
+
+ else:
+ metadata = copy.copy(m)
+
+ # The actual onnx export needs to be done on the main thread.
+ item = QueuedExport(metadata)
+ self.queued_exports.put(item)
+ if sync:
+ # This will cause block_until_complete to never block
+ # because the work will have been completed already.
+ self.process_pending_exports()
+
+ return item.block_until_complete()
+
+ def export_threadsafe(self, metadata=None) -> StorageItem:
+ """Same as `export`, but it can be called in threads other than the
+ main thread.
+
+ This method relies on the main thread calling `process_pending_exports` from time to time.
+ You cannot call this method from the main thread. It will block indefinitely.
+ """
+ return self._export(metadata, False)
+
+ def export(self, metadata=None) -> StorageItem:
+ """Serializes a model to onnx and saves it to disk.
+
+ This must only be called from the main thread. That is, the
+ thread which has ownership over the model and that modifies it.
+ This is usually the thread that has the training loop.
+ """
+ logging.info("Starting ONNX export...")
+ start_time = time.time()
+ storage_item = self._export(metadata, True)
+ elapsed_time = time.time() - start_time
+ logging.info(
+ f"ONNX Export completed in {elapsed_time} seconds. \n"
+ f"ONNX timestamp: {storage_item.timestamp} \n"
+ f"and filepath: {storage_item.filepath}"
+ )
+ return storage_item
+
+ def delete(self, handle: StorageItemHandle) -> bool:
+ return self.storage.delete(handle)
+
+ def get(self, handle: StorageItemHandle) -> bool:
+ return self.storage.get(handle)
+
+ def items(self) -> Sequence[StorageItem]:
+ return self.storage.items()
+
+ def latest(self) -> Optional[StorageItem]:
+ return self.storage.latest()
diff --git a/emote/extra/schedules.py b/emote/extra/schedules.py
new file mode 100644
index 00000000..ef49c82e
--- /dev/null
+++ b/emote/extra/schedules.py
@@ -0,0 +1,187 @@
+import math
+
+from dataclasses import dataclass
+
+from emote.utils.math import truncated_linear
+
+
+@dataclass
+class BPStepScheduler:
+ bp_step_begin: float
+ bp_step_end: float
+ value_min: float
+ value_max: float
+
+ def evaluate_at(self, bp):
+ return truncated_linear(
+ self.bp_step_begin, self.bp_step_end, self.value_min, self.value_max, bp
+ )
+
+
+class Schedule:
+ def __init__(self, initial: float, final: float, steps: int):
+ self.initial = initial
+ self.final = final
+ self.steps = steps
+
+ self._step_count = 0
+ self._current_value = initial
+
+ @property
+ def value(self):
+ return self._current_value
+
+ def step(self):
+ pass
+
+
+class ConstantSchedule(Schedule):
+ """Constant value that doesn't change over time.
+
+ Args:
+ value (float): Value of the schedule.
+ """
+
+ def __init__(
+ self,
+ value,
+ ):
+ super().__init__(value, None, None)
+
+
+class LinearSchedule(Schedule):
+ """Linear interpolation between initial and final over steps timesteps.
+ After this many timesteps, final is returned.
+
+ Args:
+ initial (float): Initial value.
+ final (float): Final value.
+ steps (int): Number of steps.
+ use_staircase (bool, optional): Use step like decay. Defaults to False.
+ staircase_steps (int, optional): The number of discrete steps. Defaults to 5.
+ """
+
+ def __init__(
+ self,
+ initial: float,
+ final: float,
+ steps: int,
+ use_staircase: bool = False,
+ staircase_steps: int = 5,
+ ):
+ super().__init__(initial, final, steps)
+
+ self.use_staircase = use_staircase
+ self.staircase_steps = staircase_steps
+
+ def step(self):
+ fraction = self._step_count / self.steps
+ if self.use_staircase:
+ fraction = math.floor(fraction * self.staircase_steps) / self.staircase_steps
+ fraction = min(fraction, 1.0)
+
+ self._current_value = self.initial + fraction * (self.final - self.initial)
+
+ self._step_count += 1
+
+
+class CyclicSchedule(Schedule):
+ """Cyclic schedule.
+
+ Args:
+ initial (float): Initial value.
+ final (float): Final value.
+ half_period_steps (int): Number of steps in one half of the cycle.
+ mode (str, optional): One of {triangular, triangular2}. Defaults to "triangular".
+
+ * triangular: A basic triangular cycle without amplitude scaling.
+ * triangular2: A basic triangular cycle that scales initial amplitude by half each cycle.
+
+ ** Note: for triangular2, the final value is the boundary that is scaled down
+ at each cycle iteration,
+ meaning that the value of the scheduled parameter will settle around initial.
+ """
+
+ def __init__(
+ self,
+ initial: float,
+ final: float,
+ half_period_steps: int,
+ mode: str = "triangular",
+ ):
+ super().__init__(initial, final, half_period_steps)
+
+ self.mode = mode
+
+ if self.mode == "triangular":
+ self.scale_fn = self._triangular_scale_fn
+ elif self.mode == "triangular2":
+ self.scale_fn = self._triangular2_scale_fn
+
+ def _triangular_scale_fn(self, x: float) -> float:
+ return 1
+
+ def _triangular2_scale_fn(self, x: float) -> float:
+ return 1 / (2.0 ** (x - 1))
+
+ def step(self):
+ cycle = math.floor(1 + self._step_count / (2 * self.steps))
+ x = math.fabs(self._step_count / self.steps - 2 * cycle + 1)
+
+ self._current_value = self.initial + (self.final - self.initial) * max(
+ 0, (1 - x)
+ ) * self.scale_fn(cycle)
+
+ self._step_count += 1
+
+
+class CosineAnnealing(Schedule):
+ """Cosine annealing schedule.
+
+ Args:
+ initial (float): Initial value.
+ final (float): Final value.
+ steps (int): Number of steps.
+ """
+
+ def __init__(self, initial: float, final: float, steps: int):
+ super().__init__(initial, final, steps)
+
+ def step(self):
+ if self._step_count > 0:
+ if (self._step_count - 1 - self.steps) % (2 * self.steps) == 0:
+ self._current_value += (
+ (self.initial - self.final) * (1 - math.cos(math.pi / self.steps)) / 2
+ )
+ else:
+ self._current_value = (1 + math.cos(math.pi * self._step_count / self.steps)) / (
+ 1 + math.cos(math.pi * (self._step_count - 1) / self.steps)
+ ) * (self._current_value - self.final) + self.final
+
+ self._step_count += 1
+
+
+class CosineAnnealingWarmRestarts(Schedule):
+ """Cosine annealing schedule with warm restarts.
+
+ Args:
+ initial (float): Initial value.
+ final (float): Final value.
+ steps (int): Number of steps.
+ """
+
+ def __init__(self, initial: float, final: float, steps: int):
+ super().__init__(initial, final, steps)
+
+ def step(self):
+ if self._step_count >= self.steps:
+ self._step_count %= self.steps
+
+ self._current_value = (
+ self.final
+ + (self.initial - self.final)
+ * (1 + math.cos(math.pi * self._step_count / self.steps))
+ / 2
+ )
+
+ self._step_count += 1
diff --git a/emote/extra/system_logger.py b/emote/extra/system_logger.py
new file mode 100644
index 00000000..39911b9f
--- /dev/null
+++ b/emote/extra/system_logger.py
@@ -0,0 +1,39 @@
+"""Logger that logs the memory consumption and memory consumption growth
+rate."""
+
+import os
+
+import psutil
+
+from emote.callback import Callback
+from emote.mixins.logging import LoggingMixin
+
+
+class SystemLogger(LoggingMixin, Callback):
+ def __init__(self):
+ super().__init__(cycle=1_000)
+ self._proc = psutil.Process(os.getpid())
+ self._previous_memory = self._proc.memory_info().rss / (1024 * 1024)
+ self._previous_bp_step = 0
+
+ def end_cycle(self, bp_step, bp_samples):
+ memory_now = self._proc.memory_info().rss / (1024 * 1024)
+ self.log_scalar("system/ram_usage_mb", memory_now)
+
+ memory_delta = memory_now - self._previous_memory
+ step_delta = bp_step - self._previous_bp_step
+ if step_delta > 0:
+ self.log_scalar("system/ram_usage_growth_mb_step", memory_delta / step_delta)
+
+ self._previous_memory = memory_now
+ self._previous_bp_step = bp_step
+
+ # unix-style summed load
+ cpu_percent = self._proc.cpu_percent()
+ self.log_scalar("system/cpu_load", cpu_percent)
+
+ # Requires torch 1.11
+ # import torch
+ # if torch.cuda.is_available() and torch.cuda.is_initialized():
+ # gpu_load = torch.cuda.utilization()
+ # self.log_scalar("system/gpu_load", gpu_load)
diff --git a/emote/memory/BUILD b/emote/memory/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/memory/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/memory/README.md b/emote/memory/README.md
new file mode 100644
index 00000000..e69de29b
diff --git a/emote/memory/__init__.py b/emote/memory/__init__.py
new file mode 100644
index 00000000..5572874b
--- /dev/null
+++ b/emote/memory/__init__.py
@@ -0,0 +1,84 @@
+"""
+This module contains all the major building blocks for our memory
+implementation. The memory was developed in the same time period as
+`DeepMind's Reverb `_, and shares
+naming with it, which in turn is borrowing from databases. What is not
+alike Reverb is that we do not have the RateSamplers (but it can be
+added). We also do not share data between ArrayTables.
+
+The goal of the memory is to provide a unified interface for all types of
+machine learning tasks. This is achieved by focusing on configuration and
+pluggability over code-driven functionality.
+
+Currently, there are three main points of customization:
+
+* Shape and type of data
+* Insertion, sampling, and eviction
+* Data transformation and generation
+
+
+## High-level parts
+
+
+### ArrayTable
+
+
+A table is a datastructure containing a specific type of data that shares the same high-level structure.
+
+### Columns and Virtual Columns
+
+
+A column is a storage for a specific type of data where each item is
+the same shape and type. A virtual column is like a column, but it
+references another column and does data synthesization or modification
+w.r.t that. For example, dones and masks are synthetic data based only
+on indices.
+
+### Adaptors
+
+
+Adaptors are another approach to virtual column but are more suited for
+transforming the whole batch, such as scaling for reshaping specific
+datas. Since this step occurs when the data has already been converted to
+tensors, the full power of Tensorflow is available here and gradients will be
+correctly tracked.
+
+### Strategies, Samplers and Ejectors
+
+Strategies are based on the delegate pattern, where we can inject implementation
+details through objects instead of using inheritance. Strategies define the API
+for sampling and ejection from memories, and are queried from the table upon
+sampling and insertion.
+
+Samplers and Ejectors track the data (but do not own it!). They are used by the
+table for sampling and ejection based on the policy they implement. Currently we
+have Fifo and Uniform samplers and ejectors, but one could have prioritized
+samplers/ejectors, etc.
+
+### Proxy Wrappers
+
+Wrappers live around the memory proxy and extend functionality. This is a great point for data conversion, validation, and logging.
+"""
+
+from .callbacks import MemoryImporterCallback
+from .memory import (
+ JointMemoryLoader,
+ LoggingProxyWrapper,
+ MemoryExporterProxyWrapper,
+ MemoryLoader,
+ MemoryWarmup,
+ TableMemoryProxy,
+)
+from .table import Table
+
+
+__all__ = [
+ "Table",
+ "TableMemoryProxy",
+ "MemoryLoader",
+ "MemoryExporterProxyWrapper",
+ "MemoryImporterCallback",
+ "LoggingProxyWrapper",
+ "MemoryWarmup",
+ "JointMemoryLoader",
+]
diff --git a/emote/memory/adaptors.py b/emote/memory/adaptors.py
new file mode 100644
index 00000000..10ef2629
--- /dev/null
+++ b/emote/memory/adaptors.py
@@ -0,0 +1,121 @@
+""""""
+
+from typing import Callable, List, Optional
+
+import torch
+
+from emote.memory.core_types import SampleResult
+
+
+Adaptor = Callable[[SampleResult, int, int], SampleResult]
+
+
+class DictObsAdaptor:
+ """Converts multiple observation columns to a single dict observation.
+
+ :param keys: The dictionary keys to extract
+ :param output_keys: The output names for the extracted keys.
+ Defaults to the same name.
+ :param with_next: If True, adds an extra column called "next_{key}"
+ for each key in keys.
+ """
+
+ def __init__(
+ self,
+ keys: List[str],
+ output_keys: Optional[List[str]] = None,
+ with_next: bool = True,
+ ):
+ if output_keys is None:
+ output_keys = keys
+ else:
+ assert len(keys) == len(output_keys)
+ self.key_map = list(zip(keys, output_keys))
+ self.with_next = with_next
+
+ def __call__(self, result: SampleResult, count: int, sequence_length: int) -> SampleResult:
+ obs_dict = {}
+ next_obs_dict = {}
+ for key, out_key in self.key_map:
+ obs_dict[out_key] = result.pop(key)
+ if self.with_next:
+ next_obs_dict[f"{out_key}"] = result.pop("next_" + key)
+
+ result["observation"] = obs_dict
+ result["next_observation"] = next_obs_dict
+ return result
+
+
+class KeyScaleAdaptor:
+ """An adaptor to apply scaling to a specified sampled key.
+
+ :param key: The key for which to scale data
+ :param scale: The scale factor to apply
+ """
+
+ def __init__(self, scale, key):
+ self.key = key
+ self.scale = torch.tensor(scale)
+
+ def __call__(self, result: SampleResult, count: int, sequence_length: int) -> SampleResult:
+ result[self.key] *= self.scale
+ return result
+
+
+class KeyCastAdaptor:
+ """An adaptor to cast a specified sampled key.
+
+ :param key: The key for which to cast data
+ :param dtype: The dtype to cast to.
+ """
+
+ def __init__(self, dtype, key):
+ self.key = key
+ self.dtype = dtype
+
+ def __call__(self, result: SampleResult, count: int, sequence_length: int) -> SampleResult:
+ result[self.key].to(self.dtype)
+ return result
+
+
+class TerminalAdaptor:
+ """An adaptor to apply tags from detailed terminal tagging.
+
+ :param value_key: the key containing the terminal mask value to
+ apply
+ :param target_key: the default mask data to override
+ """
+
+ def __init__(self, target_key: str, value_key: str) -> None:
+ self.target_key = target_key
+ self.value_key = value_key
+
+ def __call__(self, result: SampleResult, count: int, sequence_length: int) -> SampleResult:
+ # Note: The below code assumes that both terminal tags and the masks are
+ # always 1.0 or 0.0.
+
+ # reshapes the input data to [batch_size, time_dimension, ...] so we
+ # can correctly overlay the terminal tags - otherwise we get a dimension
+ # mismatch.
+ target = result[self.target_key]
+ value = result[self.value_key]
+
+ result_shape = target.shape
+
+ new_value_shape = (-1, sequence_length, *result_shape[1:])
+ value_reshaped = torch.reshape(value, new_value_shape)
+
+ # compute a selection mask which is true for every non-end-of-episode step
+ indice_mask = target == 1.0
+ # where it is true, simply use the existing value in
+ # result[target_key], otherwise use the terminal-state tag value
+ # from result[value_key]
+ result_reshaped = torch.where(
+ indice_mask,
+ target,
+ value_reshaped[:, -1],
+ )
+
+ # reshape back
+ result[self.target_key] = torch.reshape(result_reshaped, result_shape)
+ return result
diff --git a/emote/memory/builder.py b/emote/memory/builder.py
new file mode 100644
index 00000000..a63466dd
--- /dev/null
+++ b/emote/memory/builder.py
@@ -0,0 +1,205 @@
+""""""
+
+from typing import List
+
+import numpy as np
+import torch
+
+from emote.memory.strategy import SampleStrategy
+
+from ..utils import MDPSpace
+from .adaptors import DictObsAdaptor, TerminalAdaptor
+from .column import Column, TagColumn, VirtualColumn
+from .fifo_strategy import FifoEjectionStrategy
+from .storage import NextElementMapper, SyntheticDones
+from .table import ArrayTable
+from .uniform_strategy import UniformSampleStrategy
+
+
+class DictTable(ArrayTable):
+ def __init__(
+ self,
+ *,
+ use_terminal_column: bool,
+ obs_keys: List[str],
+ columns: List[Column],
+ maxlen: int,
+ length_key="actions",
+ sampler: SampleStrategy = None,
+ device: torch.device,
+ ):
+ if sampler is None:
+ sampler = UniformSampleStrategy()
+
+ adaptors = [DictObsAdaptor(obs_keys)]
+ if use_terminal_column:
+ columns.append(
+ TagColumn(
+ name="terminal",
+ shape=tuple(),
+ dtype=np.float32,
+ )
+ )
+ adaptors.append(TerminalAdaptor("terminal", "masks"))
+
+ super().__init__(
+ columns=columns,
+ maxlen=maxlen,
+ sampler=sampler,
+ ejector=FifoEjectionStrategy(),
+ length_key=length_key,
+ adaptors=adaptors,
+ device=device,
+ )
+
+
+class DictObsTable(DictTable):
+ """Create a memory suited for Reinforcement Learning Tasks with 1-Step
+ Bellman Backup with a single bootstrap value, and using dictionary
+ observations as network inputs."""
+
+ def __init__(
+ self,
+ *,
+ spaces: MDPSpace,
+ use_terminal_column: bool = False,
+ maxlen: int = 1_000_000,
+ device: torch.device,
+ dones_dtype=bool,
+ masks_dtype=np.float32,
+ sampler: SampleStrategy = None,
+ ):
+ if spaces.rewards is not None:
+ reward_column = Column(
+ name="rewards",
+ dtype=spaces.rewards.dtype,
+ shape=spaces.rewards.shape,
+ )
+ else:
+ reward_column = Column(name="rewards", dtype=np.float32, shape=(1,))
+
+ columns = [
+ Column(
+ name="actions",
+ dtype=spaces.actions.dtype,
+ shape=spaces.actions.shape,
+ ),
+ VirtualColumn(
+ name="dones",
+ dtype=dones_dtype,
+ shape=(1,),
+ target_name="actions",
+ mapper=SyntheticDones,
+ ),
+ VirtualColumn(
+ name="masks",
+ dtype=masks_dtype,
+ shape=(1,),
+ target_name="actions",
+ mapper=SyntheticDones.as_mask,
+ ),
+ reward_column,
+ ]
+
+ obs_keys = []
+ for key, space in spaces.state.spaces.items():
+ obs_keys.append(key)
+ columns.extend(
+ [
+ Column(name=key, dtype=space.dtype, shape=space.shape),
+ VirtualColumn(
+ name="next_" + key,
+ dtype=space.dtype,
+ shape=space.shape,
+ target_name=key,
+ mapper=NextElementMapper,
+ ),
+ ]
+ )
+
+ if sampler is None:
+ sampler = UniformSampleStrategy()
+
+ super().__init__(
+ use_terminal_column=use_terminal_column,
+ maxlen=maxlen,
+ columns=columns,
+ obs_keys=obs_keys,
+ sampler=sampler,
+ device=device,
+ )
+
+
+class DictObsNStepTable(DictTable):
+ """Create a memory suited for Reinforcement Learning Tasks with N-Step
+ Bellman Backup with a single bootstrap value, and using dictionary
+ observations as network inputs."""
+
+ def __init__(
+ self,
+ *,
+ spaces: MDPSpace,
+ use_terminal_column: bool,
+ maxlen: int,
+ sampler: SampleStrategy = None,
+ device: torch.device,
+ ):
+ if spaces.rewards is not None:
+ reward_column = Column(
+ name="rewards",
+ dtype=spaces.rewards.dtype,
+ shape=spaces.rewards.shape,
+ )
+ else:
+ reward_column = Column(name="rewards", dtype=np.float32, shape=(1,))
+
+ columns = [
+ Column(
+ name="actions",
+ dtype=spaces.actions.dtype,
+ shape=spaces.actions.shape,
+ ),
+ VirtualColumn(
+ name="dones",
+ dtype=bool,
+ shape=(1,),
+ target_name="actions",
+ mapper=SyntheticDones,
+ ),
+ VirtualColumn(
+ name="masks",
+ dtype=np.float32,
+ shape=(1,),
+ target_name="actions",
+ mapper=SyntheticDones.as_mask,
+ ),
+ reward_column,
+ ]
+
+ obs_keys = []
+ for key, space in spaces.state.spaces.items():
+ obs_keys.append(key)
+ columns.extend(
+ [
+ Column(name=key, dtype=space.dtype, shape=space.shape),
+ VirtualColumn(
+ name="next_" + key,
+ dtype=space.dtype,
+ shape=space.shape,
+ target_name=key,
+ mapper=NextElementMapper.with_only_last,
+ ),
+ ]
+ )
+
+ if sampler is None:
+ sampler = UniformSampleStrategy()
+
+ super().__init__(
+ use_terminal_column=use_terminal_column,
+ maxlen=maxlen,
+ columns=columns,
+ obs_keys=obs_keys,
+ sampler=sampler,
+ device=device,
+ )
diff --git a/emote/memory/callbacks.py b/emote/memory/callbacks.py
new file mode 100644
index 00000000..89433ae5
--- /dev/null
+++ b/emote/memory/callbacks.py
@@ -0,0 +1,35 @@
+import logging
+import os
+
+from emote.callback import Callback
+from emote.memory.table import Table
+
+
+class MemoryImporterCallback(Callback):
+ """Load and validate a previously exported memory."""
+
+ def __init__(
+ self,
+ memory: Table,
+ target_memory_name: str,
+ experiment_load_dir: str,
+ load_fname_override=None,
+ ):
+ super().__init__()
+ self._order = -1 # this is to ensure that this callback is called before the others
+ self.memory = memory
+ self._target_memory_name = target_memory_name
+ self._load_fname_override = load_fname_override
+ self._load_dir = experiment_load_dir
+
+ def restore_state(self):
+ if self._load_fname_override not in (None, ""):
+ restore_path = os.path.join(self._load_dir, self._load_fname_override)
+ else:
+ restore_path = os.path.join(self._load_dir, f"{self._target_memory_name}_export")
+
+ if not os.path.exists(restore_path + ".zip"):
+ raise FileNotFoundError(f"Failed to load memory dump: {restore_path} does not exist.")
+
+ self.memory.restore(restore_path)
+ logging.info(f"Loading memory dump {restore_path}")
diff --git a/emote/memory/column.py b/emote/memory/column.py
new file mode 100644
index 00000000..579022d6
--- /dev/null
+++ b/emote/memory/column.py
@@ -0,0 +1,69 @@
+""""""
+
+from dataclasses import dataclass
+from typing import Tuple, Type
+
+import numpy as np
+
+from emote.memory.storage import VirtualStorage
+
+
+@dataclass
+class Column:
+ """A typed column for data storage."""
+
+ name: str
+ """The name of the column."""
+
+ shape: Tuple[int]
+ dtype: type
+
+ def state(self):
+ dtype_name = ""
+ if hasattr(self.dtype, "name"):
+ dtype_name = self.dtype.name
+ else:
+ dtype_name = self.dtype.__name__
+
+ return {
+ "shape": self.shape,
+ "dtype": dtype_name,
+ }
+
+ def load_state(self, config):
+ self.shape = config["shape"]
+
+ dtype_name = config["dtype"]
+ if dtype_name == "bool":
+ self.dtype = bool
+
+ else:
+ if hasattr(np, dtype_name):
+ self.dtype = getattr(np, dtype_name)
+ else:
+ self.dtype = np.dtype(dtype_name)
+
+
+@dataclass
+class TagColumn(Column):
+ """A typed column for tag storage."""
+
+ pass
+
+
+@dataclass
+class VirtualColumn(Column):
+ """A column providing fake or transformed data via Mapper."""
+
+ target_name: str
+ mapper: Type[VirtualStorage]
+
+ def state(self):
+ config = super().state()
+ config["target_name"] = self.target_name
+ # TODO: add mapper configuration
+ return config
+
+ def load_state(self, config):
+ super().load_state(config)
+ self.target_name = config["target_name"]
diff --git a/emote/memory/core_types.py b/emote/memory/core_types.py
new file mode 100644
index 00000000..2986eb69
--- /dev/null
+++ b/emote/memory/core_types.py
@@ -0,0 +1,21 @@
+"""Supporting types used for working with the memory."""
+
+from typing import Dict, Generic, Tuple, TypeVar
+
+
+# Number is *either* an int or a float, but *not* covariant.
+# For example: Sequence[int | float] accepts int | float
+# Sequence[Number] only accept [int, int, ...] or
+# [float, float, ...]
+
+Number = TypeVar("Number", int, float)
+
+
+# Technically far too general, but there is no good support for
+# multidimensional arrays
+class Matrix(Generic[Number]):
+ pass
+
+
+SampleResult = Dict[str, Matrix]
+SamplePoint = Tuple[int, int, int]
diff --git a/emote/memory/coverage_based_strategy.py b/emote/memory/coverage_based_strategy.py
new file mode 100644
index 00000000..5f3e14b4
--- /dev/null
+++ b/emote/memory/coverage_based_strategy.py
@@ -0,0 +1,92 @@
+""""""
+
+import random
+
+from typing import Sequence
+
+import numpy as np
+
+from .core_types import SamplePoint
+from .strategy import EjectionStrategy, SampleStrategy, Strategy
+
+
+class CoverageBasedStrategy(Strategy):
+ """A sampler intended to sample based on coverage of experiences, favoring
+ less-visited states.
+
+ This base class can be used for implementing various coverage-based
+ sampling strategies.
+ """
+
+ def __init__(self, alpha=0.5):
+ super().__init__()
+ self._identities = {}
+ self._sample_count = {}
+ self._ids = []
+ self._prios = []
+ self._dirty = False
+ self._alpha = alpha
+
+ def track(self, identity: int, sequence_length: int):
+ self._dirty = True
+ self._identities[identity] = sequence_length
+ self._sample_count[identity] = self._sample_count.get(identity, 0)
+
+ def forget(self, identity: int):
+ self._dirty = True
+ del self._identities[identity]
+ del self._sample_count[identity]
+
+ def _rebalance(self):
+ self._dirty = False
+ original_prios = np.array(tuple(self._identities.values())) / sum(self._identities.values())
+ self._ids = np.array(tuple(self._identities.keys()), dtype=np.int64)
+
+ sample_prios = np.array(
+ [1 / (self._sample_count[id] + 1) ** self._alpha for id in self._ids]
+ )
+ combined_prios = original_prios * sample_prios
+
+ sum_prios = sum(combined_prios)
+ self._prios = combined_prios / sum_prios
+
+
+class CoverageBasedSampleStrategy(CoverageBasedStrategy, SampleStrategy):
+ def __init__(self, alpha=0.5):
+ super().__init__(alpha=alpha)
+
+ def sample(self, count: int, transition_count: int) -> Sequence[SamplePoint]:
+ if self._dirty:
+ self._rebalance()
+
+ identities = np.random.choice(self._ids, size=count, p=self._prios)
+ ids = self._identities
+ output = []
+ app = output.append
+ r = random.random
+ tm1 = transition_count - 1
+ for k in identities:
+ self._sample_count[k] += 1
+ offset = int(r() * (ids[k] - tm1))
+ app((k, offset, offset + transition_count))
+
+ return output
+
+
+class CoverageBasedEjectionStrategy(CoverageBasedStrategy, EjectionStrategy):
+ def sample(self, count: int) -> Sequence[int]:
+ if self._dirty:
+ self._rebalance()
+
+ identities = set()
+ while count > 0:
+ identity = np.random.choice(self._ids, size=1, p=self._prios)[0]
+
+ if identity in identities:
+ continue
+
+ length = self._identities[identity]
+ count -= length
+ identities.add(identity)
+
+ return list(identities)
diff --git a/emote/memory/fifo_strategy.py b/emote/memory/fifo_strategy.py
new file mode 100644
index 00000000..210e1586
--- /dev/null
+++ b/emote/memory/fifo_strategy.py
@@ -0,0 +1,132 @@
+""""""
+
+import random
+
+from collections import deque
+from typing import Sequence
+
+from .core_types import SamplePoint
+from .strategy import EjectionStrategy, SampleStrategy, Strategy
+
+
+class FifoStrategyBase(Strategy):
+ """A sampler intended to sample in a first-in-first-out style across the
+ whole set of experiences.
+
+ This base class is used by both the fifo sample and ejection
+ strategies.
+ """
+
+ def __init__(self):
+ """Create a FIFO-based strategy."""
+ super().__init__()
+ self._sequence_lengths = {}
+ self._identities = deque()
+
+ def track(self, identity: int, sequence_length: int):
+ # N.b. this is most likely a client bug causing us to have a repeat ID,
+ # but it can occur when stopping/starting a data generator
+
+ if self._in_simple_import:
+ return
+
+ assert identity not in self._sequence_lengths
+ self._identities.append(identity)
+ self._sequence_lengths[identity] = sequence_length
+
+ def forget(self, identity: int):
+ self._identities.remove(identity)
+ del self._sequence_lengths[identity]
+
+ def post_import(self):
+ original_ids = self._identities.copy()
+ for id in original_ids:
+ length = self._sequence_lengths[id]
+ self.forget(id)
+ if id >= 0:
+ # This is a guard to prevent recursive memory imports/exports,
+ # as that'd make it very hard to uphold variants over time.
+ self.track(-abs(id) - 1, length)
+
+ def state(self) -> dict:
+ """Serialize the strategy to a JSON-serializable dictionary."""
+ return {
+ "identities": list(self._identities),
+ "sequence_lengths": list(self._sequence_lengths.items()),
+ }
+
+ def load_state(self, state: dict):
+ """Load the strategy from a dictionary."""
+ self._identities = deque(state["identities"])
+ self._sequence_lengths = dict(state["sequence_lengths"])
+
+
+################################################################################
+
+
+class FifoSampleStrategy(FifoStrategyBase, SampleStrategy):
+ def __init__(self, per_episode: bool = True, random_offset: bool = True):
+ """Create a FIFO-based sample strategy.
+
+ :param per_episode: if true, will only sample each episode once
+ in a single pass
+ :param random_offset: if true will sample at a random offset in
+ each episode. Will be assumed true when sampling per episode
+ """
+ super().__init__()
+ self._per_episode = per_episode
+ self._random_offset = random_offset
+
+ def sample(self, count: int, transition_count: int) -> Sequence[SamplePoint]:
+ number_episodes = len(self._identities)
+ points = []
+
+ if self._per_episode:
+ for current_episode_offset in range(count):
+ current_episode_id = self._identities[current_episode_offset % number_episodes]
+ offset = random.randint(
+ 0, self._sequence_lengths[current_episode_id] - transition_count
+ )
+ points.append((current_episode_id, offset, offset + transition_count))
+
+ else:
+ current_episode_offset = 0
+ current_offset = 0
+
+ while len(points) < count:
+ current_episode_id = self._identities[current_episode_offset % number_episodes]
+ if self._random_offset:
+ offset = random.randint(
+ 0, self._sequence_lengths[current_episode_id] - transition_count
+ )
+ else:
+ offset = current_offset
+
+ points.append((current_episode_id, offset, offset + transition_count))
+ current_offset += transition_count
+
+ if current_offset + transition_count > self._sequence_lengths[current_episode_id]:
+ current_episode_offset += 1
+ current_offset = 0
+
+ return points
+
+
+################################################################################
+
+
+class FifoEjectionStrategy(FifoStrategyBase, EjectionStrategy):
+ def sample(self, count: int) -> Sequence[int]:
+ identities = []
+
+ current_episode_offset = 0
+ while count > 0:
+ current_episode_id = self._identities[current_episode_offset]
+ current_episode_length = self._sequence_lengths[current_episode_id]
+
+ identities.append(current_episode_id)
+ count -= current_episode_length
+
+ current_episode_offset += 1
+
+ return identities
diff --git a/emote/memory/loading.py b/emote/memory/loading.py
new file mode 100644
index 00000000..f37c3fcf
--- /dev/null
+++ b/emote/memory/loading.py
@@ -0,0 +1,69 @@
+"""Utilities for loading files into memories."""
+
+import pickle
+
+import numpy as np
+
+from .table import ArrayTable
+
+
+def fill_table_from_legacy_file(
+ table: ArrayTable,
+ path: str,
+ *,
+ read_obs: bool = False,
+ read_actions: bool = False,
+ read_rewards: bool = False,
+):
+ """Load a legacy memory dump into a new-style table memory.
+
+ :param table: The table to fill. Must contain 'obs', 'rewards', and
+ 'actions' columns
+ :param path: The path to load from. Must be a pickle file. Extension
+ is optional :throws: OSError if file does not exist. KeyError if
+ table or file do not match the legacy format.
+ """
+
+ if not path.endswith(".pickle"):
+ path += ".pickle"
+
+ with open(path, "rb") as file_:
+ state = pickle.load(file_) # nosec B301
+
+ for k in ["dones", "actions", "rewards", "next_obs", "obs"]:
+ array = np.array(state[k])
+ state[k] = array.reshape(-1, *array.shape[3:])
+
+ done_indices = [i for i, d in enumerate(state["dones"]) if d]
+ previous_idx = 0
+ agent_idx = -1
+ for done_idx in done_indices:
+ if (done_idx - previous_idx) < 10:
+ previous_idx = done_idx + 1
+ agent_idx -= 1
+ continue
+
+ rewards = state["rewards"][previous_idx : done_idx + 1]
+ actions = state["actions"][previous_idx * 2 : (done_idx + 1) * 2]
+ dones = state["dones"][previous_idx : done_idx + 1]
+
+ assert not dones[0] or len(dones) == 1
+ assert dones[len(dones) - 1]
+ obs = [o for o in state["obs"][previous_idx : done_idx + 1]]
+
+ obs.append(state["next_obs"][done_idx + 1])
+
+ outs = {}
+ if read_obs:
+ outs["obs"] = obs
+
+ if read_actions:
+ outs["actions"] = actions.reshape(-1, 2)
+
+ if read_rewards:
+ outs["rewards"] = rewards
+
+ table.add_sequence(agent_idx, outs)
+
+ previous_idx = done_idx + 1
+ agent_idx -= 1
diff --git a/emote/memory/memory.py b/emote/memory/memory.py
new file mode 100644
index 00000000..b77b4d94
--- /dev/null
+++ b/emote/memory/memory.py
@@ -0,0 +1,582 @@
+"""Sequence builder collates observations into sequences stored in the memory.
+
+The sequence builder is the API between "instant" based APIs such as the
+agent proxy and the episode-based functionality of the memory
+implementation. The goal of the sequence builder is to consume
+individual timesteps per agent and collate them into episodes before
+submission into the memory.
+"""
+
+from __future__ import annotations
+
+import collections
+import inspect
+import logging
+import os
+import time
+import warnings
+
+from collections import defaultdict, deque
+from dataclasses import dataclass, field
+from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
+
+import numpy as np
+import torch
+
+from torch.utils.tensorboard import SummaryWriter
+
+from emote.callback import Callback
+from emote.extra.onnx_exporter import OnnxExporter
+from emote.mixins.logging import LoggingMixin
+from emote.proxies import MemoryProxy
+from emote.trainer import TrainingShutdownException
+
+from ..typing import AgentId, DictObservation, DictResponse, EpisodeState
+from ..utils import BlockTimers, TimedBlock
+from .core_types import Matrix
+from .table import Table
+
+
+@dataclass
+class Episode:
+ """An episode of data being constructed."""
+
+ data: Dict[str, List[Matrix]] = field(default_factory=lambda: defaultdict(list))
+
+ def append(self, observation: Mapping[str, Matrix]) -> Tuple:
+ for k, v in observation.items():
+ self.data[k].append(v)
+
+ def complete(self, observation: Mapping[str, Matrix]) -> Mapping[str, Matrix]:
+ self.append(observation)
+ return self.data
+
+ @staticmethod
+ def from_initial(observation: Mapping[str, Matrix]) -> "Episode":
+ episode = Episode()
+ episode.append(observation)
+ return episode
+
+
+################################################################################
+
+
+class TableMemoryProxy:
+ """The sequence builder wraps a sequence-based memory to build full
+ episodes from [identity, observation] data.
+
+ Not thread safe.
+ """
+
+ def __init__(
+ self,
+ table: Table,
+ minimum_length_threshold: Optional[int] = None,
+ use_terminal: bool = False,
+ *,
+ name: str = "default",
+ ):
+ self._store: Dict[AgentId, Episode] = {}
+ self._table = table
+ if minimum_length_threshold is None:
+ self._min_length_filter = lambda _: True
+ else:
+ key = table._length_key
+ self._min_length_filter = lambda ep: len(ep[key]) >= minimum_length_threshold
+
+ self._completed_episodes: set[AgentId] = set()
+ self._term_states = [EpisodeState.TERMINAL, EpisodeState.INTERRUPTED]
+ self._use_terminal = use_terminal
+ self._name = name
+
+ @property
+ def name(self):
+ return self._name
+
+ def size(self):
+ return self._table.size()
+
+ def resize(self, new_size: int):
+ self._table.resize(new_size)
+
+ def store(self, path: str):
+ return self._table.store(path)
+
+ def is_initial(self, identity: int):
+ """Returns true if identity is not already used in a partial sequence.
+
+ Does not validate if the identity is associated with a complete
+ episode.
+ """
+ return identity not in self._store
+
+ def add(
+ self,
+ observations: Dict[AgentId, DictObservation],
+ responses: Dict[AgentId, DictResponse],
+ ):
+ completed_episodes = {}
+ for agent_id, observation in observations.items():
+ data = {space: feature for space, feature in observation.array_data.items()}
+ if observation.episode_state != EpisodeState.INITIAL:
+ data["rewards"] = observation.rewards["reward"]
+
+ else:
+ assert (
+ agent_id not in self._store
+ ), f"Agent {agent_id} already has an ongoing episode"
+
+ if observation.episode_state in self._term_states:
+ if self._use_terminal:
+ # The terminal value assigned here is the terminal _mask_ value,
+ # not whether it is terminal. In this case, our legacy code
+ # treated all terminals as fatal, i.e., truncated bootstrap.
+ # Since this is the terminal mask value, an interrupted
+ # episode should be 1.0 or "infinite bootstrap horizon"
+ data["terminal"] = float(observation.episode_state == EpisodeState.INTERRUPTED)
+
+ if agent_id not in self._store:
+ # First warn that this is a new agent id:
+ if agent_id in self._completed_episodes:
+ logging.warning("agent_id has already been completed: %d", agent_id)
+ else:
+ logging.warning(
+ "agent_id completed with no previous sequence: %d", agent_id
+ )
+
+ self._completed_episodes.add(agent_id)
+
+ if agent_id not in self._store:
+ # Then continue without sending an empty episode to the table.
+ continue
+
+ ep = self._store.pop(agent_id).complete(data)
+ if self._min_length_filter(ep): # else discard
+ completed_episodes[agent_id] = ep
+
+ else:
+ assert agent_id in responses, "Mismatch between observations and responses!"
+ response = responses[agent_id]
+ data.update(response.list_data)
+ data.update(response.scalar_data)
+
+ if agent_id not in self._store:
+ self._store[agent_id] = Episode.from_initial(data)
+
+ else:
+ self._store[agent_id].append(data)
+
+ for agent_id, sequence in completed_episodes.items():
+ self._table.add_sequence(agent_id, sequence)
+
+ def timers(self):
+ return self._table._timers
+
+
+class MemoryProxyWrapper:
+ """Base class for memory proxy wrappers.
+
+ This class forwards non-existing method accessess to the inner
+ MemoryProxy or MemoryProxyWrapper.
+ """
+
+ def __init__(self, inner: "MemoryProxyWrapper" | MemoryProxy, **kwargs):
+ super().__init__(**kwargs)
+ self._inner = inner
+
+ def _lookup_class_attr(self, name):
+ cls_attr = getattr(self._inner.__class__, name, None)
+ if cls_attr is None:
+ if isinstance(self._inner, MemoryProxyWrapper):
+ return self._inner._lookup_class_attr(name)
+
+ return None
+
+ return cls_attr
+
+ def __getattr__(self, name):
+ # get the attribute from inner.
+ # if it does not exist, exception will be raised.
+ #
+ # we look up the class attr to check if it is a property. Properties on the instance only
+ # resolve to the value, which would be string for example.
+ cls_attr = self._lookup_class_attr(name)
+ attr = getattr(self._inner, name)
+
+ # for some safety, make sure it is an method.
+ # we only want the memory proxy wrapper to forward methods.
+ if not inspect.ismethod(attr) and not isinstance(cls_attr, property):
+ # NOTE: In python >= 3.10 we should specify
+ # 'obj' and 'name' on the AttributeError so Python can provide hints to the user.
+ raise AttributeError(
+ f"Accessing non-method inner attribute {name} is not allowed.",
+ )
+
+ return attr
+
+
+class TableMemoryProxyWrapper(MemoryProxyWrapper):
+ def __init__(self, *, inner: TableMemoryProxy, **kwargs):
+ super().__init__(inner=inner, **kwargs)
+
+ def store(self, path: str):
+ return self._inner.store(path)
+
+
+class LoggingProxyWrapper(TableMemoryProxyWrapper, LoggingMixin):
+ def __init__(
+ self,
+ inner: TableMemoryProxy,
+ writer: SummaryWriter,
+ log_interval: int,
+ ):
+ super().__init__(inner=inner, default_window_length=1000)
+
+ self.completed_inferences = 0
+ self.completed_episodes = 0
+
+ self._writer = writer
+ self._log_interval = log_interval
+ self._counter = 0
+ self._start_time = time.monotonic()
+ self._cycle_start_infs = self.completed_inferences
+ self._cycle_start_time = time.perf_counter()
+
+ self._infs_at_start = 0
+
+ def state_dict(self) -> dict[str, Any]:
+ return {
+ "completed_inferences": self.completed_inferences,
+ "completed_episodes": self.completed_episodes,
+ "inference_steps": self._total_infs,
+ }
+
+ def load_state_dict(
+ self,
+ state_dict: Dict[str, Any],
+ load_network: bool = True,
+ load_optimizer: bool = True,
+ load_hparams: bool = True,
+ ) -> dict[str, Any]:
+ if load_hparams:
+ self.completed_inferences = state_dict.get(
+ "completed_inferences", self.completed_inferences
+ )
+ self.completed_episodes = state_dict.get("completed_episodes", self.completed_episodes)
+ self._total_infs = state_dict.get("inference_steps", self._total_infs)
+ self._infs_at_start = self.completed_inferences
+
+ def add(
+ self,
+ observations: Dict[AgentId, DictObservation],
+ responses: Dict[AgentId, DictResponse],
+ ):
+ self._counter += 1
+
+ self.completed_inferences += len(observations)
+ self.completed_episodes += len(observations) - len(responses)
+
+ for obs in observations.values():
+ if obs.metadata is None:
+ continue
+
+ self.report(obs.metadata.info, obs.metadata.info_lists)
+
+ if (self._counter % self._log_interval) == 0:
+ self._end_cycle()
+ self._counter = 0
+
+ return self._inner.add(observations, responses)
+
+ def report(
+ self,
+ metrics: dict[str, float],
+ metrics_lists: dict[str, list[float]],
+ ):
+ for key, value in metrics.items():
+ if key.startswith("histogram:"):
+ self.log_histogram(key[10:], value)
+ else:
+ self.log_windowed_scalar(key, value)
+
+ for key, value in metrics_lists.items():
+ if key.startswith("histogram:"):
+ self.log_histogram(key[10:], value)
+ else:
+ self.log_windowed_scalar(key, value)
+
+ def get_report(
+ self, keys: List[str]
+ ) -> Tuple[dict[str, int | float | list[float]], dict[str, list[float]]]:
+ keys = set(keys)
+ out = {}
+ out_lists = {}
+
+ for key in keys:
+ if key.startswith("histogram:") and key[10:] in self.hist_logs:
+ window = self.hist_logs[key[10:]]
+ out[key] = sum(window) / len(window)
+ elif key in self.windowed_scalar:
+ window = self.windowed_scalar[key]
+ out_lists[key] = list(window)
+ out[key] = sum(window) / len(window)
+ out[f"{key}/cumulative"] = self.windowed_scalar_cumulative[key]
+ elif key == "inf_step":
+ out[key] = self.completed_inferences
+
+ return out, out_lists
+
+ def _end_cycle(self):
+ now_time = time.perf_counter()
+ cycle_time = now_time - self._cycle_start_time
+ cycle_infs = self.completed_inferences - self._cycle_start_infs
+ inf_step = self.completed_inferences
+ self.log_scalar("training/inf_per_sec", cycle_infs / cycle_time)
+ self.log_scalar("episode/completed", self.completed_episodes)
+
+ for name, (mean, var) in self.timers().stats().items():
+ self.log_scalar(f"memory/{self.name}/{name}/timing/mean", mean)
+ self.log_scalar(f"memory/{self.name}/{name}/timing/var", var)
+
+ if "episode/reward" in self.windowed_scalar:
+ rewards = self.windowed_scalar["episode/reward"]
+ average_reward = sum(rewards) / len(rewards)
+ rewards_tensor = torch.Tensor(rewards)
+
+ self._writer.add_scalar(
+ "env_vs_episode/reward", average_reward, self.completed_episodes
+ )
+ self._writer.add_histogram("episode/reward_distribution", rewards_tensor, inf_step)
+ self._writer.add_histogram(
+ "env_vs_episode/reward_distribution",
+ rewards_tensor,
+ self.completed_episodes,
+ )
+
+ for k, v in self.scalar_logs.items():
+ self._writer.add_scalar(k, v, inf_step)
+
+ for k, v in self.windowed_scalar.items():
+ k = k.split(":")[1] if k.startswith("windowed[") else k
+
+ self._writer.add_scalar(k, sum(v) / len(v), inf_step)
+
+ for k, v in self.windowed_scalar_cumulative.items():
+ k = k.split(":")[1] if k.startswith("windowed[") else k
+
+ self._writer.add_scalar(f"{k}/cumulative", v, inf_step)
+
+ for k, v in self.image_logs.items():
+ self._writer.add_image(k, v, inf_step, dataformats="HWC")
+
+ for k, (video_array, fps) in self.video_logs.items():
+ self._writer.add_video(k, video_array, inf_step, fps=fps, walltime=None)
+
+ for k, v in self.hist_logs.items():
+ if isinstance(v, deque):
+ v = np.array(v)
+
+ self._writer.add_histogram(k, v, inf_step)
+
+ time_since_start = time.monotonic() - self._start_time
+
+ self._writer.add_scalar(
+ "performance/inf_steps_per_sec",
+ (inf_step - self._infs_at_start) / time_since_start,
+ inf_step,
+ )
+
+ self._writer.flush()
+
+ self._cycle_start_infs = self.completed_inferences
+ self._cycle_start_time = now_time
+
+
+class MemoryExporterProxyWrapper(TableMemoryProxyWrapper, LoggingMixin):
+ """Export the memory at regular intervals."""
+
+ def __init__(
+ self,
+ memory: TableMemoryProxy | TableMemoryProxyWrapper,
+ target_memory_name,
+ inf_steps_per_memory_export,
+ experiment_root_path: str,
+ min_time_per_export: int = 600,
+ ):
+ super().__init__(inner=memory)
+
+ recommended_min_inf_steps = 10_000
+ if inf_steps_per_memory_export < recommended_min_inf_steps:
+ warnings.warn(
+ f"Exporting a memory is a slow operation "
+ f"and should not be done too often. "
+ f"Current inf_step is {inf_steps_per_memory_export}, "
+ f"while the recommended minimum is {recommended_min_inf_steps}.",
+ UserWarning,
+ )
+
+ self._inf_step = 0
+ self.experiment_root_path = experiment_root_path
+ self._target_memory_name = target_memory_name
+ self._inf_steps_per_memory_export = inf_steps_per_memory_export
+ self._min_time_per_export = min_time_per_export
+
+ self._next_export = inf_steps_per_memory_export
+ self._next_export_time = time.monotonic() + min_time_per_export
+ self._scopes = BlockTimers()
+
+ def add(
+ self,
+ observations: Dict[AgentId, DictObservation],
+ responses: Dict[AgentId, DictResponse],
+ ):
+ """First add the new batch to the memory."""
+ self._inner.add(observations, responses)
+ """Save the replay buffer if it has enough data and enough time."""
+ has_enough_data = self._inf_step > self._next_export
+ time_now = time.monotonic()
+ has_enough_time = time_now > self._next_export_time
+
+ self._inf_step += 1
+
+ if has_enough_data and has_enough_time:
+ logging.info("Starting Memory export...")
+ start_time = time.time()
+ self._next_export = self._inf_step + self._inf_steps_per_memory_export
+ self._next_export_time = time_now + self._min_time_per_export
+
+ export_path = os.path.join(
+ self.experiment_root_path, f"{self._target_memory_name}_export"
+ )
+ with self._scopes.scope("export"):
+ self._inner.store(export_path)
+
+ elapsed_time = time.time() - start_time
+ logging.info(f"Memory export completed in {elapsed_time} seconds")
+
+ for name, (mean, var) in self._scopes.stats().items():
+ self.log_scalar(f"memory/{self._target_memory_name}/{name}/timing/mean", mean)
+ self.log_scalar(f"memory/{self._target_memory_name}/{name}/timing/var", var)
+
+
+class MemoryLoader:
+ def __init__(
+ self,
+ table: Table,
+ rollout_count: int,
+ rollout_length: int,
+ size_key: str,
+ data_group: str = "default",
+ ):
+ self.data_group = data_group
+ self.table = table
+ self.rollout_count = rollout_count
+ self.rollout_length = rollout_length
+ self.size_key = size_key
+ self.timer = TimedBlock()
+
+ def is_ready(self):
+ """True if the data loader has enough data to start providing data."""
+ return self.table.size() >= (self.rollout_count * self.rollout_length)
+
+ def __iter__(self):
+ if not self.is_ready():
+ raise Exception(
+ "Data loader does not have enough data.\
+ Check `is_ready()` before trying to iterate over data."
+ )
+
+ while True:
+ with self.timer:
+ data = self.table.sample(self.rollout_count, self.rollout_length)
+
+ data[self.size_key] = self.rollout_count * self.rollout_length
+ yield {self.data_group: data, self.size_key: data[self.size_key]}
+
+
+class JointMemoryLoader:
+ """A memory loader capable of loading data from multiple
+ `MemoryLoader`s."""
+
+ def __init__(self, loaders: list[MemoryLoader], size_key: str = "batch_size"):
+ self._loaders = loaders
+ self._size_key = size_key
+
+ counts = collections.Counter((loader.data_group for loader in loaders))
+ counts_over_1 = {k: count for k, count in counts.items() if count > 1}
+ if len(counts_over_1) != 0:
+ raise ValueError(
+ f"""JointMemoryLoader was provided MemoryLoaders that share the same datagroup. This will clobber the joint output data and is not allowed.
+ Here is a dict of each datagroup encountered more than once, and its occurance count: {counts_over_1}"""
+ )
+
+ def is_ready(self):
+ return all(loader.is_ready() for loader in self._loaders)
+
+ def __iter__(self):
+ if not self.is_ready():
+ raise RuntimeError(
+ """memory loader(s) in JointMemoryLoader does not have enough data. Check `is_ready()`
+ before trying to iterate over data."""
+ )
+
+ while True:
+ out = {self._size_key: 0}
+
+ for loader in self._loaders:
+ data = next(iter(loader))
+ out[loader.data_group] = data[loader.data_group]
+ # for joint memory loaders we sum up all individual loader sizes
+ out[self._size_key] += data[loader.size_key]
+
+ yield out
+
+
+class JointMemoryLoaderWithDataGroup(JointMemoryLoader):
+ """A JointMemoryLoader that places its data inside of a user-specified
+ datagroup."""
+
+ def __init__(self, loaders: list[MemoryLoader], data_group: str, size_key: str = "batch_size"):
+ super().__init__(loaders, size_key)
+ self._data_group = data_group
+
+ def __iter__(self):
+ data = next(super().__iter__())
+ total_size = data.pop(self._size_key)
+
+ yield {self._data_group: data, self._size_key: total_size}
+
+
+class MemoryWarmup(Callback):
+ """A blocker to ensure memory has data.
+
+ This ensures the memory has enough data when training starts, as the
+ memory will panic otherwise. This is useful if you use an async data
+ generator.
+
+ If you do not use an async data generator this can deadlock your
+ training loop and prevent progress.
+ """
+
+ def __init__(
+ self,
+ loader: MemoryLoader,
+ exporter: Optional[OnnxExporter],
+ shutdown_signal: Optional[Callable[[], bool]] = None,
+ ):
+ super().__init__()
+ self._order = 100
+ self._loader = loader
+ self._exporter = exporter
+ self._shutdown_signal = shutdown_signal or (lambda: False)
+
+ def begin_training(self):
+ import time
+
+ while not self._loader.is_ready():
+ time.sleep(0.1)
+ if self._exporter:
+ self._exporter.process_pending_exports()
+
+ if self._shutdown_signal():
+ raise TrainingShutdownException
diff --git a/emote/memory/segment_tree.py b/emote/memory/segment_tree.py
new file mode 100644
index 00000000..0fef1467
--- /dev/null
+++ b/emote/memory/segment_tree.py
@@ -0,0 +1,128 @@
+import operator
+
+
+class SegmentTree:
+ def __init__(self, capacity, operation, neutral_element):
+ """Build a Segment Tree data structure.
+
+ https://en.wikipedia.org/wiki/Segment_tree
+
+ Can be used as regular array, but with two
+ important differences:
+
+ a) setting item's value is slightly slower.
+ It is O(lg capacity) instead of O(1).
+ b) user has access to an efficient ( O(log segment size) )
+ `reduce` operation which reduces `operation` over
+ a contiguous subsequence of items in the array.
+
+ :param capacity: (int) Total size of the array - must be a power of two.
+ :param operation: (lambda (Any, Any): Any) operation for combining elements (eg. sum, max) must form a
+ mathematical group together with the set of possible values for array elements (i.e. be associative)
+ :param neutral_element: (Any) neutral element for the operation above. eg. float('-inf') for max and 0 for sum.
+ """
+ assert (
+ capacity > 0 and capacity & (capacity - 1) == 0
+ ), "capacity must be positive and a power of 2."
+ self._capacity = capacity
+ self._value = [neutral_element for _ in range(2 * capacity)]
+ self._operation = operation
+
+ def _reduce_helper(self, start, end, node, node_start, node_end):
+ if start == node_start and end == node_end:
+ return self._value[node]
+ mid = (node_start + node_end) // 2
+ if end <= mid:
+ return self._reduce_helper(start, end, 2 * node, node_start, mid)
+ else:
+ if mid + 1 <= start:
+ return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
+ else:
+ return self._operation(
+ self._reduce_helper(start, mid, 2 * node, node_start, mid),
+ self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end),
+ )
+
+ def reduce(self, start=0, end=None):
+ """Returns result of applying `self.operation` to a contiguous
+ subsequence of the array.
+
+ self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
+
+ :param start: (int) beginning of the subsequence
+ :param end: (int) end of the subsequences
+ :return: (Any) result of reducing self.operation over the specified range of array elements.
+ """
+ if end is None:
+ end = self._capacity
+ if end < 0:
+ end += self._capacity
+ end -= 1
+ return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
+
+ def __setitem__(self, idx, val):
+ # index of the leaf
+ idx += self._capacity
+ self._value[idx] = val
+ idx //= 2
+ while idx >= 1:
+ self._value[idx] = self._operation(self._value[2 * idx], self._value[2 * idx + 1])
+ idx //= 2
+
+ def __getitem__(self, idx):
+ assert 0 <= idx < self._capacity
+ return self._value[self._capacity + idx]
+
+
+class SumSegmentTree(SegmentTree):
+ def __init__(self, capacity):
+ super(SumSegmentTree, self).__init__(
+ capacity=capacity, operation=operator.add, neutral_element=0.0
+ )
+
+ def sum(self, start=0, end=None):
+ """Returns arr[start] + ... + arr[end]
+
+ :param start: (int) start position of the reduction (must be >= 0)
+ :param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
+ :return: (Any) reduction of SumSegmentTree
+ """
+ return super(SumSegmentTree, self).reduce(start, end)
+
+ def find_prefixsum_idx(self, prefixsum):
+ """
+ Find the highest index `i` in the array such that
+ sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
+
+ if array values are probabilities, this function
+ allows to sample indexes according to the discrete
+ probability efficiently.
+
+ :param prefixsum: (float) upperbound on the sum of array prefix
+ :return: (int) highest index satisfying the prefixsum constraint
+ """
+ assert 0 <= prefixsum <= self.sum() + 1e-5
+ idx = 1
+ while idx < self._capacity: # while non-leaf
+ if self._value[2 * idx] > prefixsum:
+ idx = 2 * idx
+ else:
+ prefixsum -= self._value[2 * idx]
+ idx = 2 * idx + 1
+ return idx - self._capacity
+
+
+class MinSegmentTree(SegmentTree):
+ def __init__(self, capacity):
+ super(MinSegmentTree, self).__init__(
+ capacity=capacity, operation=min, neutral_element=float("inf")
+ )
+
+ def min(self, start=0, end=None):
+ """Returns min(arr[start], ..., arr[end])
+
+ :param start: (int) start position of the reduction (must be >= 0)
+ :param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
+ :return: (Any) reduction of MinSegmentTree
+ """
+ return super(MinSegmentTree, self).reduce(start, end)
diff --git a/emote/memory/storage.py b/emote/memory/storage.py
new file mode 100644
index 00000000..cef208e8
--- /dev/null
+++ b/emote/memory/storage.py
@@ -0,0 +1,256 @@
+""""""
+
+from typing import Sequence, Tuple
+
+import numpy as np
+
+from .core_types import Number
+
+
+class BaseStorage(dict):
+ """A simple dictionary-based storage with support for a temporary workspace
+ for sampled data."""
+
+ def __init__(self, shape, dtype):
+ self._shape = shape
+ self._dtype = dtype
+ self._temp_storage = None
+
+ def get_empty_storage(self, count, length):
+ """A workspace that can be reused to skip reallocating the same numpy
+ buffer each time the memory is sampled.
+
+ Will *not* work if the memory is sampled from multiple threads.
+ """
+ total_size = count * length
+ if self._temp_storage is None or self._temp_storage.shape[0] < total_size:
+ d = np.empty((total_size, *self._shape), self._dtype)
+ self._temp_storage = d
+
+ return self._temp_storage[:total_size]
+
+ def sequence_length_transform(self, length):
+ return length
+
+ def post_import(self):
+ # have to coalesce the list explicitly since we'll otherwise suffer from iterator invalidation
+ invalid_ids = list(filter(lambda v: v < 0, self.keys()))
+
+ # delete all imported negative ids
+ for invalid_id in invalid_ids:
+ del self[invalid_id]
+
+ # make remaining ids negative
+ remaining_ids = list(self.keys())
+ for valid_id in remaining_ids:
+ self[-abs(valid_id) - 1] = self[valid_id]
+ del self[valid_id]
+
+
+class TagStorage(dict):
+ class TagProxy:
+ __slots__ = ["value"]
+
+ def __getitem__(self, key):
+ return self.value[0]
+
+ @property
+ def shape(self):
+ return (-1,)
+
+ def __init__(self, shape, dtype):
+ self._shape = shape
+ self._dtype = dtype
+ self._temp_storage = None
+
+ def get_empty_storage(self, count, length):
+ """A workspace that can be reused to skip reallocating the same numpy
+ buffer each time the memory is sampled.
+
+ Will *not* work if the memory is sampled from multiple threads.
+ """
+ total_size = count * length
+ if self._temp_storage is None or self._temp_storage.shape[0] < total_size:
+ d = np.empty((total_size, *self._shape), self._dtype)
+ self._temp_storage = d
+
+ return self._temp_storage[:total_size]
+
+ def sequence_length_transform(self, length):
+ return 1
+
+ def post_import(self):
+ # have to coalesce the list explicitly since we'll otherwise suffer from iterator invalidation
+ invalid_ids = list(filter(lambda v: v < 0, self.keys()))
+
+ # delete all imported negative ids
+ for invalid_id in invalid_ids:
+ del self[invalid_id]
+
+ # make remaining ids negative
+ remaining_ids = list(self.keys())
+ for valid_id in remaining_ids:
+ self[-abs(valid_id) - 1] = self[valid_id]
+ del self[valid_id]
+
+ def __getitem__(self, key: int | Tuple[int, ...] | slice):
+ episode = super().__getitem__(key)
+ r = TagStorage.TagProxy()
+ r.value = episode
+ return r
+
+ @property
+ def shape(self):
+ return (0,)
+
+
+class VirtualStorage:
+ """A virtual storage uses a simple storage to generate data."""
+
+ def __init__(self, storage, shape, dtype):
+ self._storage = storage
+ self._shape = shape
+ self._dtype = dtype
+ self._temp_storage = None
+
+ @property
+ def shape(self):
+ return self._storage.shape
+
+ def __getitem__(self, key: int | Tuple[int, ...] | slice):
+ pass
+
+ def __setitem__(self, key: int | Tuple[int, ...] | slice, value: Sequence[Number]):
+ pass
+
+ def __delitem__(self, key: int | Tuple[int, ...] | slice):
+ pass
+
+ def sequence_length_transform(self, length):
+ return length
+
+ def get_empty_storage(self, count, length):
+ total_size = count * length
+ if self._temp_storage is None or self._temp_storage.shape[0] < total_size:
+ d = np.empty((total_size, *self._shape), self._dtype)
+ self._temp_storage = d
+
+ return self._temp_storage[:total_size]
+
+ def post_import(self):
+ pass
+
+
+class NextElementMapper(VirtualStorage):
+ """Simple mapper that can be used to sample a specified one step over,
+ which is useful to sample transitions for RL."""
+
+ class Wrapper:
+ def __init__(self, item):
+ self._item = item
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ key += 1
+
+ elif isinstance(key, tuple):
+ key = tuple(k + 1 for k in key)
+
+ elif isinstance(key, slice):
+ key = slice(key.start + 1, key.stop + 1, key.step)
+
+ return self._item[key]
+
+ @property
+ def shape(self):
+ return self._item.shape
+
+ class LastWrapper:
+ def __init__(self, item):
+ self._item = item
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ key += 1
+
+ elif isinstance(key, tuple):
+ key = tuple(k + 1 for k in key[-1:])
+
+ elif isinstance(key, slice):
+ step = key.step or 1
+ key = slice(key.stop, key.stop + step, step)
+
+ return self._item[key]
+
+ @property
+ def shape(self):
+ return self._item.shape
+
+ def __init__(self, storage, shape, dtype, only_last: bool = False):
+ super().__init__(storage, shape, dtype)
+ self._only_last = only_last
+ self._wrapper = NextElementMapper.LastWrapper if only_last else NextElementMapper.Wrapper
+
+ def __getitem__(self, key: int | Tuple[int, ...] | slice):
+ return self._wrapper(self._storage[key])
+
+ def sequence_length_transform(self, length):
+ return 1 if self._only_last else length
+
+ @staticmethod
+ def with_only_last(storage, shape, dtype):
+ return NextElementMapper(storage, shape, dtype, only_last=True)
+
+
+class SyntheticDones(VirtualStorage):
+ """Generates done or masks based on sequence length."""
+
+ class Wrapper:
+ def __init__(self, length, shape, dtype):
+ self._max_idx = length - 1
+ self._shape = shape
+ self._dtype = dtype
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self._dtype(key == self._max_idx)
+
+ elif isinstance(key, tuple):
+ return tuple(self._dtype(k == self._max_idx) for k in key)
+
+ elif isinstance(key, slice):
+ return (
+ (np.arange(key.start, key.stop) == self._max_idx)
+ .reshape(-1, *self._shape)
+ .astype(self._dtype)
+ )
+
+ @property
+ def shape(self):
+ return (-1,)
+
+ class MaskWrapper(Wrapper):
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self._dtype(1.0 - (key == self._max_idx))
+
+ elif isinstance(key, tuple):
+ return tuple(self._dtype(1.0 - (k == self._max_idx)) for k in key)
+
+ elif isinstance(key, slice):
+ v = SyntheticDones.Wrapper.__getitem__(self, key)
+ return 1.0 - v
+
+ def __init__(self, storage, shape, dtype, mask: bool = False):
+ super().__init__(storage, shape, dtype)
+ self._mask = mask
+
+ def __getitem__(self, key: int | Tuple[int, ...] | slice):
+ if self._mask:
+ return SyntheticDones.MaskWrapper(len(self._storage[key]), self._shape, self._dtype)
+
+ return SyntheticDones.Wrapper(len(self._storage[key]), self._shape, self._dtype)
+
+ @staticmethod
+ def as_mask(storage, shape, dtype):
+ return SyntheticDones(storage, shape, dtype, mask=True)
diff --git a/emote/memory/strategy.py b/emote/memory/strategy.py
new file mode 100644
index 00000000..66792e8f
--- /dev/null
+++ b/emote/memory/strategy.py
@@ -0,0 +1,96 @@
+""""""
+
+from abc import ABC, abstractmethod
+from typing import Optional, Sequence
+
+from .core_types import Matrix, SamplePoint
+
+
+class Strategy(ABC):
+ """A generalized strategy that may be specialized for sampling or ejection
+ from a memory buffer."""
+
+ def __init__(self):
+ self._in_simple_import = False
+
+ @abstractmethod
+ def track(self, identity: int, sequence_length: int):
+ """Track a sequence given by identity and sequence_length that exists
+ in the memory.
+
+ :param identity: an identity that is globally unique
+ :param sequence_length: the number of transitions in the
+ sequence identified by identity
+ """
+ ...
+
+ @abstractmethod
+ def forget(self, identity: int):
+ """Forget the sequence of transitions given by identity."""
+ ...
+
+ def on_sample(
+ self,
+ ids_and_offsets: Sequence[SamplePoint],
+ transition_count: int,
+ advantages: Optional[Matrix] = None,
+ ):
+ """Called after a sampling strategy has been invoked, to give the
+ strategy a chance to update sampling weights in case it uses
+ prioritized sampling."""
+ ...
+
+ def post_import(self):
+ """Post-import validation of invariants and cleanup.
+
+ This *has* to forget any imported negative ids, anything else is
+ implementation-defined.
+ """
+ ...
+
+ def state(self) -> dict:
+ """Serialize the strategy state to a dictionary."""
+ ...
+
+ def load_state(self, state: dict):
+ """Load the strategy state from a dictionary."""
+
+ def clear(self):
+ """Clear the strategy's internal state."""
+ ...
+
+ def begin_simple_import(self):
+ """Called before a simple import, to allow the strategy to prepare
+ itself."""
+ self._in_simple_import = True
+
+ def end_simple_import(self):
+ """Called after a simple import, to allow the strategy to cleanup."""
+ self._in_simple_import = False
+
+
+################################################################################
+
+
+class SampleStrategy(Strategy):
+ """A strategy specialized for sampling."""
+
+ @abstractmethod
+ def sample(self, count: int, transition_count: int) -> Sequence[SamplePoint]:
+ """Apply the sampling strategy to the memory metadata, returning
+ `count` identities and offsets to use when sampling from the memory."""
+ ...
+
+
+################################################################################
+
+
+class EjectionStrategy(Strategy):
+ """A strategy specialized for ejection sampling."""
+
+ @abstractmethod
+ def sample(self, count: int) -> Sequence[int]:
+ """Apply the sampling strategy to the memory metadata, returning a list
+ of identities that shall be ejected from the memory to remove at least
+ "count" transitions."""
+ ...
diff --git a/emote/memory/table.py b/emote/memory/table.py
new file mode 100644
index 00000000..1f5f9c1f
--- /dev/null
+++ b/emote/memory/table.py
@@ -0,0 +1,578 @@
+from __future__ import annotations
+
+import enum
+import json
+import logging
+import os
+import stat
+import zipfile
+
+from threading import Lock
+from typing import List, Optional, Protocol, Sequence, Tuple
+
+import numpy as np
+import torch
+
+from ..utils.deprecated import deprecated
+from ..utils.timed_call import BlockTimers
+from .adaptors import Adaptor
+from .column import Column, TagColumn, VirtualColumn
+from .core_types import SampleResult
+from .storage import BaseStorage, TagStorage, VirtualStorage
+from .strategy import EjectionStrategy, SampleStrategy
+
+
+logger = logging.getLogger(__name__)
+
+
+class TableSerializationVersion(enum.Enum):
+ """The version of the memory serialization format."""
+
+ Legacy = 0
+ """The legacy memory table format using pickling, which leads to
+ portability issues and risks when refactoring."""
+
+ V1 = 1
+ """Memory table format using a zip file with a JSON metadata file and raw
+ numpy data files.
+
+ Note that this version only restores data, but will not affect the
+ types of ejectors, adaptors, and so on.
+ """
+
+ LATEST = V1
+
+
+class Table(Protocol):
+ adaptors: List[Adaptor]
+
+ def sample(self, count: int, sequence_length: int) -> SampleResult:
+ """Sample COUNT traces from the memory, each consisting of
+ SEQUENCE_LENGTH frames.
+
+ The data is transposed in a SoA fashion (since this is both
+ easier to store and easier to consume).
+ """
+ ...
+
+ def size(self) -> int:
+ """Query the number of elements currently in the memory."""
+ ...
+
+ def full(self) -> bool:
+ """Query whether the memory is filled."""
+ ...
+
+ def add_sequence(self, identity: int, sequence):
+ """Add a fully terminated sequence to the memory."""
+ ...
+
+ def store(
+ self,
+ path: str,
+ version: TableSerializationVersion = TableSerializationVersion.LATEST,
+ ) -> bool:
+ """Persist the whole table and all metadata into the designated
+ name."""
+ ...
+
+ def restore(self, path: str, override_version: TableSerializationVersion | None = None) -> bool:
+ """Restore the data table from the provided path.
+
+ This also clears the data stores.
+ """
+ ...
+
+
+class ArrayTable:
+ def __init__(
+ self,
+ *,
+ columns: Sequence[Column],
+ maxlen: int,
+ sampler: SampleStrategy,
+ ejector: EjectionStrategy,
+ length_key="actions",
+ adaptors: Optional[Adaptor] = None,
+ device: torch.device,
+ ):
+ """Create the table with the specified configuration."""
+ self._sampler = sampler
+ self._ejector = ejector
+ self._length_key = length_key
+ self._maxlen = maxlen
+ self._columns = {column.name: column for column in columns}
+ self._lock = Lock()
+ self.adaptors = adaptors if adaptors else []
+ self._device = device
+
+ self.clear()
+
+ def resize(self, new_size):
+ with self._lock:
+ if new_size < self._maxlen:
+ raise ValueError(
+ f"The new memory size {new_size} is smaller than the current size of the memory "
+ f"({self._maxlen}). Shrinking the memory is not supported"
+ )
+ self._maxlen = new_size
+
+ def clear(self):
+ """Clear and reset all data."""
+ with self._lock:
+ self._clear()
+
+ def _clear(self):
+ """Clear and reset all data."""
+
+ self._data = {}
+
+ self._lengths = {}
+ self._total_length = 0
+ self._filled = False
+ self._timers = BlockTimers()
+
+ for column in self._columns.values():
+ if isinstance(column, VirtualColumn):
+ self._data[column.name] = column.mapper(
+ self._data[column.target_name], column.shape, column.dtype
+ )
+
+ elif isinstance(column, TagColumn):
+ self._data[column.name] = TagStorage(column.shape, column.dtype)
+
+ else:
+ self._data[column.name] = BaseStorage(column.shape, column.dtype)
+
+ ################################################################################
+
+ def _diagnostic_broadcast_error(
+ self,
+ err: Exception,
+ key: str,
+ episode_id: int,
+ slice_begin: int,
+ slice_end: int,
+ ):
+ """Assumptions: This is called while holding all the data lock"""
+
+ lines = [f"Caught ValueError ({err}) when sampling memory for key {key}"]
+ lines.append(f"\nFor episode id {episode_id}, the following data was found: ")
+ for key, store in self._data.items():
+ lines.append(f"\t{key} -> {store[episode_id].shape}")
+
+ lines.append(f"and an error occurred when slicing the range {slice_begin}..{slice_end}")
+
+ raise ValueError("\n".join(lines))
+
+ ################################################################################
+
+ def _execute_gather(self, count: int, sequence_length: int, sample_points: List[Tuple[int]]):
+ with self._timers.scope("gather"):
+ out = {}
+ for key, store in self._data.items():
+ local_seq_length = store.sequence_length_transform(sequence_length)
+ output_store = store.get_empty_storage(count, local_seq_length)
+ idx = 0
+ next_idx = idx + local_seq_length
+
+ for identity, start, end in sample_points:
+ try:
+ output_store[idx:next_idx] = store[identity][start:end]
+ idx = next_idx
+
+ except ValueError as err:
+ self._diagnostic_broadcast_error(err, key, identity, start, end)
+
+ except KeyError as err:
+ self._diagnostic_broadcast_error(err, key, identity, start, end)
+
+ next_idx += local_seq_length
+
+ out[key] = torch.tensor(output_store).to(self._device)
+
+ return out
+
+ def sample(self, count: int, sequence_length: int) -> SampleResult:
+ """Sample COUNT traces from the memory, each consisting of
+ SEQUENCE_LENGTH transitions.
+
+ The transitions are returned in a SoA fashion (since this is
+ both easier to store and easier to consume)
+ """
+
+ with self._lock:
+ with self._timers.scope("points"):
+ sample_points = self._sampler.sample(count, sequence_length)
+
+ result = self._execute_gather(count, sequence_length, sample_points)
+ for adaptor in self.adaptors:
+ result = adaptor(result, count, sequence_length)
+ return result
+
+ def size(self) -> int:
+ """Query the number of elements currently in the memory."""
+ with self._lock:
+ return self._internal_size()
+
+ def _internal_size(self) -> int:
+ return self._total_length
+
+ def full(self) -> bool:
+ """Returns true if the memory has reached saturation, e.g., where new
+ adds may cause ejection.
+
+ .. warning:: This does not necessarily mean that `size() == maxlen`, as
+ we store and eject full sequences. The memory only guarantees we will
+ have *fewer* samples than maxlen.
+ """
+ with self._lock:
+ return self._filled
+
+ def add_sequence(self, identity: int, sequence: dict):
+ with (
+ self._timers.scope("add_sequence"),
+ self._lock,
+ ):
+ self._add_sequence_internal(identity, sequence)
+
+ def _add_sequence_internal(self, identity: int, sequence: dict):
+ """Add a fully terminated sequence to the memory."""
+ sequence_length = len(sequence[self._length_key])
+
+ # unsigned extend: all Ids that are added as sequences must be positive int64 values
+ if identity < 0:
+ identity += 2**64
+
+ # Shrink before writing to make sure we don't overflow the storages
+
+ with self._timers.scope("add_sequence_inner"):
+ size_after_add = self._internal_size() + sequence_length
+ if size_after_add > self._maxlen:
+ self._eject_count(size_after_add - self._maxlen)
+
+ for name, value in sequence.items():
+ self._data[name][identity] = np.array(
+ value, dtype=self._columns[name].dtype
+ ).reshape(-1, *self._columns[name].shape)
+
+ self._total_length += sequence_length
+ self._lengths[identity] = sequence_length
+ self._sampler.track(identity, sequence_length)
+ self._ejector.track(identity, sequence_length)
+
+ def _eject_count(self, count: int):
+ """Request ejection of *at least* the specified number of
+ transitions."""
+ self._filled = True
+
+ identities = self._ejector.sample(count)
+
+ for to_eject in identities:
+ for storage in self._data.values():
+ del storage[to_eject]
+
+ self._total_length -= self._lengths[to_eject]
+ del self._lengths[to_eject]
+ self._sampler.forget(to_eject)
+ self._ejector.forget(to_eject)
+
+ def _serialize(self, path: str) -> bool:
+ from atomicwrites import atomic_write
+
+ with self._lock:
+ with (
+ atomic_write(f"{path}.zip", overwrite=True, mode="wb") as tmp,
+ zipfile.ZipFile(tmp, "a") as zip_,
+ ):
+ with zip_.open("version", "w") as version:
+ version_int = TableSerializationVersion.V1.value
+ version.write(str(version_int).encode("utf-8"))
+
+ parts = {
+ "ejector_type": self._ejector.__class__.__name__,
+ "sampler_type": self._sampler.__class__.__name__,
+ "length_key": self._length_key,
+ "maxlen": self._maxlen,
+ "ids": list(self._lengths.keys()),
+ }
+
+ ejector_state = self._ejector.state()
+ if ejector_state is not None:
+ parts["ejector_state"] = ejector_state
+
+ sampler_state = self._sampler.state()
+ if sampler_state is not None:
+ parts["sampler_state"] = sampler_state
+
+ parts["columns"] = [
+ (name, column.__class__.__name__, column.state())
+ for name, column in self._columns.items()
+ ]
+
+ output_ranges = {}
+ output_data = {}
+
+ for key, store in self._data.items():
+ ranges = []
+ merged_data = []
+
+ if isinstance(store, VirtualStorage):
+ continue
+
+ for identity, data in store.items():
+ ranges.append(
+ (
+ identity,
+ len(merged_data),
+ len(data),
+ )
+ )
+ merged_data.extend(data)
+
+ output_data[key] = np.stack(merged_data)
+ output_ranges[key] = ranges
+
+ parts["part_keys"] = list(output_data.keys())
+
+ with zip_.open("configuration.json", "w", force_zip64=True) as f:
+ json_data = json.dumps(parts)
+ f.write(json_data.encode("utf-8"))
+
+ for key, data in output_data.items():
+ with zip_.open(f"{key}.ranges.npy", "w", force_zip64=True) as f:
+ np.save(f, output_ranges[key], allow_pickle=False)
+
+ with zip_.open(f"{key}.npy", "w", force_zip64=True) as npz:
+ np.save(npz, data, allow_pickle=False)
+
+ os.chmod(f"{path}.zip", stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+
+ def _deserialize(self, zip_: "zipfile.ZipFile") -> bool:
+ """Restore the data table from the provided path.
+
+ This currently implies a "clear" of the data stores.
+ """
+
+ with self._lock:
+ self._clear()
+
+ self._ejector.begin_simple_import()
+ self._sampler.begin_simple_import()
+
+ with zip_.open("configuration.json", "r") as f:
+ config = json.load(f)
+
+ ejector_type = config["ejector_type"]
+
+ if ejector_type != self._ejector.__class__.__name__:
+ logger.warning(
+ f"Deserializing memory with ejector type {ejector_type}, but "
+ f"memory is configured with ejector type "
+ f"{self._ejector.__class__.__name__}. This may lead to "
+ f"unexpected behavior."
+ )
+
+ if "ejector_state" in config:
+ self._ejector.load_state(config["ejector_state"])
+
+ sampler_type = config["sampler_type"]
+
+ if sampler_type != self._sampler.__class__.__name__:
+ logger.warning(
+ f"Deserializing memory with sampler type {sampler_type}, but "
+ f"memory is configured with sampler type "
+ f"{self._sampler.__class__.__name__}. This may lead to "
+ f"unexpected behavior."
+ )
+
+ if "sampler_state" in config:
+ self._sampler.load_state(config["sampler_state"])
+
+ if self._length_key != config["length_key"]:
+ logger.warning(
+ f"Deserializing memory with length key {config['length_key']}, "
+ f"but memory is configured with length key "
+ f"{self._length_key}. This may lead to unexpected behavior."
+ )
+
+ if self._maxlen != config["maxlen"]:
+ logger.warning(
+ f"Deserializing memory with maxlen {config['maxlen']}, "
+ f"but memory is configured with maxlen "
+ f"{self._maxlen}. This may lead to unexpected behavior."
+ )
+
+ for name, column_type, column_config in config["columns"]:
+ if name not in self._columns:
+ logger.warning(
+ f"Deserializing memory with column {name}, "
+ f"but memory is configured without column "
+ f"{name}. This may lead to unexpected behavior."
+ )
+ continue
+
+ if column_type != self._columns[name].__class__.__name__:
+ logger.warning(
+ f"Deserializing memory with column {name} of type "
+ f"{column_type}, but memory is configured with column "
+ f"{name} of type {self._columns[name].__class__.__name__}. "
+ f"This may lead to unexpected behavior."
+ )
+ continue
+
+ self._columns[name].load_state(column_config)
+
+ loaded_data = {}
+ ranges = {}
+ for key in config["part_keys"]:
+ with zip_.open(f"{key}.ranges.npy", "r") as f:
+ tuplized = np.load(f, allow_pickle=False)
+
+ ranges[key] = {identity: (start, size) for identity, start, size in tuplized}
+
+ with zip_.open(f"{key}.npy", "r") as npz:
+ loaded_data[key] = np.load(npz, allow_pickle=False)
+
+ # we reassemble sequences and store them in the memory
+ for identity in config["ids"]:
+ reassembled = {}
+
+ for key, data in ranges.items():
+ (start, size) = data[identity]
+ end = start + size
+ reassembled[key] = loaded_data[key][start:end]
+
+ self._add_sequence_internal(identity, reassembled)
+
+ self._sampler.end_simple_import()
+ self._ejector.end_simple_import()
+
+ @deprecated(
+ reason="Legacy memory export use pickling which add security and stability risks.",
+ version="23.1.0",
+ )
+ def _store_legacy(self, path: str) -> bool:
+ """Persist the whole table and all metadata into the designated
+ name."""
+
+ import cloudpickle
+
+ from atomicwrites import atomic_write
+
+ with self._lock:
+ with (
+ atomic_write(f"{path}.zip", overwrite=True, mode="wb") as tmp,
+ zipfile.ZipFile(tmp, "a") as zip_,
+ ):
+ with zip_.open("data.pickle", "w", force_zip64=True) as data_file:
+ parts = {
+ "ejector": self._ejector,
+ "sampler": self._sampler,
+ "length_key": self._length_key,
+ "maxlen": self._maxlen,
+ "columns": self._columns,
+ "lengths": self._lengths,
+ "filled": self._filled,
+ }
+
+ cloudpickle.dump(parts, data_file, protocol=4)
+
+ for key, data in self._data.items():
+ if isinstance(data, VirtualStorage):
+ continue
+
+ with zip_.open(f"{key}.npy", "w", force_zip64=True) as npz:
+ np.save(npz, data)
+
+ os.chmod(f"{path}.zip", stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+
+ @deprecated(
+ reason="Legacy memory export uses pickling which add security and stability risks.",
+ version="23.1.0",
+ )
+ def _restore_legacy(self, zip_: zipfile.ZipFile) -> bool:
+ """Restore the data table from the provided path.
+
+ This currently implies a "clear" of the data stores.
+ """
+
+ import cloudpickle
+
+ with self._lock:
+ with zip_.open("data.pickle", "r") as data_file:
+ parts = cloudpickle.load(data_file)
+ self._ejector = parts["ejector"]
+ self._sampler = parts["sampler"]
+ self._length_key = parts["length_key"]
+ self._maxlen = parts["maxlen"]
+ self._columns = parts["columns"]
+ self._lengths = parts["lengths"]
+ self._filled = parts["filled"]
+
+ for key, data in self._data.items():
+ if isinstance(data, VirtualStorage):
+ continue
+
+ with zip_.open(f"{key}.npy", "r") as npz:
+ loaded = np.load(npz, allow_pickle=True).item(0)
+ for d, v in loaded.items():
+ data[d] = v
+
+ for column in self._columns.values():
+ if isinstance(column, VirtualColumn):
+ self._data[column.name] = column.mapper(
+ self._data[column.target_name], column.shape, column.dtype
+ )
+
+ self._lengths = {-abs(k) - 1: v for (k, v) in self._lengths.items() if k >= 0}
+ self._total_length = sum(self._lengths.values())
+ self._sampler.post_import()
+ self._ejector.post_import()
+ for column_store in self._data.values():
+ column_store.post_import()
+
+ def store(
+ self,
+ path: str,
+ version: TableSerializationVersion = TableSerializationVersion.LATEST,
+ ) -> bool:
+ """Persist the whole table and all metadata into the designated name.
+
+ :param path: The path to store the data to.
+ :param version: The serialization version to use.
+ """
+
+ if version is None:
+ version = TableSerializationVersion.LATEST
+
+ if version == TableSerializationVersion.Legacy:
+ return self._store_legacy(path)
+
+ elif version == TableSerializationVersion.V1:
+ return self._serialize(path)
+
+ else:
+ raise ValueError(f"Unknown serialization version {version}")
+
+ def restore(self, path: str, override_version: TableSerializationVersion | None = None) -> bool:
+ with zipfile.ZipFile(f"{path}.zip", "r") as zip_:
+ version = TableSerializationVersion.LATEST
+ if override_version is not None:
+ version = override_version
+ elif "version" in zip_.namelist():
+ with zip_.open("version", "r") as version_file:
+ version_int = int(version_file.read())
+ version = TableSerializationVersion(version_int)
+
+ else:
+ version = TableSerializationVersion.Legacy
+
+ if version == TableSerializationVersion.Legacy:
+ return self._restore_legacy(zip_)
+
+ elif version == TableSerializationVersion.V1:
+ return self._deserialize(zip_)
+
+ else:
+ raise ValueError(f"Unknown serialization version {version}")
diff --git a/emote/memory/uniform_strategy.py b/emote/memory/uniform_strategy.py
new file mode 100644
index 00000000..8c0757cb
--- /dev/null
+++ b/emote/memory/uniform_strategy.py
@@ -0,0 +1,95 @@
+""""""
+
+import random
+
+from typing import Sequence
+
+import numpy as np
+
+from .core_types import SamplePoint
+from .strategy import EjectionStrategy, SampleStrategy, Strategy
+
+
+class UniformStrategyBase(Strategy):
+ """A sampler intended to sample uniformly across the whole set of
+ experiences.
+
+ This base class is used by both the uniform sample and ejection
+ strategies.
+ """
+
+ def __init__(self):
+ super().__init__()
+ self._identities = {}
+ self._ids = []
+ self._prios = []
+ self._dirty = False
+
+ def track(self, identity: int, sequence_length: int):
+ self._dirty = True
+ self._identities[identity] = sequence_length
+
+ def forget(self, identity: int):
+ self._dirty = True
+ del self._identities[identity]
+
+ def _rebalance(self):
+ self._dirty = False
+ self._prios = np.array(tuple(self._identities.values())) / sum(self._identities.values())
+ self._ids = np.array(tuple(self._identities.keys()), dtype=np.int64)
+
+ def post_import(self):
+ original_ids = self._identities.copy()
+ for id, length in original_ids.items():
+ self.forget(id)
+ if id >= 0:
+ self.track(-abs(id) - 1, length)
+
+ # rebalance here so we don't have to start by rebalancing all imported
+ # memory on the first sample. Not required and the rebalance should be
+ # cheap, but this makes the initial state of memory be clean.
+ if self._dirty:
+ self._rebalance()
+
+
+################################################################################
+
+
+class UniformSampleStrategy(UniformStrategyBase, SampleStrategy):
+ def sample(self, count: int, transition_count: int) -> Sequence[SamplePoint]:
+ if self._dirty:
+ self._rebalance()
+
+ identities = np.random.choice(self._ids, size=count, p=self._prios)
+ ids = self._identities
+ output = []
+ app = output.append
+ r = random.random
+ tm1 = transition_count - 1
+ for k in identities:
+ offset = int(r() * (ids[k] - tm1))
+ app((k, offset, offset + transition_count))
+
+ return output
+
+
+################################################################################
+
+
+class UniformEjectionStrategy(UniformStrategyBase, EjectionStrategy):
+ def sample(self, count: int) -> Sequence[int]:
+ if self._dirty:
+ self._rebalance()
+
+ identities = set()
+ while count > 0:
+ identity = np.random.choice(self._ids, size=1, p=self._prios)[0]
+
+ if identity in identities:
+ continue
+
+ length = self._identities[identity]
+ count -= length
+ identities.add(identity)
+
+ return list(identities)
diff --git a/emote/mixins/BUILD b/emote/mixins/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/mixins/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/mixins/__init__.py b/emote/mixins/__init__.py
new file mode 100644
index 00000000..b6734582
--- /dev/null
+++ b/emote/mixins/__init__.py
@@ -0,0 +1,7 @@
+"""Mixins for emote.
+
+Mixins are used to add functionality to other classes just like regular
+inheritance. The difference is that mixins are designed to work well
+with multiple inheritance, which requires extra care to avoid issues in
+initialization order.
+"""
diff --git a/emote/mixins/logging.py b/emote/mixins/logging.py
new file mode 100644
index 00000000..4e619fcd
--- /dev/null
+++ b/emote/mixins/logging.py
@@ -0,0 +1,120 @@
+from collections import deque
+from collections.abc import Iterable
+from typing import Any, Dict, Tuple
+
+import numpy as np
+import torch
+
+
+class LoggingMixin:
+ """A Mixin that accepts logging calls.
+
+ Logged data is saved on this object and gets written by a Logger.
+ This therefore doesn't care how the data is logged, it only provides
+ a standard interface for storing the data to be handled by a Logger.
+ """
+
+ def __init__(self, *, default_window_length: int = 250, **kwargs):
+ super().__init__(**kwargs)
+
+ self.scalar_logs: Dict[str, float | torch.Tensor] = {}
+ self.windowed_scalar: Dict[str, deque[float | torch.Tensor]] = {}
+ self.windowed_scalar_cumulative: Dict[str, int] = {}
+ self.image_logs: Dict[str, torch.Tensor] = {}
+ self.hist_logs: Dict[str, float | torch.Tensor] = {}
+ self.video_logs: Dict[str, Tuple[np.ndarray, int]] = {}
+
+ self._default_window_length = default_window_length
+
+ def log_scalar(self, key: str, value: float | torch.Tensor):
+ """Use log_scalar to periodically log scalar data."""
+ if isinstance(value, torch.Tensor):
+ self.scalar_logs[key] = value.item()
+ else:
+ self.scalar_logs[key] = value
+
+ def log_windowed_scalar(
+ self,
+ key: str,
+ value: float | torch.Tensor | Iterable[torch.Tensor | float],
+ ):
+ """Log scalars using a moving window average.
+
+ By default this will use `default_window_length` from the constructor as the window
+ length. It can also be overridden on a per-key basis using the format
+ windowed[LENGTH]:foo/bar. Note that this cannot be changed between multiple invocations -
+ whichever length is found first will be permanent.
+ """
+
+ if key not in self.windowed_scalar:
+ # we allow windowed[100]:some_key/foobar to override window size
+ if "windowed[" in key:
+ p = key.split(":")[0]
+ length = int(p.split("[")[1][:-1])
+ else:
+ length = self._default_window_length
+
+ self.windowed_scalar[key] = deque(maxlen=length)
+ self.windowed_scalar_cumulative[key] = 0
+
+ if isinstance(value, Iterable):
+ val = value.numpy() if isinstance(value, torch.Tensor) else value
+ self.windowed_scalar[key].extend(val)
+ self.windowed_scalar_cumulative[key] += sum(val)
+ else:
+ val = value.item() if isinstance(value, torch.Tensor) else value
+ self.windowed_scalar[key].append(val)
+ self.windowed_scalar_cumulative[key] += val
+
+ def log_image(self, key: str, value: torch.Tensor):
+ """Use log_image to periodically log image data."""
+ if len(value.shape) == 3:
+ self.image_logs[key] = value.detach()
+
+ def log_video(self, key: str, value: Tuple[np.ndarray, int]):
+ """Use log_scalar to periodically log scalar data."""
+ self.video_logs[key] = value
+
+ def log_histogram(
+ self,
+ key: str,
+ value: torch.Tensor | float | Iterable[torch.Tensor | float],
+ ):
+ if isinstance(value, Iterable):
+ self.hist_logs[key] = value.detach() if isinstance(value, torch.Tensor) else value
+ else:
+ if key not in self.hist_logs:
+ self.hist_logs[key] = deque(maxlen=self._default_window_length)
+
+ self.hist_logs[key].append(value)
+
+ def state_dict(self):
+ state_dict = super().state_dict()
+ state_dict["scalar_logs"] = self.scalar_logs
+ state_dict["hist_logs"] = self.hist_logs
+ state_dict["image_logs"] = self.image_logs
+ state_dict["video_logs"] = self.video_logs
+ state_dict["windowed_scalar"] = {
+ k: (list(v), v.maxlen) for (k, v) in self.windowed_scalar.items()
+ }
+ state_dict["windowed_scalar_cumulative"] = self.windowed_scalar_cumulative
+ return state_dict
+
+ def load_state_dict(
+ self,
+ state_dict: Dict[str, Any],
+ load_network: bool = True,
+ load_optimizer: bool = True,
+ load_hparams: bool = True,
+ ):
+ if load_hparams:
+ self.scalar_logs = state_dict.pop("scalar_logs")
+ self.hist_logs = state_dict.pop("hist_logs")
+ self.video_logs = state_dict.pop("video_logs")
+ self.image_logs = state_dict.pop("image_logs")
+ self.windowed_scalar = {
+ k: deque(v[0], maxlen=v[1]) for (k, v) in self.windowed_scalar.items()
+ }
+ self.windowed_scalar_cumulative = state_dict.pop("windowed_scalar_cumulative")
+
+ super().load_state_dict(state_dict, load_network, load_optimizer, load_hparams)
diff --git a/emote/models/BUILD b/emote/models/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/models/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/models/__init__.py b/emote/models/__init__.py
new file mode 100644
index 00000000..5d1782af
--- /dev/null
+++ b/emote/models/__init__.py
@@ -0,0 +1,16 @@
+from .callbacks import BatchSampler, LossProgressCheck, ModelBasedCollector, ModelLoss
+from .ensemble import EnsembleOfGaussian
+from .model import DeterministicModel, DynamicModel
+from .model_env import ModelEnv
+
+
+__all__ = [
+ "DynamicModel",
+ "ModelLoss",
+ "ModelEnv",
+ "EnsembleOfGaussian",
+ "ModelBasedCollector",
+ "BatchSampler",
+ "LossProgressCheck",
+ "DeterministicModel",
+]
diff --git a/emote/models/callbacks.py b/emote/models/callbacks.py
new file mode 100644
index 00000000..44d84efe
--- /dev/null
+++ b/emote/models/callbacks.py
@@ -0,0 +1,271 @@
+from collections import deque
+from typing import Optional
+
+import numpy as np
+import torch
+
+from torch import optim
+
+from emote.callback import BatchCallback
+from emote.callbacks.loss import LossCallback
+from emote.extra.schedules import BPStepScheduler
+from emote.memory import MemoryLoader
+from emote.mixins.logging import LoggingMixin
+from emote.models.model import DynamicModel
+from emote.models.model_env import ModelEnv
+from emote.proxies import AgentProxy, MemoryProxy
+from emote.trainer import TrainingShutdownException
+from emote.typing import AgentId, DictObservation
+
+
+class ModelLoss(LossCallback):
+ """Trains a dynamic model by minimizing the model loss.
+
+ Arguments:
+ model (DynamicModel): A dynamic model
+ opt (torch.optim.Optimizer): An optimizer.
+ lr_schedule (lr_scheduler, optional): A learning rate scheduler
+ max_grad_norm (float): Clip the norm of the gradient during backprop using this value.
+ name (str): The name of the module. Used e.g. while logging.
+ data_group (str): The name of the data group from which this Loss takes its data.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: DynamicModel,
+ opt: optim.Optimizer,
+ lr_schedule: Optional[optim.lr_scheduler._LRScheduler] = None,
+ max_grad_norm: float = 10.0,
+ name: str = "dynamic_model",
+ data_group: str = "default",
+ input_key: str = "obs",
+ ):
+ super().__init__(
+ name=name,
+ optimizer=opt,
+ lr_schedule=lr_schedule,
+ network=model,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ )
+ self.model = model
+ self._input_key = input_key
+
+ def loss(self, observation, next_observation, actions, rewards):
+ loss, _ = self.model.loss(
+ obs=observation[self._input_key],
+ next_obs=next_observation[self._input_key],
+ action=actions,
+ reward=rewards,
+ )
+ return loss
+
+
+class LossProgressCheck(LoggingMixin, BatchCallback):
+ def __init__(
+ self,
+ model: DynamicModel,
+ num_bp: int,
+ data_group: str = "default",
+ input_key: str = "obs",
+ ):
+ super().__init__()
+ self.data_group = data_group
+ self.model = model
+ self.cycle = num_bp
+ self.rng = torch.Generator(device=self.model.device)
+ self.prediction_err = []
+ self.prediction_average_err = []
+ self.len_averaging_window = num_bp // 10
+ self._input_key = input_key
+
+ def begin_batch(self, *args, **kwargs):
+ obs, next_obs, action, reward = self.get_batch(*args, **kwargs)
+ predicted_obs, predicted_reward = self.model.sample(
+ observation=obs, action=action, rng=self.rng
+ )
+ obs_prediction_err = (predicted_obs - next_obs).detach().to("cpu").numpy()
+ reward_prediction_err = (predicted_reward - reward).detach().to("cpu").numpy()
+
+ obs_prediction_err = np.mean(np.abs(obs_prediction_err))
+ reward_prediction_err = np.mean(np.abs(reward_prediction_err))
+
+ self.log_scalar("mbrl/obs_pred_err", obs_prediction_err)
+ self.log_scalar("mbrl/rew_pred_err", reward_prediction_err)
+
+ self.prediction_err.append([obs_prediction_err, reward_prediction_err])
+
+ if len(self.prediction_err) >= self.len_averaging_window:
+ self.prediction_average_err.append(np.mean(np.array(self.prediction_err), axis=0))
+ self.prediction_err = []
+
+ def end_cycle(self):
+ for i in range(len(self.prediction_average_err) - 4):
+ for j in range(2):
+ if self.prediction_average_err[i + 4][j] > self.prediction_average_err[i][j]:
+ raise Exception(
+ f"The loss is not decreasing: \n"
+ f"Loss at {i}: {self.prediction_average_err[i]}"
+ f"Loss at {i+4}: {self.prediction_average_err[i+4]}"
+ )
+ raise TrainingShutdownException()
+
+ def get_batch(self, observation, next_observation, actions, rewards):
+ return (
+ observation[self._input_key],
+ next_observation[self._input_key],
+ actions,
+ rewards,
+ )
+
+
+class BatchSampler(BatchCallback):
+ """BatchSampler class is used to provide batches of data for the RL
+ training callbacks. In every BP step, it samples one batch from either the
+ gym buffer or the model buffer based on a Bernoulli probability
+ distribution. It outputs the batch to a separate data-group which will be
+ used by other RL training callbacks.
+
+ Arguments:
+ dataloader (MemoryLoader): the dataloader to load data from the model buffer
+ prob_scheduler (BPStepScheduler): the scheduler to update the prob of data
+ samples to come from the model vs. the Gym buffer
+ data_group (str): the data_group to receive data
+ rl_data_group (str): the data_group to upload data for RL training
+ generator (torch.Generator (optional)): an optional random generator
+ """
+
+ def __init__(
+ self,
+ dataloader: MemoryLoader,
+ prob_scheduler: BPStepScheduler,
+ data_group: str = "default",
+ rl_data_group: str = "rl_buffer",
+ generator: Optional[torch.Generator] = None,
+ ):
+ super().__init__()
+ self.dataloader = dataloader
+ """There are two data_groups in this class: self.data_group, and self.rl_data_group.
+ The first one is to receive samples from the Gym buffer in clone_batch method. The
+ second one is the destination for the batch. """
+ self.data_group = data_group
+ self.rl_data_group = rl_data_group
+ self.iter = iter(self.dataloader)
+ self.scheduler = prob_scheduler
+ self.prob_of_sampling_model_data = self.scheduler.value_min
+ self.rng = generator if generator else torch.Generator()
+ self.bp_counter = 0
+
+ def begin_batch(self, *args, **kwargs):
+ """Generates a batch of data either by sampling from the model buffer or by
+ cloning the input batch
+ Returns:
+ (dict): the batch of data
+ """
+ self.log_scalar("training/prob_sampling_from_model", self.prob_of_sampling_model_data)
+ if self.use_model_batch():
+ return self.sample_model_batch()
+ else:
+ return {self.rl_data_group: kwargs[self.data_group]}
+
+ def sample_model_batch(self):
+ """Samples a batch of data from the model buffer
+ Returns:
+ (dict): batch samples
+ """
+ try:
+ batch = next(self.iter)
+ except StopIteration:
+ self.iter = iter(self.dataloader)
+ batch = next(self.iter)
+ return batch
+
+ def use_model_batch(self):
+ """Decides if batch should come from the model-generated buffer
+ Returns:
+ (bool): True if model samples should be used, False otherwise.
+ """
+ self.bp_counter += 1
+ self.prob_of_sampling_model_data = self.scheduler.evaluate_at(self.bp_counter)
+ rnd = torch.rand(size=(1,), generator=self.rng)[0]
+ return True if rnd < self.prob_of_sampling_model_data else False
+
+
+class ModelBasedCollector(LoggingMixin, BatchCallback):
+ """ModelBasedCollector class is used to sample rollouts from the trained
+ dynamic model. The rollouts are stored in a replay buffer memory.
+
+ Arguments:
+ model_env: The Gym-like dynamic model
+ agent: The policy used to sample actions
+ memory: The memory to store the new synthetic samples
+ rollout_scheduler: A scheduler used to set the rollout-length when unrolling the dynamic model
+ num_bp_to_retain_buffer: The number of BP steps to keep samples. Samples will be over-written (first in
+ first out) for bp steps larger than this.
+ data_group: The data group to receive data from. This must be set to get real (Gym) samples
+ """
+
+ def __init__(
+ self,
+ model_env: ModelEnv,
+ agent: AgentProxy,
+ memory: MemoryProxy,
+ rollout_scheduler: BPStepScheduler,
+ num_bp_to_retain_buffer=1000000,
+ data_group: str = "default",
+ input_key: str = "obs",
+ ):
+ super().__init__()
+ """The data group is used to receive correct observation when
+ collect_multiple is called.
+
+ The data group must be set such that real Gym samples (not model
+ data) are given to the function.
+ """
+ self.data_group = data_group
+ self._input_key = input_key
+ self.agent = agent
+ self.memory = memory
+ self.model_env = model_env
+ self.last_environment_rewards = deque(maxlen=1000)
+
+ self.len_rollout = int(rollout_scheduler.value_min)
+ self.rollout_scheduler = rollout_scheduler
+ self.num_bp_to_retain_buffer = num_bp_to_retain_buffer
+ self.obs: dict[AgentId, DictObservation] = None
+ self.prob_of_sampling_model_data = 0.0
+ self.bp_counter = 0
+
+ def begin_batch(self, *args, **kwargs):
+ self.update_rollout_size()
+ self.log_scalar("training/model_rollout_length", self.len_rollout)
+ observation = self.get_batch(*args, **kwargs)
+
+ self.obs = self.model_env.dict_reset(observation, self.len_rollout)
+ for _ in range(self.len_rollout + 1):
+ self.collect_sample()
+
+ def get_batch(self, observation):
+ return observation[self._input_key]
+
+ def collect_sample(self):
+ """Collect a single rollout."""
+ actions = self.agent(self.obs)
+ next_obs, ep_info = self.model_env.dict_step(actions)
+
+ self.memory.add(self.obs, actions)
+ self.obs = next_obs
+
+ if "reward" in ep_info:
+ self.log_scalar("episode/model_reward", ep_info["reward"])
+
+ def update_rollout_size(self):
+ self.bp_counter += 1
+ len_rollout = int(self.rollout_scheduler.evaluate_at(self.bp_counter))
+ if self.len_rollout != len_rollout:
+ self.len_rollout = len_rollout
+ new_memory_size = (
+ self.len_rollout * self.model_env.num_envs * self.num_bp_to_retain_buffer
+ )
+ self.memory.resize(new_memory_size)
diff --git a/emote/models/ensemble.py b/emote/models/ensemble.py
new file mode 100644
index 00000000..86bc9cd0
--- /dev/null
+++ b/emote/models/ensemble.py
@@ -0,0 +1,188 @@
+# This file contains codes and texts that are copied from
+# https://github.com/facebookresearch/mbrl-lib
+from typing import Optional
+
+import numpy as np
+import torch
+
+from torch import nn as nn
+from torch.nn import GaussianNLLLoss, functional as F
+
+from emote.utils.math import truncated_normal_
+
+
+def truncated_normal_init(m: nn.Module):
+ """Initializes the weights of the given module using a truncated normal
+ distribution."""
+ if isinstance(m, nn.Linear):
+ input_dim = m.weight.data.shape[0]
+ stddev = 1 / (2 * np.sqrt(input_dim))
+ truncated_normal_(m.weight.data, std=stddev)
+ m.bias.data.fill_(0.0)
+ if isinstance(m, EnsembleLinearLayer):
+ num_members, input_dim, _ = m.weight.data.shape
+ stddev = 1 / (2 * np.sqrt(input_dim))
+ for i in range(num_members):
+ truncated_normal_(m.weight.data[i], std=stddev)
+ m.bias.data.fill_(0.0)
+
+
+class EnsembleLinearLayer(nn.Module):
+ """Linear layer for ensemble models.
+
+ Arguments:
+ num_members (int): the ensemble size
+ in_size (int): the input size of the model
+ out_size (int): the output size of the model
+ """
+
+ def __init__(self, num_members: int, in_size: int, out_size: int):
+ super().__init__()
+ self.num_members = num_members
+ self.in_size = in_size
+ self.out_size = out_size
+ self.weight = nn.Parameter(torch.rand(self.num_members, self.in_size, self.out_size))
+ self.bias = nn.Parameter(torch.rand(self.num_members, 1, self.out_size))
+
+ def forward(self, x):
+ return x.matmul(self.weight) + self.bias
+
+
+class EnsembleOfGaussian(nn.Module):
+ def __init__(
+ self,
+ *,
+ in_size: int,
+ out_size: int,
+ device: str | torch.device,
+ num_layers: int = 4,
+ ensemble_size: int = 1,
+ hidden_size: int = 256,
+ learn_logvar_bounds: bool = False,
+ deterministic: bool = False,
+ ):
+ super().__init__()
+ self.in_size = in_size
+ self.out_size = out_size
+ self.num_members = ensemble_size
+ self.device = torch.device(device)
+ self.deterministic = deterministic
+ self.nll_loss = GaussianNLLLoss(reduction="none")
+
+ activation_func = nn.ReLU()
+
+ hidden_layers = [
+ nn.Sequential(
+ EnsembleLinearLayer(ensemble_size, in_size, hidden_size),
+ activation_func,
+ )
+ ]
+ for i in range(num_layers - 1):
+ hidden_layers.append(
+ nn.Sequential(
+ EnsembleLinearLayer(ensemble_size, hidden_size, hidden_size),
+ activation_func,
+ )
+ )
+ self.hidden_layers = nn.Sequential(*hidden_layers)
+ self.mean_and_logvar = EnsembleLinearLayer(ensemble_size, hidden_size, 2 * out_size)
+ self.min_logvar = nn.Parameter(
+ -10 * torch.ones(1, out_size), requires_grad=learn_logvar_bounds
+ )
+ self.max_logvar = nn.Parameter(
+ 0.5 * torch.ones(1, out_size), requires_grad=learn_logvar_bounds
+ )
+ self.logvar_loss_weight = 0.01
+
+ self.apply(truncated_normal_init)
+ self.to(self.device)
+
+ def default_forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
+ x = self.hidden_layers(x)
+ mean_and_logvar = self.mean_and_logvar(x)
+ mean = mean_and_logvar[..., : self.out_size]
+ logvar = mean_and_logvar[..., self.out_size :]
+ logvar = self.max_logvar - F.softplus(self.max_logvar - logvar)
+ logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)
+ return mean, logvar
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """Computes mean and logvar predictions for the given input.
+
+ Arguments:
+ x (tensor): the input to the model.
+
+ Returns:
+ (tuple of two tensors): the predicted mean and log variance of the output.
+ """
+ assert x.ndim == 2
+ x = x.unsqueeze(0)
+ mean, logvar = self.default_forward(x)
+ return mean.mean(dim=0), logvar.mean(dim=0)
+
+ def loss(
+ self,
+ model_in: torch.Tensor,
+ target: Optional[torch.Tensor] = None,
+ ) -> tuple[torch.Tensor, dict[str, any]]:
+ """Computes Gaussian NLL loss.
+
+ Arguments:
+ model_in (tensor): input tensor.
+ target (tensor): target tensor.
+
+ Returns:
+ (a tuple of tensor and dict): a loss tensor and a dict which includes
+ extra info.
+ """
+ assert model_in.ndim == target.ndim
+ if model_in.ndim == 2: # add ensemble dimension
+ model_in = model_in.unsqueeze(0)
+ target = target.unsqueeze(0)
+ pred_mean, pred_logvar = self.default_forward(model_in)
+ if target.shape[0] != self.num_members:
+ target = target.repeat(self.num_members, 1, 1)
+ nll = (
+ self.nll_loss(pred_mean, target, torch.exp(pred_logvar))
+ .mean((1, 2)) # average over batch and target dimension
+ .sum() # sum over ensemble dimension
+ )
+
+ nll += self.logvar_loss_weight * (self.max_logvar.sum() - self.min_logvar.sum())
+ return nll, {}
+
+ def sample(
+ self,
+ model_input: torch.Tensor,
+ rng: torch.Generator,
+ ) -> torch.Tensor:
+ """Samples next observation, reward and terminal from the model using
+ the ensemble.
+
+ Args:
+ model_input (tensor): the observation and action.
+ rng (torch.Generator): a random number generator.
+
+ Returns:
+ (tuple): predicted observation, rewards, terminal indicator and model
+ state dictionary.
+ """
+ if self.deterministic:
+ return self.forward(model_input)[0]
+ means, logvars = self.forward(model_input)
+ variances = logvars.exp()
+ stds = torch.sqrt(variances)
+ return torch.normal(means, stds, generator=rng)
+
+ def save(self, save_dir: str):
+ """Saves the model to the given directory."""
+ model_dict = {"state_dict": self.state_dict()}
+ torch.save(model_dict, save_dir)
+
+ def load(self, load_dir: str):
+ """Loads the model from the given path."""
+ model_dict = torch.load(load_dir)
+ self.load_state_dict(model_dict["state_dict"])
diff --git a/emote/models/model.py b/emote/models/model.py
new file mode 100644
index 00000000..d12d7b2f
--- /dev/null
+++ b/emote/models/model.py
@@ -0,0 +1,317 @@
+# This file contains codes and texts that are copied from
+# https://github.com/facebookresearch/mbrl-lib
+
+from typing import Optional
+
+import torch
+import torch.nn.functional as F
+
+from torch import nn
+
+from emote.utils.model import normal_init
+
+
+class DynamicModel(nn.Module):
+ """Wrapper class for model. DynamicModel class functions as a wrapper for
+ models including ensembles. It also provides data manipulations that are
+ common when using dynamics models with observations and actions (e.g.,
+ predicting delta observations, input normalization).
+
+ Arguments:
+ model: the model to wrap.
+ learned_rewards (bool): if True, the wrapper considers the last output of the model
+ to correspond to reward predictions.
+ obs_process_fn (callable, optional): if provided, observations will be passed through
+ this function before being given to the model.
+ no_delta_list (list(int), optional): if provided, represents a list of dimensions over
+ which the model predicts the actual observation and not just a delta.
+ """
+
+ def __init__(
+ self,
+ *,
+ model: nn.Module,
+ learned_rewards: bool = True,
+ obs_process_fn: Optional[nn.Module] = None,
+ no_delta_list: Optional[list[int]] = None,
+ ):
+ super().__init__()
+ self.model = model
+ self.device = self.model.device
+ self.learned_rewards = learned_rewards
+ self.no_delta_list = no_delta_list if no_delta_list else []
+ self.obs_process_fn = obs_process_fn
+ self.input_normalizer = Normalizer()
+ self.target_normalizer = Normalizer()
+
+ def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, ...]:
+ """Computes the output of the dynamics model.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ (tuple of tensors): predicted tensors
+ """
+ return self.model.forward(x)
+
+ def loss(
+ self,
+ obs: torch.Tensor,
+ next_obs: torch.Tensor,
+ action: torch.Tensor,
+ reward: torch.Tensor,
+ ) -> tuple[torch.Tensor, dict[str, any]]:
+ """Computes the model loss over a batch of transitions.
+
+ Arguments:
+ obs (tensor): current observations
+ next_obs (tensor): next observations
+ action (tensor): actions
+ reward (tensor): rewards
+
+ Returns:
+ (tensor and optional dict): the loss tensor and optional info
+ """
+ model_in, target = self.process_batch(
+ obs=obs, next_obs=next_obs, action=action, reward=reward
+ )
+ return self.model.loss(model_in, target=target)
+
+ def sample(
+ self,
+ action: torch.Tensor,
+ observation: torch.Tensor,
+ rng: torch.Generator,
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
+ """Samples a simulated transition from the dynamics model. The function
+ first normalizes the inputs to the model, and then denormalize the
+ model output as the final output.
+
+ Arguments:
+ action (tensor): the action at.
+ observation (tensor): the observation/state st.
+ rng (torch.Generator): a random number generator.
+
+ Returns:
+ (tuple): predicted observation and rewards.
+ """
+ model_in = self.get_model_input(observation, action)
+
+ model_in = self.input_normalizer.normalize(model_in)
+ preds = self.model.sample(model_in, rng)
+ preds = self.target_normalizer.denormalize(preds)
+
+ assert len(preds.shape) == 2, (
+ f"Prediction shape is: {preds.shape} Predictions must be 'batch_size x "
+ f"length_of_prediction. Have you forgotten to run propagation on the ensemble?"
+ )
+ next_observs = preds[:, :-1] if self.learned_rewards else preds
+
+ tmp_ = next_observs + observation
+ for dim in self.no_delta_list:
+ tmp_[:, dim] = next_observs[:, dim]
+ next_observs = tmp_
+ rewards = preds[:, -1:] if self.learned_rewards else None
+
+ return next_observs, rewards
+
+ def get_model_input(
+ self,
+ obs: torch.Tensor,
+ action: torch.Tensor,
+ ) -> torch.Tensor:
+ """The function prepares the input to the neural network model by
+ concatenating observations and actions. In case, obs_process_fn is
+ given, the observations are processed by the function prior to the
+ concatenation.
+
+ Arguments:
+ obs (torch.Tensor): observation tensor
+ action (torch.Tensor): action tensor
+
+ Returns:
+ (torch.Tensor): the concatenation of obs and actions
+ """
+ if self.obs_process_fn:
+ obs = self.obs_process_fn(obs)
+ model_in = torch.cat([obs, action], dim=obs.ndim - 1)
+ return model_in
+
+ def process_batch(
+ self,
+ obs: torch.Tensor,
+ next_obs: torch.Tensor,
+ action: torch.Tensor,
+ reward: torch.Tensor,
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """The function processes the given batch, normalizes inputs and
+ targets, and prepares them for the training.
+
+ Arguments:
+ obs (torch.Tensor): the observations tensor
+ next_obs (torch.Tensor): the next observation tensor
+ action (torch.Tensor): the actions tensor
+ reward (torch.Tensor): the rewards tensor
+
+ Returns:
+ (tuple[torch.Tensor, torch.Tensor]): the training input and target tensors
+ """
+
+ target_obs = next_obs - obs
+ for dim in self.no_delta_list:
+ target_obs[..., dim] = next_obs[..., dim]
+
+ model_in = self.get_model_input(obs, action)
+ if self.learned_rewards:
+ target = torch.cat([target_obs, reward], dim=obs.ndim - 1)
+ else:
+ target = target_obs
+
+ model_in_normalized = self.input_normalizer.normalize(model_in.float(), True)
+ target_normalized = self.target_normalizer.normalize(target.float(), True)
+ return model_in_normalized, target_normalized
+
+ def save(self, save_dir: str) -> None:
+ """Saving the model.
+
+ Arguments:
+ save_dir (str): the directory to save the model
+ """
+ self.model.save(save_dir)
+
+ def load(self, load_dir: str) -> None:
+ """Loading the model.
+
+ Arguments:
+ load_dir (str): the directory to load the model
+ """
+ self.model.load(load_dir)
+
+
+class DeterministicModel(nn.Module):
+ def __init__(
+ self,
+ in_size: int,
+ out_size: int,
+ device: torch.device,
+ hidden_size: int = 256,
+ num_hidden_layers: int = 4,
+ ):
+ super().__init__()
+ self.in_size = in_size
+ self.out_size = out_size
+ self.device = torch.device(device)
+
+ network = [
+ nn.Sequential(
+ nn.Linear(in_size, hidden_size),
+ nn.BatchNorm1d(hidden_size),
+ nn.ReLU(),
+ )
+ ]
+ for _ in range(num_hidden_layers - 1):
+ network.append(
+ nn.Sequential(
+ nn.Linear(hidden_size, hidden_size),
+ nn.BatchNorm1d(hidden_size),
+ nn.ReLU(),
+ )
+ )
+ network.append(nn.Sequential(nn.Linear(hidden_size, out_size)))
+ self.network = nn.Sequential(*network).to(self.device)
+ self.network.apply(normal_init)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ ) -> torch.Tensor:
+ return self.network(x)
+
+ def loss(
+ self,
+ model_in: torch.Tensor,
+ target: torch.Tensor,
+ ) -> tuple[torch.Tensor, dict[str, any]]:
+ prediction = self.forward(model_in)
+ loss = F.mse_loss(prediction, target)
+ return loss, {"loss_info": None}
+
+ def sample(
+ self,
+ model_input: torch.Tensor,
+ rng: torch.Generator = None,
+ ) -> torch.Tensor:
+ """Samples next observation, reward and terminal from the model.
+
+ Args:
+ model_input (tensor): the observation and action.
+ rng (torch.Generator): a random number generator.
+
+ Returns:
+ (tuple): predicted observation, rewards, terminal indicator and model
+ state dictionary.
+ """
+ return self.forward(model_input)
+
+
+class Normalizer:
+ """Class that keeps a running mean and variance and normalizes data
+ accordingly."""
+
+ def __init__(self):
+ self.mean = None
+ self.std = None
+ self.eps = 1e-5
+ self.update_rate = 0.5
+ self.bp_step = 0
+
+ def update_stats(self, data: torch.Tensor):
+ """Updates the stored statistics using the given data.
+
+ Arguments:
+ data (torch.Tensor): The data used to compute the statistics.
+ """
+ if self.mean is None:
+ self.mean = data.mean(0, keepdim=True)
+ self.std = data.std(0, keepdim=True)
+ else:
+ self.mean = (1.0 - self.update_rate) * self.mean + self.update_rate * data.mean(
+ 0, keepdim=True
+ )
+ self.std = (1.0 - self.update_rate) * self.std + self.update_rate * data.std(
+ 0, keepdim=True
+ )
+ self.std[self.std < self.eps] = self.eps
+ self.update_rate -= 0.01
+ if self.update_rate < 0.01:
+ self.update_rate = 0.01
+
+ def normalize(self, val: torch.Tensor, update_state: bool = False) -> torch.Tensor:
+ """Normalizes the value according to the stored statistics.
+
+ Arguments:
+ val (torch.Tensor): The value to normalize.
+ update_state (bool): Update state?
+
+ Returns:
+ (torch.Tensor): The normalized value.
+ """
+ if update_state:
+ self.update_stats(val)
+ if self.mean is None:
+ return val
+ return (val - self.mean) / self.std
+
+ def denormalize(self, val: torch.Tensor) -> torch.Tensor:
+ """De-normalizes the value according to the stored statistics.
+
+ Arguments:
+ val (torch.Tensor): The value to de-normalize.
+
+ Returns:
+ (torch.Tensor): The de-normalized value.
+ """
+ if self.mean is None:
+ return val
+ return self.std * val + self.mean
diff --git a/emote/models/model_env.py b/emote/models/model_env.py
new file mode 100644
index 00000000..09e7226e
--- /dev/null
+++ b/emote/models/model_env.py
@@ -0,0 +1,195 @@
+# This file contains codes and texts that are copied from
+# https://github.com/facebookresearch/mbrl-lib
+
+from itertools import count
+from typing import Optional
+
+import numpy as np
+import torch
+
+from torch import Tensor
+
+from emote.models.model import DynamicModel
+from emote.typing import (
+ AgentId,
+ DictObservation,
+ DictResponse,
+ EpisodeState,
+ RewardFnType,
+ TermFnType,
+)
+from emote.utils.model import to_numpy
+
+
+class ModelEnv:
+ """Wraps a dynamics model into a gym-like environment.
+
+ Arguments:
+ num_envs (int): the number of envs to simulate in parallel (batch_size).
+ model (DynamicModel): the dynamic model to wrap.
+ termination_fn (callable): a function that receives observations, and
+ returns a boolean flag indicating whether the episode should end or not.
+ reward_fn (callable, optional): a function that receives actions and observations
+ and returns the value of the resulting reward in the environment.
+ generator (torch.Generator, optional): a torch random number generator
+ """
+
+ def __init__(
+ self,
+ *,
+ num_envs: int,
+ model: DynamicModel,
+ termination_fn: TermFnType,
+ reward_fn: Optional[RewardFnType] = None,
+ generator: Optional[torch.Generator] = None,
+ input_key: str = "obs",
+ ):
+ self.dynamic_model = model
+ self.termination_fn = termination_fn
+ self.reward_fn = reward_fn
+ self.device = model.device
+ self._input_key = input_key
+ self.num_envs = num_envs
+ self._current_obs: torch.Tensor = None
+ self._init_obs: torch.Tensor = None
+ self._propagation_method: Optional[str] = None
+ self._model_indices = None
+ self._timestep = 0
+ self._len_rollout = 0
+
+ self.rng = generator if generator else torch.Generator(device=self.device)
+ self._next_agent = count()
+ self._agent_ids: list[AgentId] = [next(self._next_agent) for i in range(self.num_envs)]
+
+ def reset(
+ self,
+ initial_obs_batch: torch.Tensor,
+ len_rollout: int,
+ ):
+ """Resets the model environment.
+
+ Arguments:
+ initial_obs_batch (torch.Tensor): a batch of initial observations.
+ len_rollout (int): the max length of the model rollout
+ """
+ self._timestep = 0
+ self._len_rollout = len_rollout
+ assert len(initial_obs_batch.shape) == 2 # batch, obs_dim
+ self._current_obs = torch.clone(initial_obs_batch)
+ self._init_obs = torch.clone(self._current_obs)
+
+ def step(
+ self,
+ actions: np.ndarray,
+ ) -> tuple[Tensor, Tensor, Tensor, dict[str, Tensor]]:
+ """Steps the model environment with the given batch of actions.
+
+ Arguments:
+ actions (np.ndarray): the actions for each "episode" to rollout.
+ Shape must be batch_size x dim_actions. If a np.ndarray is given, it's
+ converted to a torch.Tensor and sent to the model device.
+
+ Returns:
+ (tuple | dict): contains the predicted next observation, reward, done flag.
+ The done flag and rewards are computed using the termination_fn and
+ reward_fn passed in the constructor. The rewards can also be predicted
+ by the model.
+ """
+ assert len(actions.shape) == 2 # batch, action_dim
+ with torch.no_grad():
+ actions = torch.from_numpy(actions).to(self.device)
+ (
+ next_observs,
+ pred_rewards,
+ ) = self.dynamic_model.sample(
+ action=actions,
+ observation=self._current_obs,
+ rng=self.rng,
+ )
+ rewards = (
+ pred_rewards if self.reward_fn is None else self.reward_fn(actions, next_observs)
+ )
+ dones = self.termination_fn(next_observs)
+
+ info = {"reached_max_len": torch.zeros(dones.shape)}
+ self._timestep += 1
+ if self._timestep >= self._len_rollout:
+ info["reached_max_len"] += 1.0
+ self._current_obs = torch.clone(next_observs)
+ return next_observs, rewards, dones, info
+
+ def dict_step(
+ self,
+ actions: dict[AgentId, DictResponse],
+ ) -> tuple[dict[AgentId, DictObservation], dict[str, float]]:
+ """The function to step the Gym-like model with dict_action.
+
+ Arguments:
+ actions (dict[AgentId, DictResponse]): the dict actions.
+
+ Returns:
+ (tuple[dict[AgentId, DictObservation], dict[str, float]]): the predicted next dict observation,
+ reward, and done flag.
+ """
+ batched_actions = np.stack(
+ [actions[agent].list_data["actions"] for agent in self._agent_ids]
+ )
+ next_obs, rewards, dones, info = self.step(batched_actions)
+ new_agents = []
+ results = {}
+ reached_max_len = info["reached_max_len"]
+
+ for env_id, (done, timed_out) in enumerate(zip(dones, reached_max_len)):
+ if done or timed_out:
+ episode_state = EpisodeState.TERMINAL if done else EpisodeState.INTERRUPTED
+ results[self._agent_ids[env_id]] = DictObservation(
+ episode_state=episode_state,
+ array_data={self._input_key: to_numpy(next_obs[env_id])},
+ rewards={"reward": to_numpy(rewards[env_id])},
+ )
+ new_agent = next(self._next_agent)
+ results[new_agent] = DictObservation(
+ episode_state=EpisodeState.INITIAL,
+ array_data={self._input_key: to_numpy(self._init_obs[env_id])},
+ rewards={"reward": None},
+ )
+ new_agents.append(new_agent)
+ self._agent_ids[env_id] = new_agent
+ results.update(
+ {
+ agent_id: DictObservation(
+ episode_state=EpisodeState.RUNNING,
+ array_data={self._input_key: to_numpy(next_obs[env_id])},
+ rewards={"reward": to_numpy(rewards[env_id])},
+ )
+ for env_id, agent_id in enumerate(self._agent_ids)
+ if agent_id not in new_agents
+ }
+ )
+ ep_info = {}
+ return results, ep_info
+
+ def dict_reset(
+ self,
+ obs: torch.Tensor,
+ len_rollout: int,
+ ) -> dict[AgentId, DictObservation]:
+ """Resets the model env.
+
+ Arguments:
+ obs (torch.Tensor): the initial observations.
+ len_rollout (int): the max rollout length
+
+ Returns:
+ (dict): the formatted initial observation.
+ """
+ self.reset(obs, len_rollout)
+ self._agent_ids = [next(self._next_agent) for _ in range(self.num_envs)]
+ return {
+ agent_id: DictObservation(
+ episode_state=EpisodeState.INITIAL,
+ array_data={self._input_key: to_numpy(obs[i])},
+ rewards={"reward": None},
+ )
+ for i, agent_id in enumerate(self._agent_ids)
+ }
diff --git a/emote/nn/BUILD b/emote/nn/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/nn/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/nn/__init__.py b/emote/nn/__init__.py
new file mode 100644
index 00000000..916c43b4
--- /dev/null
+++ b/emote/nn/__init__.py
@@ -0,0 +1,11 @@
+from .action_value_mlp import ActionValueMlp
+from .gaussian_policy import GaussianMlpPolicy, GaussianPolicyHead
+from .initialization import ortho_init_
+
+
+__all__ = [
+ "ActionValueMlp",
+ "GaussianMlpPolicy",
+ "GaussianPolicyHead",
+ "ortho_init_",
+]
diff --git a/emote/nn/action_value_mlp.py b/emote/nn/action_value_mlp.py
new file mode 100644
index 00000000..e2a3ede7
--- /dev/null
+++ b/emote/nn/action_value_mlp.py
@@ -0,0 +1,54 @@
+from functools import partial
+from typing import List
+
+import torch
+
+from torch import Tensor, nn
+
+from emote.nn.initialization import ortho_init_
+
+
+class ActionValueMlp(nn.Module):
+ def __init__(self, observation_dim, action_dim, hidden_dims):
+ super().__init__()
+ self.obs_d = observation_dim
+ self.act_d = action_dim
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip([observation_dim + action_dim] + hidden_dims, hidden_dims)
+ ],
+ )
+ self.encoder.apply(ortho_init_)
+
+ self.final_layer = nn.Linear(hidden_dims[-1], 1)
+ self.final_layer.apply(partial(ortho_init_, gain=1))
+
+ def forward(self, action: Tensor, obs: Tensor) -> Tensor:
+ bsz, obs_d = obs.shape
+ bsz_action, act_d = action.shape
+ assert bsz == bsz_action
+ assert obs_d == self.obs_d
+ assert act_d == self.act_d
+ x = torch.cat([obs, action], dim=1)
+ out = self.final_layer(self.encoder(x))
+ assert (bsz, 1) == out.shape
+ return out
+
+
+class SharedEncoderActionValueNet(nn.Module):
+ def __init__(
+ self,
+ shared_enc: nn.Module,
+ encoder_out_dim: int,
+ action_dim: int,
+ hidden_dims: List[int],
+ ):
+ super().__init__()
+ self.shared_enc = shared_enc
+ self.action_value_mlp = ActionValueMlp(encoder_out_dim, action_dim, hidden_dims)
+
+ def forward(self, action: torch.Tensor, obs: torch.Tensor):
+ x = self.shared_enc(obs)
+ value = self.action_value_mlp(action, x)
+ return value
diff --git a/emote/nn/curl.py b/emote/nn/curl.py
new file mode 100644
index 00000000..c6ada202
--- /dev/null
+++ b/emote/nn/curl.py
@@ -0,0 +1,295 @@
+from typing import List
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from torch.optim.lr_scheduler import LinearLR
+
+from emote.callbacks.loss import LossCallback
+from emote.nn.layers import Conv2dEncoder
+
+
+def soft_update_from_to(source_params, target_params, tau):
+ for target_param, param in zip(target_params, source_params):
+ target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
+
+
+def rand_uniform(minval: float, maxval: float, shape: List[int]):
+ range = maxval - minval
+ rand = torch.rand(shape)
+ return range * rand + minval
+
+
+@torch.jit.script
+class ImageAugmentor:
+ def __init__(
+ self,
+ device: torch.device,
+ use_fast_augment: bool = True,
+ use_noise_aug: bool = True,
+ use_per_image_mask_size: bool = False,
+ min_mask_relative_size: float = 0.2, # min size of the mask relative to the size of the image
+ max_mask_relative_size: float = 0.4, # max size of the mask relative to the size of the image
+ ):
+ self._use_noise_aug = use_noise_aug
+ self._use_fast_augment = use_fast_augment
+ self._use_per_image_mask_size = use_per_image_mask_size
+ self._min_mask_size = min_mask_relative_size
+ self._max_mask_size = max_mask_relative_size
+ self._device = device
+
+ def __call__(self, image: torch.Tensor):
+ with torch.no_grad():
+ if self._use_noise_aug:
+ image = self._maybe_add_noise(image, noise_std=0.015, noise_prob=0.25)
+ if self._use_fast_augment:
+ image = self._cutout_per_batch_pos_and_mask_size(image)
+ else:
+ if self._use_per_image_mask_size:
+ image = self._cutout_per_image_mask_size(image)
+ else:
+ image = self._cutout_per_batch_mask_size(image)
+ return image
+
+ def _get_mask_indices(
+ self,
+ image_size_x: int,
+ image_size_y: int,
+ num_slices: List[int], # the number of unique index slices to return
+ ):
+ size = rand_uniform(minval=self._min_mask_size, maxval=self._max_mask_size, shape=[1])[0]
+ mask_size: List[int] = [int(image_size_x * size), int(image_size_y * size)]
+ start_i = torch.randint(low=0, high=image_size_x - mask_size[0], size=num_slices)
+ start_j = torch.randint(low=0, high=image_size_y - mask_size[1], size=num_slices)
+ end_i = start_i + mask_size[0]
+ end_j = start_j + mask_size[1]
+ return start_i, start_j, end_i, end_j
+
+ def _maybe_add_noise(self, image: torch.Tensor, noise_std: float, noise_prob: float):
+ prob_sample = rand_uniform(minval=0.0, maxval=1.0, shape=[1])[0]
+ # Add noise to the image from a normal distribution.
+ if prob_sample < noise_prob:
+ image = image + torch.normal(
+ mean=0.0, std=noise_std, size=image.shape, device=self._device
+ )
+ return image
+
+ def _cutout_per_image_mask_size(self, images: torch.Tensor):
+ # This is slightly slower than per batch version but in principle it should also be slightly better.
+ batch_size, im_x, im_y, _ = images.shape
+
+ for i in range(batch_size):
+ start_i, start_j, end_i, end_j = self._get_mask_indices(im_x, im_y, num_slices=[1])
+ images[i, start_i:end_i, start_j:end_j, :] = 0
+ return images
+
+ def _cutout_per_batch_mask_size(self, images: torch.Tensor):
+ batch_size, im_x, im_y, _ = images.shape
+
+ start_i, start_j, end_i, end_j = self._get_mask_indices(im_x, im_y, num_slices=[batch_size])
+
+ for i in range(batch_size):
+ images[i, start_i[i] : end_i[i], start_j[i] : end_j[i], :] = 0
+ return images
+
+ def _cutout_per_batch_pos_and_mask_size(self, images: torch.Tensor):
+ _, im_x, im_y, _ = images.shape
+ start_i, start_j, end_i, end_j = self._get_mask_indices(im_x, im_y, num_slices=[1])
+ images[:, start_i:end_i, start_j:end_j, :] = 0
+ return images
+
+
+class CurlLoss(LossCallback):
+ """Contrastive Unsupervised Representations for Reinforcement Learning
+ (CURL).
+
+ paper: https://arxiv.org/abs/2004.04136
+
+ :param encoder_model: (Conv2dEncoder) The image encoder that will be trained using CURL.
+ :param target_encoder_model: (Conv2dEncoder) The target image encoder.
+ :param device: (torch.device) The device to use for computation.
+ :param learning_rate: (float)
+ :param learning_rate_start_frac: (float) The start fraction for LR schedule.
+ :param learning_rate_end_frac: (float) The end fraction for LR schedule.
+ :param learning_rate_steps: (int) The number of step to decay the LR over.
+ :param max_grad_norm: (float) The maximum gradient norm, use for gradient clipping.
+ :param desired_zdim: (int) The size of the latent. If the projection layer is not used this will
+ default to the encoder output size.
+ :param tau: (float) The tau value that is used for updating the target encoder.
+ :param use_noise_aug: (bool) Add noise during image augmentation.
+ :param temperature: (float) The value used for the temperature scaled cross-entropy calculation.
+ :param use_temperature_variant: (bool) Use normalised temperature scaled cross-entropy variant.
+ :param use_per_image_mask_size: (bool) Use different mask sizes for every image in the batch.
+ :param use_fast_augment: (bool) A gpu compatible image augmentation that uses a fixed cutout
+ position and size per batch.
+ :param use_projection_layer: (bool) Add an additional dense layer to the encoder that projects
+ to zdim size.
+ :param augment_anchor_and_pos: (bool) Augment both the anchor and positive images.
+ :param log_images: (bool) Logs the augmented images.
+ """
+
+ def __init__(
+ self,
+ encoder_model: Conv2dEncoder,
+ target_encoder_model: Conv2dEncoder,
+ device: torch.DeviceObjType,
+ learning_rate: float,
+ learning_rate_start_frac: float = 1.0,
+ learning_rate_end_frac: float = 1.0,
+ learning_rate_steps: float = 1,
+ max_grad_norm: float = 1.0,
+ data_group: str = "default",
+ desired_zdim: int = 128, # This will be ignored if use_projection_layer = False
+ tau: float = 0.005,
+ use_noise_aug: bool = False,
+ temperature: float = 0.1,
+ use_temperature_variant: bool = True,
+ use_per_image_mask_size: bool = False,
+ use_fast_augment: bool = False,
+ use_projection_layer: bool = True,
+ augment_anchor_and_pos: bool = True, # disabling this saves some computation and doesn't seem to have any adverse effects.
+ log_images: bool = True,
+ ):
+ self._max_grad_norm = max_grad_norm
+ self.data_group = data_group
+ self._device = device
+
+ self._use_projection_layer = use_projection_layer
+ self._log_images = log_images
+ self._use_temperature_variant = use_temperature_variant
+ self._augment_anchor_and_pos = augment_anchor_and_pos
+ self._tau = tau
+
+ encoder_output_size = encoder_model.get_encoder_output_size()
+
+ if not encoder_model.flatten:
+ encoder_output_size = (
+ encoder_output_size[0] * encoder_output_size[1] * encoder_output_size[2]
+ )
+ encoder_model = nn.Sequential(encoder_model, nn.Flatten())
+
+ if not target_encoder_model.flatten:
+ target_encoder_model = nn.Sequential(target_encoder_model, nn.Flatten())
+
+ if self._use_projection_layer:
+ # Add a layer to reduce the encoder output size to the size of zdim.
+ # This differs from the original paper.
+ self._zdim = desired_zdim
+
+ # Add projection layer to the encoder.
+ encoder_proj_layer = nn.Linear(encoder_output_size, desired_zdim, device=device)
+ self._proj_layer_source_vars = encoder_proj_layer.parameters()
+ encoder_model = nn.Sequential(encoder_model, encoder_proj_layer, nn.ReLU())
+
+ # Add projection layer to the target encoder.
+ target_proj_layer = nn.Linear(encoder_output_size, desired_zdim, device=device)
+ self._proj_layer_target_vars = target_proj_layer.parameters()
+ target_encoder_model = nn.Sequential(target_encoder_model, target_proj_layer, nn.ReLU())
+
+ # Update the projection layers on the target to match the source
+ soft_update_from_to(self._proj_layer_source_vars, self._proj_layer_target_vars, tau=1.0)
+ else:
+ self._zdim = encoder_output_size
+
+ self._encoder = encoder_model
+ self._target_encoder = target_encoder_model
+
+ self._W = torch.tensor(
+ torch.rand(size=[desired_zdim, desired_zdim]),
+ requires_grad=True,
+ device=device,
+ )
+
+ if self._use_temperature_variant:
+ self._temperature = torch.tensor(
+ temperature, requires_grad=False, dtype=torch.float32, device=device
+ )
+
+ self._augment = ImageAugmentor(
+ use_fast_augment=use_fast_augment,
+ use_noise_aug=use_noise_aug,
+ use_per_image_mask_size=use_per_image_mask_size,
+ device=device,
+ )
+
+ optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
+
+ lr_schedule = LinearLR(
+ optimizer,
+ learning_rate_start_frac,
+ learning_rate_end_frac,
+ learning_rate_steps,
+ )
+ super().__init__(
+ name="curl",
+ optimizer=optimizer,
+ lr_schedule=lr_schedule,
+ network=None,
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ )
+
+ def parameters(self):
+ return list(self._encoder.parameters()) + [self._W]
+
+ def backward(self, observation):
+ images = observation["images"]
+ image_aug1 = self._augment(images.clone())
+ image_aug2 = (
+ self._augment(images.clone()) if self._augment_anchor_and_pos else images.clone()
+ )
+
+ self.optimizer.zero_grad()
+ loss = self._loss(image_aug1, image_aug2)
+ loss.backward()
+ grad_norm = nn.utils.clip_grad_norm_(self.parameters, self._max_grad_norm)
+ self.optimizer.step()
+ self.lr_schedule.step()
+
+ self.log_scalar(f"loss/{self.name}_lr", self.lr_schedule.get_last_lr()[0])
+ self.log_scalar(f"loss/{self.name}_loss", loss)
+ self.log_scalar(f"loss/{self.name}_gradient_norm", grad_norm)
+
+ if self._log_images:
+ self.log_image("augmentations/base_allch", images[0, :, :, :])
+ self.log_image("augmentations/image1_allch", image_aug1[0, :, :, :])
+ self.log_image("augmentations/image2_allch", image_aug2[0, :, :, :])
+
+ @torch.jit.export
+ def _loss(self, image1: torch.Tensor, image2: torch.Tensor):
+ batch_size = image1.shape[0]
+
+ # ENCODE
+ z_a = self._encoder(image1)
+ with torch.no_grad():
+ z_pos = self._target_encoder(image2)
+
+ # PROJECTION
+ Wz = self._W @ z_pos.T # (z,B)
+
+ # LOGITS
+ logits = z_a @ Wz # (B,B)
+
+ if self._use_temperature_variant:
+ # Use normalised temperature scaled cross-entropy. This differs from the orig
+ # CURL paper but it seems to give better results. This technique is also used
+ # in SimCLR v2.
+ logits = logits / self._temperature
+ else:
+ # remove max for numerical stability
+ logits = logits - torch.amax(logits, dim=1)
+
+ # LOSS
+ # One neat trick!: Diags are positive examples, off-diag are negative examples!
+ labels = F.one_hot(torch.arange(batch_size, device=self._device), batch_size).float()
+ loss = (-labels * F.log_softmax(logits, dim=-1)).sum(dim=-1)
+ return torch.mean(loss)
+
+ def end_batch(self):
+ if self._use_projection_layer:
+ soft_update_from_to(
+ self._proj_layer_source_vars,
+ self._proj_layer_target_vars,
+ tau=self._tau,
+ )
diff --git a/emote/nn/gaussian_policy.py b/emote/nn/gaussian_policy.py
new file mode 100644
index 00000000..02cf702a
--- /dev/null
+++ b/emote/nn/gaussian_policy.py
@@ -0,0 +1,90 @@
+from __future__ import annotations
+
+from functools import partial
+from typing import Tuple
+
+import torch
+import torch.distributions as dists
+import torch.distributions.transforms as transforms
+import torch.nn as nn
+
+from torch import Tensor
+
+from emote.nn.initialization import ortho_init_, xavier_uniform_init_
+
+
+class BasePolicy(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def post_process(self, actions):
+ """Post-process a pre-action into a post-action."""
+ return actions
+
+ def infer(self, x: Tensor):
+ """Samples pre-actions and associated post-actions (actual decisions)
+ from the policy given the encoder input.
+
+ Only for use at inference time; defaults to identity
+ transformation. Crucial to reimplement for discrete
+ reparametrized policies.
+ """
+ p_samp, _ = self(x)
+ return p_samp, self.post_process(p_samp)
+
+
+class GaussianPolicyHead(nn.Module):
+ def __init__(
+ self,
+ hidden_dim: int,
+ action_dim: int,
+ ):
+ super().__init__()
+ self.action_dim = action_dim
+ self.hidden_dim = hidden_dim
+ self.mean = nn.Linear(hidden_dim, action_dim)
+ self.log_std = nn.Linear(hidden_dim, action_dim)
+
+ def forward(self, x: Tensor, epsilon: Tensor | None = None) -> Tensor | Tuple[Tensor]:
+ """Sample pre-actions and associated log-probabilities.
+
+ :return: Direct samples (pre-actions) from the policy log-
+ probabilities associated to those samples
+ """
+ bsz, _ = x.shape
+
+ mean = self.mean(x).clamp(min=-5, max=5) # equates to 0.99991 after tanh.
+ std = torch.exp(self.log_std(x).clamp(min=-20, max=2))
+ if self.training:
+ dist = dists.TransformedDistribution(
+ dists.Independent(dists.Normal(mean, std), 1),
+ transforms.TanhTransform(cache_size=1),
+ )
+ sample = dist.rsample()
+
+ log_prob = dist.log_prob(sample).view(bsz, 1)
+
+ assert sample.shape == (bsz, self.action_dim)
+ assert log_prob.shape == (bsz, 1)
+
+ return sample, log_prob
+
+ return torch.tanh(mean + std * epsilon)
+
+
+class GaussianMlpPolicy(nn.Module):
+ def __init__(self, observation_dim: int, action_dim: int, hidden_dims: list[int]):
+ super().__init__()
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip([observation_dim] + hidden_dims, hidden_dims)
+ ],
+ )
+ self.policy = GaussianPolicyHead(hidden_dims[-1], action_dim)
+
+ self.encoder.apply(ortho_init_)
+ self.policy.apply(partial(xavier_uniform_init_, gain=0.01))
+
+ def forward(self, obs: Tensor, epsilon: Tensor | None = None) -> Tensor | Tuple[Tensor]:
+ return self.policy(self.encoder(obs), epsilon)
diff --git a/emote/nn/initialization.py b/emote/nn/initialization.py
new file mode 100644
index 00000000..96096e4a
--- /dev/null
+++ b/emote/nn/initialization.py
@@ -0,0 +1,34 @@
+import numpy as np
+import torch
+
+from torch import nn
+
+
+def ortho_init_(m, gain=np.sqrt(2)):
+ if isinstance(m, nn.Linear):
+ nn.init.orthogonal_(m.weight, gain)
+ nn.init.constant_(m.bias, 0.0)
+ if isinstance(m, nn.Conv2d):
+ nn.init.orthogonal_(m.weight, gain)
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0.0)
+
+
+def xavier_uniform_init_(m, gain):
+ if isinstance(m, nn.Linear):
+ nn.init.xavier_uniform_(m.weight, gain)
+ nn.init.constant_(m.bias, 0.0)
+
+
+def normal_init_(m: nn.Module):
+ if isinstance(m, nn.Conv1d):
+ torch.nn.init.normal_(m.weight, std=0.01)
+ if m.bias is not None:
+ torch.nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.BatchNorm1d):
+ torch.nn.init.constant_(m.weight, 1)
+ torch.nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.Linear):
+ torch.nn.init.normal_(m.weight, std=1e-3)
+ if m.bias is not None:
+ torch.nn.init.constant_(m.bias, 0)
diff --git a/emote/nn/layers.py b/emote/nn/layers.py
new file mode 100644
index 00000000..38b765ea
--- /dev/null
+++ b/emote/nn/layers.py
@@ -0,0 +1,178 @@
+from __future__ import annotations
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from emote.nn.initialization import ortho_init_
+
+
+class Conv2dEncoder(nn.Module):
+ """Multi-layer 2D convolutional encoder.
+
+ :param input_shape: (tuple[int, int, int]) The input image shape,
+ this should be consistent with channels_last.
+ :param channels: (list[int]) The number of channels for each conv
+ layer.
+ :param kernels: (list[int]) The kernel size for each conv layer.
+ :param strides: (list[int]) The strides for each conv layer.
+ :param padding: (list[int]]) The padding.
+ :param channels_last: (bool) Whether the input image has channels as
+ the last dim, else first.
+ :param activation: (torch.nn.Module) The activation function.
+ :param flatten: (bool) Flattens the output into a vector.
+ """
+
+ def __init__(
+ self,
+ input_shape: tuple[int, int, int],
+ channels: list[int],
+ kernels: list[int],
+ strides: list[int],
+ padding: list[int],
+ channels_last: bool = True,
+ activation: torch.nn.Module = torch.nn.ReLU,
+ flatten: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self._channels_last = channels_last
+ if channels_last:
+ self._img_shape_cwh = [input_shape[2], input_shape[0], input_shape[1]]
+ else:
+ self._img_shape_cwh = input_shape
+
+ self._channels = channels
+ self._kernels = kernels
+ self._strides = strides
+ self._padding = padding
+ self.flatten = flatten
+
+ num_layers = len(channels)
+ channels = [self._img_shape_cwh[0]] + channels
+
+ self._layers = torch.nn.ModuleList()
+ for i in range(num_layers):
+ self._layers.append(
+ torch.nn.Conv2d(
+ channels[i],
+ channels[i + 1],
+ kernels[i],
+ stride=strides[i],
+ padding=padding[i],
+ )
+ )
+ self._layers.append(activation())
+
+ if self.flatten:
+ self._layers.append(nn.Flatten())
+
+ self.apply(ortho_init_)
+
+ def forward(self, obs: torch.Tensor):
+ x = obs
+ if self._channels_last:
+ x = x.permute(0, 3, 1, 2)
+ for layer in self._layers:
+ x = layer(x)
+ return x
+
+ def get_encoder_output_size(self):
+ curr_size_x, curr_size_y = self._img_shape_cwh[1], self._img_shape_cwh[2]
+ """Calculate the outputs size of a conv encoder."""
+ for k, s, p in zip(self._kernels, self._strides, self._padding):
+ curr_size_x = ((curr_size_x - k + 2 * p) // s) + 1
+ curr_size_y = ((curr_size_y - k + 2 * p) // s) + 1
+
+ out_size = (self._channels[-1], curr_size_x, curr_size_y)
+
+ if self.flatten:
+ out_size = np.prod(out_size)
+
+ return out_size
+
+
+class Conv1dEncoder(nn.Module):
+ """Multi-layer 1D convolutional encoder.
+
+ :param input_shape: (tuple[int, int]) The input shape
+ :param channels: (list[int]) The number of channels for each conv
+ layer.
+ :param kernels: (list[int]) The kernel size for each conv layer.
+ :param strides: (list[int]) The strides for each conv layer.
+ :param padding: (list[int]) The padding.
+ :param activation: (torch.nn.Module) The activation function.
+ :param flatten: (bool) Flattens the output into a vector.
+ :param name: (str) Name of the encoder (default: "conv1d")
+ :param channels_last: (bool) Whether the input has channels as the
+ last dim, else first.
+ """
+
+ def __init__(
+ self,
+ input_shape: tuple[int, int],
+ channels: list[int],
+ kernels: list[int],
+ strides: list[int],
+ padding: list[int],
+ activation: torch.nn.Module = torch.nn.ReLU,
+ flatten: bool = True,
+ name: str = "conv1d",
+ channels_last: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self._channels_last = channels_last
+ if channels_last:
+ self._input_shape = [input_shape[1], input_shape[0]]
+ else:
+ self._input_shape = input_shape
+
+ self._channels = channels
+ self._kernels = kernels
+ self._strides = strides
+ self._padding = padding
+ self.name = name
+ self.flatten = flatten
+
+ num_layers = len(channels)
+ channels = [self._input_shape[0]] + channels
+
+ self._layers = torch.nn.ModuleList()
+ for i in range(num_layers):
+ self._layers.append(
+ torch.nn.Conv1d(
+ channels[i],
+ channels[i + 1],
+ kernels[i],
+ strides[i],
+ padding=padding[i],
+ )
+ )
+ self._layers.append(activation())
+
+ if self.flatten:
+ self._layers.append(nn.Flatten())
+
+ self.apply(ortho_init_)
+
+ def forward(self, obs: torch.Tensor):
+ x = obs
+ if self._channels_last:
+ x = x.permute(0, 2, 1)
+ for layer in self._layers:
+ x = layer(x)
+ return x
+
+ def get_encoder_output_size(self):
+ curr_size = self._input_shape[1]
+
+ for k, s, p in zip(self._kernels, self._strides, self._padding):
+ curr_size = ((curr_size - k + 2 * p) // s) + 1
+
+ out_size = (self._channels[-1], curr_size)
+
+ if self.flatten:
+ out_size = np.prod(out_size)
+
+ return out_size
diff --git a/emote/optimizers.py b/emote/optimizers.py
new file mode 100644
index 00000000..0e06df83
--- /dev/null
+++ b/emote/optimizers.py
@@ -0,0 +1,100 @@
+# Adapted from
+# https://github.com/karpathy/minGPT/blob/3ed14b2cec0dfdad3f4b2831f2b4a86d11aef150/mingpt/model.py#L136
+# (MIT license)
+
+from __future__ import annotations
+
+from typing import Type
+
+import torch
+
+
+def separate_modules_for_weight_decay(
+ network: torch.nn.Module,
+ whitelist_weight_modules: tuple[Type[torch.nn.Module], ...],
+ blacklist_weight_modules: tuple[Type[torch.nn.Module], ...],
+ layers_to_exclude: set[str] | None = None,
+) -> tuple[set[str], set[str]]:
+ """Separate the parameters of network into two sets: one set of parameters that will have weight decay, and one set that will not.
+
+ Args:
+ network (torch.nn.Module): Network whose modules we want to separate.
+ whitelist_weight_modules (tuple[Type[torch.nn.Module], ...]): Modules that should have weight decay applied to the weights.
+ blacklist_weight_modules (tuple[Type[torch.nn.Module], ...]): Modules that should not have weight decay applied to the weights.
+ layers_to_exclude (set[str] | None, optional): Names of layers that should be excluded. Defaults to None.
+
+ Returns:
+ tuple[set[str], set[str]]: Sets of modules with and without weight decay.
+ """
+ # Make sure the same module doesn't appear in both whitelist_weight_modules and blacklist_weight_modules
+ assert (
+ len(set(whitelist_weight_modules) & set(blacklist_weight_modules)) == 0
+ ), "Some modules are both whitelisted and blacklisted!"
+
+ layers_to_exclude = layers_to_exclude or set()
+ decay = set()
+ no_decay = set()
+
+ for mn, m in network.named_modules():
+ for pn, p in m.named_parameters():
+ fpn = f"{mn}.{pn}" if mn else pn
+
+ if mn in layers_to_exclude:
+ # Weights of excluded layers will NOT be weight decayed
+ no_decay.add(fpn)
+ elif pn.endswith("bias"):
+ # Biases will not be decayed
+ no_decay.add(fpn)
+ elif pn.endswith("weight") and isinstance(m, blacklist_weight_modules):
+ # Weights of blacklist modules will NOT be weight decayed
+ no_decay.add(fpn)
+ elif pn.endswith("weight") and isinstance(m, whitelist_weight_modules):
+ # Weights of whitelist modules will be weight decayed
+ decay.add(fpn)
+
+ return decay, no_decay
+
+
+class ModifiedAdamW(torch.optim.AdamW):
+ """Modifies AdamW (Adam with weight decay) to not apply weight decay on the
+ bias and layer normalization weights, and optionally additional modules.
+
+ Args:
+ network (torch.nn.Module): network
+ lr (float): learning rate
+ weight_decay (float): weight decay coefficient
+ whitelist_weight_modules (tuple[Type[torch.nn.Module], ...], optional): params to get weight decay. Defaults to (torch.nn.Linear, ).
+ blacklist_weight_modules (tuple[Type[torch.nn.Module], ...], optional): params to not get weight decay. Defaults to (torch.nn.LayerNorm, ).
+ layers_to_exclude (set[str] | None, optional): set of names of additional layers to exclude, e.g. last layer of Q-network. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ network: torch.nn.Module,
+ lr: float,
+ weight_decay: float,
+ whitelist_weight_modules: tuple[Type[torch.nn.Module], ...] = (torch.nn.Linear,),
+ blacklist_weight_modules: tuple[Type[torch.nn.Module], ...] = (torch.nn.LayerNorm,),
+ layers_to_exclude: set[str] | None = None,
+ ):
+ decay, no_decay = separate_modules_for_weight_decay(
+ network,
+ whitelist_weight_modules,
+ blacklist_weight_modules,
+ layers_to_exclude,
+ )
+
+ param_dict = dict(network.named_parameters())
+
+ optim_groups = [
+ {
+ "params": [param_dict[pn] for pn in decay],
+ "weight_decay": weight_decay,
+ },
+ {
+ "params": [param_dict[pn] for pn in no_decay],
+ "weight_decay": 0.0,
+ },
+ ]
+
+ super().__init__(optim_groups, lr)
diff --git a/emote/proxies.py b/emote/proxies.py
new file mode 100644
index 00000000..09ffc605
--- /dev/null
+++ b/emote/proxies.py
@@ -0,0 +1,151 @@
+"""Proxies are bridges between the world the agent acts in and the algorithm
+training loop."""
+
+from __future__ import annotations
+
+from typing import Dict, Protocol
+
+import numpy as np
+import torch
+
+from torch import nn
+
+from emote.typing import AgentId, DictObservation, DictResponse, EpisodeState
+from emote.utils.spaces import MDPSpace
+
+
+class AgentProxy(Protocol):
+ """The interface between the agent in the game and the network used during
+ training."""
+
+ def __call__(
+ self,
+ obserations: Dict[AgentId, DictObservation],
+ ) -> Dict[AgentId, DictResponse]:
+ """Take observations for the active agents and returns the relevant
+ network output."""
+ ...
+
+ @property
+ def policy(self) -> nn.Module:
+ pass
+
+ @property
+ def input_names(self) -> tuple[str, ...]:
+ ...
+
+ @property
+ def output_names(self) -> tuple[str, ...]:
+ ...
+
+
+class MemoryProxy(Protocol):
+ """The interface between the agent in the game and the memory buffer the
+ network trains from."""
+
+ def add(
+ self,
+ observations: Dict[AgentId, DictObservation],
+ responses: Dict[AgentId, DictResponse],
+ ):
+ """Store episodes in the memory buffer used for training.
+
+ This is useful e.g. if the data collection is running from a
+ checkpointed model running on another machine.
+ """
+ ...
+
+
+class GenericAgentProxy(AgentProxy):
+ """Observations are dicts that contain multiple input and output keys.
+
+ For example, we might have a policy that takes in both "obs" and
+ "goal" and outputs "actions". In order to be able to properly invoke
+ the network it is the responsibility of this proxy to collate the
+ inputs and decollate the outputs per agent.
+ """
+
+ def __init__(
+ self,
+ policy: nn.Module,
+ device: torch.device,
+ input_keys: tuple,
+ output_keys: tuple,
+ uses_logprobs: bool = True,
+ spaces: MDPSpace | None = None,
+ ):
+ r"""Handle multi-input multi-output policy networks.
+
+ Parameters:
+ policy (nn.Module): The neural network policy that takes observations and returns actions.
+ device (torch.device): The device to run the policy on.
+ input_keys (tuple): Keys specifying what fields from the observation to pass to the policy.
+ output_keys (tuple): Keys for the fields in the output dictionary that the policy is responsible for.
+ spaces (MDPSpace, optional): A utility for managing observation and action spaces, for validation.
+ """
+ self._policy = policy
+ self._end_states = [EpisodeState.TERMINAL, EpisodeState.INTERRUPTED]
+ self.device = device
+ self.input_keys = input_keys
+ self.output_keys = output_keys
+ self._spaces = spaces
+ self._uses_logprobs = uses_logprobs
+
+ def __call__(self, observations: dict[AgentId, DictObservation]) -> dict[AgentId, DictResponse]:
+ """Runs the policy and returns the actions."""
+ # The network takes observations of size batch x obs for each observation space.
+ assert len(observations) > 0, "Observations must not be empty."
+
+ active_agents = [
+ agent_id
+ for agent_id, obs in observations.items()
+ if obs.episode_state not in self._end_states
+ ]
+
+ tensor_obs_list = [None] * len(self.input_keys)
+ for input_key in self.input_keys:
+ np_obs = np.array(
+ [observations[agent_id].array_data[input_key] for agent_id in active_agents]
+ )
+
+ if self._spaces is not None:
+ shape = (np_obs.shape[0],) + self._spaces.state.spaces[input_key].shape
+ if shape != np_obs.shape:
+ np_obs = np.reshape(np_obs, shape)
+
+ tensor_obs = torch.tensor(np_obs).to(self.device)
+ index = self.input_keys.index(input_key)
+ tensor_obs_list[index] = tensor_obs
+
+ if self._uses_logprobs:
+ outputs: tuple[any, ...] = self._policy(*tensor_obs_list)
+ # we remove element 1 as we don't need the logprobs here
+ outputs = outputs[0:1] + outputs[2:]
+ outputs = {
+ key: outputs[i].detach().cpu().numpy() for i, key in enumerate(self.output_keys)
+ }
+ else:
+ outputs = self._policy(*tensor_obs_list)
+ outputs = {key: outputs.detach().cpu().numpy() for key in self.output_keys}
+
+ agent_data = [
+ (agent_id, DictResponse(list_data={}, scalar_data={})) for agent_id in active_agents
+ ]
+
+ for i, (_, response) in enumerate(agent_data):
+ for k, data in outputs.items():
+ response.list_data[k] = data[i]
+
+ return dict(agent_data)
+
+ @property
+ def input_names(self):
+ return self.input_keys
+
+ @property
+ def output_names(self):
+ return self.output_keys
+
+ @property
+ def policy(self):
+ return self._policy
diff --git a/emote/trainer.py b/emote/trainer.py
new file mode 100644
index 00000000..98fa1858
--- /dev/null
+++ b/emote/trainer.py
@@ -0,0 +1,145 @@
+import logging
+
+from itertools import count
+from typing import Any, Callable, Iterable, List, MutableMapping
+from weakref import ref
+
+from .callback import Callback
+from .utils import WeakReference
+
+
+class StateDict(
+ dict, MutableMapping[str, Any]
+): # TODO(singhblom) Split state dict into two - one persistable and one transient.
+ """Wrapped around a dict allowing usage in a weakref."""
+
+ def get_handle(self) -> WeakReference["StateDict"]:
+ """Retrieve a weak handle to this state dict, with no promise of
+ ownership or lifetime."""
+ return ref(self)
+
+
+class TrainingShutdownException(Exception):
+ pass
+
+
+class Trainer:
+ """The Trainer class manages the main training loop in emote.
+
+ It does so by invoking a bunch of callbacks in a number of different
+ places.
+ """
+
+ state: StateDict
+ callbacks: List[Callback]
+ dataloader: Iterable
+ cycle_length: int
+
+ def __init__(
+ self,
+ callbacks: List[Callback],
+ dataloader: Iterable,
+ batch_size_key: str = "batch_size",
+ ):
+ for cb in callbacks:
+ assert isinstance(cb, Callback), f"{cb} is not a Callback"
+
+ self.callbacks = sorted(callbacks, key=lambda cb: cb._order)
+ self._cyclic_callbacks = [
+ cb for cb in self.callbacks if cb.cycle is not None and cb.cycle > 0
+ ]
+ self.dataloader = dataloader
+ self.state = StateDict()
+ self._batch_size_key = batch_size_key
+
+ def train(self, shutdown_signal: Callable = None):
+ """The main training loop.
+
+ This method will wait until the memory is full enough to start
+ sampling, and then start running cycles of backprops on batches
+ sampled from the memory.
+
+ :param shutdown_signal: A function that returns True if training
+ shut end, False otherwise.
+ """
+ shutdown_signal = shutdown_signal or (lambda: False)
+
+ self.state["bp_samples"] = 0
+ self.state["bp_step"] = 1
+
+ try:
+ self._restore_state()
+ self._begin_training()
+
+ except TrainingShutdownException:
+ logging.info("Training shutdown requested before training began")
+ return
+
+ except Exception as ex:
+ raise Exception("Error in begin_training, aborting") from ex
+
+ try:
+ counter = count(self.state["bp_step"])
+ for bp_step, batch in zip(counter, self.dataloader):
+ self.state.update(batch)
+ self.state["bp_step"] = bp_step
+ self.state["bp_samples"] += self.state[self._batch_size_key]
+
+ if shutdown_signal():
+ raise TrainingShutdownException
+
+ self._begin_cycle(bp_step)
+ self._begin_batch()
+ self._backward()
+ self._end_batch()
+ self._end_cycle(bp_step)
+
+ except TrainingShutdownException as ex:
+ self._end_training(ex)
+
+ except Exception as ex:
+ self._end_training(ex)
+ raise ex
+
+ def _restore_state(self):
+ for cb in self.callbacks:
+ if updated_state := cb.restore_state(**self.state):
+ self.state.update(updated_state)
+
+ def _begin_training(self):
+ for cb in self.callbacks:
+ if updated_state := cb.begin_training(**self.state):
+ self.state.update(updated_state)
+
+ def _begin_cycle(self, bp_step):
+ for cb in self._cyclic_callbacks:
+ # Start cycles on 1st step of new cycle
+ if (bp_step - 1) % cb.cycle == 0:
+ if updated_state := cb.begin_cycle(**self.state):
+ self.state.update(updated_state)
+
+ def _begin_batch(self):
+ for cb in self.callbacks:
+ if updated_state := cb.begin_batch(**self.state):
+ self.state.update(updated_state)
+
+ def _backward(self):
+ for cb in self.callbacks:
+ if updated_state := cb.backward(**self.state):
+ self.state.update(updated_state)
+
+ def _end_batch(self):
+ for cb in self.callbacks:
+ if updated_state := cb.end_batch(**self.state):
+ self.state.update(updated_state)
+
+ def _end_cycle(self, bp_step):
+ for cb in self._cyclic_callbacks:
+ if bp_step % cb.cycle == 0:
+ if updated_state := cb.end_cycle(**self.state):
+ self.state.update(updated_state)
+
+ def _end_training(self, exception: Exception):
+ for cb in self.callbacks:
+ if updated_state := cb.end_training(exception, **self.state):
+ self.state.update(updated_state)
diff --git a/emote/typing.py b/emote/typing.py
new file mode 100644
index 00000000..dc124163
--- /dev/null
+++ b/emote/typing.py
@@ -0,0 +1,82 @@
+"""
+emote.typing
+============
+"""
+from __future__ import annotations
+
+from dataclasses import dataclass
+from enum import Enum
+from typing import Callable, Dict, List, TypeAlias
+
+import torch
+
+from numpy.typing import ArrayLike
+
+
+RewardFnType: TypeAlias = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
+TermFnType = Callable[[torch.Tensor], torch.Tensor]
+
+# The AgentId is an application-defined integer
+AgentId = int
+# SingleAgentData is a single ndarray containing correlated data for one agent.
+SingleAgentData = ArrayLike
+# BatchedData is a concatenated set of arrays from multiple agents.
+# The shape of BatchedData will be [Number of Agents, *(shape of SingleAgentData)]
+BatchedData = ArrayLike
+
+# Input is a set of named inputs from one agent. We mainly use this for observations.
+InputSpace = str
+Input = Dict[InputSpace, SingleAgentData]
+# Input gathers inputs from multiple agents
+InputGroup = Dict[AgentId, Input]
+# InputBatch is the result of merging an InputGroup based on input name.
+InputBatch = Dict[InputSpace, BatchedData]
+
+# Output is a set of named outputs for one agent
+OutputSpace = str
+Output = Dict[OutputSpace, SingleAgentData]
+# Input gathers inputs from multiple agents
+OutputGroup = Dict[AgentId, Output]
+# OutputBatch is the result of evaluating the neural network on an input batch, before unmerging.
+OutputBatch = Dict[OutputSpace, BatchedData]
+
+
+class EpisodeState(Enum):
+ # The agent expects an action back and can continue to at least one more state
+ RUNNING = 0
+
+ # The episode has ended due to external factors the agent has no ability to
+ # affect - for example, the agent timing out or the game round ending.
+ INTERRUPTED = 1
+
+ # The episode has ended due to events the agent could have affected, and
+ # should learn to understand.
+ TERMINAL = 2
+
+ # This is the first step of an agent's lifetime. Sending this multiple
+ # times for one agent is an error.
+ INITIAL = 3
+
+
+# In the future we might switch to supporting flat np.arrays here.
+FloatList = List[float]
+
+
+@dataclass
+class MetaData:
+ info: Dict[str, float]
+ info_lists: Dict[str, FloatList]
+
+
+@dataclass
+class DictObservation:
+ rewards: Dict[str, float]
+ episode_state: EpisodeState
+ array_data: Dict[str, SingleAgentData]
+ metadata: MetaData = None
+
+
+@dataclass
+class DictResponse:
+ list_data: Dict[str, FloatList]
+ scalar_data: Dict[str, float]
diff --git a/emote/utils/BUILD b/emote/utils/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/emote/utils/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/emote/utils/__init__.py b/emote/utils/__init__.py
new file mode 100644
index 00000000..f27cdcf7
--- /dev/null
+++ b/emote/utils/__init__.py
@@ -0,0 +1,15 @@
+from .spaces import MDPSpace
+from .threading import AtomicContainer, AtomicInt, LockedResource
+from .timed_call import BlockTimers, TimedBlock
+from .weak_reference import WeakReference
+
+
+__all__ = [
+ "WeakReference",
+ "LockedResource",
+ "AtomicContainer",
+ "AtomicInt",
+ "TimedBlock",
+ "BlockTimers",
+ "MDPSpace",
+]
diff --git a/emote/utils/deprecated.py b/emote/utils/deprecated.py
new file mode 100644
index 00000000..44f48477
--- /dev/null
+++ b/emote/utils/deprecated.py
@@ -0,0 +1,55 @@
+""""""
+
+import functools
+import warnings
+
+from typing import Callable
+
+
+def deprecated(
+ original_function: Callable = None,
+ *,
+ reason: str,
+ max_warn_count: int = 10,
+ version: str = None,
+) -> Callable:
+ """Function decorator to deprecate an annotated function. Can be used both
+ as a bare decorator, or with parameters to customize the display of the
+ message. Writes to logging.warn.
+
+ :param original_function: Function to decorate. Automatically
+ passed.
+ :param reason: Message to show. Function name is automatically
+ added.
+ :param max_warn_count: How many times we will warn for the same
+ function
+ :returns: the wrapped function
+ """
+ reason = f": {reason}" if reason else ""
+ version = f" -- deprecated since version {version}" if version else ""
+
+ def _decorate(function):
+ warn_count = 0
+
+ name = getattr(function, "__qualname__", function.__name__)
+ message = f"Call to deprecated function '{name}'{reason}{version}."
+
+ @functools.wraps(function)
+ def _wrapper(*args, **kwargs):
+ nonlocal warn_count
+ if warn_count < max_warn_count:
+ warnings.warn(
+ message,
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ warn_count += 0
+
+ return function(*args, **kwargs)
+
+ return _wrapper
+
+ if original_function:
+ return _decorate(original_function)
+
+ return _decorate
diff --git a/emote/utils/gamma_matrix.py b/emote/utils/gamma_matrix.py
new file mode 100644
index 00000000..c5262f3a
--- /dev/null
+++ b/emote/utils/gamma_matrix.py
@@ -0,0 +1,52 @@
+import torch
+
+
+# Taken from https://github.com/jackharmer/agency (MIT License)
+
+
+# Construct a gamma matrix for optimised discount calculations.
+# Using this in combination with the discount() function below
+# provides up to 100x speedup over a non gamma matrix variant.
+#
+# Gamma Matrix Form [roll_length+1, roll_length]:
+# [0.99^0, 0.0, 0.0 ]
+# [0.99^1, 0.99^0, 0.0 ]
+# [0.99^2, 0.99^1, 0.99^0]
+# [0.99^3, 0.99^2, 0.99^1]
+#
+#
+# This allow the discount to be calculated as a dot product of the
+# reward matrix and the gammaMatrix in one calculation across the whole
+# batch.
+#
+# Reward Matrix: [num_rolls, roll_length+1]
+def make_gamma_matrix(gamma: float, roll_length: int):
+ gamma = torch.tensor(gamma, dtype=torch.float32)
+ gamma_matrix = torch.zeros((roll_length + 1, roll_length), dtype=torch.float32)
+ gamma_vector = torch.zeros((roll_length + 1), dtype=torch.float32)
+ for cc in range(roll_length + 1):
+ gamma_vector[cc] = pow(gamma, cc)
+ for cc in range(roll_length):
+ gamma_matrix[cc : (roll_length + 1), cc] = gamma_vector[0 : roll_length + 1 - cc]
+ return gamma_matrix
+
+
+# Calculate the discounted return using a gamma matrix, see above.
+#
+# Reward Matrix * Gamma Matrix = Discount Matrix
+# [num_rolls, roll_length+1] [roll_length+1, roll_length] [num_rolls, roll_length]
+#
+# [ r0, r1, ..., v] [0.99^0, 0.0 ]
+# [ r0, r1, ..., v] * [0.99^1, 0.99^0]
+# [ r0, r1, ..., v] [0.99^2, 0.99^1]
+def discount(rewards: torch.tensor, values: torch.tensor, gamma_matrix: torch.tensor):
+ # [num_rolls, roll_length + 1]
+ reward_matrix = torch.cat([rewards, values], dim=1)
+ # [num_rolls, roll_length]
+ discount_matrix = torch.matmul(reward_matrix, gamma_matrix) # dot product
+ # Discount vector: [num_rolls * roll_length]
+ return torch.reshape(discount_matrix, (discount_matrix.shape[0] * discount_matrix.shape[1], 1))
+
+
+def split_rollouts(data: torch.tensor, rollout_len: int):
+ return data.view([data.shape[0] // rollout_len, rollout_len, *data.shape[1:]])
diff --git a/emote/utils/math.py b/emote/utils/math.py
new file mode 100644
index 00000000..540ddb16
--- /dev/null
+++ b/emote/utils/math.py
@@ -0,0 +1,50 @@
+# This file contains codes and texts that are copied from
+# https://github.com/facebookresearch/mbrl-lib
+
+import torch
+
+
+def truncated_linear(min_x: float, max_x: float, min_y: float, max_y: float, x: float) -> float:
+ """Truncated linear function.
+
+ Implements the following function:
+
+ \\[
+ \\begin{cases}
+ f1(x) = \\frac{min_y + (x - min_x)}{ (max_x - min_x) * (max_y - min_y)} \\\\
+ f(x) = min(max_y, max(min_y, f1(x)))
+ \\end{cases}
+ \\]
+ If max_x - min_x < 1e-10, then it behaves as the constant \\(f(x) = max_y\\)
+ """
+ if max_x - min_x < 1e-10:
+ return max_y
+ if x <= min_x:
+ y: float = min_y
+ else:
+ dx = (x - min_x) / (max_x - min_x)
+ dx = min(dx, 1.0)
+ y = dx * (max_y - min_y) + min_y
+ return y
+
+
+def truncated_normal_(tensor: torch.Tensor, mean: float = 0, std: float = 1) -> torch.Tensor:
+ """Samples from a truncated normal distribution in-place.
+
+ Arguments:
+ tensor (tensor): the tensor in which sampled values will be stored.
+ mean (float): the desired mean (default = 0).
+ std (float): the desired standard deviation (default = 1).
+
+ Returns:
+ (tensor): the tensor with the stored values. Note that this modifies the input tensor
+ in place, so this is just a pointer to the same object.
+ """
+ torch.nn.init.normal_(tensor, mean=mean, std=std)
+ while True:
+ cond = torch.logical_or(tensor < mean - 2 * std, tensor > mean + 2 * std)
+ bound_violations = torch.sum(cond).item()
+ if bound_violations == 0:
+ break
+ tensor[cond] = torch.normal(mean, std, size=(bound_violations,), device=tensor.device)
+ return tensor
diff --git a/emote/utils/model.py b/emote/utils/model.py
new file mode 100644
index 00000000..44a3024b
--- /dev/null
+++ b/emote/utils/model.py
@@ -0,0 +1,21 @@
+import torch
+
+from torch import nn
+
+
+def to_numpy(x: torch.Tensor):
+ return x.detach().to("cpu").numpy()
+
+
+def normal_init(m: nn.Module):
+ if isinstance(m, nn.Conv1d):
+ torch.nn.init.normal_(m.weight, std=0.01)
+ if m.bias is not None:
+ torch.nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.BatchNorm1d):
+ torch.nn.init.constant_(m.weight, 1)
+ torch.nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.Linear):
+ torch.nn.init.normal_(m.weight, std=1e-3)
+ if m.bias is not None:
+ torch.nn.init.constant_(m.bias, 0)
diff --git a/emote/utils/spaces.py b/emote/utils/spaces.py
new file mode 100644
index 00000000..7b360496
--- /dev/null
+++ b/emote/utils/spaces.py
@@ -0,0 +1,23 @@
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+import numpy as np
+import torch
+
+
+@dataclass
+class BoxSpace:
+ dtype: torch.dtype | np.dtype
+ shape: Tuple[int]
+
+
+@dataclass
+class DictSpace:
+ spaces: Dict[str, BoxSpace]
+
+
+@dataclass
+class MDPSpace:
+ rewards: BoxSpace
+ actions: BoxSpace
+ state: DictSpace
diff --git a/emote/utils/threading.py b/emote/utils/threading.py
new file mode 100644
index 00000000..99fd8640
--- /dev/null
+++ b/emote/utils/threading.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+
+"""Thread-related utilities and tools."""
+
+import threading
+
+from time import perf_counter
+from typing import Any, Generic, TypeVar
+
+
+T = TypeVar("T")
+
+
+class LockedResource(Generic[T]):
+ """Context manager for a lock and a resource.
+
+ Only giving access to the
+ resource when locked. Works well when paired with [`empyc.types.Ref`]
+ for primitive types as well.
+
+ *Usage:*
+ ```python
+ resource = LockedResource([])
+ with resource as inner_list:
+ inner_list.append(1)
+ ```
+ """
+
+ def __init__(self, data: T):
+ """Create a new LockedResource, with the provided data.
+
+ :param data: The data to lock
+ """
+ self._lock = threading.Lock()
+ self._data = data
+
+ def __enter__(self) -> T:
+ """Enter the locked context and retrieve the data.
+
+ :returns: The underlying data object
+ """
+ self._lock.acquire()
+ return self._data
+
+ def __exit__(self, _1, _2, _3):
+ """Exit the locked context.
+
+ .. note::
+
+ Will propagate any errors occurring inside the locked scope.
+ """
+ self._lock.release()
+
+ def swap(self, new_resource: T) -> T:
+ """Replace the contained resource with the provided new resource,
+ returning the previous resource. This operation is atomic.
+
+ :param new_resource: The resource to lock after the swap
+ :returns: The previously guarded data
+ """
+ with self._lock:
+ res = self._data
+ self._data = new_resource
+
+ return res
+
+
+class AtomicContainer:
+ """Container that allows atomic ``set``, ``get``, ``take`` operations."""
+
+ def __init__(self, initial_data: Any = None):
+ self._data = initial_data
+ self._lock = threading.Lock()
+
+ def take(self) -> Any:
+ with self._lock:
+ value, self._data = self._data, None
+ return value
+
+ def read(self) -> Any:
+ with self._lock:
+ return self._data
+
+ def set(self, value: Any):
+ with self._lock:
+ self._data = value
+
+
+class AtomicInt:
+ def __init__(self, value: int = 0):
+ self._value = value
+ self._lock = threading.Lock()
+
+ def __iadd__(self, value: int):
+ with self._lock:
+ self._value += value
+ return self
+
+ def __isub__(self, value: int):
+ with self._lock:
+ self._value -= value
+ return self
+
+ def swap(self, value: int):
+ with self._lock:
+ current_value = self._value
+ self._value = value
+ return current_value
+
+ def increment(self, value: int = 1):
+ """Increments the integer and returns the previous value."""
+ with self._lock:
+ original = self._value
+ self._value += value
+ return original
+
+
+class TracedLock:
+ def __init__(self, lock_class=threading.Lock):
+ self._lock = lock_class()
+ self._current_scope_exec_start = None
+ self._accumulated_block_time = 0.0
+ self._accumulated_exec_time = 0.0
+
+ def __enter__(self):
+ try_lock_start = perf_counter()
+ self._lock.acquire()
+ now = perf_counter()
+ self._accumulated_block_time += now - try_lock_start
+ self._current_scope_exec_start = now
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self._lock.release()
+ self._accumulated_exec_time += perf_counter() - self._current_scope_exec_start
diff --git a/emote/utils/timed_call.py b/emote/utils/timed_call.py
new file mode 100644
index 00000000..3ef18aa7
--- /dev/null
+++ b/emote/utils/timed_call.py
@@ -0,0 +1,142 @@
+"""Simple block-based timers using Welford's Online Algorithm to approximate
+mean and variance.
+
+Usage:
+```python
+
+timer = TimedBlock()
+
+for _ in range(10):
+ with timer():
+ sleep(1)
+
+print(time.sleep())
+
+# (1.000013, 1.3e-5)
+"""
+
+import time
+
+from abc import ABC, abstractmethod
+from collections import defaultdict, deque
+from dataclasses import dataclass, field
+from typing import Dict, Tuple, Type
+
+import numpy as np
+
+
+class StatisticsAccumulator(ABC):
+ """Interface for a statistics integrator."""
+
+ @abstractmethod
+ def add(self, value: float):
+ """Add the `value` to the running statistics.
+
+ :param value: the sample to integrate
+ """
+ ...
+
+ @abstractmethod
+ def current(self) -> Tuple[float, float]:
+ """Returns the statistics of the observed samples so far.
+
+ :returns: a tuple (mean, variance)
+ """
+ ...
+
+
+@dataclass
+class WelfordAccumulator(StatisticsAccumulator):
+ """Implements Welford's Online Algorithm for single-pass variance and
+ mean."""
+
+ count: int = 0
+ mean: float = 0.0
+ differences: float = 0.0
+
+ def add(self, value: float):
+ """Add the `value` to the running statistics.
+
+ :param value: the sample to integrate
+ """
+ self.count += 1
+ delta = value - self.mean
+ self.mean += delta / self.count
+ delta2 = value - self.mean
+ self.differences += delta * delta2
+
+ def current(self) -> Tuple[float, float]:
+ """Returns the current values of the Welford algorithm.
+
+ :returns: a tuple (mean, variance)
+ """
+ if self.count == 0:
+ return float("nan"), float("nan")
+
+ return self.mean, self.differences / self.count
+
+
+@dataclass
+class MovingWindowAccumulator(StatisticsAccumulator):
+ values: deque = field(default_factory=lambda: deque(maxlen=100))
+
+ def add(self, value: float):
+ """Add the `value` to the running statistics.
+
+ :param value: the sample to integrate
+ """
+ self.values.append(value)
+
+ def current(self) -> Tuple[float, float]:
+ """Returns the current statistics.
+
+ :returns: a tuple (mean, variance)
+ """
+
+ if len(self.values) == 0:
+ return float("nan"), float("nan")
+
+ return np.mean(self.values), np.var(self.values)
+
+
+class TimedBlock:
+ """Used to track the performance statistics of a block of code, in terms of
+ execution time."""
+
+ def __init__(self, tracker_type: Type[StatisticsAccumulator] = MovingWindowAccumulator):
+ """Create a new timed block instance.
+
+ :param tracker_type: The statistics integrator to use. Defaults
+ to to MovingWindowStats
+ """
+ self._tracker = tracker_type()
+ self._start = None
+
+ def __enter__(self):
+ self._start = time.perf_counter()
+
+ def __exit__(self, *args):
+ self._tracker.add(time.perf_counter() - self._start)
+
+ def mean(self) -> float:
+ """Retrieve the mean execution time."""
+ return self._tracker.current()[0]
+
+ def var(self):
+ """Retrieve the variance of the execution time."""
+ return self._tracker.current()[1]
+
+ def stats(self):
+ """Retrieve the mean and the variance of execution time."""
+ return self._tracker.current()
+
+
+class BlockTimers:
+ def __init__(self, tracker_type: Type[StatisticsAccumulator] = MovingWindowAccumulator):
+ self._timers: Dict[str, TimedBlock] = defaultdict(lambda: TimedBlock(tracker_type))
+
+ def scope(self, name: str) -> TimedBlock:
+ return self._timers[name]
+
+ def stats(self):
+ return {name: timer.stats() for name, timer in self._timers.items()}
diff --git a/emote/utils/weak_reference.py b/emote/utils/weak_reference.py
new file mode 100644
index 00000000..5a6d3b55
--- /dev/null
+++ b/emote/utils/weak_reference.py
@@ -0,0 +1,11 @@
+"""A class that contains a typed weak reference."""
+
+from typing import Generic, TypeVar
+from weakref import ReferenceType
+
+
+T = TypeVar("T")
+
+
+class WeakReference(ReferenceType, Generic[T]):
+ """A typed weak reference."""
diff --git a/experiments/gym/BUILD b/experiments/gym/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/experiments/gym/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/experiments/gym/__init__.py b/experiments/gym/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/experiments/gym/train_carracing.py b/experiments/gym/train_carracing.py
new file mode 100644
index 00000000..23babd6e
--- /dev/null
+++ b/experiments/gym/train_carracing.py
@@ -0,0 +1,212 @@
+import argparse
+import time
+
+from dataclasses import dataclass
+from functools import partial
+from typing import Tuple
+
+import numpy as np
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from tests.gym.collector import ThreadedGymCollector
+from torch import nn
+from torch.optim import Adam
+from torch.utils.tensorboard import SummaryWriter
+
+from emote import Trainer
+from emote.algorithms.sac import AlphaLoss, PolicyLoss, QLoss, QTarget, VisionAgentProxy
+from emote.callback import Callback
+from emote.callbacks.logging import TensorboardLogger
+from emote.env.box2d import make_vision_box2d_env
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsNStepTable
+from emote.mixins.logging import LoggingMixin
+from emote.nn import GaussianPolicyHead
+from emote.nn.action_value_mlp import SharedEncoderActionValueNet
+from emote.nn.initialization import ortho_init_, xavier_uniform_init_
+from emote.nn.layers import Conv2dEncoder
+
+
+class Policy(nn.Module):
+ def __init__(self, shared_enc, num_obs, num_actions, hidden_dims):
+ super().__init__()
+ self.shared_enc = shared_enc
+ self.mlp_encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip([num_obs] + hidden_dims, hidden_dims)
+ ],
+ )
+ self.policy = GaussianPolicyHead(
+ hidden_dims[-1],
+ num_actions,
+ )
+
+ self.mlp_encoder.apply(ortho_init_)
+ self.policy.apply(partial(xavier_uniform_init_, gain=0.01))
+
+ def forward(self, obs):
+ x = self.shared_enc(obs)
+ x = self.mlp_encoder(x)
+ sample, log_prob = self.policy(x)
+ log_prob = log_prob.clamp(min=-2)
+ return sample, log_prob
+
+ def non_shared_parameters(self):
+ # ** This is critical for training! **
+ # Prevent the policy loss from training the shared encoder.
+ return list(self.mlp_encoder.parameters()) + list(self.policy.parameters())
+
+
+class ImageLoggerCallback(LoggingMixin, Callback):
+ def __init__(self):
+ super().__init__()
+ self.data_group = "default"
+
+ def begin_batch(self, observation):
+ self.log_image("images/obs", observation["obs"][0])
+
+
+@dataclass
+class Config:
+ device: str = "cuda"
+ env_name: str = "CarRacing-v1"
+ hidden_dims = [512, 512]
+ batch_size: int = 1000
+ rollout_len: int = 40
+ learning_rate: float = 2e-3
+ n_env: int = 10
+ max_grad_norm: float = 1.0
+ init_alpha: float = 1.0
+ max_memory_size: int = 100_000
+ max_alpha: float = 10.0
+ # Conv encoder
+ input_shape: Tuple[int, int, int] = (84, 84, 3)
+ channels = [16, 16, 32, 32]
+ kernels = [3, 3, 3, 3]
+ strides = [2, 2, 2, 2]
+ padding = [1, 1, 1, 1]
+
+
+def train_carracing(args):
+ cfg = Config()
+
+ device = torch.device(cfg.device)
+
+ # Create box2d vector env environment wrapper.
+ env = DictGymWrapper(
+ AsyncVectorEnv([make_vision_box2d_env(cfg.env_name, rank) for rank in range(cfg.n_env)])
+ )
+ num_actions = env.dict_space.actions.shape[0]
+
+ # Build the networks.
+ shared_conv_enc = Conv2dEncoder(
+ input_shape=cfg.input_shape,
+ channels=cfg.channels,
+ kernels=cfg.kernels,
+ strides=cfg.strides,
+ padding=cfg.padding,
+ )
+ flat_enc_out_size = shared_conv_enc.get_encoder_output_size(flatten=True)
+
+ flat_shared_conv_enc = nn.Sequential(shared_conv_enc, nn.Flatten())
+
+ q1 = SharedEncoderActionValueNet(
+ flat_shared_conv_enc, flat_enc_out_size, num_actions, cfg.hidden_dims
+ )
+ q2 = SharedEncoderActionValueNet(
+ flat_shared_conv_enc, flat_enc_out_size, num_actions, cfg.hidden_dims
+ )
+
+ policy = Policy(flat_shared_conv_enc, flat_enc_out_size, num_actions, cfg.hidden_dims)
+
+ ln_alpha = torch.tensor(np.log(cfg.init_alpha), requires_grad=True, device=device)
+
+ q1 = q1.to(device)
+ q2 = q2.to(device)
+ policy = policy.to(device)
+
+ # Create the loss callbacks.
+ logged_cbs = [
+ QLoss(
+ name="q1",
+ q=q1,
+ opt=Adam(q1.parameters(), lr=cfg.learning_rate),
+ max_grad_norm=cfg.max_grad_norm,
+ ),
+ QLoss(
+ name="q2",
+ q=q2,
+ opt=Adam(q2.parameters(), lr=cfg.learning_rate),
+ max_grad_norm=cfg.max_grad_norm,
+ ),
+ PolicyLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q=q1,
+ opt=Adam(policy.non_shared_parameters(), lr=cfg.learning_rate),
+ max_grad_norm=cfg.max_grad_norm,
+ ),
+ AlphaLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ opt=Adam([ln_alpha], lr=cfg.learning_rate),
+ n_actions=num_actions,
+ max_grad_norm=cfg.max_grad_norm,
+ max_alpha=cfg.max_alpha,
+ ),
+ QTarget(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q1=q1,
+ q2=q2,
+ roll_length=cfg.rollout_len,
+ reward_scale=0.1,
+ ),
+ ImageLoggerCallback(),
+ ]
+
+ memory_table = DictObsNStepTable(
+ spaces=env.dict_space,
+ use_terminal_column=False,
+ maxlen=cfg.max_memory_size,
+ device=device,
+ )
+
+ # Add a gym collector callback
+ logged_cbs.append(
+ ThreadedGymCollector(
+ env,
+ VisionAgentProxy(policy, device=device),
+ TableMemoryProxy(memory_table, use_terminal=False),
+ warmup_steps=cfg.batch_size * 3,
+ render=False,
+ ),
+ )
+
+ # Add a tensorboard logger callback.
+ callbacks = logged_cbs + [
+ TensorboardLogger(
+ logged_cbs,
+ SummaryWriter(log_dir=args.log_dir + "/" + args.name + "_{}".format(time.time())),
+ 100,
+ ),
+ ]
+
+ # Create the memory loader and then train.
+ dataloader = MemoryLoader(
+ memory_table, cfg.batch_size // cfg.rollout_len, cfg.rollout_len, "batch_size"
+ )
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="carracing-v1")
+ parser.add_argument("--log_dir", type=str, default="/mnt/mllogs/emote/carracing")
+ args = parser.parse_args()
+
+ train_carracing(args)
diff --git a/experiments/gym/train_dqn_cartpole.py b/experiments/gym/train_dqn_cartpole.py
new file mode 100644
index 00000000..8e6b56c0
--- /dev/null
+++ b/experiments/gym/train_dqn_cartpole.py
@@ -0,0 +1,381 @@
+import argparse
+import math
+import random
+import time
+
+import gymnasium as gym
+import numpy as np
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from tests.gym.collector import ThreadedGymCollector
+from torch import nn
+from torch.optim import Adam
+from torch.utils.tensorboard import SummaryWriter
+
+from emote import Trainer
+from emote.algorithms.dqn import QLoss, QTarget
+from emote.callbacks.checkpointing import Checkpointer
+from emote.callbacks.generic import BackPropStepsTerminator
+from emote.callbacks.logging import TensorboardLogger
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsNStepTable
+from emote.mixins.logging import LoggingMixin
+from emote.proxies import GenericAgentProxy
+from emote.utils.spaces import BoxSpace, DictSpace, MDPSpace
+
+
+def _make_env():
+ """Create the environment for the experiment, the environment is created in
+ a thunk to avoid creating multiple environments in the same process.
+
+ This is important for the vectorized
+ environments.
+ Returns:
+ (Callable[[], gym.Env]): The thunk that creates the environment
+ """
+
+ def _thunk():
+ env = gym.make("CartPole-v1")
+ env = gym.wrappers.FrameStack(env, 3)
+ env = gym.wrappers.FlattenObservation(env)
+ return env
+
+ return _thunk
+
+
+class QNet(nn.Module):
+ """Q-Network class for Q-Learning. It takes observations and returns
+ Q-values for actions.
+
+ Attributes:
+ network (nn.Sequential): Neural network for computing Q-values.
+
+ Args:
+ num_obs (int): Dimensionality of observations.
+ num_actions (int): Number of possible actions.
+ hidden_dims (list of int): Dimensions of hidden layers.
+ """
+
+ def __init__(self, num_obs, num_actions, hidden_dims):
+ super(QNet, self).__init__()
+
+ layers = []
+ input_dim = num_obs
+
+ for hidden_dim in hidden_dims:
+ layers.append(nn.Linear(input_dim, hidden_dim))
+ layers.append(nn.ReLU())
+ input_dim = hidden_dim
+
+ layers.append(nn.Linear(input_dim, num_actions))
+
+ self.network = nn.Sequential(*layers)
+
+ def forward(self, obs):
+ """Forward pass for the Q-Network.
+
+ Args:
+ obs (Tensor): Observations.
+
+ Returns:
+ Tensor: Q-values for each action.
+ """
+ return self.network(obs)
+
+
+class DQNPolicy(nn.Module):
+ """DQN Policy class to handle action selection with epsilon-greedy
+ strategy.
+
+ Attributes:
+ q_net (QNet): Q-Network to evaluate Q-values.
+ initial_epsilon (float): Initial value of epsilon in epsilon-greedy.
+ target_epsilon (float): Target value of epsilon.
+ step_count (int): Counter for steps taken.
+ epsilon_decay_duration (int): Steps over which epsilon is decayed.
+ log_epsilon (bool): Flag to log epsilon values.
+
+ Args:
+ q_net (QNet): Q-Network.
+ epsilon_range (list of float): Initial and target epsilon for epsilon-greedy.
+ epsilon_decay_duration (int): Number of steps over which epsilon will decay.
+ log_epsilon (bool): Whether to log epsilon values or not.
+ """
+
+ def __init__(
+ self, q_net, epsilon_range=[0.9, 0.05], epsilon_decay_duration=10_000, log_epsilon=True
+ ):
+ super(DQNPolicy, self).__init__()
+ self.q_net = q_net
+
+ self.initial_epsilon = epsilon_range[0]
+ self.target_epsilon = epsilon_range[1]
+ self.step_count = 0
+ self.epsilon_decay_duration = epsilon_decay_duration
+ self.log_epsilon = log_epsilon
+
+ # Returns the index of the chosen action
+ def forward(self, state):
+ """Forward pass for action selection.
+
+ Args:
+ state (Tensor): The state observations.
+
+ Returns:
+ Tensor: Indices of chosen actions for each environment.
+ """
+ with torch.no_grad():
+ epsilon = self.target_epsilon + (self.initial_epsilon - self.target_epsilon) * math.exp(
+ -1.0 * self.step_count / self.epsilon_decay_duration
+ )
+
+ self.step_count += 1
+ if (
+ self.step_count % 50_000 == 0
+ and self.log_epsilon
+ and epsilon > self.target_epsilon + 0.01
+ ):
+ print("Epsilon: ", epsilon)
+
+ q_values = self.q_net(state) # Shape should be (num_envs, action_dim)
+ num_envs, action_dim = q_values.shape
+ actions = []
+
+ for i in range(num_envs):
+ if np.random.rand() < epsilon:
+ action_idx = random.randint(0, action_dim - 1)
+ else:
+ action_idx = q_values[i].argmax().item()
+ actions.append(action_idx)
+ return torch.tensor(actions)
+
+
+def create_memory(
+ space: MDPSpace,
+ memory_size: int,
+ len_rollout: int,
+ batch_size: int,
+ data_group: str,
+ device: torch.device,
+):
+ """Creates memory and data_loader for the RL training.
+
+ Arguments:
+ space (MDPSpace): the MDP space
+ memory_size (int): the maximum length of memory
+ len_rollout (int): the rollout size for the NStepTable
+ batch_size (int): batch size
+ data_group (str): the data group for uploading the data
+ device (torch.device): the device to upload the data
+ preload_buffer (bool): preload the buffer with some existing data
+ buffer_filename (str): the path to the replay buffer if preload_buffer is set to True
+ Returns:
+ (tuple[TableMemoryProxy, MemoryLoader]): A proxy for the memory and a dataloader
+ """
+ # Create the memory
+ table = DictObsNStepTable(
+ spaces=space,
+ use_terminal_column=False,
+ maxlen=memory_size,
+ device=device,
+ )
+ # The memory proxy is used to upload the data to the memory
+ memory_proxy = TableMemoryProxy(table=table, use_terminal=False)
+ # The data loader is used to sample the data from the memory
+ data_loader = MemoryLoader(
+ table=table,
+ rollout_count=batch_size // len_rollout,
+ rollout_length=len_rollout,
+ size_key="batch_size",
+ data_group=data_group,
+ )
+ return memory_proxy, data_loader
+
+
+def create_complementary_callbacks(
+ args,
+ logged_cbs: list[LoggingMixin],
+ cbs_name_to_checkpoint: list[str] = None,
+):
+ """The function creates the supplementary callbacks for the training and
+ adds them to the callback lists and returns the list.
+
+ Arguments:
+ args: input args
+ logged_cbs (list[Callback]): the list of callbacks
+ cbs_name_to_checkpoint (list[str]): the name of callbacks to checkpoint
+ Returns:
+ (list[Callback]): the full list of callbacks for the training
+ """
+ # The logger callback is used for logging the training progress
+ logger = TensorboardLogger(
+ logged_cbs,
+ SummaryWriter(log_dir=args.log_dir + "/" + args.name + "_{}".format(time.time())),
+ 100,
+ )
+
+ # Terminates the training after a certain number of backprop steps
+ bp_step_terminator = BackPropStepsTerminator(bp_steps=args.bp_steps)
+ # Callbacks to be used during training
+ callbacks = logged_cbs + [logger, bp_step_terminator]
+
+ if cbs_name_to_checkpoint:
+ # The checkpointer exports the model weights to the checkpoint directory
+ checkpointer = Checkpointer(
+ callbacks=[
+ cb for cb in logged_cbs if hasattr(cb, "name") and cb.name in cbs_name_to_checkpoint
+ ],
+ run_root=args.checkpoint_dir,
+ checkpoint_interval=args.checkpoint_interval,
+ )
+ callbacks += [checkpointer]
+
+ return callbacks
+
+
+def main(args):
+ # Create the environment
+ env = DictGymWrapper(AsyncVectorEnv([_make_env() for _ in range(args.num_envs)]))
+ device = torch.device(args.device)
+
+ # Define the space in order to create the memory
+ input_shapes = {k: v.shape for k, v in env.dict_space.state.spaces.items()}
+ output_shapes = {"actions": env.dict_space.actions.shape}
+ action_shape = output_shapes["actions"]
+ spaces = MDPSpace(
+ rewards=None,
+ actions=BoxSpace(dtype=np.float32, shape=action_shape),
+ state=DictSpace(
+ spaces={k: BoxSpace(dtype=np.float32, shape=tuple(v)) for k, v in input_shapes.items()}
+ ),
+ )
+ num_actions = spaces.actions.shape[0]
+ num_obs = list(spaces.state.spaces.values())[0].shape[0]
+
+ memory_proxy, dataloader = create_memory(
+ space=spaces,
+ memory_size=args.memory_size,
+ len_rollout=args.rollout_length,
+ batch_size=args.batch_size,
+ data_group="default",
+ device=device,
+ )
+ """Create a memory exporter if needed."""
+ if args.export_memory:
+ from emote.memory.memory import MemoryExporterProxyWrapper
+
+ memory_proxy = MemoryExporterProxyWrapper(
+ memory=memory_proxy,
+ target_memory_name=dataloader.data_group,
+ inf_steps_per_memory_export=10_000,
+ experiment_root_path=args.log_dir,
+ min_time_per_export=0,
+ )
+
+ num_actions = env.action_space.nvec[0]
+
+ # Create our two networks and the policy
+ online_q_net = QNet(num_obs, num_actions, args.hidden_dims)
+ target_q_net = QNet(num_obs, num_actions, args.hidden_dims)
+ policy = DQNPolicy(online_q_net)
+
+ # Move them to the device
+ online_q_net = online_q_net.to(device)
+ target_q_net = target_q_net.to(device)
+ policy = policy.to(device)
+
+ # The agent proxy is responsible for inference
+ agent_proxy = GenericAgentProxy(
+ policy,
+ device=device,
+ input_keys=tuple(input_shapes.keys()),
+ output_keys=tuple(output_shapes.keys()),
+ uses_logprobs=False,
+ spaces=spaces,
+ )
+
+ # Create an optimizer for the online network
+ optimizers = [
+ QLoss(
+ name="q1",
+ q=online_q_net,
+ opt=Adam(online_q_net.parameters(), lr=args.lr),
+ max_grad_norm=1,
+ ),
+ ]
+
+ train_callbacks = optimizers + [
+ # The QTarget callback is responsible for updating the target network
+ QTarget(
+ q_net=online_q_net,
+ target_q_net=target_q_net,
+ roll_length=args.rollout_length,
+ ),
+ # The collector is responsible for the interaction with the environment
+ ThreadedGymCollector(
+ env,
+ agent_proxy,
+ memory_proxy,
+ warmup_steps=args.batch_size * 2000,
+ render=False,
+ ),
+ ]
+
+ all_callbacks = create_complementary_callbacks(
+ args,
+ train_callbacks,
+ )
+
+ # The trainer acts as the main callback, responsible for calling all other callbacks
+ trainer = Trainer(all_callbacks, dataloader)
+ trainer.train()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="cartpole", help="The name of the experiment")
+ parser.add_argument(
+ "--log-dir",
+ type=str,
+ default="./mllogs/emote/cartpole",
+ help="Directory where logs will be stored.",
+ )
+ parser.add_argument(
+ "--num-envs", type=int, default=4, help="Number of environments to run in parallel"
+ )
+ parser.add_argument(
+ "--rollout-length",
+ type=int,
+ default=1,
+ help="The length of each rollout. Refers to the number of steps or time-steps taken during a simulated trajectory or rollout when estimating the expected return of a policy.",
+ )
+ parser.add_argument("--batch-size", type=int, default=128, help="Size of each training batch")
+ parser.add_argument(
+ "--hidden-dims", type=list, default=[128, 128], help="The hidden dimensions of the network"
+ )
+ parser.add_argument("--lr", type=float, default=1e-3, help="Learning Rate")
+ parser.add_argument(
+ "--device", type=str, default="cpu", help="Device to run the model on, e.g. cpu or cuda:0"
+ )
+ parser.add_argument(
+ "--bp-steps",
+ type=int,
+ default=50_000,
+ help="Number of backpropagation steps until the training run is finished",
+ )
+ parser.add_argument(
+ "--memory-size",
+ type=int,
+ default=50_000,
+ help="The size of the replay buffer. More complex environments require larger replay buffers, as they need more data to learn. Given that cartpole is a simple environment, a replay buffer of size 50_000 is sufficient.",
+ )
+ parser.add_argument(
+ "--export-memory", action="store_true", default=False, help="Whether to export the memory"
+ )
+ args = parser.parse_args()
+ main(args)
+
+
+# pdm run python experiments/gym/train_dqn_cartpole.py
diff --git a/experiments/gym/train_genrl.py b/experiments/gym/train_genrl.py
new file mode 100644
index 00000000..bfa310f7
--- /dev/null
+++ b/experiments/gym/train_genrl.py
@@ -0,0 +1,210 @@
+"""This is an example training with GenRL algorithm. GenRL training requires a
+generative model as an input. The generative model should be trained prior to
+the GenRL training using VAE training as an example. One can use train_vae.py
+as an example way to train a generative model. Please follow the instruction
+given by 'train_vae.py' to train a vae model for the lunar lander environment.
+
+Policy training with GenRL can be done using the following:
+
+ python experiments/gym/train_genrl.py --vae-checkpoint-dir checkpoints/vae_ll/checkpoint --vae-checkpoint-index 1
+ --vae-latent-size 1 --condition-size 24 --num-hidden-layer 4 --bp-steps 10000
+
+The above example assumes a pre-trained generative model exists in the directory defined by '--vae-checkpoint-dir' at
+the index defined by '--vae-checkpoint-index'.
+"""
+
+import argparse
+
+import numpy as np
+import torch
+
+from experiments.gym.train_lunar_lander import (
+ Policy,
+ QNet,
+ _make_env,
+ create_complementary_callbacks,
+ create_train_callbacks,
+)
+from experiments.gym.train_vae import get_conditioning_fn
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from tests.test_genrl import FullyConnectedDecoder, FullyConnectedEncoder
+
+from emote import Trainer
+from emote.algorithms.genrl.proxies import MemoryProxyWithEncoder
+from emote.algorithms.genrl.wrappers import DecoderWrapper, EncoderWrapper, PolicyWrapper
+from emote.algorithms.sac import FeatureAgentProxy
+from emote.memory import MemoryLoader
+from emote.memory.builder import DictObsNStepTable
+from emote.utils.spaces import BoxSpace, MDPSpace
+
+
+def create_memory(
+ encoder: EncoderWrapper,
+ space: MDPSpace,
+ memory_size: int,
+ len_rollout: int,
+ batch_size: int,
+ data_group: str,
+ device: torch.device,
+ observation_key: str = "obs",
+):
+ use_terminal_masking = True
+
+ table = DictObsNStepTable(
+ spaces=space,
+ use_terminal_column=use_terminal_masking,
+ maxlen=memory_size,
+ device=device,
+ )
+
+ memory_proxy = MemoryProxyWithEncoder(
+ table=table,
+ encoder=encoder,
+ minimum_length_threshold=len_rollout,
+ use_terminal=use_terminal_masking,
+ input_key=observation_key,
+ )
+
+ data_loader = MemoryLoader(
+ table=table,
+ rollout_count=batch_size // len_rollout,
+ rollout_length=len_rollout,
+ size_key="batch_size",
+ data_group=data_group,
+ )
+
+ return memory_proxy, data_loader
+
+
+def create_actor_critic_agents(
+ args,
+ num_obs: int,
+ num_latent: int,
+ decoder_wrapper: DecoderWrapper,
+ init_alpha: float = 0.01,
+):
+ device = args.device
+
+ hidden_dims = [args.hidden_layer_size] * arg.num_hidden_layer
+ q1 = QNet(num_obs, num_latent, hidden_dims).to(device)
+ q2 = QNet(num_obs, num_latent, hidden_dims).to(device)
+ policy = Policy(num_obs, num_latent, hidden_dims).to(device)
+ policy_wrapper = PolicyWrapper(decoder_wrapper, policy)
+
+ policy_proxy = FeatureAgentProxy(policy_wrapper, device=device)
+
+ log_alpha = torch.tensor(np.log(init_alpha), requires_grad=True, device=device)
+
+ return q1, q2, policy, policy_proxy, log_alpha
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="genrl")
+ parser.add_argument("--observation-key", type=str, default="obs")
+ parser.add_argument("--log-dir", type=str, default="logs/")
+ parser.add_argument("--vae-checkpoint-dir", type=str, default="checkpoints/training")
+ parser.add_argument("--vae-checkpoint-index", type=int, default=0)
+ parser.add_argument("--vae-latent-size", type=int, default=3)
+ parser.add_argument("--condition-size", type=int, default=0)
+ parser.add_argument("--num-envs", type=int, default=10)
+ parser.add_argument("--rollout-length", type=int, default=5)
+ parser.add_argument("--batch-size", type=int, default=200)
+ parser.add_argument("--hidden-layer-size", type=int, default=256)
+ parser.add_argument("--num-hidden-layer", type=int, default=2)
+ parser.add_argument("--actor-lr", type=float, default=8e-3, help="Policy lr")
+ parser.add_argument("--critic-lr", type=float, default=8e-3, help="Q-function lr")
+ parser.add_argument("--device", type=str, default="cuda:0")
+ parser.add_argument("--bp-steps", type=int, default=100000)
+ parser.add_argument("--use-wandb", action="store_true")
+ parser.add_argument("--wandb-run", type=str, default=None, help="wandb run name")
+
+ arg = parser.parse_args()
+
+ training_device = torch.device(arg.device)
+ """Creating a vector of Gym environments."""
+ gym_wrapper = DictGymWrapper(AsyncVectorEnv([_make_env() for _ in range(arg.num_envs)]))
+
+ number_of_actions = gym_wrapper.dict_space.actions.shape[0]
+ number_of_obs = list(gym_wrapper.dict_space.state.spaces.values())[0].shape[0]
+
+ condition_func = get_conditioning_fn(arg.condition_size)
+ """Create the decoder wrapper."""
+ action_latent_size = arg.vae_latent_size
+
+ decoder = FullyConnectedDecoder(
+ action_latent_size,
+ number_of_actions,
+ training_device,
+ arg.condition_size,
+ [arg.hidden_layer_size] * arg.num_hidden_layer,
+ )
+ decoder_wrapper = DecoderWrapper(decoder, condition_func)
+ encoder = FullyConnectedEncoder(
+ number_of_actions,
+ action_latent_size,
+ training_device,
+ arg.condition_size,
+ [arg.hidden_layer_size] * arg.num_hidden_layer,
+ )
+ encoder_wrapper = EncoderWrapper(encoder, condition_func)
+
+ checkpoint_filename = f"{arg.vae_checkpoint_dir}_{arg.vae_checkpoint_index}.tar"
+
+ state_dict = torch.load(checkpoint_filename, map_location=training_device)
+ state = state_dict["callback_state_dicts"]["vae"]
+ encoder_wrapper.load_state_dict(state.pop("network_state_dict"))
+
+ state_dict = torch.load(checkpoint_filename)
+ state = state_dict["callback_state_dicts"]["vae"]
+ decoder_wrapper.load_state_dict(state.pop("network_state_dict"))
+
+ for model in [encoder, decoder]:
+ for param in model.parameters():
+ param.requires_grad = False
+
+ """Creating the MDP space"""
+ spaces = MDPSpace(
+ rewards=gym_wrapper.dict_space.rewards,
+ actions=BoxSpace(dtype=np.float32, shape=(action_latent_size,)),
+ state=gym_wrapper.dict_space.state,
+ )
+ """Creating agent and the Q-functions."""
+ qnet1, qnet2, policy, agent_proxy, ln_alpha = create_actor_critic_agents(
+ decoder_wrapper=decoder_wrapper,
+ args=arg,
+ num_latent=action_latent_size,
+ num_obs=number_of_obs,
+ )
+ """Creating the memory."""
+ gym_memory, dataloader = create_memory(
+ space=spaces,
+ encoder=encoder_wrapper,
+ memory_size=4_000_000,
+ len_rollout=arg.rollout_length,
+ batch_size=arg.batch_size,
+ data_group="rl_buffer",
+ device=training_device,
+ observation_key=arg.observation_key,
+ )
+ """Creating the train callbacks."""
+ train_callbacks = create_train_callbacks(
+ args=arg,
+ q1=qnet1,
+ q2=qnet2,
+ policy=policy,
+ policy_proxy=agent_proxy,
+ ln_alpha=ln_alpha,
+ env=gym_wrapper,
+ memory_proxy=gym_memory,
+ data_group="rl_buffer",
+ )
+ """Creating the complementary callbacks."""
+ callbacks = create_complementary_callbacks(
+ args=arg,
+ logged_cbs=train_callbacks,
+ )
+ """Start the training."""
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
diff --git a/experiments/gym/train_lunar_lander.py b/experiments/gym/train_lunar_lander.py
new file mode 100644
index 00000000..570b925c
--- /dev/null
+++ b/experiments/gym/train_lunar_lander.py
@@ -0,0 +1,374 @@
+import argparse
+import time
+
+from functools import partial
+
+import gymnasium as gym
+import numpy as np
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from tests.gym.collector import ThreadedGymCollector
+from torch import nn
+from torch.optim import Adam
+from torch.utils.tensorboard import SummaryWriter
+
+from emote import Trainer
+from emote.algorithms.sac import AlphaLoss, FeatureAgentProxy, PolicyLoss, QLoss, QTarget
+from emote.callbacks import Checkpointer
+from emote.callbacks.generic import BackPropStepsTerminator
+from emote.callbacks.logging import TensorboardLogger
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsNStepTable
+from emote.mixins.logging import LoggingMixin
+from emote.nn import GaussianPolicyHead
+from emote.nn.initialization import ortho_init_, xavier_uniform_init_
+from emote.utils.spaces import MDPSpace
+
+
+def _make_env():
+ """Making a Lunar Lander Gym environment.
+
+ Returns:
+ (Gym.env): one Lunar Lander Gym environment
+ """
+
+ def _thunk():
+ env = gym.make("LunarLander-v2", continuous=True)
+ env = gym.wrappers.FrameStack(env, 3)
+ env = gym.wrappers.FlattenObservation(env)
+ return env
+
+ return _thunk
+
+
+class QNet(nn.Module):
+ def __init__(self, num_obs, num_actions, hidden_dims):
+ super().__init__()
+ all_dims = [num_obs + num_actions] + hidden_dims
+
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip(all_dims, hidden_dims)
+ ],
+ )
+ self.encoder.apply(ortho_init_)
+
+ self.final_layer = nn.Linear(hidden_dims[-1], 1)
+ self.final_layer.apply(partial(ortho_init_, gain=1))
+
+ def forward(self, action, obs):
+ x = torch.cat([obs, action], dim=1)
+ return self.final_layer(self.encoder(x))
+
+
+class Policy(nn.Module):
+ def __init__(self, num_obs, num_actions, hidden_dims):
+ super().__init__()
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip([num_obs] + hidden_dims, hidden_dims)
+ ],
+ )
+ self.policy = GaussianPolicyHead(
+ hidden_dims[-1],
+ num_actions,
+ )
+
+ self.encoder.apply(ortho_init_)
+ self.policy.apply(partial(xavier_uniform_init_, gain=0.01))
+
+ def forward(self, obs, epsilon=None):
+ sample, log_prob = self.policy(self.encoder(obs))
+ # TODO: Investigate the log_prob() logic of the pytorch distribution code.
+ # The change below shouldn't be needed but significantly improves training
+ # stability when training lunar lander.
+ log_prob = log_prob.clamp(min=-2)
+ return sample, log_prob
+
+
+def create_memory(
+ space: MDPSpace,
+ memory_size: int,
+ len_rollout: int,
+ batch_size: int,
+ data_group: str,
+ device: torch.device,
+ preload_buffer: bool = False,
+ buffer_filename: str = None,
+):
+ """Creates memory and data_loader for the RL training.
+
+ Arguments:
+ space (MDPSpace): the MDP space
+ memory_size (int): the maximum length of memory
+ len_rollout (int): the rollout size for the NStepTable
+ batch_size (int): batch size
+ data_group (str): the data group for uploading the data
+ device (torch.device): the device to upload the data
+ preload_buffer (bool): preload the buffer with some existing data
+ buffer_filename (str): the path to the replay buffer if preload_buffer is set to True
+ Returns:
+ (tuple[TableMemoryProxy, MemoryLoader]): A proxy for the memory and a dataloader
+ """
+ table = DictObsNStepTable(
+ spaces=space,
+ use_terminal_column=False,
+ maxlen=memory_size,
+ device=device,
+ )
+ if preload_buffer:
+ table.restore(buffer_filename)
+ print(f"memory populated with offline samples - size: {table.size()}")
+ memory_proxy = TableMemoryProxy(table=table, use_terminal=False)
+ data_loader = MemoryLoader(
+ table=table,
+ rollout_count=batch_size // len_rollout,
+ rollout_length=len_rollout,
+ size_key="batch_size",
+ data_group=data_group,
+ )
+ return memory_proxy, data_loader
+
+
+def create_actor_critic_agents(
+ args,
+ num_obs: int,
+ num_actions: int,
+ init_alpha: float = 0.01,
+):
+ """The function to create the actor (policy) and the critics (two
+ Q-functions)
+
+ Arguments:
+ args: the input arguments given by argparser
+ num_obs (int): the dimension of the state (observation) space
+ num_actions (int): the dimension of the action space
+ init_alpha (float): the initial value of the alpha parameters
+ Returns:
+ (tuple[nn.Module, nn.Module, FeatureAgentProxy, torch.Tensor]): the two Q-functions and the policy
+ proxy which also contains the policy nn.Module, and the alpha Tensor.
+ """
+ device = args.device
+ hidden_dims = [args.hidden_layer_size, args.hidden_layer_size]
+ q1 = QNet(num_obs, num_actions, hidden_dims)
+ q2 = QNet(num_obs, num_actions, hidden_dims)
+ policy = Policy(num_obs, num_actions, hidden_dims)
+ q1 = q1.to(device)
+ q2 = q2.to(device)
+ policy = policy.to(device)
+ policy_proxy = FeatureAgentProxy(policy, device=device)
+ ln_alpha = torch.tensor(np.log(init_alpha), requires_grad=True, device=device)
+ return q1, q2, policy_proxy, ln_alpha, policy
+
+
+def create_train_callbacks(
+ args,
+ q1: nn.Module,
+ q2: nn.Module,
+ policy: nn.Module,
+ policy_proxy: FeatureAgentProxy,
+ ln_alpha: torch.Tensor,
+ env: DictGymWrapper,
+ memory_proxy: TableMemoryProxy,
+ data_group: str,
+):
+ """The function creates the callbacks required for model-free SAC training.
+
+ Arguments:
+ args: the input arguments given by argparser
+ q1 (nn.Module): the first Q-network (used for double Q-learning)
+ q2 (nn.Module): the second Q-network (used for double Q-learning)
+ policy (nn.Module): the high-level policy
+ policy_proxy (FeatureAgentProxy): the wrapper for the policy network
+ ln_alpha (Tensor): the log of alpha parameters (trainable)
+ env (DictGymWrapper): the Gym wrapper
+ memory_proxy (TableMemoryProxy): the proxy for the memory
+ data_group (str): the data_group to receive data batches
+ Returns:
+ (list[Callback]): the callbacks for the SAC RL training
+ """
+ batch_size = args.batch_size
+ max_grad_norm = 1
+ len_rollout = args.rollout_length
+ num_actions = env.dict_space.actions.shape[0]
+
+ training_cbs = [
+ QLoss(
+ name="q1",
+ q=q1,
+ opt=Adam(q1.parameters(), lr=args.critic_lr),
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ ),
+ QLoss(
+ name="q2",
+ q=q2,
+ opt=Adam(q2.parameters(), lr=args.critic_lr),
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ ),
+ PolicyLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q=q1,
+ opt=Adam(policy.parameters(), lr=args.actor_lr),
+ max_grad_norm=max_grad_norm,
+ data_group=data_group,
+ ),
+ AlphaLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ opt=Adam([ln_alpha], lr=args.actor_lr),
+ n_actions=num_actions,
+ max_grad_norm=max_grad_norm,
+ max_alpha=10.0,
+ data_group=data_group,
+ ),
+ QTarget(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q1=q1,
+ q2=q2,
+ roll_length=len_rollout,
+ reward_scale=0.1,
+ data_group=data_group,
+ ),
+ ThreadedGymCollector(
+ env,
+ policy_proxy,
+ memory_proxy,
+ warmup_steps=batch_size,
+ render=False,
+ ),
+ ]
+ return training_cbs
+
+
+def create_complementary_callbacks(
+ args,
+ logged_cbs: list[LoggingMixin],
+ cbs_name_to_checkpoint: list[str] = None,
+):
+ """The function creates the supplementary callbacks for the training and
+ adds them to the callback lists and returns the list.
+
+ Arguments:
+ args: input args
+ logged_cbs (list[Callback]): the list of callbacks
+ cbs_name_to_checkpoint (list[str]): the name of callbacks to checkpoint
+ Returns:
+ (list[Callback]): the full list of callbacks for the training
+ """
+ if args.use_wandb:
+ from emote.callbacks.wb_logger import WBLogger
+
+ config = {
+ "wandb_project": args.name,
+ "wandb_run": args.wandb_run,
+ "hidden_dims": args.hidden_layer_size,
+ "batch_size": args.batch_size,
+ "learning_rate": args.actor_lr,
+ "rollout_len": args.rollout_length,
+ }
+ logger = WBLogger(
+ callbacks=logged_cbs,
+ config=config,
+ log_interval=100,
+ )
+ else:
+ logger = TensorboardLogger(
+ logged_cbs,
+ SummaryWriter(log_dir=args.log_dir + "/" + args.name + "_{}".format(time.time())),
+ 100,
+ )
+
+ bp_step_terminator = BackPropStepsTerminator(bp_steps=args.bp_steps)
+ callbacks = logged_cbs + [logger, bp_step_terminator]
+
+ if cbs_name_to_checkpoint:
+ checkpointer = Checkpointer(
+ callbacks=[
+ cb for cb in logged_cbs if hasattr(cb, "name") and cb.name in cbs_name_to_checkpoint
+ ],
+ run_root=args.checkpoint_dir,
+ checkpoint_interval=args.checkpoint_interval,
+ )
+ callbacks += [checkpointer]
+
+ return callbacks
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="ll")
+ parser.add_argument("--log-dir", type=str, default="/mnt/mllogs/emote/lunar_lander")
+ parser.add_argument("--num-envs", type=int, default=10)
+ parser.add_argument("--rollout-length", type=int, default=5)
+ parser.add_argument("--batch-size", type=int, default=200)
+ parser.add_argument("--hidden-layer-size", type=int, default=256)
+ parser.add_argument("--actor-lr", type=float, default=8e-3, help="The policy learning rate")
+ parser.add_argument("--critic-lr", type=float, default=8e-3, help="Q-function learning rate")
+ parser.add_argument("--device", type=str, default="cuda:0")
+ parser.add_argument("--bp-steps", type=int, default=10000)
+ parser.add_argument("--export-memory", action="store_true")
+ parser.add_argument("--use-wandb", action="store_true")
+ parser.add_argument(
+ "--wandb-run",
+ type=str,
+ default=None,
+ help="Short display name of run for the W&B UI. Randomly generated by default.",
+ )
+
+ input_args = parser.parse_args()
+ training_device = torch.device(input_args.device)
+ """Creating a vector of Gym environments."""
+ gym_wrapper = DictGymWrapper(AsyncVectorEnv([_make_env() for _ in range(input_args.num_envs)]))
+ number_of_actions = gym_wrapper.dict_space.actions.shape[0]
+ number_of_obs = list(gym_wrapper.dict_space.state.spaces.values())[0].shape[0]
+ """Creating the memory and the dataloader."""
+ gym_memory_proxy, dataloader = create_memory(
+ space=gym_wrapper.dict_space,
+ memory_size=4_000_000,
+ len_rollout=input_args.rollout_length,
+ batch_size=input_args.batch_size,
+ data_group="default",
+ device=training_device,
+ )
+ """Create a memory exporter if needed."""
+ if input_args.export_memory:
+ from emote.memory.memory import MemoryExporterProxyWrapper
+
+ gym_memory_proxy = MemoryExporterProxyWrapper(
+ memory=gym_memory_proxy,
+ target_memory_name=dataloader.data_group,
+ inf_steps_per_memory_export=10_000,
+ experiment_root_path=input_args.log_dir,
+ min_time_per_export=0,
+ )
+
+ """Creating the actor (policy) and critics (the two Q-functions) agents """
+ qnet1, qnet2, agent_proxy, ln_alpha, policy = create_actor_critic_agents(
+ args=input_args, num_actions=number_of_actions, num_obs=number_of_obs
+ )
+ """Creating the training callbacks."""
+ train_callbacks = create_train_callbacks(
+ args=input_args,
+ q1=qnet1,
+ q2=qnet2,
+ policy=policy,
+ policy_proxy=agent_proxy,
+ ln_alpha=ln_alpha,
+ env=gym_wrapper,
+ memory_proxy=gym_memory_proxy,
+ data_group="default",
+ )
+ """Creating the supplementary callbacks and adding them to the training
+ callbacks."""
+ all_callbacks = create_complementary_callbacks(args=input_args, logged_cbs=train_callbacks)
+ """Training."""
+ trainer = Trainer(all_callbacks, dataloader)
+ trainer.train()
diff --git a/experiments/gym/train_lunar_lander_model_based.py b/experiments/gym/train_lunar_lander_model_based.py
new file mode 100644
index 00000000..02fed8ea
--- /dev/null
+++ b/experiments/gym/train_lunar_lander_model_based.py
@@ -0,0 +1,234 @@
+import argparse
+
+import torch
+
+from experiments.gym.train_lunar_lander import (
+ _make_env,
+ create_actor_critic_agents,
+ create_complementary_callbacks,
+ create_memory,
+ create_train_callbacks,
+)
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from torch.optim import Adam
+
+from emote import Trainer
+from emote.extra.schedules import BPStepScheduler
+from emote.models.callbacks import BatchSampler, ModelBasedCollector, ModelLoss
+from emote.models.ensemble import EnsembleOfGaussian
+from emote.models.model import DynamicModel
+from emote.models.model_env import ModelEnv
+
+
+def lunar_lander_term_func(
+ states: torch.Tensor,
+):
+ """The termination function used to identify terminal states for the lunar
+ lander gym environment.
+
+ This function is used inside a gym-like dynamic model to terminate
+ trajectories. The current implementation always outputs False which means all states
+ labeled as non-terminal. This can be improved by adding some code to identify terminal
+ states, or alternatively, training a neural network to detect terminal states.
+ Arguments:
+ states (torch.Tensor): the state (batch_size x dim_state)
+ Returns:
+ (torch.Tensor): the terminal labels (batch_size)
+ """
+ return torch.zeros(states.shape[0])
+
+
+def create_dynamic_model_env(
+ args,
+ num_obs: int,
+ num_actions: int,
+):
+ """Creates gym-like dynamic model
+ Arguments:
+ args: arguments passed to the code via argparse
+ num_obs (int): the dimension of observation space
+ num_actions (int): the dimension of action space
+ Returns:
+ (ModelEnv): Gym-like dynamic model
+ """
+ device = torch.device(args.device)
+ model = EnsembleOfGaussian(
+ in_size=num_obs + num_actions,
+ out_size=num_obs + 1,
+ device=device,
+ ensemble_size=args.num_model_ensembles,
+ )
+ dynamic_model = DynamicModel(model=model)
+ model_env = ModelEnv(
+ num_envs=args.batch_size,
+ model=dynamic_model,
+ termination_fn=lunar_lander_term_func,
+ )
+ return model_env
+
+
+def create_model_based_callbacks(
+ args,
+ model_buffer,
+ model_data_loader,
+ model_env,
+ policy_proxy,
+):
+ """ "Creates the extra callbacks required for model-based RL (MBRL)
+ training. Currently, there are three callbacks required for the MBRL
+ training: (1) ModelLoss: It is used to train the dynamic model. (2)
+ BatchSampler: In every BP step, it samples a batch of transitions from
+ either the gym buffer or the model buffer depending on a probability
+ distribution. The batch is only used for the RL training. (3)
+ ModelBasedCollector: It is used to create synthetic transitions by
+ unrolling the gym-like dynamic model. The transitions are stored in the
+ model buffer.
+
+ Arguments:
+ args: arguments passed to the code via argparse
+ model_buffer (DictTable): the replay_buffer used to store transitions
+ model_data_loader (MemoryLoader): the dataloader used to sample batches of transitions
+ model_env (ModelEnv): the Gym-like dynamic model
+ policy_proxy (FeatureAgentProxy): the policy proxy
+ Returns:
+ (list[Callback]): A list of callbacks required for model-based RL training
+ """
+ mb_cbs = [
+ ModelLoss(
+ model=model_env.dynamic_model,
+ name="dynamic_model",
+ opt=Adam(model_env.dynamic_model.model.parameters(), lr=args.model_lr),
+ data_group="default",
+ ),
+ BatchSampler(
+ dataloader=model_data_loader,
+ prob_scheduler=BPStepScheduler(*args.data_scheduler),
+ data_group="default",
+ rl_data_group="rl_buffer",
+ ),
+ ModelBasedCollector(
+ model_env=model_env,
+ agent=policy_proxy,
+ memory=model_buffer,
+ rollout_scheduler=BPStepScheduler(*args.rollout_scheduler),
+ num_bp_to_retain_buffer=args.num_bp_to_retain_model_buffer,
+ ),
+ ]
+ return mb_cbs
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="ll_mbrl_")
+ parser.add_argument("--log-dir", type=str, default="/mnt/mllogs/emote/lunar_lander")
+ parser.add_argument("--num-envs", type=int, default=10)
+ parser.add_argument("--rollout-length", type=int, default=1)
+ parser.add_argument("--batch-size", type=int, default=200)
+ parser.add_argument("--hidden-layer-size", type=int, default=256)
+ parser.add_argument("--actor-lr", type=float, default=8e-3, help="The policy learning rate")
+ parser.add_argument("--critic-lr", type=float, default=8e-3, help="Q-function learning rate")
+ parser.add_argument("--device", type=str, default="cuda:0")
+ parser.add_argument("--bp-steps", type=int, default=100000)
+ parser.add_argument("--use-wandb", action="store_true")
+ parser.add_argument(
+ "--wandb-run",
+ type=str,
+ default=None,
+ help="Short display name of run for the W&B UI. Randomly generated by default.",
+ )
+ """The extra arguments for the model-based RL training."""
+ parser.add_argument(
+ "--num-model-ensembles",
+ type=int,
+ default=5,
+ help="The number of dynamic models in the ensemble",
+ )
+ parser.add_argument(
+ "--rollout-scheduler",
+ nargs="+",
+ type=int,
+ default=[5000, 100000, 1, 20],
+ help="The scheduler which outputs the rollout size (the number of time-steps"
+ "to unroll the dynamic model) given the BP step as input "
+ "[bp_begin, bp_end, rollout_initial_size, rollout_final_size]).",
+ )
+ parser.add_argument(
+ "--data-scheduler",
+ nargs="+",
+ type=float,
+ default=[5000, 100000, 0.0, 0.9],
+ help="The scheduler which outputs the probability of choosing synthetic samples"
+ "(generated by the model) against real gym samples given the BP step as input "
+ "[bp_begin, bp_end, prob_initial_value, prob_final_value]).",
+ )
+ parser.add_argument(
+ "--num-bp-to-retain-model-buffer",
+ type=int,
+ default=5000,
+ help="The number of BP steps before the model-buffer is completely overwritten",
+ )
+ parser.add_argument("--model-lr", type=float, default=1e-3, help="The model learning rate")
+
+ input_args = parser.parse_args()
+
+ training_device = torch.device(input_args.device)
+
+ gym_wrapper = DictGymWrapper(AsyncVectorEnv([_make_env() for _ in range(input_args.num_envs)]))
+ number_of_actions = gym_wrapper.dict_space.actions.shape[0]
+ number_of_obs = list(gym_wrapper.dict_space.state.spaces.values())[0].shape[0]
+ """Creating the models, memory, dataloader and callbacks (the same as
+ model-free training)."""
+ qnet1, qnet2, agent_proxy = create_actor_critic_agents(
+ args=input_args, num_actions=number_of_actions, num_obs=number_of_obs
+ )
+
+ gym_memory, dataloader = create_memory(
+ space=gym_wrapper.dict_space,
+ memory_size=4_000_000,
+ len_rollout=input_args.rollout_length,
+ batch_size=input_args.batch_size,
+ data_group="default",
+ device=training_device,
+ )
+
+ train_callbacks = create_train_callbacks(
+ args=input_args,
+ q1=qnet1,
+ q2=qnet2,
+ policy_proxy=agent_proxy,
+ env=gym_wrapper,
+ memory_proxy=gym_memory,
+ data_group="rl_buffer",
+ )
+ """The extra functions used only for model-based RL training."""
+ memory_init_size = input_args.batch_size * input_args.num_bp_to_retain_model_buffer
+ model_memory, model_dataloader = create_memory(
+ space=gym_wrapper.dict_space,
+ memory_size=memory_init_size,
+ len_rollout=1,
+ batch_size=input_args.batch_size,
+ data_group="rl_buffer",
+ device=training_device,
+ )
+
+ gym_like_env = create_dynamic_model_env(
+ args=input_args,
+ num_obs=number_of_obs,
+ num_actions=number_of_actions,
+ )
+
+ mb_callbacks = create_model_based_callbacks(
+ args=input_args,
+ model_buffer=model_memory,
+ model_data_loader=model_dataloader,
+ model_env=gym_like_env,
+ policy_proxy=agent_proxy,
+ )
+ """Creating the complementary callbacks and starting the training."""
+ callbacks = create_complementary_callbacks(
+ args=input_args, logged_cbs=(train_callbacks + mb_callbacks)
+ )
+
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
diff --git a/experiments/gym/train_vae.py b/experiments/gym/train_vae.py
new file mode 100644
index 00000000..be327a44
--- /dev/null
+++ b/experiments/gym/train_vae.py
@@ -0,0 +1,143 @@
+"""
+
+This is an example code to train VAE models to generate action data. In order to train a VAE model, we need to obtain a
+replay buffer of expert data. The replay buffer can come from previous RL training sessions. As an example, train a SAC
+policy and store the replay buffer:
+
+ python experiments/gym/train_lunar_lander.py --log-dir logs/ --bp-steps 50000 --export-memory
+
+The '--export-memory' flag is needed to save the replay buffer after the training is finished.
+Once the expert replay buffer is created, you can train the VAE model using the example below:
+
+ python experiments/gym/train_vae.py --beta 0.004 --action-size 2 --observation-size 24
+ --condition-size 24 --latent-size 1 --num-hidden-layer 4 --bp-steps 10000 --checkpoint-interval 9999
+ --buffer-dir logs/ --buffer-filename default_export
+
+The '--latent-size 1' learns a latent space to represent actions with only one dimension. Note that the original action
+space is 2-dimensional.
+
+"""
+
+import argparse
+import os
+
+import numpy as np
+import torch
+
+from experiments.gym.train_lunar_lander import create_complementary_callbacks, create_memory
+from tests.test_genrl import FullyConnectedDecoder, FullyConnectedEncoder
+from torch.optim import Adam
+
+from emote import Trainer
+from emote.algorithms.genrl.vae import VAELoss, VariationalAutoencoder
+from emote.utils.spaces import BoxSpace, DictSpace, MDPSpace
+
+
+def get_conditioning_fn(len_cond: int = 0):
+ assert len_cond >= 0
+
+ def conditioning_fn(a):
+ if len(a.shape) == 1:
+ return a[:len_cond]
+ return a[:, :len_cond]
+
+ return conditioning_fn
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="vae_training")
+
+ parser.add_argument("--action-size", type=int)
+ parser.add_argument("--observation-size", type=int)
+ parser.add_argument("--observation-key", type=str, default="obs")
+ parser.add_argument("--condition-size", type=int)
+ parser.add_argument("--latent-size", type=int, default=3)
+ parser.add_argument("--beta", type=float, default=0.01, help="VAE beta value")
+
+ parser.add_argument("--batch-size", type=int, default=200)
+ parser.add_argument("--hidden-layer-size", type=int, default=256)
+ parser.add_argument("--num-hidden-layer", type=int, default=2)
+ parser.add_argument("--lr", type=float, default=8e-3, help="The learning rate")
+ parser.add_argument("--device", type=str, default="cuda:0")
+ parser.add_argument("--bp-steps", type=int, default=10000)
+
+ parser.add_argument("--log-dir", type=str, default="logs/")
+
+ parser.add_argument("--checkpoint-dir", type=str, default="checkpoints")
+ parser.add_argument("--checkpoint-interval", type=int, default=100000)
+
+ parser.add_argument("--buffer-dir", type=str, default="replay_buffers/")
+ parser.add_argument("--buffer-filename", type=str, default="rl_buffer_export")
+
+ parser.add_argument("--use-wandb", action="store_true")
+ parser.add_argument(
+ "--wandb-run", type=str, default=None, help="Display name of the run for wandb."
+ )
+
+ arg = parser.parse_args()
+
+ training_device = torch.device(arg.device)
+
+ spaces = MDPSpace(
+ rewards=None,
+ actions=BoxSpace(dtype=np.float32, shape=(arg.action_size,)),
+ state=DictSpace(
+ spaces={arg.observation_key: BoxSpace(dtype=np.float32, shape=(arg.observation_size,))}
+ ),
+ )
+ condition_fn = get_conditioning_fn(arg.condition_size)
+ """Create the memory and pre-load it with some expert policy
+ trajectories."""
+ _, dataloader = create_memory(
+ space=spaces,
+ memory_size=4_000_000,
+ len_rollout=1,
+ batch_size=arg.batch_size,
+ data_group="offline_data",
+ preload_buffer=True,
+ buffer_filename=os.path.join(arg.buffer_dir, arg.buffer_filename),
+ device=training_device,
+ )
+ """Create the vae model."""
+ encoder = FullyConnectedEncoder(
+ input_size=arg.action_size,
+ output_size=arg.latent_size,
+ condition_size=arg.condition_size,
+ device=training_device,
+ hidden_sizes=[arg.hidden_layer_size] * arg.num_hidden_layer,
+ )
+ decoder = FullyConnectedDecoder(
+ latent_size=arg.latent_size,
+ output_size=arg.action_size,
+ condition_size=arg.condition_size,
+ device=training_device,
+ hidden_sizes=[arg.hidden_layer_size] * arg.num_hidden_layer,
+ )
+
+ vae = VariationalAutoencoder(
+ encoder=encoder,
+ decoder=decoder,
+ device=training_device,
+ beta=arg.beta,
+ )
+
+ cbs = [
+ VAELoss(
+ vae=vae,
+ opt=Adam(vae.parameters(), lr=arg.lr),
+ data_group="offline_data",
+ conditioning_func=condition_fn,
+ input_key=arg.observation_key,
+ )
+ ]
+ """Creating the supplementary callbacks and adding them to the training
+ callbacks."""
+ all_callbacks = create_complementary_callbacks(
+ args=arg,
+ logged_cbs=cbs,
+ cbs_name_to_checkpoint=["vae"],
+ )
+ """Training."""
+ trainer = Trainer(all_callbacks, dataloader)
+ trainer.train()
diff --git a/experiments/gym/wandb_sweep_lunar_lander.py b/experiments/gym/wandb_sweep_lunar_lander.py
new file mode 100644
index 00000000..faf37589
--- /dev/null
+++ b/experiments/gym/wandb_sweep_lunar_lander.py
@@ -0,0 +1,209 @@
+import argparse
+
+from functools import partial
+
+import gymnasium as gym
+import numpy as np
+import torch
+import wandb
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from tests.gym.collector import ThreadedGymCollector
+from torch import nn
+from torch.optim import Adam
+
+from emote import Trainer
+from emote.algorithms.sac import AlphaLoss, FeatureAgentProxy, PolicyLoss, QLoss, QTarget
+from emote.callbacks.generic import BackPropStepsTerminator
+from emote.callbacks.wb_logger import WBLogger
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsNStepTable
+from emote.nn import GaussianPolicyHead
+from emote.nn.initialization import ortho_init_, xavier_uniform_init_
+
+
+def _make_env():
+ def _thunk():
+ env = gym.make("LunarLander-v2", continuous=True)
+ env = gym.wrappers.FrameStack(env, 3)
+ env = gym.wrappers.FlattenObservation(env)
+ return env
+
+ return _thunk
+
+
+class QNet(nn.Module):
+ def __init__(self, num_obs, num_actions, hidden_dims):
+ super().__init__()
+ all_dims = [num_obs + num_actions] + hidden_dims
+
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip(all_dims, hidden_dims)
+ ],
+ )
+ self.encoder.apply(ortho_init_)
+
+ self.final_layer = nn.Linear(hidden_dims[-1], 1)
+ self.final_layer.apply(partial(ortho_init_, gain=1))
+
+ def forward(self, action, obs):
+ x = torch.cat([obs, action], dim=1)
+ return self.final_layer(self.encoder(x))
+
+
+class Policy(nn.Module):
+ def __init__(self, num_obs, num_actions, hidden_dims):
+ super().__init__()
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip([num_obs] + hidden_dims, hidden_dims)
+ ],
+ )
+ self.policy = GaussianPolicyHead(
+ hidden_dims[-1],
+ num_actions,
+ )
+
+ self.encoder.apply(ortho_init_)
+ self.policy.apply(partial(xavier_uniform_init_, gain=0.01))
+
+ def forward(self, obs):
+ sample, log_prob = self.policy(self.encoder(obs))
+ # TODO: Investigate the log_prob() logic of the pytorch distribution code.
+ # The change below shouldn't be needed but significantly improves training
+ # stability when training lunar lander.
+ log_prob = log_prob.clamp(min=-2)
+ return sample, log_prob
+
+
+def train_lunar_lander(args):
+ device = torch.device(args.device)
+
+ hidden_dims = [256, 256]
+ batch_size = 2000
+ n_env = 10
+ max_grad_norm = 1
+
+ rollout_len = 20
+ init_alpha = 1.0
+
+ # any additional hyperparameters/metadata that we want to log
+ config = {
+ "hidden_dims": hidden_dims,
+ "batch_size": batch_size,
+ "rollout_len": rollout_len,
+ }
+
+ # parameters to search defined from wandb.config which are set by the sweep agent
+ wandb.init(config=config)
+ learning_rate = wandb.config.learning_rate
+
+ env = DictGymWrapper(AsyncVectorEnv([_make_env() for _ in range(n_env)]))
+ table = DictObsNStepTable(
+ spaces=env.dict_space,
+ use_terminal_column=False,
+ maxlen=4_000_000,
+ device=device,
+ )
+ memory_proxy = TableMemoryProxy(table, use_terminal=False)
+ dataloader = MemoryLoader(table, batch_size // rollout_len, rollout_len, "batch_size")
+
+ num_actions = env.dict_space.actions.shape[0]
+ num_obs = list(env.dict_space.state.spaces.values())[0].shape[0]
+
+ q1 = QNet(num_obs, num_actions, hidden_dims)
+ q2 = QNet(num_obs, num_actions, hidden_dims)
+ policy = Policy(num_obs, num_actions, hidden_dims)
+
+ ln_alpha = torch.tensor(np.log(init_alpha), requires_grad=True, device=device)
+ agent_proxy = FeatureAgentProxy(policy, device=device)
+
+ q1 = q1.to(device)
+ q2 = q2.to(device)
+ policy = policy.to(device)
+
+ logged_cbs = [
+ QLoss(
+ name="q1",
+ q=q1,
+ opt=Adam(q1.parameters(), lr=learning_rate),
+ max_grad_norm=max_grad_norm,
+ ),
+ QLoss(
+ name="q2",
+ q=q2,
+ opt=Adam(q2.parameters(), lr=learning_rate),
+ max_grad_norm=max_grad_norm,
+ ),
+ PolicyLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q=q1,
+ opt=Adam(policy.parameters(), lr=learning_rate),
+ max_grad_norm=max_grad_norm,
+ ),
+ AlphaLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ opt=Adam([ln_alpha], lr=learning_rate),
+ n_actions=num_actions,
+ max_grad_norm=max_grad_norm,
+ max_alpha=10.0,
+ ),
+ QTarget(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q1=q1,
+ q2=q2,
+ roll_length=rollout_len,
+ reward_scale=0.1,
+ ),
+ ThreadedGymCollector(
+ env,
+ agent_proxy,
+ memory_proxy,
+ warmup_steps=batch_size,
+ render=False,
+ ),
+ ]
+
+ logger = WBLogger(
+ callbacks=logged_cbs,
+ config=config,
+ log_interval=100,
+ )
+
+ callbacks = logged_cbs + [logger, BackPropStepsTerminator(args.num_bp_steps)]
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--name", type=str, default="ll")
+ parser.add_argument("--log-dir", type=str, default="/mnt/mllogs/emote/lunar_lander")
+ parser.add_argument("--device", type=str, default="cuda:0")
+ parser.add_argument("--num_bp_steps", type=int, default=10000)
+ parser.add_argument(
+ "--wandb_run",
+ type=str,
+ default=None,
+ help="Short display name of run for the W&B UI. Randomly generated by default.",
+ )
+
+ args = parser.parse_args()
+
+ # Configuration dictionary of the W&B sweep
+ sweep_config = {
+ "method": "grid",
+ "name": "sweep",
+ "parameters": {
+ "learning_rate": {"values": [8e-3, 1e-3]},
+ },
+ }
+ sweep_id = wandb.sweep(sweep_config, project=args.name)
+ wandb.agent(sweep_id, function=partial(train_lunar_lander, args))
diff --git a/locks/base.lock b/locks/base.lock
new file mode 100644
index 00000000..90a4e575
--- /dev/null
+++ b/locks/base.lock
@@ -0,0 +1,3283 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=base
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "atomicwrites>=1.4.0",
+// "box2d-py>=2.3.5",
+// "cloudpickle~=3.0",
+// "gsutil>=4.66",
+// "gymnasium>=0.27.1",
+// "myst-parser~=2.0",
+// "numpy<1.24",
+// "onnx>=1.10",
+// "opencv-python>=3.0",
+// "protobuf>=4.0",
+// "psutil>=5.8.0",
+// "pygame>=2.1.0",
+// "pytest-benchmark==4.0.0",
+// "pytest-cov!=2.12.1,<3.1,>=2.12",
+// "pytest-platform-markers",
+// "pytest-rerunfailures",
+// "pytest-xdist<3,>=2.5",
+// "pytest~=8.0",
+// "setuptools==59.5",
+// "tensorboard>=2.8.0",
+// "torch!=1.12.0+cpu,!=1.12.0+cu116,==1.12.0",
+// "torch==1.12.0",
+// "wandb>=0.14.0"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308",
+ "url": "https://files.pythonhosted.org/packages/a2/ad/e0d3c824784ff121c03cc031f944bc7e139a8f1870ffd2845cc2dd76f6c4/absl_py-2.1.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff",
+ "url": "https://files.pythonhosted.org/packages/7a/8f/fc001b92ecc467cc32ab38398bd0bfb45df46e7523bf33c2ad22a505f06e/absl-py-2.1.0.tar.gz"
+ }
+ ],
+ "project_name": "absl-py",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52",
+ "url": "https://files.pythonhosted.org/packages/1f/41/0852b954464d853cf315e60f096d3ff6a74aff75ad5f3388c06695d5d37f/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54",
+ "url": "https://files.pythonhosted.org/packages/0c/03/2cac72f64b2853397dd697aa4957755b85bfd3acc0ffe898571060f1db83/aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747",
+ "url": "https://files.pythonhosted.org/packages/18/02/4156ed2edca212041c7a5334b9520ff5a39e40648177e2f0ef13cac2b555/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7",
+ "url": "https://files.pythonhosted.org/packages/18/93/1f005bbe044471a0444a82cdd7356f5120b9cf94fe2c50c0cdbf28f1258b/aiohttp-3.9.3.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5",
+ "url": "https://files.pythonhosted.org/packages/43/68/86874ff80e74c2e8308af3d80345fd624b5b26197a914aa9a85cfaf5b025/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf",
+ "url": "https://files.pythonhosted.org/packages/4b/a0/8b50667a858f3e4f3fec2d471aa9e618783c0450b980e7a5bf617c1cb1f3/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c",
+ "url": "https://files.pythonhosted.org/packages/63/56/c1d39b27114595beaea776e164dbb793cf64c16331ba00cd0dc7cf0542f2/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768",
+ "url": "https://files.pythonhosted.org/packages/6d/8a/46ba295c98b24779370580b4450f80f35a1ae9e4bc9f9783ea1043d33395/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5",
+ "url": "https://files.pythonhosted.org/packages/7e/6e/6c0486fdd8918f9818e82b30898cb77ff0debccc4b09db5d9a939ed7a075/aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec",
+ "url": "https://files.pythonhosted.org/packages/86/74/b506f01485dba1c4298700156b915f3ba475be823a7b31056d40a9ac0daa/aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29",
+ "url": "https://files.pythonhosted.org/packages/93/40/d3decda219ebd5410eba627601d537ec3782efbcadba308e9ce381cc0b71/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc",
+ "url": "https://files.pythonhosted.org/packages/9a/41/d6ce776c9c22f402ad0b0cfbdc70a630512229854b0043bd0dbe6566d75d/aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b",
+ "url": "https://files.pythonhosted.org/packages/9d/79/b34562b6cce04322023112f1984380359d78bd043b8ef822c2f356b7a047/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6",
+ "url": "https://files.pythonhosted.org/packages/f5/4e/41143834b3fd5b89b404c76b5a71496bca96fbd8587c1e42a8f2b2efb8b3/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl"
+ }
+ ],
+ "project_name": "aiohttp",
+ "requires_dists": [
+ "Brotli; platform_python_implementation == \"CPython\" and extra == \"speedups\"",
+ "aiodns; (sys_platform == \"linux\" or sys_platform == \"darwin\") and extra == \"speedups\"",
+ "aiosignal>=1.1.2",
+ "async-timeout<5.0,>=4.0; python_version < \"3.11\"",
+ "attrs>=17.3.0",
+ "brotlicffi; platform_python_implementation != \"CPython\" and extra == \"speedups\"",
+ "frozenlist>=1.1.1",
+ "multidict<7.0,>=4.5",
+ "yarl<2.0,>=1.0"
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.9.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17",
+ "url": "https://files.pythonhosted.org/packages/76/ac/a7305707cb852b7e16ff80eaf5692309bde30e2b1100a1fcacdc8f731d97/aiosignal-1.3.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc",
+ "url": "https://files.pythonhosted.org/packages/ae/67/0952ed97a9793b4958e5736f6d2b346b414a2cd63e82d05940032f45b32f/aiosignal-1.3.1.tar.gz"
+ }
+ ],
+ "project_name": "aiosignal",
+ "requires_dists": [
+ "frozenlist>=1.1.0"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.3.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92",
+ "url": "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65",
+ "url": "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz"
+ }
+ ],
+ "project_name": "alabaster",
+ "requires_dists": [],
+ "requires_python": ">=3.9",
+ "version": "0.7.16"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128",
+ "url": "https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41",
+ "url": "https://files.pythonhosted.org/packages/d7/d8/05696357e0311f5b5c316d7b95f46c669dd9c15aaeecbb48c7d0aeb88c40/appdirs-1.4.4.tar.gz"
+ }
+ ],
+ "project_name": "appdirs",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c",
+ "url": "https://files.pythonhosted.org/packages/88/8c/61021c45428ad2ef6131c6068d14f7f0968767e972e427cd87bd25c9ea7b/argcomplete-3.2.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23",
+ "url": "https://files.pythonhosted.org/packages/3c/c0/031c507227ce3b715274c1cd1f3f9baf7a0f7cec075e22c7c8b5d4e468a9/argcomplete-3.2.3.tar.gz"
+ }
+ ],
+ "project_name": "argcomplete",
+ "requires_dists": [
+ "coverage; extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pexpect; extra == \"test\"",
+ "ruff; extra == \"test\"",
+ "wheel; extra == \"test\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.2.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028",
+ "url": "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f",
+ "url": "https://files.pythonhosted.org/packages/87/d6/21b30a550dafea84b1b8eee21b5e23fa16d010ae006011221f33dcd8d7f8/async-timeout-4.0.3.tar.gz"
+ }
+ ],
+ "project_name": "async-timeout",
+ "requires_dists": [
+ "typing-extensions>=3.6.5; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11",
+ "url": "https://files.pythonhosted.org/packages/87/c6/53da25344e3e3a9c01095a89f16dbcda021c609ddb42dd6d7c0528236fb2/atomicwrites-1.4.1.tar.gz"
+ }
+ ],
+ "project_name": "atomicwrites",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1",
+ "url": "https://files.pythonhosted.org/packages/e0/44/827b2a91a5816512fcaf3cc4ebc465ccd5d598c45cefa6703fcf4a79018f/attrs-23.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "url": "https://files.pythonhosted.org/packages/e3/fc/f800d51204003fa8ae392c4e8278f256206e7a919b708eef054f5f4b650d/attrs-23.2.0.tar.gz"
+ }
+ ],
+ "project_name": "attrs",
+ "requires_dists": [
+ "attrs[tests-mypy]; extra == \"tests-no-zope\"",
+ "attrs[tests-no-zope]; extra == \"tests\"",
+ "attrs[tests]; extra == \"cov\"",
+ "attrs[tests]; extra == \"dev\"",
+ "cloudpickle; platform_python_implementation == \"CPython\" and extra == \"tests-no-zope\"",
+ "coverage[toml]>=5.3; extra == \"cov\"",
+ "furo; extra == \"docs\"",
+ "hypothesis; extra == \"tests-no-zope\"",
+ "importlib-metadata; python_version < \"3.8\"",
+ "mypy>=1.6; (platform_python_implementation == \"CPython\" and python_version >= \"3.8\") and extra == \"tests-mypy\"",
+ "myst-parser; extra == \"docs\"",
+ "pre-commit; extra == \"dev\"",
+ "pympler; extra == \"tests-no-zope\"",
+ "pytest-mypy-plugins; (platform_python_implementation == \"CPython\" and python_version >= \"3.8\") and extra == \"tests-mypy\"",
+ "pytest-xdist[psutil]; extra == \"tests-no-zope\"",
+ "pytest>=4.3.0; extra == \"tests-no-zope\"",
+ "sphinx-notfound-page; extra == \"docs\"",
+ "sphinx; extra == \"docs\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "towncrier; extra == \"docs\"",
+ "zope-interface; extra == \"docs\"",
+ "zope-interface; extra == \"tests\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "23.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287",
+ "url": "https://files.pythonhosted.org/packages/0d/35/4196b21041e29a42dc4f05866d0c94fa26c9da88ce12c38c2265e42c82fb/Babel-2.14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363",
+ "url": "https://files.pythonhosted.org/packages/e2/80/cfbe44a9085d112e983282ee7ca4c00429bc4d1ce86ee5f4e60259ddff7f/Babel-2.14.0.tar.gz"
+ }
+ ],
+ "project_name": "babel",
+ "requires_dists": [
+ "freezegun~=1.0; extra == \"dev\"",
+ "pytest-cov; extra == \"dev\"",
+ "pytest>=6.0; extra == \"dev\"",
+ "pytz>=2015.7; python_version < \"3.9\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8",
+ "url": "https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a",
+ "url": "https://files.pythonhosted.org/packages/c8/af/54a920ff4255664f5d238b5aebd8eedf7a07c7a5e71e27afcfe840b82f51/boto-2.49.0.tar.gz"
+ }
+ ],
+ "project_name": "boto",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.49.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "bdacfbbc56079bb317548efe49d3d5a86646885cc27f4a2ee97e4b2960921ab7",
+ "url": "https://files.pythonhosted.org/packages/98/c2/ab05b5329dc4416b5ee5530f0625a79c394a3e3c10abe0812b9345256451/box2d-py-2.3.8.tar.gz"
+ }
+ ],
+ "project_name": "box2d-py",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.3.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945",
+ "url": "https://files.pythonhosted.org/packages/fb/2b/a64c2d25a37aeb921fddb929111413049fc5f8b9a4c1aefaffaafe768d54/cachetools-5.3.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105",
+ "url": "https://files.pythonhosted.org/packages/b3/4d/27a3e6dd09011649ad5210bdf963765bc8fa81a0827a4fc01bafd2705c5b/cachetools-5.3.3.tar.gz"
+ }
+ ],
+ "project_name": "cachetools",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "5.3.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1",
+ "url": "https://files.pythonhosted.org/packages/ba/06/a07f096c664aeb9f01624f858c3add0a4e913d6c96257acb4fce61e7de14/certifi-2024.2.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "url": "https://files.pythonhosted.org/packages/71/da/e94e26401b62acd6d91df2b52954aceb7f561743aa5ccc32152886c76c96/certifi-2024.2.2.tar.gz"
+ }
+ ],
+ "project_name": "certifi",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "2024.2.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d",
+ "url": "https://files.pythonhosted.org/packages/ee/68/74a2b9f9432b70d97d1184cdabf32d7803124c228adef9481d280864a4a7/cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684",
+ "url": "https://files.pythonhosted.org/packages/22/05/43cfda378da7bb0aa19b3cf34fe54f8867b0d581294216339d87deefd69c/cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7",
+ "url": "https://files.pythonhosted.org/packages/54/49/b8875986beef2e74fc668b95f2df010e354f78e009d33d95b375912810c3/cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673",
+ "url": "https://files.pythonhosted.org/packages/57/3a/c263cf4d5b02880274866968fa2bf196a02c4486248bc164732319b4a4c0/cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0",
+ "url": "https://files.pythonhosted.org/packages/68/ce/95b0bae7968c65473e1298efb042e10cafc7bafc14d9e4f154008241c91d/cffi-1.16.0.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088",
+ "url": "https://files.pythonhosted.org/packages/aa/aa/1c43e48a6f361d1529f9e4602d6992659a0107b5f21cae567e2eddcf8d66/cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9",
+ "url": "https://files.pythonhosted.org/packages/c4/01/f5116266fe80c04d4d1cc96c3d355606943f9fb604a810e0b02228a0ce19/cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614",
+ "url": "https://files.pythonhosted.org/packages/c9/7c/43d81bdd5a915923c3bad5bb4bff401ea00ccc8e28433fb6083d2e3bf58e/cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743",
+ "url": "https://files.pythonhosted.org/packages/eb/de/4f644fc78a1144a897e1f908abfb2058f7be05a8e8e4fe90b7f41e9de36b/cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896",
+ "url": "https://files.pythonhosted.org/packages/f0/31/a6503a5c4874fb4d4c2053f73f09a957cb427b6943fab5a43b8e156df397/cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "cffi",
+ "requires_dists": [
+ "pycparser"
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "url": "https://files.pythonhosted.org/packages/28/76/e6222113b83e3622caa4bb41032d0b1bf785250607392e1b778aca0b8a7d/charset_normalizer-3.3.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "url": "https://files.pythonhosted.org/packages/05/8c/eb854996d5fef5e4f33ad56927ad053d04dc820e4a3d39023f35cad72617/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "url": "https://files.pythonhosted.org/packages/2b/61/095a0aa1a84d1481998b534177c8566fdc50bb1233ea9a0478cd3cc075bd/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "url": "https://files.pythonhosted.org/packages/33/c3/3b96a435c5109dd5b6adc8a59ba1d678b302a97938f032e3770cc84cd354/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "url": "https://files.pythonhosted.org/packages/3f/ba/3f5e7be00b215fa10e13d64b1f6237eb6ebea66676a41b2bcdd09fe74323/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "url": "https://files.pythonhosted.org/packages/43/05/3bf613e719efe68fb3a77f9c536a389f35b95d75424b96b426a47a45ef1d/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "url": "https://files.pythonhosted.org/packages/46/6a/d5c26c41c49b546860cc1acabdddf48b0b3fb2685f4f5617ac59261b44ae/charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "url": "https://files.pythonhosted.org/packages/58/78/a0bc646900994df12e07b4ae5c713f2b3e5998f58b9d3720cce2aa45652f/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "url": "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "url": "https://files.pythonhosted.org/packages/a8/31/47d018ef89f95b8aded95c589a77c072c55e94b50a41aa99c0a2008a45a4/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "url": "https://files.pythonhosted.org/packages/b8/60/e2f67915a51be59d4539ed189eb0a2b0d292bf79270410746becb32bc2c3/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "url": "https://files.pythonhosted.org/packages/cc/94/f7cf5e5134175de79ad2059edf2adce18e0685ebdb9227ff0139975d0e93/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "url": "https://files.pythonhosted.org/packages/da/f1/3702ba2a7470666a62fd81c58a4c40be00670e5006a67f4d626e57f013ae/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "url": "https://files.pythonhosted.org/packages/eb/5c/97d97248af4920bc68687d9c3b3c0f47c910e21a8ff80af4565a576bd2f0/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "url": "https://files.pythonhosted.org/packages/f6/93/bb6cbeec3bf9da9b2eba458c15966658d1daa8b982c642f81c93ad9b40e1/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ }
+ ],
+ "project_name": "charset-normalizer",
+ "requires_dists": [],
+ "requires_python": ">=3.7.0",
+ "version": "3.3.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "url": "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de",
+ "url": "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz"
+ }
+ ],
+ "project_name": "click",
+ "requires_dists": [
+ "colorama; platform_system == \"Windows\"",
+ "importlib-metadata; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "8.1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7",
+ "url": "https://files.pythonhosted.org/packages/96/43/dae06432d0c4b1dc9e9149ad37b4ca8384cf6eb7700cd9215b177b914f0a/cloudpickle-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882",
+ "url": "https://files.pythonhosted.org/packages/c8/72/42a6570fc61b1f8913529728ad314c7cf5961540728dcad22c33fb2db6b6/cloudpickle-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "cloudpickle",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677",
+ "url": "https://files.pythonhosted.org/packages/99/15/dbcb5d0a22bf5357cf456dfd16f9ceb89c54544d6201d53bc77c75077a8e/coverage-7.4.4-pp38.pp39.pp310-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8",
+ "url": "https://files.pythonhosted.org/packages/07/58/0e076ea3a59dbfb3e981577c4e5572b432345cedd921e83006a0215b9afe/coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf",
+ "url": "https://files.pythonhosted.org/packages/10/1e/f676e1655d10bf59a6cb8de0601b7ea3c252c764782a3c2263f6d6bbcf28/coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2",
+ "url": "https://files.pythonhosted.org/packages/45/f4/10bf725621aeec5cc2fa1bc73021f5ba1ac01bcbf2c7278d8d34e1df6457/coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87",
+ "url": "https://files.pythonhosted.org/packages/50/32/829d0e709fa699dc4e498fa77a561d25fc57954ba32466279952b98f0836/coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c",
+ "url": "https://files.pythonhosted.org/packages/7e/60/62a8c190d20bf605c89a000fd6d41e3563b5792e7275b12eeefe6803b473/coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7",
+ "url": "https://files.pythonhosted.org/packages/91/4e/feff6d115dcc239e5850570ca2ea27a243c8a69596e7f1dabe54a6102d89/coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2",
+ "url": "https://files.pythonhosted.org/packages/93/41/e6e9dbb322f3c93aba7bc519b9c62846d923d7b57398bdd7eda3f0acdd11/coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49",
+ "url": "https://files.pythonhosted.org/packages/bf/d5/f809d8b630cf4c11fe490e20037a343d12a74ec2783c6cdb5aee725e7137/coverage-7.4.4.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562",
+ "url": "https://files.pythonhosted.org/packages/d3/6d/72b9f5035c50a14bc5c5fda0c28ac16c426e957a7a3debe02906b614fc4f/coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "coverage",
+ "requires_dists": [
+ "tomli; python_full_version <= \"3.11.0a6\" and extra == \"toml\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "7.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "dc7051a0db5f2bd48665a990d3ec1cc305a466a77358ca4492826f41f283601e",
+ "url": "https://files.pythonhosted.org/packages/6b/b0/e595ce2a2527e169c3bcd6c33d2473c1918e0b7f6826a043ca1245dd4e5b/crcmod-1.7.tar.gz"
+ }
+ ],
+ "project_name": "crcmod",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c",
+ "url": "https://files.pythonhosted.org/packages/6e/8d/6cce88bdeb26b4ec14b23ab9f0c2c7c0bf33ef4904bfa952c5db1749fd37/cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc",
+ "url": "https://files.pythonhosted.org/packages/0e/1d/62a2324882c0db89f64358dadfb95cae024ee3ba9fde3d5fd4d2f58af9f5/cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1",
+ "url": "https://files.pythonhosted.org/packages/13/9e/a55763a32d340d7b06d045753c186b690e7d88780cafce5f88cb931536be/cryptography-42.0.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da",
+ "url": "https://files.pythonhosted.org/packages/2c/9c/821ef6144daf80360cf6093520bf07eec7c793103ed4b1bf3fa17d2b55d8/cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a",
+ "url": "https://files.pythonhosted.org/packages/48/c8/c0962598c43d3cff2c9d6ac66d0c612bdfb1975be8d87b8889960cf8c81d/cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1",
+ "url": "https://files.pythonhosted.org/packages/50/26/248cd8b6809635ed412159791c0d3869d8ec9dfdc57d428d500a14d425b7/cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2",
+ "url": "https://files.pythonhosted.org/packages/59/48/519ecd6b65dc9ea7c8111dfde7c9ed61aeb90fe59c6b4454900bcd3e3286/cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1",
+ "url": "https://files.pythonhosted.org/packages/5b/3d/c3c21e3afaf43bacccc3ebf61d1a0d47cef6e2607dbba01662f6f9d8fc40/cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7",
+ "url": "https://files.pythonhosted.org/packages/64/f7/d3c83c79947cc6807e6acd3b2d9a1cbd312042777bc7eec50c869913df79/cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7",
+ "url": "https://files.pythonhosted.org/packages/69/f6/630eb71f246208103ffee754b8375b6b334eeedb28620b3ae57be815eeeb/cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8",
+ "url": "https://files.pythonhosted.org/packages/6d/4d/f7c14c7a49e35df829e04d451a57b843208be7442c8e087250c195775be1/cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922",
+ "url": "https://files.pythonhosted.org/packages/7d/bc/b6c691c960b5dcd54c5444e73af7f826e62af965ba59b6d7e9928b6489a2/cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278",
+ "url": "https://files.pythonhosted.org/packages/8c/50/9185cca136596448d9cc595ae22a9bd4412ad35d812550c37c1390d54673/cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8",
+ "url": "https://files.pythonhosted.org/packages/9f/c3/3d2d9bb2ff9e15b5ababc370ae85b377eacc8e3d54fcb03225471e41a1d8/cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc",
+ "url": "https://files.pythonhosted.org/packages/c2/40/c7cb9d6819b90640ffc3c4028b28f46edc525feaeaa0d98ea23e843d446d/cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30",
+ "url": "https://files.pythonhosted.org/packages/ca/2e/9f2c49bd6a18d46c05ec098b040e7d4599c61f50ced40a39adfae3f68306/cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16",
+ "url": "https://files.pythonhosted.org/packages/d1/f1/fd98e6e79242d9aeaf6a5d49639a7e85f05741575af14d3f4a1d477f572e/cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e",
+ "url": "https://files.pythonhosted.org/packages/d4/fa/057f9d7a5364c86ccb6a4bd4e5c58920dcb66532be0cc21da3f9c7617ec3/cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d",
+ "url": "https://files.pythonhosted.org/packages/d8/b1/127ecb373d02db85a7a7de5093d7ac7b7714b8907d631f0591e8f002998d/cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec",
+ "url": "https://files.pythonhosted.org/packages/d9/f9/27dda069a9f9bfda7c75305e222d904cc2445acf5eab5c696ade57d36f1b/cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb",
+ "url": "https://files.pythonhosted.org/packages/e2/59/61b2364f2a4d3668d933531bc30d012b9b2de1e534df4805678471287d57/cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee",
+ "url": "https://files.pythonhosted.org/packages/e5/61/67e090a41c70ee526bd5121b1ccabab85c727574332d03326baaedea962d/cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4",
+ "url": "https://files.pythonhosted.org/packages/fb/0b/14509319a1b49858425553d2fb3808579cfdfe98c1d71a3f046c1b4e0108/cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ }
+ ],
+ "project_name": "cryptography",
+ "requires_dists": [
+ "bcrypt>=3.1.5; extra == \"ssh\"",
+ "build; extra == \"sdist\"",
+ "certifi; extra == \"test\"",
+ "cffi>=1.12; platform_python_implementation != \"PyPy\"",
+ "check-sdist; extra == \"pep8test\"",
+ "click; extra == \"pep8test\"",
+ "mypy; extra == \"pep8test\"",
+ "nox; extra == \"nox\"",
+ "pretend; extra == \"test\"",
+ "pyenchant>=1.6.11; extra == \"docstest\"",
+ "pytest-benchmark; extra == \"test\"",
+ "pytest-cov; extra == \"test\"",
+ "pytest-randomly; extra == \"test-randomorder\"",
+ "pytest-xdist; extra == \"test\"",
+ "pytest>=6.2.0; extra == \"test\"",
+ "readme-renderer; extra == \"docstest\"",
+ "ruff; extra == \"pep8test\"",
+ "sphinx-rtd-theme>=1.1.1; extra == \"docs\"",
+ "sphinx>=5.3.0; extra == \"docs\"",
+ "sphinxcontrib-spelling>=4.0.1; extra == \"docstest\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "42.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49",
+ "url": "https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4",
+ "url": "https://files.pythonhosted.org/packages/c5/e6/d1f6c00b7221e2d7c4b470132c931325c8b22c51ca62417e300f5ce16009/docker-pycreds-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "docker-pycreds",
+ "requires_dists": [
+ "six>=1.4.0"
+ ],
+ "requires_python": null,
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6",
+ "url": "https://files.pythonhosted.org/packages/26/87/f238c0670b94533ac0353a4e2a1a771a0cc73277b88bff23d3ae35a256c1/docutils-0.20.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b",
+ "url": "https://files.pythonhosted.org/packages/1f/53/a5da4f2c5739cf66290fac1431ee52aff6851c7c8ffd8264f13affd7bcdd/docutils-0.20.1.tar.gz"
+ }
+ ],
+ "project_name": "docutils",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.20.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
+ "url": "https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68",
+ "url": "https://files.pythonhosted.org/packages/8e/1c/beef724eaf5b01bb44b6338c8c3494eff7cab376fab4904cfbbc3585dc79/exceptiongroup-1.2.0.tar.gz"
+ }
+ ],
+ "project_name": "exceptiongroup",
+ "requires_dists": [
+ "pytest>=6; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
+ "url": "https://files.pythonhosted.org/packages/e8/9c/a079946da30fac4924d92dbc617e5367d454954494cf1e71567bcc4e00ee/execnet-2.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af",
+ "url": "https://files.pythonhosted.org/packages/e4/c8/d382dc7a1e68a165f4a4ab612a08b20d8534a7d20cc590630b734ca0c54b/execnet-2.0.2.tar.gz"
+ }
+ ],
+ "project_name": "execnet",
+ "requires_dists": [
+ "hatch; extra == \"testing\"",
+ "pre-commit; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"testing\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae",
+ "url": "https://files.pythonhosted.org/packages/05/2c/ffc08c54c05cdce6fbed2aeebc46348dbe180c6d2c541c7af7ba0aa5f5f8/Farama_Notifications-0.0.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18",
+ "url": "https://files.pythonhosted.org/packages/2e/2c/8384832b7a6b1fd6ba95bbdcae26e7137bb3eedc955c42fd5cdcc086cfbf/Farama-Notifications-0.0.4.tar.gz"
+ }
+ ],
+ "project_name": "farama-notifications",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "0.0.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "758819cb5d94cdedf4e836988b74de396ceacb8e2794d21f82d131fd9ee77237",
+ "url": "https://files.pythonhosted.org/packages/61/bf/fd60001b3abc5222d8eaa4a204cd8c0ae78e75adc688f33ce4bf25b7fafa/fasteners-0.19-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b4f37c3ac52d8a445af3a66bce57b33b5e90b97c696b7b984f530cf8f0ded09c",
+ "url": "https://files.pythonhosted.org/packages/5f/d4/e834d929be54bfadb1f3e3b931c38e956aaa3b235a46a3c764c26c774902/fasteners-0.19.tar.gz"
+ }
+ ],
+ "project_name": "fasteners",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "0.19"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7",
+ "url": "https://files.pythonhosted.org/packages/83/10/466fe96dae1bff622021ee687f68e5524d6392b0a2f80d05001cd3a451ba/frozenlist-1.4.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c",
+ "url": "https://files.pythonhosted.org/packages/36/ce/dc6f29e0352fa34ebe45421960c8e7352ca63b31630a576e8ffb381e9c08/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe",
+ "url": "https://files.pythonhosted.org/packages/51/47/159ac53faf8a11ae5ee8bb9db10327575557504e549cfd76f447b969aa91/frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75",
+ "url": "https://files.pythonhosted.org/packages/53/82/274e19f122e124aee6d113188615f63b0736b4242a875f482a81f91e07e2/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950",
+ "url": "https://files.pythonhosted.org/packages/6e/4f/b8a5a2f10c4a58c52a52a40cf6cf1ffcdbf3a3b64f276f41dab989bf3ab5/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac",
+ "url": "https://files.pythonhosted.org/packages/7a/35/1328c7b0f780d34f8afc1d87ebdc2bb065a123b24766a0b475f0d67da637/frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98",
+ "url": "https://files.pythonhosted.org/packages/97/94/a1305fa4716726ae0abf3b1069c2d922fcfd442538cb850f1be543f58766/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776",
+ "url": "https://files.pythonhosted.org/packages/ae/83/bcdaa437a9bd693ba658a0310f8cdccff26bd78e45fccf8e49897904a5cd/frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc",
+ "url": "https://files.pythonhosted.org/packages/b0/2c/7be3bdc59dbae444864dbd9cde82790314390ec54636baf6b9ce212627ad/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5",
+ "url": "https://files.pythonhosted.org/packages/b8/28/899931015b8cffbe155392fe9ca663f981a17e1adc69589ee0e1e7cdc9a2/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b",
+ "url": "https://files.pythonhosted.org/packages/cf/3d/2102257e7acad73efc4a0c306ad3953f68c504c16982bbdfee3ad75d8085/frozenlist-1.4.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a",
+ "url": "https://files.pythonhosted.org/packages/d4/e9/759043ab7d169b74fe05ebfbfa9ee5c881c303ebc838e308346204309cd0/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a",
+ "url": "https://files.pythonhosted.org/packages/ec/25/0c87df2e53c0c5d90f7517ca0ff7aca78d050a8ec4d32c4278e8c0e52e51/frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868",
+ "url": "https://files.pythonhosted.org/packages/f4/d6/ca016b0adcf8327714ccef969740688808c86e0287bf3a639ff582f24e82/frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad",
+ "url": "https://files.pythonhosted.org/packages/f8/ce/b9de7dc61e753dc318cf0de862181b484178210c5361eae6eaf06792264d/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ }
+ ],
+ "project_name": "frozenlist",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f4120b08b7f8d32904674c98f07d4caf4083a58343c0c0fa0016e0f0254dfe31",
+ "url": "https://files.pythonhosted.org/packages/05/e5/3162be0abab32f152f331423426471935f286dd4ad70fa704f2a34ea3c1e/gcs-oauth2-boto-plugin-3.0.tar.gz"
+ }
+ ],
+ "project_name": "gcs-oauth2-boto-plugin",
+ "requires_dists": [
+ "boto>=2.29.1",
+ "freezegun; extra == \"dev\"",
+ "google-reauth>=0.1.0",
+ "httplib2>=0.18",
+ "mock; python_version < \"3.3\" and extra == \"dev\"",
+ "oauth2client>=2.2.0",
+ "pyOpenSSL>=0.13",
+ "retry-decorator>=1.0.0",
+ "rsa==4.7.2",
+ "six>=1.12.0"
+ ],
+ "requires_python": null,
+ "version": "3.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4",
+ "url": "https://files.pythonhosted.org/packages/fd/5b/8f0c4a5bb9fd491c277c21eff7ccae71b47d43c4446c9d0c6cff2fe8c2c4/gitdb-4.0.11-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b",
+ "url": "https://files.pythonhosted.org/packages/19/0d/bbb5b5ee188dec84647a4664f3e11b06ade2bde568dbd489d9d64adef8ed/gitdb-4.0.11.tar.gz"
+ }
+ ],
+ "project_name": "gitdb",
+ "requires_dists": [
+ "smmap<6,>=3.0.1"
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.11"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff",
+ "url": "https://files.pythonhosted.org/packages/e9/bd/cc3a402a6439c15c3d4294333e13042b915bbeab54edc457c723931fed3f/GitPython-3.1.43-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c",
+ "url": "https://files.pythonhosted.org/packages/b6/a1/106fd9fa2dd989b6fb36e5893961f82992cf676381707253e0bf93eb1662/GitPython-3.1.43.tar.gz"
+ }
+ ],
+ "project_name": "gitpython",
+ "requires_dists": [
+ "coverage[toml]; extra == \"test\"",
+ "ddt!=1.4.3,>=1.1.1; extra == \"test\"",
+ "gitdb<5,>=4.0.1",
+ "mock; python_version < \"3.8\" and extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pre-commit; extra == \"test\"",
+ "pytest-cov; extra == \"test\"",
+ "pytest-instafail; extra == \"test\"",
+ "pytest-mock; extra == \"test\"",
+ "pytest-sugar; extra == \"test\"",
+ "pytest>=7.3.1; extra == \"test\"",
+ "sphinx-autodoc-typehints; extra == \"doc\"",
+ "sphinx-rtd-theme; extra == \"doc\"",
+ "sphinx==4.3.2; extra == \"doc\"",
+ "sphinxcontrib-applehelp<=1.0.4,>=1.0.2; extra == \"doc\"",
+ "sphinxcontrib-devhelp==1.0.2; extra == \"doc\"",
+ "sphinxcontrib-htmlhelp<=2.0.1,>=2.0.0; extra == \"doc\"",
+ "sphinxcontrib-qthelp==1.0.3; extra == \"doc\"",
+ "sphinxcontrib-serializinghtml==1.1.5; extra == \"doc\"",
+ "typing-extensions; python_version < \"3.11\" and extra == \"test\"",
+ "typing-extensions>=3.7.4.3; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "3.1.43"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b78f74116558e0476e19501b5b4b2ac7c93261a69c5449c861ea95cbc853c688",
+ "url": "https://files.pythonhosted.org/packages/5e/cb/cb0311f2ec371c83d6510847476c665edc9cc97564a51923557bc8f0b680/google_apitools-0.5.32-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c3763e52289f61e21c41d5531e20fbda9cc8484a088b8686fd460770db8bad13",
+ "url": "https://files.pythonhosted.org/packages/dc/eb/c26c36463a769a3a9f08847b9bf218cb629ca91877a911bbd6dcf37d9e62/google-apitools-0.5.32.tar.gz"
+ }
+ ],
+ "project_name": "google-apitools",
+ "requires_dists": [
+ "fasteners>=0.14",
+ "httplib2>=0.8",
+ "mock>=1.0.1; extra == \"testing\"",
+ "oauth2client>=1.4.12",
+ "python-gflags>=3.0.6; extra == \"cli\"",
+ "six>=1.12.0"
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "0.5.32"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415",
+ "url": "https://files.pythonhosted.org/packages/9e/8d/ddbcf81ec751d8ee5fd18ac11ff38a0e110f39dfbf105e6d9db69d556dd0/google_auth-2.29.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360",
+ "url": "https://files.pythonhosted.org/packages/18/b2/f14129111cfd61793609643a07ecb03651a71dd65c6974f63b0310ff4b45/google-auth-2.29.0.tar.gz"
+ }
+ ],
+ "project_name": "google-auth",
+ "requires_dists": [
+ "aiohttp<4.0.0.dev0,>=3.6.2; extra == \"aiohttp\"",
+ "cachetools<6.0,>=2.0.0",
+ "cryptography==36.0.2; extra == \"enterprise-cert\"",
+ "cryptography>=38.0.3; extra == \"pyopenssl\"",
+ "pyasn1-modules>=0.2.1",
+ "pyopenssl==22.0.0; extra == \"enterprise-cert\"",
+ "pyopenssl>=20.0.0; extra == \"pyopenssl\"",
+ "pyu2f>=0.1.5; extra == \"reauth\"",
+ "requests<3.0.0.dev0,>=2.20.0; extra == \"aiohttp\"",
+ "requests<3.0.0.dev0,>=2.20.0; extra == \"requests\"",
+ "rsa<5,>=3.1.4"
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.29.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cb39074488d74c8853074dde47368bbf8f739d4a4338b89aab696c895b6d8368",
+ "url": "https://files.pythonhosted.org/packages/69/e1/67ffaa3a645b86318ce30717af7145070ebccec5eef5c623ae08b86129b8/google_reauth-0.1.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9f6852a55c2c5453d581cd01f3d1278e86147c03d008409800390a834235892",
+ "url": "https://files.pythonhosted.org/packages/7d/86/74242e08d24ec4c436b8325dabbd7c60422b4829dfb1ad6ec117bdebea76/google-reauth-0.1.1.tar.gz"
+ }
+ ],
+ "project_name": "google-reauth",
+ "requires_dists": [
+ "oauth2client>=2.0.0; extra == \"oauth2client\"",
+ "pyu2f"
+ ],
+ "requires_python": null,
+ "version": "0.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70",
+ "url": "https://files.pythonhosted.org/packages/f0/fa/c1a5aaa161aee2edce9491757fc394e29415c57b0a6be8e02e208fb8b7e2/grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3",
+ "url": "https://files.pythonhosted.org/packages/00/87/727d8f65646843623064f881ee4446276d049da8bd8da6ef45edc10e6e97/grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5",
+ "url": "https://files.pythonhosted.org/packages/02/71/2a68e19dfd1276524e618149c0e34e08ea39724de10690da23678096fd92/grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d",
+ "url": "https://files.pythonhosted.org/packages/68/19/2575ce3bb14736eb9ab4b2e5026886e119dfc521488d6a2c9ad2d8b1b6d2/grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947",
+ "url": "https://files.pythonhosted.org/packages/c7/bb/d01494037edee2d8e024cac8049b169b2723186b01cebb495ccf677bbba9/grpcio-1.62.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243",
+ "url": "https://files.pythonhosted.org/packages/c9/45/e9237e5fa69bdc2cf01e6ef2be3a421cb1c2c30dbb4e0859ad9ed3bcde0c/grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea",
+ "url": "https://files.pythonhosted.org/packages/cc/fb/09c2e42f37858f699b5f56e40f2c3a45fb24b1b7a9dbed3ae1ca7e5fbac9/grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e",
+ "url": "https://files.pythonhosted.org/packages/e1/5f/19a48b32dac6a5134afbcff4a5deca46b176c58f0b1c2663e11b18db2571/grpcio-1.62.1-cp310-cp310-linux_armv7l.whl"
+ }
+ ],
+ "project_name": "grpcio",
+ "requires_dists": [
+ "grpcio-tools>=1.62.1; extra == \"protobuf\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.62.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "681a2d844acdf05fac989da6dd406944ae11cb27a4cf3c9edef74d2585ab5f05",
+ "url": "https://files.pythonhosted.org/packages/00/ce/9c70a91e1a5fc709e6acf34682b8b2499179ddc27b18b0e3670ff9c257db/gsutil-5.27.tar.gz"
+ }
+ ],
+ "project_name": "gsutil",
+ "requires_dists": [
+ "argcomplete>=1.9.4",
+ "crcmod>=1.7",
+ "fasteners>=0.14.1",
+ "gcs-oauth2-boto-plugin>=3.0",
+ "google-apitools>=0.5.32",
+ "google-auth[aiohttp]>=2.5.0",
+ "google-reauth>=0.1.0",
+ "httplib2==0.20.4",
+ "mock<=3.0.5,>=2.0.0; python_version < \"3.3\"",
+ "monotonic>=1.4",
+ "pyOpenSSL>=0.13",
+ "retry-decorator>=1.0.0",
+ "six>=1.16.0"
+ ],
+ "requires_python": "!=2.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4",
+ "version": "5.27"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "61c3384b5575985bb7f85e43213bcb40f36fcdff388cae6bc229304c71f2843e",
+ "url": "https://files.pythonhosted.org/packages/a8/4d/3cbfd81ed84db450dbe73a89afcd8bc405273918415649ac6683356afe92/gymnasium-0.29.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1a532752efcb7590478b1cc7aa04f608eb7a2fdad5570cd217b66b6a35274bb1",
+ "url": "https://files.pythonhosted.org/packages/0d/f8/5699ddb3e1c4f6d97b8930e573074849b921da8374fccd141f0f3a9bd713/gymnasium-0.29.1.tar.gz"
+ }
+ ],
+ "project_name": "gymnasium",
+ "requires_dists": [
+ "autorom[accept-rom-license]~=0.4.2; extra == \"accept-rom-license\"",
+ "box2d-py==2.3.5; extra == \"all\"",
+ "box2d-py==2.3.5; extra == \"box2d\"",
+ "cloudpickle>=1.2.0",
+ "cython<3; extra == \"all\"",
+ "cython<3; extra == \"mujoco-py\"",
+ "cython<3; extra == \"mujoco-py\"",
+ "farama-notifications>=0.0.1",
+ "imageio>=2.14.1; extra == \"all\"",
+ "imageio>=2.14.1; extra == \"mujoco\"",
+ "importlib-metadata>=4.8.0; python_version < \"3.10\"",
+ "jax>=0.4.0; extra == \"all\"",
+ "jax>=0.4.0; extra == \"jax\"",
+ "jaxlib>=0.4.0; extra == \"all\"",
+ "jaxlib>=0.4.0; extra == \"jax\"",
+ "lz4>=3.1.0; extra == \"all\"",
+ "lz4>=3.1.0; extra == \"other\"",
+ "matplotlib>=3.0; extra == \"all\"",
+ "matplotlib>=3.0; extra == \"other\"",
+ "moviepy>=1.0.0; extra == \"all\"",
+ "moviepy>=1.0.0; extra == \"other\"",
+ "mujoco-py<2.2,>=2.1; extra == \"all\"",
+ "mujoco-py<2.2,>=2.1; extra == \"mujoco-py\"",
+ "mujoco-py<2.2,>=2.1; extra == \"mujoco-py\"",
+ "mujoco>=2.3.3; extra == \"all\"",
+ "mujoco>=2.3.3; extra == \"mujoco\"",
+ "numpy>=1.21.0",
+ "opencv-python>=3.0; extra == \"all\"",
+ "opencv-python>=3.0; extra == \"other\"",
+ "pygame>=2.1.3; extra == \"all\"",
+ "pygame>=2.1.3; extra == \"box2d\"",
+ "pygame>=2.1.3; extra == \"classic-control\"",
+ "pygame>=2.1.3; extra == \"classic-control\"",
+ "pygame>=2.1.3; extra == \"toy-text\"",
+ "pygame>=2.1.3; extra == \"toy-text\"",
+ "pytest==7.1.3; extra == \"testing\"",
+ "scipy>=1.7.3; extra == \"testing\"",
+ "shimmy[atari]<1.0,>=0.1.0; extra == \"all\"",
+ "shimmy[atari]<1.0,>=0.1.0; extra == \"atari\"",
+ "swig==4.*; extra == \"all\"",
+ "swig==4.*; extra == \"box2d\"",
+ "torch>=1.0.0; extra == \"all\"",
+ "torch>=1.0.0; extra == \"other\"",
+ "typing-extensions>=4.3.0"
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.29.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8b6a905cb1c79eefd03f8669fd993c36dc341f7c558f056cb5a33b5c2f458543",
+ "url": "https://files.pythonhosted.org/packages/59/0f/29725a9caf4b2618f524e0f28e2bda91aca8f880123ec77426ede6ea1ea4/httplib2-0.20.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "58a98e45b4b1a48273073f905d2961666ecf0fbac4250ea5b47aef259eb5c585",
+ "url": "https://files.pythonhosted.org/packages/9c/65/57ad964eb8d45cc3d1316ce5ada2632f74e35863a0e57a52398416a182a1/httplib2-0.20.4.tar.gz"
+ }
+ ],
+ "project_name": "httplib2",
+ "requires_dists": [
+ "pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2; python_version > \"3.0\"",
+ "pyparsing<3,>=2.4.2; python_version < \"3.0\""
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "0.20.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f",
+ "url": "https://files.pythonhosted.org/packages/c2/e7/a82b05cf63a603df6e68d59ae6a68bf5064484a0718ea5033660af4b54a9/idna-3.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "url": "https://files.pythonhosted.org/packages/bf/3f/ea4b9117521a1e9c50344b909be7886dd00a519552724809bb1f486986c2/idna-3.6.tar.gz"
+ }
+ ],
+ "project_name": "idna",
+ "requires_dists": [],
+ "requires_python": ">=3.5",
+ "version": "3.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b",
+ "url": "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a",
+ "url": "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz"
+ }
+ ],
+ "project_name": "imagesize",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374",
+ "url": "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
+ "url": "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "iniconfig",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa",
+ "url": "https://files.pythonhosted.org/packages/30/6d/6de6be2d02603ab56e72997708809e8a5b0fbfee080735109b40a3564843/Jinja2-3.1.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90",
+ "url": "https://files.pythonhosted.org/packages/b2/5e/3a21abf3cd467d7876045335e681d276ac32492febe6d98ad89562d1a7e1/Jinja2-3.1.3.tar.gz"
+ }
+ ],
+ "project_name": "jinja2",
+ "requires_dists": [
+ "Babel>=2.7; extra == \"i18n\"",
+ "MarkupSafe>=2.0"
+ ],
+ "requires_python": ">=3.7",
+ "version": "3.1.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f",
+ "url": "https://files.pythonhosted.org/packages/fc/b3/0c0c994fe49cd661084f8d5dc06562af53818cc0abefaca35bdc894577c3/Markdown-3.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224",
+ "url": "https://files.pythonhosted.org/packages/22/02/4785861427848cc11e452cc62bb541006a1087cf04a1de83aedd5530b948/Markdown-3.6.tar.gz"
+ }
+ ],
+ "project_name": "markdown",
+ "requires_dists": [
+ "coverage; extra == \"testing\"",
+ "importlib-metadata>=4.4; python_version < \"3.10\"",
+ "mdx-gh-links>=0.2; extra == \"docs\"",
+ "mkdocs-gen-files; extra == \"docs\"",
+ "mkdocs-literate-nav; extra == \"docs\"",
+ "mkdocs-nature>=0.6; extra == \"docs\"",
+ "mkdocs-section-index; extra == \"docs\"",
+ "mkdocs>=1.5; extra == \"docs\"",
+ "mkdocstrings[python]; extra == \"docs\"",
+ "pyyaml; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
+ "url": "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb",
+ "url": "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "markdown-it-py",
+ "requires_dists": [
+ "commonmark~=0.9; extra == \"compare\"",
+ "coverage; extra == \"testing\"",
+ "gprof2dot; extra == \"profiling\"",
+ "jupyter_sphinx; extra == \"rtd\"",
+ "linkify-it-py<3,>=1; extra == \"linkify\"",
+ "markdown~=3.4; extra == \"compare\"",
+ "mdit-py-plugins; extra == \"plugins\"",
+ "mdit-py-plugins; extra == \"rtd\"",
+ "mdurl~=0.1",
+ "mistletoe~=1.0; extra == \"compare\"",
+ "mistune~=2.0; extra == \"compare\"",
+ "myst-parser; extra == \"rtd\"",
+ "panflute~=2.3; extra == \"compare\"",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "psutil; extra == \"benchmarking\"",
+ "pytest-benchmark; extra == \"benchmarking\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"benchmarking\"",
+ "pytest; extra == \"testing\"",
+ "pyyaml; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design; extra == \"rtd\"",
+ "sphinx; extra == \"rtd\"",
+ "sphinx_book_theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
+ "url": "https://files.pythonhosted.org/packages/30/39/8d845dd7d0b0613d86e0ef89549bfb5f61ed781f59af45fc96496e897f3a/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
+ "url": "https://files.pythonhosted.org/packages/0a/7b/85681ae3c33c385b10ac0f8dd025c30af83c78cec1c37a6aa3b55e67f5ec/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
+ "url": "https://files.pythonhosted.org/packages/29/fe/a36ba8c7ca55621620b2d7c585313efd10729e63ef81e4e61f52330da781/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
+ "url": "https://files.pythonhosted.org/packages/60/ae/9c60231cdfda003434e8bd27282b1f4e197ad5a710c14bee8bea8a9ca4f0/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
+ "url": "https://files.pythonhosted.org/packages/65/dc/1510be4d179869f5dafe071aecb3f1f41b45d37c02329dfba01ff59e5ac5/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
+ "url": "https://files.pythonhosted.org/packages/6a/4a/a4d49415e600bacae038c67f9fecc1d5433b9d3c71a4de6f33537b89654c/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
+ "url": "https://files.pythonhosted.org/packages/7c/52/2b1b570f6b8b803cef5ac28fdf78c0da318916c7d2fe9402a84d591b394c/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
+ "url": "https://files.pythonhosted.org/packages/87/5b/aae44c6655f3801e81aa3eef09dbbf012431987ba564d7231722f68df02d/MarkupSafe-2.1.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
+ "url": "https://files.pythonhosted.org/packages/e4/54/ad5eb37bf9d51800010a74e4665425831a9db4e7c4e0fde4352e391e808e/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl"
+ }
+ ],
+ "project_name": "markupsafe",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.1.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9",
+ "url": "https://files.pythonhosted.org/packages/e5/3c/fe85f19699a7b40c8f9ce8ecee7e269b9b3c94099306df6f9891bdefeedd/mdit_py_plugins-0.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b",
+ "url": "https://files.pythonhosted.org/packages/b4/db/61960d68d5c39ff0dd48cb799a39ae4e297f6e9b96bf2f8da29d897fba0c/mdit_py_plugins-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "mdit-py-plugins",
+ "requires_dists": [
+ "coverage; extra == \"testing\"",
+ "markdown-it-py<4.0.0,>=1.0.0",
+ "myst-parser; extra == \"rtd\"",
+ "pre-commit; extra == \"code-style\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "sphinx-book-theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8",
+ "url": "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba",
+ "url": "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz"
+ }
+ ],
+ "project_name": "mdurl",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c",
+ "url": "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7",
+ "url": "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz"
+ }
+ ],
+ "project_name": "monotonic",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7",
+ "url": "https://files.pythonhosted.org/packages/fa/a2/17e1e23c6be0a916219c5292f509360c345b5fa6beeb50d743203c27532c/multidict-6.0.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef",
+ "url": "https://files.pythonhosted.org/packages/11/b7/bef33e84e3722bc42531af020d7ae8c31235ce8846bacaa852b6484cf868/multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf",
+ "url": "https://files.pythonhosted.org/packages/12/4d/99dfc36872dcc53956879f5da80a6505bbd29214cce90ce792a86e15fddf/multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc",
+ "url": "https://files.pythonhosted.org/packages/26/ce/f745a2d6104e56f7fa0d7d0756bb9ed27b771dd7b8d9d7348cd7f0f7b9de/multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae",
+ "url": "https://files.pythonhosted.org/packages/33/62/2c9085e571318d51212a6914566fe41dd0e33d7f268f7e2f23dcd3f06c56/multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f",
+ "url": "https://files.pythonhosted.org/packages/36/6d/d2f982fb485175727a193b4900b5f929d461e7aa87d6fb5a91a377fcc9c0/multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5",
+ "url": "https://files.pythonhosted.org/packages/8d/ea/0230b6faa9a5bc10650fd50afcc4a86e6c37af2fe05bc679b74d79253732/multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600",
+ "url": "https://files.pythonhosted.org/packages/a4/eb/d8e7693c9064554a1585698d1902839440c6c695b0f53c9a8be5d9d4a3b8/multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9",
+ "url": "https://files.pythonhosted.org/packages/b7/36/48097b96135017ed1b806c5ea27b6cdc2ed3a6861c5372b793563206c586/multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a",
+ "url": "https://files.pythonhosted.org/packages/bc/84/9579004267e1cc5968ef2ef8718dab9d8950d99354d85b739dd67b09c273/multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442",
+ "url": "https://files.pythonhosted.org/packages/c2/5c/1e76b2c742cb9e0248d1e8c4ed420817879230c833fa27d890b5fd22290b/multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182",
+ "url": "https://files.pythonhosted.org/packages/ce/e2/88cdfeaf03eab3498f688a19b62ca704d371cd904cb74b682541ca7b20a7/multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604",
+ "url": "https://files.pythonhosted.org/packages/d9/48/037440edb5d4a1c65e002925b2f24071d6c27754e6f4734f63037e3169d6/multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c",
+ "url": "https://files.pythonhosted.org/packages/f3/7d/fe7648d4b2f200f8854066ce6e56bf51889abfaf859814c62160dd0e32a9/multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da",
+ "url": "https://files.pythonhosted.org/packages/f9/79/722ca999a3a09a63b35aac12ec27dfa8e5bb3a38b0f857f7a1a209a88836/multidict-6.0.5.tar.gz"
+ }
+ ],
+ "project_name": "multidict",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "6.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14",
+ "url": "https://files.pythonhosted.org/packages/1d/f6/6d61a023d758f488e36638076e8a4ec4447a2cdf86938cf6c60cf1c860e6/myst_parser-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead",
+ "url": "https://files.pythonhosted.org/packages/e8/c1/48ea47b78ade0bb0281f34c9e343e3ea0c681fbc81464dbfd134e983954f/myst_parser-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "myst-parser",
+ "requires_dists": [
+ "beautifulsoup4; extra == \"testing\"",
+ "coverage[toml]; extra == \"testing\"",
+ "docutils<0.21,>=0.16",
+ "ipython; extra == \"rtd\"",
+ "jinja2",
+ "linkify-it-py~=2.0; extra == \"linkify\"",
+ "markdown-it-py~=3.0",
+ "mdit-py-plugins~=0.4",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "pydata-sphinx-theme==v0.13.0rc4; extra == \"rtd\"",
+ "pygments; extra == \"testing-docutils\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-param-files~=0.3.4; extra == \"testing\"",
+ "pytest-param-files~=0.3.4; extra == \"testing-docutils\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest<8,>=7; extra == \"testing\"",
+ "pytest<8,>=7; extra == \"testing-docutils\"",
+ "pyyaml",
+ "sphinx-autodoc2~=0.4.2; extra == \"rtd\"",
+ "sphinx-book-theme==1.0.0rc2; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design2; extra == \"rtd\"",
+ "sphinx-pyscript; extra == \"rtd\"",
+ "sphinx-pytest; extra == \"testing\"",
+ "sphinx-tippy>=0.3.1; extra == \"rtd\"",
+ "sphinx-togglebutton; extra == \"rtd\"",
+ "sphinx<8,>=6",
+ "sphinxext-opengraph~=0.8.2; extra == \"rtd\"",
+ "sphinxext-rediraffe~=0.2.7; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1",
+ "url": "https://files.pythonhosted.org/packages/e4/f3/679b3a042a127de0d7c84874913c3e23bb84646eb3bc6ecab3f8c872edc9/numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63",
+ "url": "https://files.pythonhosted.org/packages/0f/ae/dad4b8e7c65494cbbd1c063de114efaf9acd0f5f6171f044f0d4b6299787/numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a",
+ "url": "https://files.pythonhosted.org/packages/42/38/775b43da55fa7473015eddc9a819571517d9a271a9f8134f68fb9be2f212/numpy-1.23.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d",
+ "url": "https://files.pythonhosted.org/packages/4d/39/d33202cc56c21123a50c6d5e160d00c18ff685ab864dbd4bf80dd40a7af9/numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43",
+ "url": "https://files.pythonhosted.org/packages/67/6b/d7c93d458d16464da9b3f560a20c363a19e242ebbb019bd1e1d797523851/numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "numpy",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "1.23.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac",
+ "url": "https://files.pythonhosted.org/packages/95/a9/4f25a14d23f0786b64875b91784607c2277eff25d48f915e39ff0cff505a/oauth2client-4.1.3-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6",
+ "url": "https://files.pythonhosted.org/packages/a6/7b/17244b1083e8e604bf154cf9b716aecd6388acd656dd01893d0d244c94d9/oauth2client-4.1.3.tar.gz"
+ }
+ ],
+ "project_name": "oauth2client",
+ "requires_dists": [
+ "httplib2>=0.9.1",
+ "pyasn1-modules>=0.0.5",
+ "pyasn1>=0.1.7",
+ "rsa>=3.1.4",
+ "six>=1.6.1"
+ ],
+ "requires_python": null,
+ "version": "4.1.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "298f28a2b5ac09145fa958513d3d1e6b349ccf86a877dbdcccad57713fe360b3",
+ "url": "https://files.pythonhosted.org/packages/49/5f/d8e1a24247f506a77cbe22341c72ca91bea3b468c5d6bca2047d885ea3c6/onnx-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "237c6987c6c59d9f44b6136f5819af79574f8d96a760a1fa843bede11f3822f7",
+ "url": "https://files.pythonhosted.org/packages/b3/fe/0978403c8d710ece2f34006367e78de80410743fe0e7680c8f33f2dab20d/onnx-1.16.0.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "034ae21a2aaa2e9c14119a840d2926d213c27aad29e5e3edaa30145a745048e1",
+ "url": "https://files.pythonhosted.org/packages/b8/1c/50310a559857951fc6e069cf5d89deebe34287997d1c5928bca435456f62/onnx-1.16.0-cp310-cp310-macosx_10_15_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9eadbdce25b19d6216f426d6d99b8bc877a65ed92cbef9707751c6669190ba4f",
+ "url": "https://files.pythonhosted.org/packages/c8/0b/f4705e4a3fa6fd0de971302fdae17ad176b024eca8c24360f0e37c00f9df/onnx-1.16.0-cp310-cp310-macosx_10_15_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ec22a43d74eb1f2303373e2fbe7fbcaa45fb225f4eb146edfed1356ada7a9aea",
+ "url": "https://files.pythonhosted.org/packages/ef/6e/96be6692ebcd8da568084d753f386ce08efa1f99b216f346ee281edd6cc3/onnx-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "onnx",
+ "requires_dists": [
+ "Pillow; extra == \"reference\"",
+ "google-re2; extra == \"reference\"",
+ "numpy>=1.20",
+ "protobuf>=3.20.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e4088cab82b66a3b37ffc452976b14a3c599269c247895ae9ceb4066d8188a57",
+ "url": "https://files.pythonhosted.org/packages/d9/64/7fdfb9386511cd6805451e012c537073a79a958a58795c4e602e538c388c/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1a9f0e6267de3a1a1db0c54213d022c7c8b5b9ca4b580e80bdc58516c922c9e1",
+ "url": "https://files.pythonhosted.org/packages/25/72/da7c69a3542071bf1e8f65336721b8b2659194425438d988f79bc14ed9cc/opencv-python-4.9.0.80.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e5f7aa4486651a6ebfa8ed4b594b65bd2d2f41beeb4241a3e4b1b85acbbbadb",
+ "url": "https://files.pythonhosted.org/packages/35/69/b657974ddcbba54d59d7d62b01e60a8b815e35f415b996e4d355be0ac7b4/opencv_python-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7b34a52e9da36dda8c151c6394aed602e4b17fa041df0b9f5b93ae10b0fcca2a",
+ "url": "https://files.pythonhosted.org/packages/52/00/2adf376707c7965bb4569f28f73fafe303c404d01047b10e3b52761be086/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "71dfb9555ccccdd77305fc3dcca5897fbf0cf28b297c51ee55e079c065d812a3",
+ "url": "https://files.pythonhosted.org/packages/77/df/b56175c3fb5bc058774bdcf35f5a71cf9c3c5b909f98a1c688eb71cd3b1f/opencv_python-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl"
+ }
+ ],
+ "project_name": "opencv-python",
+ "requires_dists": [
+ "numpy>=1.13.3; python_version < \"3.7\"",
+ "numpy>=1.17.0; python_version >= \"3.7\"",
+ "numpy>=1.17.3; python_version >= \"3.8\"",
+ "numpy>=1.19.3; python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\"",
+ "numpy>=1.19.3; python_version >= \"3.9\"",
+ "numpy>=1.21.0; python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\"",
+ "numpy>=1.21.2; python_version >= \"3.10\"",
+ "numpy>=1.21.4; python_version >= \"3.10\" and platform_system == \"Darwin\"",
+ "numpy>=1.23.5; python_version >= \"3.11\"",
+ "numpy>=1.26.0; python_version >= \"3.12\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "4.9.0.80"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9",
+ "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz"
+ }
+ ],
+ "project_name": "packaging",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "24.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
+ "url": "https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be",
+ "url": "https://files.pythonhosted.org/packages/54/c6/43f9d44d92aed815e781ca25ba8c174257e27253a94630d21be8725a2b59/pluggy-1.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pluggy",
+ "requires_dists": [
+ "pre-commit; extra == \"dev\"",
+ "pytest-benchmark; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"dev\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9",
+ "url": "https://files.pythonhosted.org/packages/f4/d5/db585a5e8d64af6b384c7b3a63da13df2ff86933e486ba78431736c67c25/protobuf-4.25.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d",
+ "url": "https://files.pythonhosted.org/packages/15/db/7f731524fe0e56c6b2eb57d05b55d3badd80ef7d1f1ed59db191b2fdd8ab/protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c",
+ "url": "https://files.pythonhosted.org/packages/5e/d8/65adb47d921ce828ba319d6587aa8758da022de509c3862a70177a958844/protobuf-4.25.3.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019",
+ "url": "https://files.pythonhosted.org/packages/d8/82/aefe901174b5a618daee511ddd00342193c1b545e3cd6a2cd6df9ba452b5/protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c",
+ "url": "https://files.pythonhosted.org/packages/f3/bf/26deba06a4c910a85f78245cac7698f67cedd7efe00d04f6b3e1b3506a59/protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl"
+ }
+ ],
+ "project_name": "protobuf",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.25.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8",
+ "url": "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c",
+ "url": "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421",
+ "url": "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4",
+ "url": "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81",
+ "url": "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl"
+ }
+ ],
+ "project_name": "psutil",
+ "requires_dists": [
+ "enum34; python_version <= \"3.4\" and extra == \"test\"",
+ "ipaddress; python_version < \"3.0\" and extra == \"test\"",
+ "mock; python_version < \"3.0\" and extra == \"test\"",
+ "pywin32; sys_platform == \"win32\" and extra == \"test\"",
+ "wmi; sys_platform == \"win32\" and extra == \"test\""
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7",
+ "version": "5.9.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378",
+ "url": "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719",
+ "url": "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz"
+ }
+ ],
+ "project_name": "py",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "1.11.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5",
+ "url": "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690",
+ "url": "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz"
+ }
+ ],
+ "project_name": "py-cpuinfo",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "9.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473",
+ "url": "https://files.pythonhosted.org/packages/23/7e/5f50d07d5e70a2addbccd90ac2950f81d1edd0783630651d9268d7f1db49/pyasn1-0.6.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c",
+ "url": "https://files.pythonhosted.org/packages/4a/a3/d2157f333900747f20984553aca98008b6dc843eb62f3a36030140ccec0d/pyasn1-0.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pyasn1",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "0.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b",
+ "url": "https://files.pythonhosted.org/packages/13/68/8906226b15ef38e71dc926c321d2fe99de8048e9098b5dfd38343011c886/pyasn1_modules-0.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6",
+ "url": "https://files.pythonhosted.org/packages/f7/00/e7bd1dec10667e3f2be602686537969a7ac92b0a7c5165be2e5875dc3971/pyasn1_modules-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pyasn1-modules",
+ "requires_dists": [
+ "pyasn1<0.7.0,>=0.4.6"
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc",
+ "url": "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6",
+ "url": "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz"
+ }
+ ],
+ "project_name": "pycparser",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "2.22"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "03879ec299c9f4ba23901b2649a96b2143f0a5d787f0b6c39469989e2320caf1",
+ "url": "https://files.pythonhosted.org/packages/c8/c7/0d77e0e327bf09c12f445f92f5bad0b447375d7b836c5bac5255ead8436f/pygame-2.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a0769eb628c818761755eb0a0ca8216b95270ea8cbcbc82227e39ac9644643da",
+ "url": "https://files.pythonhosted.org/packages/14/54/dc58f8b70e08b6706b158f0c70f86eb1594db6797cb89383f062ad6a304d/pygame-2.5.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30d1618672a55e8c6669281ba264464b3ab563158e40d89e8c8b3faa0febebd",
+ "url": "https://files.pythonhosted.org/packages/5b/91/09f93d428b483c451eacee9ba1e04a1e9999751c80bf6236b2bdc8e19b1e/pygame-2.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ed9a3d98adafa0805ccbaaff5d2996a2b5795381285d8437a4a5d248dbd12b4a",
+ "url": "https://files.pythonhosted.org/packages/65/b6/67e33add85b0f7ac901c6fb89a57f97fdfd67c8834f425a97abaf4a60191/pygame-2.5.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c1b89eb5d539e7ac5cf75513125fb5f2f0a2d918b1fd6e981f23bf0ac1b1c24a",
+ "url": "https://files.pythonhosted.org/packages/c6/aa/2c0c867d6cff00966cfc2152b25f61599f87e88b239e4dcb8ad5357f0f69/pygame-2.5.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "39690e9be9baf58b7359d1f3b2336e1fd6f92fedbbce42987be5df27f8d30718",
+ "url": "https://files.pythonhosted.org/packages/e8/6e/31d7a068edbb029e5a35d8fe4572b67e00705cb8f6dad650397bc417b6b3/pygame-2.5.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "pygame",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "2.5.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "url": "https://files.pythonhosted.org/packages/97/9c/372fef8377a6e340b1704768d20daaded98bf13282b5327beb2e2fe2c7ef/pygments-2.17.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367",
+ "url": "https://files.pythonhosted.org/packages/55/59/8bccf4157baf25e4aa5a0bb7fa3ba8600907de105ebc22b0c78cfbf6f565/pygments-2.17.2.tar.gz"
+ }
+ ],
+ "project_name": "pygments",
+ "requires_dists": [
+ "colorama>=0.4.6; extra == \"windows-terminal\"",
+ "importlib-metadata; python_version < \"3.8\" and extra == \"plugins\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.17.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad",
+ "url": "https://files.pythonhosted.org/packages/54/a7/2104f674a5a6845b04c8ff01659becc6b8978ca410b82b94287e0b1e018b/pyOpenSSL-24.1.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f",
+ "url": "https://files.pythonhosted.org/packages/91/a8/cbeec652549e30103b9e6147ad433405fdd18807ac2d54e6dbb73184d8a1/pyOpenSSL-24.1.0.tar.gz"
+ }
+ ],
+ "project_name": "pyopenssl",
+ "requires_dists": [
+ "cryptography<43,>=41.0.5",
+ "pretend; extra == \"test\"",
+ "pytest-rerunfailures; extra == \"test\"",
+ "pytest>=3.0.1; extra == \"test\"",
+ "sphinx!=5.2.0,!=5.2.0.post0,!=7.2.5; extra == \"docs\"",
+ "sphinx-rtd-theme; extra == \"docs\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "24.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742",
+ "url": "https://files.pythonhosted.org/packages/9d/ea/6d76df31432a0e6fdf81681a895f009a4bb47b3c39036db3e1b528191d52/pyparsing-3.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "url": "https://files.pythonhosted.org/packages/46/3a/31fd28064d016a2182584d579e033ec95b809d8e220e74c4af6f0f2e8842/pyparsing-3.1.2.tar.gz"
+ }
+ ],
+ "project_name": "pyparsing",
+ "requires_dists": [
+ "jinja2; extra == \"diagrams\"",
+ "railroad-diagrams; extra == \"diagrams\""
+ ],
+ "requires_python": ">=3.6.8",
+ "version": "3.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7",
+ "url": "https://files.pythonhosted.org/packages/4d/7e/c79cecfdb6aa85c6c2e3cf63afc56d0f165f24f5c66c03c695c4d9b84756/pytest-8.1.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044",
+ "url": "https://files.pythonhosted.org/packages/30/b7/7d44bbc04c531dcc753056920e0988032e5871ac674b5a84cb979de6e7af/pytest-8.1.1.tar.gz"
+ }
+ ],
+ "project_name": "pytest",
+ "requires_dists": [
+ "argcomplete; extra == \"testing\"",
+ "attrs>=19.2; extra == \"testing\"",
+ "colorama; sys_platform == \"win32\"",
+ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"",
+ "hypothesis>=3.56; extra == \"testing\"",
+ "iniconfig",
+ "mock; extra == \"testing\"",
+ "packaging",
+ "pluggy<2.0,>=1.4",
+ "pygments>=2.7.2; extra == \"testing\"",
+ "requests; extra == \"testing\"",
+ "setuptools; extra == \"testing\"",
+ "tomli>=1; python_version < \"3.11\"",
+ "xmlschema; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "8.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6",
+ "url": "https://files.pythonhosted.org/packages/4d/a1/3b70862b5b3f830f0422844f25a823d0470739d994466be9dbbbb414d85a/pytest_benchmark-4.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1",
+ "url": "https://files.pythonhosted.org/packages/28/08/e6b0067efa9a1f2a1eb3043ecd8a0c48bfeb60d3255006dcc829d72d5da2/pytest-benchmark-4.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-benchmark",
+ "requires_dists": [
+ "aspectlib; extra == \"aspect\"",
+ "elasticsearch; extra == \"elasticsearch\"",
+ "pathlib2; python_version < \"3.4\"",
+ "py-cpuinfo",
+ "pygal; extra == \"histogram\"",
+ "pygaljs; extra == \"histogram\"",
+ "pytest>=3.8",
+ "statistics; python_version < \"3.4\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6",
+ "url": "https://files.pythonhosted.org/packages/20/49/b3e0edec68d81846f519c602ac38af9db86e1e71275528b3e814ae236063/pytest_cov-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470",
+ "url": "https://files.pythonhosted.org/packages/61/41/e046526849972555928a6d31c2068410e47a31fb5ab0a77f868596811329/pytest-cov-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-cov",
+ "requires_dists": [
+ "coverage[toml]>=5.2.1",
+ "fields; extra == \"testing\"",
+ "hunter; extra == \"testing\"",
+ "process-tests; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=4.6",
+ "six; extra == \"testing\"",
+ "virtualenv; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "810958f66a91afb1a1e2ae83089d8dc1cd2437ac96b12963042fbb9fb4d16af0",
+ "url": "https://files.pythonhosted.org/packages/f4/af/9c0bda43e486a3c9bf1e0f876d0f241bc3f229d7d65d09331a0868db9629/pytest_forked-1.6.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4dafd46a9a600f65d822b8f605133ecf5b3e1941ebb3588e943b4e3eb71a5a3f",
+ "url": "https://files.pythonhosted.org/packages/8c/c9/93ad2ba2413057ee694884b88cf7467a46c50c438977720aeac26e73fdb7/pytest-forked-1.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-forked",
+ "requires_dists": [
+ "py",
+ "pytest>=3.10"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b05cb0bcd51a7cd0375bfbeeb3eaeb01fc85665e45b21fc9494a8a19137f4d32",
+ "url": "https://files.pythonhosted.org/packages/c5/d1/2ef73ee137add043df444fddf1c851b8ca70ab9c7b7f18e18c4c244fec6d/pytest_platform_markers-1.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "07ea92669114ba8083b6653995b5a9ab14d57ca16307fd2af22d6f7d295160e4",
+ "url": "https://files.pythonhosted.org/packages/b3/e7/174a22a8cb4cf4b64456cd799f472bb90206f1ce8d537edbc1d9659689a3/pytest-platform-markers-1.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-platform-markers",
+ "requires_dists": [
+ "pytest>=3.6.0"
+ ],
+ "requires_python": null,
+ "version": "1.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32",
+ "url": "https://files.pythonhosted.org/packages/dc/e7/e75bd157331aecc190f5f8950d7ea3d2cf56c3c57fb44da70e60b221133f/pytest_rerunfailures-14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92",
+ "url": "https://files.pythonhosted.org/packages/cc/a4/6de45fe850759e94aa9a55cda807c76245af1941047294df26c851dfb4a9/pytest-rerunfailures-14.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-rerunfailures",
+ "requires_dists": [
+ "packaging>=17.1",
+ "pytest>=7.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65",
+ "url": "https://files.pythonhosted.org/packages/21/08/b1945d4b4986eb1aa10cf84efc5293bba39da80a2f95db3573dd90678408/pytest_xdist-2.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf",
+ "url": "https://files.pythonhosted.org/packages/5d/43/9dbc32d297d6eae85d6c05dc8e8d3371061bd6cbe56a2f645d9ea4b53d9b/pytest-xdist-2.5.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-xdist",
+ "requires_dists": [
+ "execnet>=1.1",
+ "filelock; extra == \"testing\"",
+ "psutil>=3.0; extra == \"psutil\"",
+ "pytest-forked",
+ "pytest>=6.2.0",
+ "setproctitle; extra == \"setproctitle\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "2.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a3caa3a11842fc7d5746376f37195e6af5f17c0a15737538bb1cebf656fb306b",
+ "url": "https://files.pythonhosted.org/packages/29/b5/c1209e6cb77647bc2c9a6a1a953355720f34f3b006b725e303c70f3c0786/pyu2f-0.1.5.tar.gz"
+ }
+ ],
+ "project_name": "pyu2f",
+ "requires_dists": [
+ "six"
+ ],
+ "requires_python": null,
+ "version": "0.1.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290",
+ "url": "https://files.pythonhosted.org/packages/07/91/45dfd0ef821a7f41d9d0136ea3608bb5b1653e42fd56a7970532cb5c003f/PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "url": "https://files.pythonhosted.org/packages/29/61/bf33c6c85c55bc45a29eee3195848ff2d518d84735eb0e2d8cb42e0d285e/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f",
+ "url": "https://files.pythonhosted.org/packages/5b/07/10033a403b23405a8fc48975444463d3d10a5c2736b7eb2550b07b367429/PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "url": "https://files.pythonhosted.org/packages/96/06/4beb652c0fe16834032e54f0956443d4cc797fe645527acee59e7deaa0a2/PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "url": "https://files.pythonhosted.org/packages/ba/91/090818dfa62e85181f3ae23dd1e8b7ea7f09684864a900cab72d29c57346/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "url": "https://files.pythonhosted.org/packages/cd/e5/af35f7ea75cf72f2cd079c95ee16797de7cd71f29ea7c68ae5ce7be1eda0/PyYAML-6.0.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "url": "https://files.pythonhosted.org/packages/f1/26/55e4f21db1f72eaef092015d9017c11510e7e6301c62a6cfee91295d13c6/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "pyyaml",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "6.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "url": "https://files.pythonhosted.org/packages/70/8e/0e2d847013cb52cd35b38c009bb167a1a26b2ce6cd6965bf26b47bc0bf44/requests-2.31.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1",
+ "url": "https://files.pythonhosted.org/packages/9d/be/10918a2eac4ae9f02f6cfe6414b7a155ccd8f7f9d4380d62fd5b955065c3/requests-2.31.0.tar.gz"
+ }
+ ],
+ "project_name": "requests",
+ "requires_dists": [
+ "PySocks!=1.5.7,>=1.5.6; extra == \"socks\"",
+ "certifi>=2017.4.17",
+ "chardet<6,>=3.0.2; extra == \"use-chardet-on-py3\"",
+ "charset-normalizer<4,>=2",
+ "idna<4,>=2.5",
+ "urllib3<3,>=1.21.1"
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.31.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e1e8ad02e518fe11073f2ea7d80b6b8be19daa27a60a1838aff7c731ddcf2ebe",
+ "url": "https://files.pythonhosted.org/packages/6e/e6/bedc75b264cbcbf6e6d0e5071d96d739f540fc09be31744a7a8824c02a8e/retry_decorator-1.1.1.tar.gz"
+ }
+ ],
+ "project_name": "retry-decorator",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2",
+ "url": "https://files.pythonhosted.org/packages/e9/93/0c0f002031f18b53af7a6166103c02b9c0667be528944137cc954ec921b3/rsa-4.7.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9",
+ "url": "https://files.pythonhosted.org/packages/db/b5/475c45a58650b0580421746504b680cd2db4e81bc941e94ca53785250269/rsa-4.7.2.tar.gz"
+ }
+ ],
+ "project_name": "rsa",
+ "requires_dists": [
+ "pyasn1>=0.1.3"
+ ],
+ "requires_python": "<4,>=3.5",
+ "version": "4.7.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "5f75eb91d8ab6037c754a87b8501cc581b2827e923682f593bed3539ce5b3999",
+ "url": "https://files.pythonhosted.org/packages/b1/f8/2038661bc32579d0c11191fc1093e49db590bfb6e63d501d7995fb798d62/sentry_sdk-1.44.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "24e6a53eeabffd2f95d952aa35ca52f0f4201d17f820ac9d3ff7244c665aaf68",
+ "url": "https://files.pythonhosted.org/packages/fd/72/85a8bc961d9160ac8c9f0a6d39dbdad21795d55c7b02a433bd0ffb75c037/sentry-sdk-1.44.1.tar.gz"
+ }
+ ],
+ "project_name": "sentry-sdk",
+ "requires_dists": [
+ "aiohttp>=3.5; extra == \"aiohttp\"",
+ "apache-beam>=2.12; extra == \"beam\"",
+ "arq>=0.23; extra == \"arq\"",
+ "asttokens; extra == \"pure-eval\"",
+ "asyncpg>=0.23; extra == \"asyncpg\"",
+ "blinker>=1.1; extra == \"flask\"",
+ "blinker>=1.1; extra == \"quart\"",
+ "bottle>=0.12.13; extra == \"bottle\"",
+ "celery-redbeat>=2; extra == \"celery-redbeat\"",
+ "celery>=3; extra == \"celery\"",
+ "certifi",
+ "chalice>=1.16.0; extra == \"chalice\"",
+ "clickhouse-driver>=0.2.0; extra == \"clickhouse-driver\"",
+ "django>=1.8; extra == \"django\"",
+ "executing; extra == \"pure-eval\"",
+ "falcon>=1.4; extra == \"falcon\"",
+ "fastapi>=0.79.0; extra == \"fastapi\"",
+ "flask>=0.11; extra == \"flask\"",
+ "grpcio>=1.21.1; extra == \"grpcio\"",
+ "httpx>=0.16.0; extra == \"httpx\"",
+ "huey>=2; extra == \"huey\"",
+ "loguru>=0.5; extra == \"loguru\"",
+ "markupsafe; extra == \"flask\"",
+ "openai>=1.0.0; extra == \"openai\"",
+ "opentelemetry-distro>=0.35b0; extra == \"opentelemetry\"",
+ "opentelemetry-distro~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-aiohttp-client~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-django~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-fastapi~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-flask~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-requests~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-sqlite3~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-urllib~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "pure-eval; extra == \"pure-eval\"",
+ "pymongo>=3.1; extra == \"pymongo\"",
+ "pyspark>=2.4.4; extra == \"pyspark\"",
+ "quart>=0.16.1; extra == \"quart\"",
+ "rq>=0.6; extra == \"rq\"",
+ "sanic>=0.8; extra == \"sanic\"",
+ "sqlalchemy>=1.2; extra == \"sqlalchemy\"",
+ "starlette>=0.19.1; extra == \"starlette\"",
+ "starlite>=1.48; extra == \"starlite\"",
+ "tiktoken>=0.3.0; extra == \"openai\"",
+ "tornado>=5; extra == \"tornado\"",
+ "urllib3>=1.25.7; python_version <= \"3.4\"",
+ "urllib3>=1.26.11; python_version >= \"3.6\"",
+ "urllib3>=1.26.9; python_version == \"3.5\""
+ ],
+ "requires_python": null,
+ "version": "1.44.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5",
+ "url": "https://files.pythonhosted.org/packages/70/1d/3b2249c833c7d52b59ff0602d760df0543dc1e6c272f145b949750edeb01/setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0",
+ "url": "https://files.pythonhosted.org/packages/24/55/8b369b56007a5a2c7594cdb58cd4a09d7cca65b28483bb5582c6975663f1/setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9",
+ "url": "https://files.pythonhosted.org/packages/35/30/ac99ecae8458ba995f85aa3aa911004679b405922e1487b0fba6fe8f4d37/setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d",
+ "url": "https://files.pythonhosted.org/packages/3d/92/17168f4bb1a695094e93e73a1ef1f7b89953a6d91e8a7699a2c840ba712f/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754",
+ "url": "https://files.pythonhosted.org/packages/4f/cc/c51e6371f640a9adbe693ddb89d68596e5a8e4b5e05b4d3c65ec504e2f6d/setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85",
+ "url": "https://files.pythonhosted.org/packages/69/a7/2a77b68c11db87c22350381d6ce022011eb420076790e0e3697153e89458/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39",
+ "url": "https://files.pythonhosted.org/packages/79/e7/54b36be02aee8ad573be68f6f46fd62838735c2f007b22df50eb5e13a20d/setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f",
+ "url": "https://files.pythonhosted.org/packages/87/7b/69bdc791001250dff279a1a81904f3f563caece4fa1607a95b9fd5197d6e/setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5",
+ "url": "https://files.pythonhosted.org/packages/94/ad/4166381d79f6ae8138be9b49f05d193a8deb748debace9896dffad45a753/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3",
+ "url": "https://files.pythonhosted.org/packages/9c/56/6f4a4e80b2810eb7ea9ab355022c780ef80457de368ab5b6b21b795e4f05/setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0",
+ "url": "https://files.pythonhosted.org/packages/9d/09/bc108723bbfb7c50c22fdf22191f3e32abcb5d6f46610018030b25f601c5/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452",
+ "url": "https://files.pythonhosted.org/packages/c3/7d/d03f319e0f3b3a6e98731a56cd4d81478ed0c12531b822fd2c728b948edb/setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74",
+ "url": "https://files.pythonhosted.org/packages/d0/ae/010811bece9a59a8bba131d9e7acea9c2e3c3cbf544bf06d8b10b8c28ff5/setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae",
+ "url": "https://files.pythonhosted.org/packages/ff/e1/b16b16a1aa12174349d15b73fd4b87e641a8ae3fb1163e80938dbbf6ae98/setproctitle-1.3.3.tar.gz"
+ }
+ ],
+ "project_name": "setproctitle",
+ "requires_dists": [
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.3.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6d10741ff20b89cd8c6a536ee9dc90d3002dec0226c78fb98605bfb9ef8a7adf",
+ "url": "https://files.pythonhosted.org/packages/40/a9/7deac76c58fa47c95360116a06b53b9b62f6db11336fe61b6ab53784d98b/setuptools-59.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d144f85102f999444d06f9c0e8c737fd0194f10f2f7e5fdb77573f6e2fa4fad0",
+ "url": "https://files.pythonhosted.org/packages/e6/e2/f2bfdf364e016f7a464db709ea40d1101c4c5a463dd7019dae0a42dbd1c6/setuptools-59.5.0.tar.gz"
+ }
+ ],
+ "project_name": "setuptools",
+ "requires_dists": [
+ "flake8-2020; extra == \"testing\"",
+ "furo; extra == \"docs\"",
+ "jaraco.envs>=2.2; extra == \"testing\"",
+ "jaraco.packaging>=8.2; extra == \"docs\"",
+ "jaraco.path>=3.2.0; extra == \"testing\"",
+ "jaraco.tidelift>=1.4; extra == \"docs\"",
+ "mock; extra == \"testing\"",
+ "paver; extra == \"testing\"",
+ "pip>=19.1; extra == \"testing\"",
+ "pygments-github-lexers==0.0.5; extra == \"docs\"",
+ "pytest-black>=0.3.7; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-checkdocs>=2.4; extra == \"testing\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-enabler>=1.0.1; extra == \"testing\"",
+ "pytest-flake8; extra == \"testing\"",
+ "pytest-mypy; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-virtualenv>=1.2.7; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=6; extra == \"testing\"",
+ "rst.linker>=1.9; extra == \"docs\"",
+ "sphinx-inline-tabs; extra == \"docs\"",
+ "sphinx; extra == \"docs\"",
+ "sphinx; extra == \"testing\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "virtualenv>=13.0.0; extra == \"testing\"",
+ "wheel; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "59.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254",
+ "url": "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "url": "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz"
+ }
+ ],
+ "project_name": "six",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da",
+ "url": "https://files.pythonhosted.org/packages/a7/a5/10f97f73544edcdef54409f1d839f6049a0d79df68adbc1ceb24d1aaca42/smmap-5.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62",
+ "url": "https://files.pythonhosted.org/packages/88/04/b5bf6d21dc4041000ccba7eb17dd3055feb237e7ffc2c20d3fae3af62baa/smmap-5.0.1.tar.gz"
+ }
+ ],
+ "project_name": "smmap",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "5.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a",
+ "url": "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1",
+ "url": "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz"
+ }
+ ],
+ "project_name": "snowballstemmer",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "1e09160a40b956dc623c910118fa636da93bd3ca0b9876a7b3df90f07d691560",
+ "url": "https://files.pythonhosted.org/packages/b2/b6/8ed35256aa530a9d3da15d20bdc0ba888d5364441bb50a5a83ee7827affe/sphinx-7.2.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9a5160e1ea90688d5963ba09a2dcd8bdd526620edbb65c328728f1b2228d5ab5",
+ "url": "https://files.pythonhosted.org/packages/73/8e/6e51da4b26665b4b92b1944ea18b2d9c825e753e19180cc5bdc818d0ed3b/sphinx-7.2.6.tar.gz"
+ }
+ ],
+ "project_name": "sphinx",
+ "requires_dists": [
+ "Jinja2>=3.0",
+ "Pygments>=2.14",
+ "alabaster<0.8,>=0.7",
+ "babel>=2.9",
+ "colorama>=0.4.5; sys_platform == \"win32\"",
+ "cython>=3.0; extra == \"test\"",
+ "docutils-stubs; extra == \"lint\"",
+ "docutils<0.21,>=0.18.1",
+ "filelock; extra == \"test\"",
+ "flake8-simplify; extra == \"lint\"",
+ "flake8>=3.5.0; extra == \"lint\"",
+ "html5lib; extra == \"test\"",
+ "imagesize>=1.3",
+ "importlib-metadata>=4.8; python_version < \"3.10\"",
+ "isort; extra == \"lint\"",
+ "mypy>=0.990; extra == \"lint\"",
+ "packaging>=21.0",
+ "pytest>=4.6; extra == \"test\"",
+ "requests>=2.25.0",
+ "ruff; extra == \"lint\"",
+ "setuptools>=67.0; extra == \"test\"",
+ "snowballstemmer>=2.0",
+ "sphinx-lint; extra == \"lint\"",
+ "sphinxcontrib-applehelp",
+ "sphinxcontrib-devhelp",
+ "sphinxcontrib-htmlhelp>=2.0.0",
+ "sphinxcontrib-jsmath",
+ "sphinxcontrib-qthelp",
+ "sphinxcontrib-serializinghtml>=1.1.9",
+ "sphinxcontrib-websupport; extra == \"docs\"",
+ "types-requests; extra == \"lint\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "7.2.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4",
+ "url": "https://files.pythonhosted.org/packages/56/89/fea3fbf6785b388e6cb8a1beaf62f96e80b37311bdeed6e133388a732426/sphinxcontrib_applehelp-1.0.8-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619",
+ "url": "https://files.pythonhosted.org/packages/26/6b/68f470fc337ed24043fec987b101f25b35010970bd958970c2ae5990859f/sphinxcontrib_applehelp-1.0.8.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-applehelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f",
+ "url": "https://files.pythonhosted.org/packages/a0/52/1049d918d1d1c72857d285c3f0c64c1cbe0be394ce1c93a3d2aa4f39fe3b/sphinxcontrib_devhelp-1.0.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3",
+ "url": "https://files.pythonhosted.org/packages/c7/a1/80b7e9f677abc673cb9320bf255ad4e08931ccbc2e66bde4b59bad3809ad/sphinxcontrib_devhelp-1.0.6.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-devhelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04",
+ "url": "https://files.pythonhosted.org/packages/c2/e9/74c4cda5b409af3222fda38f0774e616011bc935f639dbc0da5ca2d1be7d/sphinxcontrib_htmlhelp-2.0.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015",
+ "url": "https://files.pythonhosted.org/packages/8a/03/2f9d699fbfdf03ecb3b6d0e2a268a8998d009f2a9f699c2dcc936899257d/sphinxcontrib_htmlhelp-2.0.5.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-htmlhelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "html5lib; extra == \"test\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "2.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178",
+ "url": "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8",
+ "url": "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-jsmath",
+ "requires_dists": [
+ "flake8; extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.5",
+ "version": "1.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182",
+ "url": "https://files.pythonhosted.org/packages/80/b3/1beac14a88654d2e5120d0143b49be5ad450b86eb1963523d8dbdcc51eb2/sphinxcontrib_qthelp-1.0.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6",
+ "url": "https://files.pythonhosted.org/packages/ac/29/705cd4e93e98a8473d62b5c32288e6de3f0c9660d3c97d4e80d3dbbad82b/sphinxcontrib_qthelp-1.0.7.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-qthelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7",
+ "url": "https://files.pythonhosted.org/packages/38/24/228bb903ea87b9e08ab33470e6102402a644127108c7117ac9c00d849f82/sphinxcontrib_serializinghtml-1.1.10-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f",
+ "url": "https://files.pythonhosted.org/packages/54/13/8dd7a7ed9c58e16e20c7f4ce8e4cb6943eb580955236d0c0d00079a73c49/sphinxcontrib_serializinghtml-1.1.10.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-serializinghtml",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.1.10"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45",
+ "url": "https://files.pythonhosted.org/packages/3a/d0/b97889ffa769e2d1fdebb632084d5e8b53fc299d43a537acee7ec0c021a3/tensorboard-2.16.2-py3-none-any.whl"
+ }
+ ],
+ "project_name": "tensorboard",
+ "requires_dists": [
+ "absl-py>=0.4",
+ "grpcio>=1.48.2",
+ "markdown>=2.6.8",
+ "numpy>=1.12.0",
+ "protobuf!=4.24.0,>=3.19.6",
+ "setuptools>=41.0.0",
+ "six>1.9",
+ "tensorboard-data-server<0.8.0,>=0.7.0",
+ "werkzeug>=1.0.1"
+ ],
+ "requires_python": ">=3.9",
+ "version": "2.16.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530",
+ "url": "https://files.pythonhosted.org/packages/73/c6/825dab04195756cf8ff2e12698f22513b3db2f64925bdd41671bfb33aaa5/tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb",
+ "url": "https://files.pythonhosted.org/packages/7a/13/e503968fefabd4c6b2650af21e110aa8466fe21432cd7c43a84577a89438/tensorboard_data_server-0.7.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60",
+ "url": "https://files.pythonhosted.org/packages/b7/85/dabeaf902892922777492e1d253bb7e1264cadce3cea932f7ff599e53fea/tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl"
+ }
+ ],
+ "project_name": "tensorboard-data-server",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.7.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f",
+ "url": "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz"
+ }
+ ],
+ "project_name": "tomli",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2568f011dddeb5990d8698cc375d237f14568ffa8489854e3b94113b4b6b7c8b",
+ "url": "https://download.pytorch.org/whl/torch-1.12.0-cp310-cp310-manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2568f011dddeb5990d8698cc375d237f14568ffa8489854e3b94113b4b6b7c8b",
+ "url": "https://download.pytorch.org/whl/cpu/torch-1.12.0-cp310-cp310-manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "74e3437f607a920f343665cd0c9713bfdd80c67b740dad7cc91b3f2e1edbf03e",
+ "url": "https://download.pytorch.org/whl/cpu/torch-1.12.0-cp310-none-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "13c7cca6b2ea3704d775444f02af53c5f072d145247e17b8cd7813ac57869f03",
+ "url": "https://download.pytorch.org/whl/cpu/torch-1.12.0-cp310-none-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3322d33a06e440d715bb214334bd41314c94632d9a2f07d22006bf21da3a2be4",
+ "url": "https://files.pythonhosted.org/packages/52/c2/323619638a1154da0a134297a4cc1c46df4142e9fd553370301937e21fcb/torch-1.12.0-cp310-cp310-manylinux1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "13c7cca6b2ea3704d775444f02af53c5f072d145247e17b8cd7813ac57869f03",
+ "url": "https://files.pythonhosted.org/packages/5a/e8/82c14c28360dafe02877b28c70218c8b6ca8a0f2fbb0515b2abd027ca251/torch-1.12.0-cp310-none-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "349ea3ba0c0e789e0507876c023181f13b35307aebc2e771efd0e045b8e03e84",
+ "url": "https://files.pythonhosted.org/packages/76/4b/0a527d9c8f8e1890591bf5addd9e20b52a0b09b2098e8b4fb556ccf5225b/torch-1.12.0-cp310-none-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2568f011dddeb5990d8698cc375d237f14568ffa8489854e3b94113b4b6b7c8b",
+ "url": "https://files.pythonhosted.org/packages/a8/54/af39450ee79c9bfe8db0754354cb490a6fd04a44f95c2622b00e019d1606/torch-1.12.0-cp310-cp310-manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "torch",
+ "requires_dists": [
+ "typing-extensions"
+ ],
+ "requires_python": ">=3.7.0",
+ "version": "1.12.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "url": "https://files.pythonhosted.org/packages/f9/de/dc04a3ea60b22624b51c703a84bbe0184abcd1d0b9bc8074b5d6b7ab90bb/typing_extensions-4.10.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb",
+ "url": "https://files.pythonhosted.org/packages/16/3a/0d26ce356c7465a19c9ea8814b960f8a36c3b0d07c323176620b7b483e44/typing_extensions-4.10.0.tar.gz"
+ }
+ ],
+ "project_name": "typing-extensions",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.10.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "url": "https://files.pythonhosted.org/packages/a2/73/a68704750a7679d0b6d3ad7aa8d4da8e14e151ae82e6fee774e6e0d05ec8/urllib3-2.2.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19",
+ "url": "https://files.pythonhosted.org/packages/7a/50/7fd50a27caa0652cd4caf224aa87741ea41d3265ad13f010886167cfcc79/urllib3-2.2.1.tar.gz"
+ }
+ ],
+ "project_name": "urllib3",
+ "requires_dists": [
+ "brotli>=1.0.9; platform_python_implementation == \"CPython\" and extra == \"brotli\"",
+ "brotlicffi>=0.8.0; platform_python_implementation != \"CPython\" and extra == \"brotli\"",
+ "h2<5,>=4; extra == \"h2\"",
+ "pysocks!=1.5.7,<2.0,>=1.5.6; extra == \"socks\"",
+ "zstandard>=0.18.0; extra == \"zstd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "2.2.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "023b6c72a6ef13085c9a970f6714548eca64f56d3d8698e42372764950dfd004",
+ "url": "https://files.pythonhosted.org/packages/53/7c/f3656d1ce3b916ea35f454c6a32b56342168c08baf09a0683df240ca2dce/wandb-0.16.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c317d55af93a688f3eafcdfec897f7b72da1fe1525140e076ecdaab8b09aa46e",
+ "url": "https://files.pythonhosted.org/packages/e1/75/26d5e5923cb6a619215f6eeb6508b67651d0f4a3306169c4a1c5861a3b20/wandb-0.16.5.tar.gz"
+ }
+ ],
+ "project_name": "wandb",
+ "requires_dists": [
+ "Click!=8.0.0,>=7.1",
+ "GitPython!=3.1.29,>=1.0.0",
+ "PyYAML",
+ "PyYAML>=6.0.0; extra == \"launch\"",
+ "appdirs>=1.4.3",
+ "awscli; extra == \"launch\"",
+ "azure-containerregistry; extra == \"launch\"",
+ "azure-identity; extra == \"azure\"",
+ "azure-identity; extra == \"launch\"",
+ "azure-storage-blob; extra == \"azure\"",
+ "azure-storage-blob; extra == \"launch\"",
+ "bokeh; extra == \"media\"",
+ "boto3; extra == \"aws\"",
+ "boto3; extra == \"launch\"",
+ "botocore; extra == \"launch\"",
+ "chardet; extra == \"launch\"",
+ "cloudpickle; extra == \"models\"",
+ "docker-pycreds>=0.4.0",
+ "filelock; extra == \"importers\"",
+ "google-auth; extra == \"launch\"",
+ "google-cloud-aiplatform; extra == \"launch\"",
+ "google-cloud-artifact-registry; extra == \"launch\"",
+ "google-cloud-compute; extra == \"launch\"",
+ "google-cloud-storage; extra == \"gcp\"",
+ "google-cloud-storage; extra == \"kubeflow\"",
+ "google-cloud-storage; extra == \"launch\"",
+ "httpx>=0.23.0; extra == \"async\"",
+ "iso8601; extra == \"launch\"",
+ "kubernetes-asyncio; extra == \"launch\"",
+ "kubernetes; extra == \"kubeflow\"",
+ "kubernetes; extra == \"launch\"",
+ "minio; extra == \"kubeflow\"",
+ "mlflow; extra == \"importers\"",
+ "moviepy; extra == \"media\"",
+ "nbconvert; extra == \"launch\"",
+ "nbformat; extra == \"launch\"",
+ "numpy; extra == \"media\"",
+ "optuna; extra == \"launch\"",
+ "orjson; extra == \"perf\"",
+ "pillow; extra == \"media\"",
+ "plotly>=5.18.0; extra == \"media\"",
+ "polars; extra == \"importers\"",
+ "protobuf!=4.21.0,<5,>=3.12.0; python_version < \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.15.0; python_version == \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.19.0; python_version > \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.19.0; sys_platform != \"linux\"",
+ "psutil>=5.0.0",
+ "pydantic; extra == \"launch\"",
+ "pydantic>=2.0.0; extra == \"reports\"",
+ "rdkit-pypi; extra == \"media\"",
+ "requests<3,>=2.0.0",
+ "rich; extra == \"importers\"",
+ "sentry-sdk>=1.0.0",
+ "setproctitle",
+ "setuptools",
+ "sh; extra == \"kubeflow\"",
+ "soundfile; extra == \"media\"",
+ "sweeps>=0.2.0; extra == \"sweeps\"",
+ "tenacity; extra == \"importers\"",
+ "tomli; extra == \"launch\"",
+ "typing-extensions; extra == \"launch\"",
+ "typing-extensions; python_version < \"3.10\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "0.16.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3aac3f5da756f93030740bc235d3e09449efcf65f2f55e3602e1d851b8f48795",
+ "url": "https://files.pythonhosted.org/packages/e3/23/c9843d7550092ae7ad380611c238f44afef66f58f76c1dab7dcf313e4339/werkzeug-3.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e39b645a6ac92822588e7b39a692e7828724ceae0b0d702ef96701f90e70128d",
+ "url": "https://files.pythonhosted.org/packages/0f/84/00f7193d7bd88ced26cd5f868903e431054424610dc7c041bbe87d2a4d66/werkzeug-3.0.2.tar.gz"
+ }
+ ],
+ "project_name": "werkzeug",
+ "requires_dists": [
+ "MarkupSafe>=2.1.1",
+ "watchdog>=2.3; extra == \"watchdog\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad",
+ "url": "https://files.pythonhosted.org/packages/4d/05/4d79198ae568a92159de0f89e710a8d19e3fa267b719a236582eee921f4a/yarl-1.9.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551",
+ "url": "https://files.pythonhosted.org/packages/0b/58/dd3c69651381a57ac991dba54b20ae2da359eb4b03a661e71c451d6525c6/yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385",
+ "url": "https://files.pythonhosted.org/packages/0b/a3/7774786ec6e2dca0bb38b286f12a11af97957546e5fbcce71752a8d2cf07/yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234",
+ "url": "https://files.pythonhosted.org/packages/30/b5/215d586d5cb17ca9748d7a2d597c07147f210c0c0785257492094d083b65/yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b",
+ "url": "https://files.pythonhosted.org/packages/44/ae/fdbc9965ef69e650c3b5b04d60badef90ff0cde21a30770f0700e148b12f/yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e",
+ "url": "https://files.pythonhosted.org/packages/6c/27/cda5a927df3a894eddfee4efacdd230c2d8486e322fc672194fd651f82c5/yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c",
+ "url": "https://files.pythonhosted.org/packages/6d/a1/db0bdf8cc48515e9c02daf04ae2916fc27ce6498eca21432fc9ffa63f71b/yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863",
+ "url": "https://files.pythonhosted.org/packages/70/a9/ef6d69ce9a4e82080290bcb6db735bb8a6d6db92f2bbb92b6951bde97e7c/yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66",
+ "url": "https://files.pythonhosted.org/packages/81/c6/06938036ea48fa74521713499fba1459b0eb60af9b9afbe8e0e9e1a96c36/yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53",
+ "url": "https://files.pythonhosted.org/packages/b2/4f/796b0c73e9ff30a1047a7ee3390e157ab8424d4401b9f32a2624013a5b39/yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455",
+ "url": "https://files.pythonhosted.org/packages/c3/a0/0ade1409d184cbc9e85acd403a386a7c0563b92ff0f26d138ff9e86e48b4/yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541",
+ "url": "https://files.pythonhosted.org/packages/cc/2a/abbaf1460becba856e163f2a1274f5d34b1969d476da8e68a8fc2aeb5661/yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2",
+ "url": "https://files.pythonhosted.org/packages/d5/fc/40b85bea1f5686092ea37f472c94c023d6347266852ffd55baa01c40f596/yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392",
+ "url": "https://files.pythonhosted.org/packages/dd/90/2958ae9f2e12084d616eef95b6a48c8e6d96448add04367c20dc53a33ff2/yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf",
+ "url": "https://files.pythonhosted.org/packages/e0/ad/bedcdccbcbf91363fd425a948994f3340924145c2bc8ccb296f4a1e52c28/yarl-1.9.4.tar.gz"
+ }
+ ],
+ "project_name": "yarl",
+ "requires_dists": [
+ "idna>=2.0",
+ "multidict>=4.0",
+ "typing-extensions>=3.7.4; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.9.4"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "atomicwrites>=1.4.0",
+ "box2d-py>=2.3.5",
+ "cloudpickle~=3.0",
+ "gsutil>=4.66",
+ "gymnasium>=0.27.1",
+ "myst-parser~=2.0",
+ "numpy<1.24",
+ "onnx>=1.10",
+ "opencv-python>=3.0",
+ "protobuf>=4.0",
+ "psutil>=5.8.0",
+ "pygame>=2.1.0",
+ "pytest-benchmark==4.0.0",
+ "pytest-cov!=2.12.1,<3.1,>=2.12",
+ "pytest-platform-markers",
+ "pytest-rerunfailures",
+ "pytest-xdist<3,>=2.5",
+ "pytest~=8.0",
+ "setuptools==59.5",
+ "tensorboard>=2.8.0",
+ "torch!=1.12.0+cpu,!=1.12.0+cu116,==1.12.0",
+ "torch==1.12.0",
+ "wandb>=0.14.0"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/cpu.lock b/locks/cpu.lock
new file mode 100644
index 00000000..c3af40ac
--- /dev/null
+++ b/locks/cpu.lock
@@ -0,0 +1,3248 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=cpu
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "atomicwrites>=1.4.0",
+// "box2d-py>=2.3.5",
+// "cloudpickle~=3.0",
+// "gsutil>=4.66",
+// "gymnasium>=0.27.1",
+// "myst-parser~=2.0",
+// "numpy<1.24",
+// "onnx>=1.10",
+// "opencv-python>=3.0",
+// "protobuf>=4.0",
+// "psutil>=5.8.0",
+// "pygame>=2.1.0",
+// "pytest-benchmark==4.0.0",
+// "pytest-cov!=2.12.1,<3.1,>=2.12",
+// "pytest-platform-markers",
+// "pytest-rerunfailures",
+// "pytest-xdist<3,>=2.5",
+// "pytest~=8.0",
+// "setuptools==59.5",
+// "tensorboard>=2.8.0",
+// "torch!=1.12.0+cu116,==1.12.0+cpu",
+// "torch==1.12.0",
+// "wandb>=0.14.0"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308",
+ "url": "https://files.pythonhosted.org/packages/a2/ad/e0d3c824784ff121c03cc031f944bc7e139a8f1870ffd2845cc2dd76f6c4/absl_py-2.1.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff",
+ "url": "https://files.pythonhosted.org/packages/7a/8f/fc001b92ecc467cc32ab38398bd0bfb45df46e7523bf33c2ad22a505f06e/absl-py-2.1.0.tar.gz"
+ }
+ ],
+ "project_name": "absl-py",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52",
+ "url": "https://files.pythonhosted.org/packages/1f/41/0852b954464d853cf315e60f096d3ff6a74aff75ad5f3388c06695d5d37f/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54",
+ "url": "https://files.pythonhosted.org/packages/0c/03/2cac72f64b2853397dd697aa4957755b85bfd3acc0ffe898571060f1db83/aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747",
+ "url": "https://files.pythonhosted.org/packages/18/02/4156ed2edca212041c7a5334b9520ff5a39e40648177e2f0ef13cac2b555/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7",
+ "url": "https://files.pythonhosted.org/packages/18/93/1f005bbe044471a0444a82cdd7356f5120b9cf94fe2c50c0cdbf28f1258b/aiohttp-3.9.3.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5",
+ "url": "https://files.pythonhosted.org/packages/43/68/86874ff80e74c2e8308af3d80345fd624b5b26197a914aa9a85cfaf5b025/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf",
+ "url": "https://files.pythonhosted.org/packages/4b/a0/8b50667a858f3e4f3fec2d471aa9e618783c0450b980e7a5bf617c1cb1f3/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c",
+ "url": "https://files.pythonhosted.org/packages/63/56/c1d39b27114595beaea776e164dbb793cf64c16331ba00cd0dc7cf0542f2/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768",
+ "url": "https://files.pythonhosted.org/packages/6d/8a/46ba295c98b24779370580b4450f80f35a1ae9e4bc9f9783ea1043d33395/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5",
+ "url": "https://files.pythonhosted.org/packages/7e/6e/6c0486fdd8918f9818e82b30898cb77ff0debccc4b09db5d9a939ed7a075/aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec",
+ "url": "https://files.pythonhosted.org/packages/86/74/b506f01485dba1c4298700156b915f3ba475be823a7b31056d40a9ac0daa/aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29",
+ "url": "https://files.pythonhosted.org/packages/93/40/d3decda219ebd5410eba627601d537ec3782efbcadba308e9ce381cc0b71/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc",
+ "url": "https://files.pythonhosted.org/packages/9a/41/d6ce776c9c22f402ad0b0cfbdc70a630512229854b0043bd0dbe6566d75d/aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b",
+ "url": "https://files.pythonhosted.org/packages/9d/79/b34562b6cce04322023112f1984380359d78bd043b8ef822c2f356b7a047/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6",
+ "url": "https://files.pythonhosted.org/packages/f5/4e/41143834b3fd5b89b404c76b5a71496bca96fbd8587c1e42a8f2b2efb8b3/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl"
+ }
+ ],
+ "project_name": "aiohttp",
+ "requires_dists": [
+ "Brotli; platform_python_implementation == \"CPython\" and extra == \"speedups\"",
+ "aiodns; (sys_platform == \"linux\" or sys_platform == \"darwin\") and extra == \"speedups\"",
+ "aiosignal>=1.1.2",
+ "async-timeout<5.0,>=4.0; python_version < \"3.11\"",
+ "attrs>=17.3.0",
+ "brotlicffi; platform_python_implementation != \"CPython\" and extra == \"speedups\"",
+ "frozenlist>=1.1.1",
+ "multidict<7.0,>=4.5",
+ "yarl<2.0,>=1.0"
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.9.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17",
+ "url": "https://files.pythonhosted.org/packages/76/ac/a7305707cb852b7e16ff80eaf5692309bde30e2b1100a1fcacdc8f731d97/aiosignal-1.3.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc",
+ "url": "https://files.pythonhosted.org/packages/ae/67/0952ed97a9793b4958e5736f6d2b346b414a2cd63e82d05940032f45b32f/aiosignal-1.3.1.tar.gz"
+ }
+ ],
+ "project_name": "aiosignal",
+ "requires_dists": [
+ "frozenlist>=1.1.0"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.3.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92",
+ "url": "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65",
+ "url": "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz"
+ }
+ ],
+ "project_name": "alabaster",
+ "requires_dists": [],
+ "requires_python": ">=3.9",
+ "version": "0.7.16"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128",
+ "url": "https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41",
+ "url": "https://files.pythonhosted.org/packages/d7/d8/05696357e0311f5b5c316d7b95f46c669dd9c15aaeecbb48c7d0aeb88c40/appdirs-1.4.4.tar.gz"
+ }
+ ],
+ "project_name": "appdirs",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c",
+ "url": "https://files.pythonhosted.org/packages/88/8c/61021c45428ad2ef6131c6068d14f7f0968767e972e427cd87bd25c9ea7b/argcomplete-3.2.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23",
+ "url": "https://files.pythonhosted.org/packages/3c/c0/031c507227ce3b715274c1cd1f3f9baf7a0f7cec075e22c7c8b5d4e468a9/argcomplete-3.2.3.tar.gz"
+ }
+ ],
+ "project_name": "argcomplete",
+ "requires_dists": [
+ "coverage; extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pexpect; extra == \"test\"",
+ "ruff; extra == \"test\"",
+ "wheel; extra == \"test\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.2.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028",
+ "url": "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f",
+ "url": "https://files.pythonhosted.org/packages/87/d6/21b30a550dafea84b1b8eee21b5e23fa16d010ae006011221f33dcd8d7f8/async-timeout-4.0.3.tar.gz"
+ }
+ ],
+ "project_name": "async-timeout",
+ "requires_dists": [
+ "typing-extensions>=3.6.5; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11",
+ "url": "https://files.pythonhosted.org/packages/87/c6/53da25344e3e3a9c01095a89f16dbcda021c609ddb42dd6d7c0528236fb2/atomicwrites-1.4.1.tar.gz"
+ }
+ ],
+ "project_name": "atomicwrites",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1",
+ "url": "https://files.pythonhosted.org/packages/e0/44/827b2a91a5816512fcaf3cc4ebc465ccd5d598c45cefa6703fcf4a79018f/attrs-23.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "url": "https://files.pythonhosted.org/packages/e3/fc/f800d51204003fa8ae392c4e8278f256206e7a919b708eef054f5f4b650d/attrs-23.2.0.tar.gz"
+ }
+ ],
+ "project_name": "attrs",
+ "requires_dists": [
+ "attrs[tests-mypy]; extra == \"tests-no-zope\"",
+ "attrs[tests-no-zope]; extra == \"tests\"",
+ "attrs[tests]; extra == \"cov\"",
+ "attrs[tests]; extra == \"dev\"",
+ "cloudpickle; platform_python_implementation == \"CPython\" and extra == \"tests-no-zope\"",
+ "coverage[toml]>=5.3; extra == \"cov\"",
+ "furo; extra == \"docs\"",
+ "hypothesis; extra == \"tests-no-zope\"",
+ "importlib-metadata; python_version < \"3.8\"",
+ "mypy>=1.6; (platform_python_implementation == \"CPython\" and python_version >= \"3.8\") and extra == \"tests-mypy\"",
+ "myst-parser; extra == \"docs\"",
+ "pre-commit; extra == \"dev\"",
+ "pympler; extra == \"tests-no-zope\"",
+ "pytest-mypy-plugins; (platform_python_implementation == \"CPython\" and python_version >= \"3.8\") and extra == \"tests-mypy\"",
+ "pytest-xdist[psutil]; extra == \"tests-no-zope\"",
+ "pytest>=4.3.0; extra == \"tests-no-zope\"",
+ "sphinx-notfound-page; extra == \"docs\"",
+ "sphinx; extra == \"docs\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "towncrier; extra == \"docs\"",
+ "zope-interface; extra == \"docs\"",
+ "zope-interface; extra == \"tests\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "23.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287",
+ "url": "https://files.pythonhosted.org/packages/0d/35/4196b21041e29a42dc4f05866d0c94fa26c9da88ce12c38c2265e42c82fb/Babel-2.14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363",
+ "url": "https://files.pythonhosted.org/packages/e2/80/cfbe44a9085d112e983282ee7ca4c00429bc4d1ce86ee5f4e60259ddff7f/Babel-2.14.0.tar.gz"
+ }
+ ],
+ "project_name": "babel",
+ "requires_dists": [
+ "freezegun~=1.0; extra == \"dev\"",
+ "pytest-cov; extra == \"dev\"",
+ "pytest>=6.0; extra == \"dev\"",
+ "pytz>=2015.7; python_version < \"3.9\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8",
+ "url": "https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a",
+ "url": "https://files.pythonhosted.org/packages/c8/af/54a920ff4255664f5d238b5aebd8eedf7a07c7a5e71e27afcfe840b82f51/boto-2.49.0.tar.gz"
+ }
+ ],
+ "project_name": "boto",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.49.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "bdacfbbc56079bb317548efe49d3d5a86646885cc27f4a2ee97e4b2960921ab7",
+ "url": "https://files.pythonhosted.org/packages/98/c2/ab05b5329dc4416b5ee5530f0625a79c394a3e3c10abe0812b9345256451/box2d-py-2.3.8.tar.gz"
+ }
+ ],
+ "project_name": "box2d-py",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.3.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945",
+ "url": "https://files.pythonhosted.org/packages/fb/2b/a64c2d25a37aeb921fddb929111413049fc5f8b9a4c1aefaffaafe768d54/cachetools-5.3.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105",
+ "url": "https://files.pythonhosted.org/packages/b3/4d/27a3e6dd09011649ad5210bdf963765bc8fa81a0827a4fc01bafd2705c5b/cachetools-5.3.3.tar.gz"
+ }
+ ],
+ "project_name": "cachetools",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "5.3.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1",
+ "url": "https://files.pythonhosted.org/packages/ba/06/a07f096c664aeb9f01624f858c3add0a4e913d6c96257acb4fce61e7de14/certifi-2024.2.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "url": "https://files.pythonhosted.org/packages/71/da/e94e26401b62acd6d91df2b52954aceb7f561743aa5ccc32152886c76c96/certifi-2024.2.2.tar.gz"
+ }
+ ],
+ "project_name": "certifi",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "2024.2.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d",
+ "url": "https://files.pythonhosted.org/packages/ee/68/74a2b9f9432b70d97d1184cdabf32d7803124c228adef9481d280864a4a7/cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684",
+ "url": "https://files.pythonhosted.org/packages/22/05/43cfda378da7bb0aa19b3cf34fe54f8867b0d581294216339d87deefd69c/cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7",
+ "url": "https://files.pythonhosted.org/packages/54/49/b8875986beef2e74fc668b95f2df010e354f78e009d33d95b375912810c3/cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673",
+ "url": "https://files.pythonhosted.org/packages/57/3a/c263cf4d5b02880274866968fa2bf196a02c4486248bc164732319b4a4c0/cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0",
+ "url": "https://files.pythonhosted.org/packages/68/ce/95b0bae7968c65473e1298efb042e10cafc7bafc14d9e4f154008241c91d/cffi-1.16.0.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088",
+ "url": "https://files.pythonhosted.org/packages/aa/aa/1c43e48a6f361d1529f9e4602d6992659a0107b5f21cae567e2eddcf8d66/cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9",
+ "url": "https://files.pythonhosted.org/packages/c4/01/f5116266fe80c04d4d1cc96c3d355606943f9fb604a810e0b02228a0ce19/cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614",
+ "url": "https://files.pythonhosted.org/packages/c9/7c/43d81bdd5a915923c3bad5bb4bff401ea00ccc8e28433fb6083d2e3bf58e/cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743",
+ "url": "https://files.pythonhosted.org/packages/eb/de/4f644fc78a1144a897e1f908abfb2058f7be05a8e8e4fe90b7f41e9de36b/cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896",
+ "url": "https://files.pythonhosted.org/packages/f0/31/a6503a5c4874fb4d4c2053f73f09a957cb427b6943fab5a43b8e156df397/cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "cffi",
+ "requires_dists": [
+ "pycparser"
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "url": "https://files.pythonhosted.org/packages/28/76/e6222113b83e3622caa4bb41032d0b1bf785250607392e1b778aca0b8a7d/charset_normalizer-3.3.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "url": "https://files.pythonhosted.org/packages/05/8c/eb854996d5fef5e4f33ad56927ad053d04dc820e4a3d39023f35cad72617/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "url": "https://files.pythonhosted.org/packages/2b/61/095a0aa1a84d1481998b534177c8566fdc50bb1233ea9a0478cd3cc075bd/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "url": "https://files.pythonhosted.org/packages/33/c3/3b96a435c5109dd5b6adc8a59ba1d678b302a97938f032e3770cc84cd354/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "url": "https://files.pythonhosted.org/packages/3f/ba/3f5e7be00b215fa10e13d64b1f6237eb6ebea66676a41b2bcdd09fe74323/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "url": "https://files.pythonhosted.org/packages/43/05/3bf613e719efe68fb3a77f9c536a389f35b95d75424b96b426a47a45ef1d/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "url": "https://files.pythonhosted.org/packages/46/6a/d5c26c41c49b546860cc1acabdddf48b0b3fb2685f4f5617ac59261b44ae/charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "url": "https://files.pythonhosted.org/packages/58/78/a0bc646900994df12e07b4ae5c713f2b3e5998f58b9d3720cce2aa45652f/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "url": "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "url": "https://files.pythonhosted.org/packages/a8/31/47d018ef89f95b8aded95c589a77c072c55e94b50a41aa99c0a2008a45a4/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "url": "https://files.pythonhosted.org/packages/b8/60/e2f67915a51be59d4539ed189eb0a2b0d292bf79270410746becb32bc2c3/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "url": "https://files.pythonhosted.org/packages/cc/94/f7cf5e5134175de79ad2059edf2adce18e0685ebdb9227ff0139975d0e93/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "url": "https://files.pythonhosted.org/packages/da/f1/3702ba2a7470666a62fd81c58a4c40be00670e5006a67f4d626e57f013ae/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "url": "https://files.pythonhosted.org/packages/eb/5c/97d97248af4920bc68687d9c3b3c0f47c910e21a8ff80af4565a576bd2f0/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "url": "https://files.pythonhosted.org/packages/f6/93/bb6cbeec3bf9da9b2eba458c15966658d1daa8b982c642f81c93ad9b40e1/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ }
+ ],
+ "project_name": "charset-normalizer",
+ "requires_dists": [],
+ "requires_python": ">=3.7.0",
+ "version": "3.3.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "url": "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de",
+ "url": "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz"
+ }
+ ],
+ "project_name": "click",
+ "requires_dists": [
+ "colorama; platform_system == \"Windows\"",
+ "importlib-metadata; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "8.1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7",
+ "url": "https://files.pythonhosted.org/packages/96/43/dae06432d0c4b1dc9e9149ad37b4ca8384cf6eb7700cd9215b177b914f0a/cloudpickle-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882",
+ "url": "https://files.pythonhosted.org/packages/c8/72/42a6570fc61b1f8913529728ad314c7cf5961540728dcad22c33fb2db6b6/cloudpickle-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "cloudpickle",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677",
+ "url": "https://files.pythonhosted.org/packages/99/15/dbcb5d0a22bf5357cf456dfd16f9ceb89c54544d6201d53bc77c75077a8e/coverage-7.4.4-pp38.pp39.pp310-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8",
+ "url": "https://files.pythonhosted.org/packages/07/58/0e076ea3a59dbfb3e981577c4e5572b432345cedd921e83006a0215b9afe/coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf",
+ "url": "https://files.pythonhosted.org/packages/10/1e/f676e1655d10bf59a6cb8de0601b7ea3c252c764782a3c2263f6d6bbcf28/coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2",
+ "url": "https://files.pythonhosted.org/packages/45/f4/10bf725621aeec5cc2fa1bc73021f5ba1ac01bcbf2c7278d8d34e1df6457/coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87",
+ "url": "https://files.pythonhosted.org/packages/50/32/829d0e709fa699dc4e498fa77a561d25fc57954ba32466279952b98f0836/coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c",
+ "url": "https://files.pythonhosted.org/packages/7e/60/62a8c190d20bf605c89a000fd6d41e3563b5792e7275b12eeefe6803b473/coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7",
+ "url": "https://files.pythonhosted.org/packages/91/4e/feff6d115dcc239e5850570ca2ea27a243c8a69596e7f1dabe54a6102d89/coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2",
+ "url": "https://files.pythonhosted.org/packages/93/41/e6e9dbb322f3c93aba7bc519b9c62846d923d7b57398bdd7eda3f0acdd11/coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49",
+ "url": "https://files.pythonhosted.org/packages/bf/d5/f809d8b630cf4c11fe490e20037a343d12a74ec2783c6cdb5aee725e7137/coverage-7.4.4.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562",
+ "url": "https://files.pythonhosted.org/packages/d3/6d/72b9f5035c50a14bc5c5fda0c28ac16c426e957a7a3debe02906b614fc4f/coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "coverage",
+ "requires_dists": [
+ "tomli; python_full_version <= \"3.11.0a6\" and extra == \"toml\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "7.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "dc7051a0db5f2bd48665a990d3ec1cc305a466a77358ca4492826f41f283601e",
+ "url": "https://files.pythonhosted.org/packages/6b/b0/e595ce2a2527e169c3bcd6c33d2473c1918e0b7f6826a043ca1245dd4e5b/crcmod-1.7.tar.gz"
+ }
+ ],
+ "project_name": "crcmod",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c",
+ "url": "https://files.pythonhosted.org/packages/6e/8d/6cce88bdeb26b4ec14b23ab9f0c2c7c0bf33ef4904bfa952c5db1749fd37/cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc",
+ "url": "https://files.pythonhosted.org/packages/0e/1d/62a2324882c0db89f64358dadfb95cae024ee3ba9fde3d5fd4d2f58af9f5/cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1",
+ "url": "https://files.pythonhosted.org/packages/13/9e/a55763a32d340d7b06d045753c186b690e7d88780cafce5f88cb931536be/cryptography-42.0.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da",
+ "url": "https://files.pythonhosted.org/packages/2c/9c/821ef6144daf80360cf6093520bf07eec7c793103ed4b1bf3fa17d2b55d8/cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a",
+ "url": "https://files.pythonhosted.org/packages/48/c8/c0962598c43d3cff2c9d6ac66d0c612bdfb1975be8d87b8889960cf8c81d/cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1",
+ "url": "https://files.pythonhosted.org/packages/50/26/248cd8b6809635ed412159791c0d3869d8ec9dfdc57d428d500a14d425b7/cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2",
+ "url": "https://files.pythonhosted.org/packages/59/48/519ecd6b65dc9ea7c8111dfde7c9ed61aeb90fe59c6b4454900bcd3e3286/cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1",
+ "url": "https://files.pythonhosted.org/packages/5b/3d/c3c21e3afaf43bacccc3ebf61d1a0d47cef6e2607dbba01662f6f9d8fc40/cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7",
+ "url": "https://files.pythonhosted.org/packages/64/f7/d3c83c79947cc6807e6acd3b2d9a1cbd312042777bc7eec50c869913df79/cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7",
+ "url": "https://files.pythonhosted.org/packages/69/f6/630eb71f246208103ffee754b8375b6b334eeedb28620b3ae57be815eeeb/cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8",
+ "url": "https://files.pythonhosted.org/packages/6d/4d/f7c14c7a49e35df829e04d451a57b843208be7442c8e087250c195775be1/cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922",
+ "url": "https://files.pythonhosted.org/packages/7d/bc/b6c691c960b5dcd54c5444e73af7f826e62af965ba59b6d7e9928b6489a2/cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278",
+ "url": "https://files.pythonhosted.org/packages/8c/50/9185cca136596448d9cc595ae22a9bd4412ad35d812550c37c1390d54673/cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8",
+ "url": "https://files.pythonhosted.org/packages/9f/c3/3d2d9bb2ff9e15b5ababc370ae85b377eacc8e3d54fcb03225471e41a1d8/cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc",
+ "url": "https://files.pythonhosted.org/packages/c2/40/c7cb9d6819b90640ffc3c4028b28f46edc525feaeaa0d98ea23e843d446d/cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30",
+ "url": "https://files.pythonhosted.org/packages/ca/2e/9f2c49bd6a18d46c05ec098b040e7d4599c61f50ced40a39adfae3f68306/cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16",
+ "url": "https://files.pythonhosted.org/packages/d1/f1/fd98e6e79242d9aeaf6a5d49639a7e85f05741575af14d3f4a1d477f572e/cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e",
+ "url": "https://files.pythonhosted.org/packages/d4/fa/057f9d7a5364c86ccb6a4bd4e5c58920dcb66532be0cc21da3f9c7617ec3/cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d",
+ "url": "https://files.pythonhosted.org/packages/d8/b1/127ecb373d02db85a7a7de5093d7ac7b7714b8907d631f0591e8f002998d/cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec",
+ "url": "https://files.pythonhosted.org/packages/d9/f9/27dda069a9f9bfda7c75305e222d904cc2445acf5eab5c696ade57d36f1b/cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb",
+ "url": "https://files.pythonhosted.org/packages/e2/59/61b2364f2a4d3668d933531bc30d012b9b2de1e534df4805678471287d57/cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee",
+ "url": "https://files.pythonhosted.org/packages/e5/61/67e090a41c70ee526bd5121b1ccabab85c727574332d03326baaedea962d/cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4",
+ "url": "https://files.pythonhosted.org/packages/fb/0b/14509319a1b49858425553d2fb3808579cfdfe98c1d71a3f046c1b4e0108/cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ }
+ ],
+ "project_name": "cryptography",
+ "requires_dists": [
+ "bcrypt>=3.1.5; extra == \"ssh\"",
+ "build; extra == \"sdist\"",
+ "certifi; extra == \"test\"",
+ "cffi>=1.12; platform_python_implementation != \"PyPy\"",
+ "check-sdist; extra == \"pep8test\"",
+ "click; extra == \"pep8test\"",
+ "mypy; extra == \"pep8test\"",
+ "nox; extra == \"nox\"",
+ "pretend; extra == \"test\"",
+ "pyenchant>=1.6.11; extra == \"docstest\"",
+ "pytest-benchmark; extra == \"test\"",
+ "pytest-cov; extra == \"test\"",
+ "pytest-randomly; extra == \"test-randomorder\"",
+ "pytest-xdist; extra == \"test\"",
+ "pytest>=6.2.0; extra == \"test\"",
+ "readme-renderer; extra == \"docstest\"",
+ "ruff; extra == \"pep8test\"",
+ "sphinx-rtd-theme>=1.1.1; extra == \"docs\"",
+ "sphinx>=5.3.0; extra == \"docs\"",
+ "sphinxcontrib-spelling>=4.0.1; extra == \"docstest\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "42.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49",
+ "url": "https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4",
+ "url": "https://files.pythonhosted.org/packages/c5/e6/d1f6c00b7221e2d7c4b470132c931325c8b22c51ca62417e300f5ce16009/docker-pycreds-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "docker-pycreds",
+ "requires_dists": [
+ "six>=1.4.0"
+ ],
+ "requires_python": null,
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6",
+ "url": "https://files.pythonhosted.org/packages/26/87/f238c0670b94533ac0353a4e2a1a771a0cc73277b88bff23d3ae35a256c1/docutils-0.20.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b",
+ "url": "https://files.pythonhosted.org/packages/1f/53/a5da4f2c5739cf66290fac1431ee52aff6851c7c8ffd8264f13affd7bcdd/docutils-0.20.1.tar.gz"
+ }
+ ],
+ "project_name": "docutils",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.20.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
+ "url": "https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68",
+ "url": "https://files.pythonhosted.org/packages/8e/1c/beef724eaf5b01bb44b6338c8c3494eff7cab376fab4904cfbbc3585dc79/exceptiongroup-1.2.0.tar.gz"
+ }
+ ],
+ "project_name": "exceptiongroup",
+ "requires_dists": [
+ "pytest>=6; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
+ "url": "https://files.pythonhosted.org/packages/e8/9c/a079946da30fac4924d92dbc617e5367d454954494cf1e71567bcc4e00ee/execnet-2.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af",
+ "url": "https://files.pythonhosted.org/packages/e4/c8/d382dc7a1e68a165f4a4ab612a08b20d8534a7d20cc590630b734ca0c54b/execnet-2.0.2.tar.gz"
+ }
+ ],
+ "project_name": "execnet",
+ "requires_dists": [
+ "hatch; extra == \"testing\"",
+ "pre-commit; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"testing\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae",
+ "url": "https://files.pythonhosted.org/packages/05/2c/ffc08c54c05cdce6fbed2aeebc46348dbe180c6d2c541c7af7ba0aa5f5f8/Farama_Notifications-0.0.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18",
+ "url": "https://files.pythonhosted.org/packages/2e/2c/8384832b7a6b1fd6ba95bbdcae26e7137bb3eedc955c42fd5cdcc086cfbf/Farama-Notifications-0.0.4.tar.gz"
+ }
+ ],
+ "project_name": "farama-notifications",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "0.0.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "758819cb5d94cdedf4e836988b74de396ceacb8e2794d21f82d131fd9ee77237",
+ "url": "https://files.pythonhosted.org/packages/61/bf/fd60001b3abc5222d8eaa4a204cd8c0ae78e75adc688f33ce4bf25b7fafa/fasteners-0.19-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b4f37c3ac52d8a445af3a66bce57b33b5e90b97c696b7b984f530cf8f0ded09c",
+ "url": "https://files.pythonhosted.org/packages/5f/d4/e834d929be54bfadb1f3e3b931c38e956aaa3b235a46a3c764c26c774902/fasteners-0.19.tar.gz"
+ }
+ ],
+ "project_name": "fasteners",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "0.19"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7",
+ "url": "https://files.pythonhosted.org/packages/83/10/466fe96dae1bff622021ee687f68e5524d6392b0a2f80d05001cd3a451ba/frozenlist-1.4.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c",
+ "url": "https://files.pythonhosted.org/packages/36/ce/dc6f29e0352fa34ebe45421960c8e7352ca63b31630a576e8ffb381e9c08/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe",
+ "url": "https://files.pythonhosted.org/packages/51/47/159ac53faf8a11ae5ee8bb9db10327575557504e549cfd76f447b969aa91/frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75",
+ "url": "https://files.pythonhosted.org/packages/53/82/274e19f122e124aee6d113188615f63b0736b4242a875f482a81f91e07e2/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950",
+ "url": "https://files.pythonhosted.org/packages/6e/4f/b8a5a2f10c4a58c52a52a40cf6cf1ffcdbf3a3b64f276f41dab989bf3ab5/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac",
+ "url": "https://files.pythonhosted.org/packages/7a/35/1328c7b0f780d34f8afc1d87ebdc2bb065a123b24766a0b475f0d67da637/frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98",
+ "url": "https://files.pythonhosted.org/packages/97/94/a1305fa4716726ae0abf3b1069c2d922fcfd442538cb850f1be543f58766/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776",
+ "url": "https://files.pythonhosted.org/packages/ae/83/bcdaa437a9bd693ba658a0310f8cdccff26bd78e45fccf8e49897904a5cd/frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc",
+ "url": "https://files.pythonhosted.org/packages/b0/2c/7be3bdc59dbae444864dbd9cde82790314390ec54636baf6b9ce212627ad/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5",
+ "url": "https://files.pythonhosted.org/packages/b8/28/899931015b8cffbe155392fe9ca663f981a17e1adc69589ee0e1e7cdc9a2/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b",
+ "url": "https://files.pythonhosted.org/packages/cf/3d/2102257e7acad73efc4a0c306ad3953f68c504c16982bbdfee3ad75d8085/frozenlist-1.4.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a",
+ "url": "https://files.pythonhosted.org/packages/d4/e9/759043ab7d169b74fe05ebfbfa9ee5c881c303ebc838e308346204309cd0/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a",
+ "url": "https://files.pythonhosted.org/packages/ec/25/0c87df2e53c0c5d90f7517ca0ff7aca78d050a8ec4d32c4278e8c0e52e51/frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868",
+ "url": "https://files.pythonhosted.org/packages/f4/d6/ca016b0adcf8327714ccef969740688808c86e0287bf3a639ff582f24e82/frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad",
+ "url": "https://files.pythonhosted.org/packages/f8/ce/b9de7dc61e753dc318cf0de862181b484178210c5361eae6eaf06792264d/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ }
+ ],
+ "project_name": "frozenlist",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f4120b08b7f8d32904674c98f07d4caf4083a58343c0c0fa0016e0f0254dfe31",
+ "url": "https://files.pythonhosted.org/packages/05/e5/3162be0abab32f152f331423426471935f286dd4ad70fa704f2a34ea3c1e/gcs-oauth2-boto-plugin-3.0.tar.gz"
+ }
+ ],
+ "project_name": "gcs-oauth2-boto-plugin",
+ "requires_dists": [
+ "boto>=2.29.1",
+ "freezegun; extra == \"dev\"",
+ "google-reauth>=0.1.0",
+ "httplib2>=0.18",
+ "mock; python_version < \"3.3\" and extra == \"dev\"",
+ "oauth2client>=2.2.0",
+ "pyOpenSSL>=0.13",
+ "retry-decorator>=1.0.0",
+ "rsa==4.7.2",
+ "six>=1.12.0"
+ ],
+ "requires_python": null,
+ "version": "3.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4",
+ "url": "https://files.pythonhosted.org/packages/fd/5b/8f0c4a5bb9fd491c277c21eff7ccae71b47d43c4446c9d0c6cff2fe8c2c4/gitdb-4.0.11-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b",
+ "url": "https://files.pythonhosted.org/packages/19/0d/bbb5b5ee188dec84647a4664f3e11b06ade2bde568dbd489d9d64adef8ed/gitdb-4.0.11.tar.gz"
+ }
+ ],
+ "project_name": "gitdb",
+ "requires_dists": [
+ "smmap<6,>=3.0.1"
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.11"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff",
+ "url": "https://files.pythonhosted.org/packages/e9/bd/cc3a402a6439c15c3d4294333e13042b915bbeab54edc457c723931fed3f/GitPython-3.1.43-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c",
+ "url": "https://files.pythonhosted.org/packages/b6/a1/106fd9fa2dd989b6fb36e5893961f82992cf676381707253e0bf93eb1662/GitPython-3.1.43.tar.gz"
+ }
+ ],
+ "project_name": "gitpython",
+ "requires_dists": [
+ "coverage[toml]; extra == \"test\"",
+ "ddt!=1.4.3,>=1.1.1; extra == \"test\"",
+ "gitdb<5,>=4.0.1",
+ "mock; python_version < \"3.8\" and extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pre-commit; extra == \"test\"",
+ "pytest-cov; extra == \"test\"",
+ "pytest-instafail; extra == \"test\"",
+ "pytest-mock; extra == \"test\"",
+ "pytest-sugar; extra == \"test\"",
+ "pytest>=7.3.1; extra == \"test\"",
+ "sphinx-autodoc-typehints; extra == \"doc\"",
+ "sphinx-rtd-theme; extra == \"doc\"",
+ "sphinx==4.3.2; extra == \"doc\"",
+ "sphinxcontrib-applehelp<=1.0.4,>=1.0.2; extra == \"doc\"",
+ "sphinxcontrib-devhelp==1.0.2; extra == \"doc\"",
+ "sphinxcontrib-htmlhelp<=2.0.1,>=2.0.0; extra == \"doc\"",
+ "sphinxcontrib-qthelp==1.0.3; extra == \"doc\"",
+ "sphinxcontrib-serializinghtml==1.1.5; extra == \"doc\"",
+ "typing-extensions; python_version < \"3.11\" and extra == \"test\"",
+ "typing-extensions>=3.7.4.3; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "3.1.43"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b78f74116558e0476e19501b5b4b2ac7c93261a69c5449c861ea95cbc853c688",
+ "url": "https://files.pythonhosted.org/packages/5e/cb/cb0311f2ec371c83d6510847476c665edc9cc97564a51923557bc8f0b680/google_apitools-0.5.32-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c3763e52289f61e21c41d5531e20fbda9cc8484a088b8686fd460770db8bad13",
+ "url": "https://files.pythonhosted.org/packages/dc/eb/c26c36463a769a3a9f08847b9bf218cb629ca91877a911bbd6dcf37d9e62/google-apitools-0.5.32.tar.gz"
+ }
+ ],
+ "project_name": "google-apitools",
+ "requires_dists": [
+ "fasteners>=0.14",
+ "httplib2>=0.8",
+ "mock>=1.0.1; extra == \"testing\"",
+ "oauth2client>=1.4.12",
+ "python-gflags>=3.0.6; extra == \"cli\"",
+ "six>=1.12.0"
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "0.5.32"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415",
+ "url": "https://files.pythonhosted.org/packages/9e/8d/ddbcf81ec751d8ee5fd18ac11ff38a0e110f39dfbf105e6d9db69d556dd0/google_auth-2.29.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360",
+ "url": "https://files.pythonhosted.org/packages/18/b2/f14129111cfd61793609643a07ecb03651a71dd65c6974f63b0310ff4b45/google-auth-2.29.0.tar.gz"
+ }
+ ],
+ "project_name": "google-auth",
+ "requires_dists": [
+ "aiohttp<4.0.0.dev0,>=3.6.2; extra == \"aiohttp\"",
+ "cachetools<6.0,>=2.0.0",
+ "cryptography==36.0.2; extra == \"enterprise-cert\"",
+ "cryptography>=38.0.3; extra == \"pyopenssl\"",
+ "pyasn1-modules>=0.2.1",
+ "pyopenssl==22.0.0; extra == \"enterprise-cert\"",
+ "pyopenssl>=20.0.0; extra == \"pyopenssl\"",
+ "pyu2f>=0.1.5; extra == \"reauth\"",
+ "requests<3.0.0.dev0,>=2.20.0; extra == \"aiohttp\"",
+ "requests<3.0.0.dev0,>=2.20.0; extra == \"requests\"",
+ "rsa<5,>=3.1.4"
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.29.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cb39074488d74c8853074dde47368bbf8f739d4a4338b89aab696c895b6d8368",
+ "url": "https://files.pythonhosted.org/packages/69/e1/67ffaa3a645b86318ce30717af7145070ebccec5eef5c623ae08b86129b8/google_reauth-0.1.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9f6852a55c2c5453d581cd01f3d1278e86147c03d008409800390a834235892",
+ "url": "https://files.pythonhosted.org/packages/7d/86/74242e08d24ec4c436b8325dabbd7c60422b4829dfb1ad6ec117bdebea76/google-reauth-0.1.1.tar.gz"
+ }
+ ],
+ "project_name": "google-reauth",
+ "requires_dists": [
+ "oauth2client>=2.0.0; extra == \"oauth2client\"",
+ "pyu2f"
+ ],
+ "requires_python": null,
+ "version": "0.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70",
+ "url": "https://files.pythonhosted.org/packages/f0/fa/c1a5aaa161aee2edce9491757fc394e29415c57b0a6be8e02e208fb8b7e2/grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3",
+ "url": "https://files.pythonhosted.org/packages/00/87/727d8f65646843623064f881ee4446276d049da8bd8da6ef45edc10e6e97/grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5",
+ "url": "https://files.pythonhosted.org/packages/02/71/2a68e19dfd1276524e618149c0e34e08ea39724de10690da23678096fd92/grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d",
+ "url": "https://files.pythonhosted.org/packages/68/19/2575ce3bb14736eb9ab4b2e5026886e119dfc521488d6a2c9ad2d8b1b6d2/grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947",
+ "url": "https://files.pythonhosted.org/packages/c7/bb/d01494037edee2d8e024cac8049b169b2723186b01cebb495ccf677bbba9/grpcio-1.62.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243",
+ "url": "https://files.pythonhosted.org/packages/c9/45/e9237e5fa69bdc2cf01e6ef2be3a421cb1c2c30dbb4e0859ad9ed3bcde0c/grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea",
+ "url": "https://files.pythonhosted.org/packages/cc/fb/09c2e42f37858f699b5f56e40f2c3a45fb24b1b7a9dbed3ae1ca7e5fbac9/grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e",
+ "url": "https://files.pythonhosted.org/packages/e1/5f/19a48b32dac6a5134afbcff4a5deca46b176c58f0b1c2663e11b18db2571/grpcio-1.62.1-cp310-cp310-linux_armv7l.whl"
+ }
+ ],
+ "project_name": "grpcio",
+ "requires_dists": [
+ "grpcio-tools>=1.62.1; extra == \"protobuf\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.62.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "681a2d844acdf05fac989da6dd406944ae11cb27a4cf3c9edef74d2585ab5f05",
+ "url": "https://files.pythonhosted.org/packages/00/ce/9c70a91e1a5fc709e6acf34682b8b2499179ddc27b18b0e3670ff9c257db/gsutil-5.27.tar.gz"
+ }
+ ],
+ "project_name": "gsutil",
+ "requires_dists": [
+ "argcomplete>=1.9.4",
+ "crcmod>=1.7",
+ "fasteners>=0.14.1",
+ "gcs-oauth2-boto-plugin>=3.0",
+ "google-apitools>=0.5.32",
+ "google-auth[aiohttp]>=2.5.0",
+ "google-reauth>=0.1.0",
+ "httplib2==0.20.4",
+ "mock<=3.0.5,>=2.0.0; python_version < \"3.3\"",
+ "monotonic>=1.4",
+ "pyOpenSSL>=0.13",
+ "retry-decorator>=1.0.0",
+ "six>=1.16.0"
+ ],
+ "requires_python": "!=2.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4",
+ "version": "5.27"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "61c3384b5575985bb7f85e43213bcb40f36fcdff388cae6bc229304c71f2843e",
+ "url": "https://files.pythonhosted.org/packages/a8/4d/3cbfd81ed84db450dbe73a89afcd8bc405273918415649ac6683356afe92/gymnasium-0.29.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1a532752efcb7590478b1cc7aa04f608eb7a2fdad5570cd217b66b6a35274bb1",
+ "url": "https://files.pythonhosted.org/packages/0d/f8/5699ddb3e1c4f6d97b8930e573074849b921da8374fccd141f0f3a9bd713/gymnasium-0.29.1.tar.gz"
+ }
+ ],
+ "project_name": "gymnasium",
+ "requires_dists": [
+ "autorom[accept-rom-license]~=0.4.2; extra == \"accept-rom-license\"",
+ "box2d-py==2.3.5; extra == \"all\"",
+ "box2d-py==2.3.5; extra == \"box2d\"",
+ "cloudpickle>=1.2.0",
+ "cython<3; extra == \"all\"",
+ "cython<3; extra == \"mujoco-py\"",
+ "cython<3; extra == \"mujoco-py\"",
+ "farama-notifications>=0.0.1",
+ "imageio>=2.14.1; extra == \"all\"",
+ "imageio>=2.14.1; extra == \"mujoco\"",
+ "importlib-metadata>=4.8.0; python_version < \"3.10\"",
+ "jax>=0.4.0; extra == \"all\"",
+ "jax>=0.4.0; extra == \"jax\"",
+ "jaxlib>=0.4.0; extra == \"all\"",
+ "jaxlib>=0.4.0; extra == \"jax\"",
+ "lz4>=3.1.0; extra == \"all\"",
+ "lz4>=3.1.0; extra == \"other\"",
+ "matplotlib>=3.0; extra == \"all\"",
+ "matplotlib>=3.0; extra == \"other\"",
+ "moviepy>=1.0.0; extra == \"all\"",
+ "moviepy>=1.0.0; extra == \"other\"",
+ "mujoco-py<2.2,>=2.1; extra == \"all\"",
+ "mujoco-py<2.2,>=2.1; extra == \"mujoco-py\"",
+ "mujoco-py<2.2,>=2.1; extra == \"mujoco-py\"",
+ "mujoco>=2.3.3; extra == \"all\"",
+ "mujoco>=2.3.3; extra == \"mujoco\"",
+ "numpy>=1.21.0",
+ "opencv-python>=3.0; extra == \"all\"",
+ "opencv-python>=3.0; extra == \"other\"",
+ "pygame>=2.1.3; extra == \"all\"",
+ "pygame>=2.1.3; extra == \"box2d\"",
+ "pygame>=2.1.3; extra == \"classic-control\"",
+ "pygame>=2.1.3; extra == \"classic-control\"",
+ "pygame>=2.1.3; extra == \"toy-text\"",
+ "pygame>=2.1.3; extra == \"toy-text\"",
+ "pytest==7.1.3; extra == \"testing\"",
+ "scipy>=1.7.3; extra == \"testing\"",
+ "shimmy[atari]<1.0,>=0.1.0; extra == \"all\"",
+ "shimmy[atari]<1.0,>=0.1.0; extra == \"atari\"",
+ "swig==4.*; extra == \"all\"",
+ "swig==4.*; extra == \"box2d\"",
+ "torch>=1.0.0; extra == \"all\"",
+ "torch>=1.0.0; extra == \"other\"",
+ "typing-extensions>=4.3.0"
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.29.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8b6a905cb1c79eefd03f8669fd993c36dc341f7c558f056cb5a33b5c2f458543",
+ "url": "https://files.pythonhosted.org/packages/59/0f/29725a9caf4b2618f524e0f28e2bda91aca8f880123ec77426ede6ea1ea4/httplib2-0.20.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "58a98e45b4b1a48273073f905d2961666ecf0fbac4250ea5b47aef259eb5c585",
+ "url": "https://files.pythonhosted.org/packages/9c/65/57ad964eb8d45cc3d1316ce5ada2632f74e35863a0e57a52398416a182a1/httplib2-0.20.4.tar.gz"
+ }
+ ],
+ "project_name": "httplib2",
+ "requires_dists": [
+ "pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2; python_version > \"3.0\"",
+ "pyparsing<3,>=2.4.2; python_version < \"3.0\""
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "0.20.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f",
+ "url": "https://files.pythonhosted.org/packages/c2/e7/a82b05cf63a603df6e68d59ae6a68bf5064484a0718ea5033660af4b54a9/idna-3.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "url": "https://files.pythonhosted.org/packages/bf/3f/ea4b9117521a1e9c50344b909be7886dd00a519552724809bb1f486986c2/idna-3.6.tar.gz"
+ }
+ ],
+ "project_name": "idna",
+ "requires_dists": [],
+ "requires_python": ">=3.5",
+ "version": "3.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b",
+ "url": "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a",
+ "url": "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz"
+ }
+ ],
+ "project_name": "imagesize",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374",
+ "url": "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
+ "url": "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "iniconfig",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa",
+ "url": "https://files.pythonhosted.org/packages/30/6d/6de6be2d02603ab56e72997708809e8a5b0fbfee080735109b40a3564843/Jinja2-3.1.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90",
+ "url": "https://files.pythonhosted.org/packages/b2/5e/3a21abf3cd467d7876045335e681d276ac32492febe6d98ad89562d1a7e1/Jinja2-3.1.3.tar.gz"
+ }
+ ],
+ "project_name": "jinja2",
+ "requires_dists": [
+ "Babel>=2.7; extra == \"i18n\"",
+ "MarkupSafe>=2.0"
+ ],
+ "requires_python": ">=3.7",
+ "version": "3.1.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f",
+ "url": "https://files.pythonhosted.org/packages/fc/b3/0c0c994fe49cd661084f8d5dc06562af53818cc0abefaca35bdc894577c3/Markdown-3.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224",
+ "url": "https://files.pythonhosted.org/packages/22/02/4785861427848cc11e452cc62bb541006a1087cf04a1de83aedd5530b948/Markdown-3.6.tar.gz"
+ }
+ ],
+ "project_name": "markdown",
+ "requires_dists": [
+ "coverage; extra == \"testing\"",
+ "importlib-metadata>=4.4; python_version < \"3.10\"",
+ "mdx-gh-links>=0.2; extra == \"docs\"",
+ "mkdocs-gen-files; extra == \"docs\"",
+ "mkdocs-literate-nav; extra == \"docs\"",
+ "mkdocs-nature>=0.6; extra == \"docs\"",
+ "mkdocs-section-index; extra == \"docs\"",
+ "mkdocs>=1.5; extra == \"docs\"",
+ "mkdocstrings[python]; extra == \"docs\"",
+ "pyyaml; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
+ "url": "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb",
+ "url": "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "markdown-it-py",
+ "requires_dists": [
+ "commonmark~=0.9; extra == \"compare\"",
+ "coverage; extra == \"testing\"",
+ "gprof2dot; extra == \"profiling\"",
+ "jupyter_sphinx; extra == \"rtd\"",
+ "linkify-it-py<3,>=1; extra == \"linkify\"",
+ "markdown~=3.4; extra == \"compare\"",
+ "mdit-py-plugins; extra == \"plugins\"",
+ "mdit-py-plugins; extra == \"rtd\"",
+ "mdurl~=0.1",
+ "mistletoe~=1.0; extra == \"compare\"",
+ "mistune~=2.0; extra == \"compare\"",
+ "myst-parser; extra == \"rtd\"",
+ "panflute~=2.3; extra == \"compare\"",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "psutil; extra == \"benchmarking\"",
+ "pytest-benchmark; extra == \"benchmarking\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"benchmarking\"",
+ "pytest; extra == \"testing\"",
+ "pyyaml; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design; extra == \"rtd\"",
+ "sphinx; extra == \"rtd\"",
+ "sphinx_book_theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
+ "url": "https://files.pythonhosted.org/packages/30/39/8d845dd7d0b0613d86e0ef89549bfb5f61ed781f59af45fc96496e897f3a/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
+ "url": "https://files.pythonhosted.org/packages/0a/7b/85681ae3c33c385b10ac0f8dd025c30af83c78cec1c37a6aa3b55e67f5ec/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
+ "url": "https://files.pythonhosted.org/packages/29/fe/a36ba8c7ca55621620b2d7c585313efd10729e63ef81e4e61f52330da781/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
+ "url": "https://files.pythonhosted.org/packages/60/ae/9c60231cdfda003434e8bd27282b1f4e197ad5a710c14bee8bea8a9ca4f0/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
+ "url": "https://files.pythonhosted.org/packages/65/dc/1510be4d179869f5dafe071aecb3f1f41b45d37c02329dfba01ff59e5ac5/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
+ "url": "https://files.pythonhosted.org/packages/6a/4a/a4d49415e600bacae038c67f9fecc1d5433b9d3c71a4de6f33537b89654c/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
+ "url": "https://files.pythonhosted.org/packages/7c/52/2b1b570f6b8b803cef5ac28fdf78c0da318916c7d2fe9402a84d591b394c/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
+ "url": "https://files.pythonhosted.org/packages/87/5b/aae44c6655f3801e81aa3eef09dbbf012431987ba564d7231722f68df02d/MarkupSafe-2.1.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
+ "url": "https://files.pythonhosted.org/packages/e4/54/ad5eb37bf9d51800010a74e4665425831a9db4e7c4e0fde4352e391e808e/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl"
+ }
+ ],
+ "project_name": "markupsafe",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.1.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9",
+ "url": "https://files.pythonhosted.org/packages/e5/3c/fe85f19699a7b40c8f9ce8ecee7e269b9b3c94099306df6f9891bdefeedd/mdit_py_plugins-0.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b",
+ "url": "https://files.pythonhosted.org/packages/b4/db/61960d68d5c39ff0dd48cb799a39ae4e297f6e9b96bf2f8da29d897fba0c/mdit_py_plugins-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "mdit-py-plugins",
+ "requires_dists": [
+ "coverage; extra == \"testing\"",
+ "markdown-it-py<4.0.0,>=1.0.0",
+ "myst-parser; extra == \"rtd\"",
+ "pre-commit; extra == \"code-style\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "sphinx-book-theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8",
+ "url": "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba",
+ "url": "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz"
+ }
+ ],
+ "project_name": "mdurl",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c",
+ "url": "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7",
+ "url": "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz"
+ }
+ ],
+ "project_name": "monotonic",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7",
+ "url": "https://files.pythonhosted.org/packages/fa/a2/17e1e23c6be0a916219c5292f509360c345b5fa6beeb50d743203c27532c/multidict-6.0.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef",
+ "url": "https://files.pythonhosted.org/packages/11/b7/bef33e84e3722bc42531af020d7ae8c31235ce8846bacaa852b6484cf868/multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf",
+ "url": "https://files.pythonhosted.org/packages/12/4d/99dfc36872dcc53956879f5da80a6505bbd29214cce90ce792a86e15fddf/multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc",
+ "url": "https://files.pythonhosted.org/packages/26/ce/f745a2d6104e56f7fa0d7d0756bb9ed27b771dd7b8d9d7348cd7f0f7b9de/multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae",
+ "url": "https://files.pythonhosted.org/packages/33/62/2c9085e571318d51212a6914566fe41dd0e33d7f268f7e2f23dcd3f06c56/multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f",
+ "url": "https://files.pythonhosted.org/packages/36/6d/d2f982fb485175727a193b4900b5f929d461e7aa87d6fb5a91a377fcc9c0/multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5",
+ "url": "https://files.pythonhosted.org/packages/8d/ea/0230b6faa9a5bc10650fd50afcc4a86e6c37af2fe05bc679b74d79253732/multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600",
+ "url": "https://files.pythonhosted.org/packages/a4/eb/d8e7693c9064554a1585698d1902839440c6c695b0f53c9a8be5d9d4a3b8/multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9",
+ "url": "https://files.pythonhosted.org/packages/b7/36/48097b96135017ed1b806c5ea27b6cdc2ed3a6861c5372b793563206c586/multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a",
+ "url": "https://files.pythonhosted.org/packages/bc/84/9579004267e1cc5968ef2ef8718dab9d8950d99354d85b739dd67b09c273/multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442",
+ "url": "https://files.pythonhosted.org/packages/c2/5c/1e76b2c742cb9e0248d1e8c4ed420817879230c833fa27d890b5fd22290b/multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182",
+ "url": "https://files.pythonhosted.org/packages/ce/e2/88cdfeaf03eab3498f688a19b62ca704d371cd904cb74b682541ca7b20a7/multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604",
+ "url": "https://files.pythonhosted.org/packages/d9/48/037440edb5d4a1c65e002925b2f24071d6c27754e6f4734f63037e3169d6/multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c",
+ "url": "https://files.pythonhosted.org/packages/f3/7d/fe7648d4b2f200f8854066ce6e56bf51889abfaf859814c62160dd0e32a9/multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da",
+ "url": "https://files.pythonhosted.org/packages/f9/79/722ca999a3a09a63b35aac12ec27dfa8e5bb3a38b0f857f7a1a209a88836/multidict-6.0.5.tar.gz"
+ }
+ ],
+ "project_name": "multidict",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "6.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14",
+ "url": "https://files.pythonhosted.org/packages/1d/f6/6d61a023d758f488e36638076e8a4ec4447a2cdf86938cf6c60cf1c860e6/myst_parser-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead",
+ "url": "https://files.pythonhosted.org/packages/e8/c1/48ea47b78ade0bb0281f34c9e343e3ea0c681fbc81464dbfd134e983954f/myst_parser-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "myst-parser",
+ "requires_dists": [
+ "beautifulsoup4; extra == \"testing\"",
+ "coverage[toml]; extra == \"testing\"",
+ "docutils<0.21,>=0.16",
+ "ipython; extra == \"rtd\"",
+ "jinja2",
+ "linkify-it-py~=2.0; extra == \"linkify\"",
+ "markdown-it-py~=3.0",
+ "mdit-py-plugins~=0.4",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "pydata-sphinx-theme==v0.13.0rc4; extra == \"rtd\"",
+ "pygments; extra == \"testing-docutils\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-param-files~=0.3.4; extra == \"testing\"",
+ "pytest-param-files~=0.3.4; extra == \"testing-docutils\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest<8,>=7; extra == \"testing\"",
+ "pytest<8,>=7; extra == \"testing-docutils\"",
+ "pyyaml",
+ "sphinx-autodoc2~=0.4.2; extra == \"rtd\"",
+ "sphinx-book-theme==1.0.0rc2; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design2; extra == \"rtd\"",
+ "sphinx-pyscript; extra == \"rtd\"",
+ "sphinx-pytest; extra == \"testing\"",
+ "sphinx-tippy>=0.3.1; extra == \"rtd\"",
+ "sphinx-togglebutton; extra == \"rtd\"",
+ "sphinx<8,>=6",
+ "sphinxext-opengraph~=0.8.2; extra == \"rtd\"",
+ "sphinxext-rediraffe~=0.2.7; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1",
+ "url": "https://files.pythonhosted.org/packages/e4/f3/679b3a042a127de0d7c84874913c3e23bb84646eb3bc6ecab3f8c872edc9/numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63",
+ "url": "https://files.pythonhosted.org/packages/0f/ae/dad4b8e7c65494cbbd1c063de114efaf9acd0f5f6171f044f0d4b6299787/numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a",
+ "url": "https://files.pythonhosted.org/packages/42/38/775b43da55fa7473015eddc9a819571517d9a271a9f8134f68fb9be2f212/numpy-1.23.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d",
+ "url": "https://files.pythonhosted.org/packages/4d/39/d33202cc56c21123a50c6d5e160d00c18ff685ab864dbd4bf80dd40a7af9/numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43",
+ "url": "https://files.pythonhosted.org/packages/67/6b/d7c93d458d16464da9b3f560a20c363a19e242ebbb019bd1e1d797523851/numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "numpy",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "1.23.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac",
+ "url": "https://files.pythonhosted.org/packages/95/a9/4f25a14d23f0786b64875b91784607c2277eff25d48f915e39ff0cff505a/oauth2client-4.1.3-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6",
+ "url": "https://files.pythonhosted.org/packages/a6/7b/17244b1083e8e604bf154cf9b716aecd6388acd656dd01893d0d244c94d9/oauth2client-4.1.3.tar.gz"
+ }
+ ],
+ "project_name": "oauth2client",
+ "requires_dists": [
+ "httplib2>=0.9.1",
+ "pyasn1-modules>=0.0.5",
+ "pyasn1>=0.1.7",
+ "rsa>=3.1.4",
+ "six>=1.6.1"
+ ],
+ "requires_python": null,
+ "version": "4.1.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "298f28a2b5ac09145fa958513d3d1e6b349ccf86a877dbdcccad57713fe360b3",
+ "url": "https://files.pythonhosted.org/packages/49/5f/d8e1a24247f506a77cbe22341c72ca91bea3b468c5d6bca2047d885ea3c6/onnx-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "237c6987c6c59d9f44b6136f5819af79574f8d96a760a1fa843bede11f3822f7",
+ "url": "https://files.pythonhosted.org/packages/b3/fe/0978403c8d710ece2f34006367e78de80410743fe0e7680c8f33f2dab20d/onnx-1.16.0.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "034ae21a2aaa2e9c14119a840d2926d213c27aad29e5e3edaa30145a745048e1",
+ "url": "https://files.pythonhosted.org/packages/b8/1c/50310a559857951fc6e069cf5d89deebe34287997d1c5928bca435456f62/onnx-1.16.0-cp310-cp310-macosx_10_15_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9eadbdce25b19d6216f426d6d99b8bc877a65ed92cbef9707751c6669190ba4f",
+ "url": "https://files.pythonhosted.org/packages/c8/0b/f4705e4a3fa6fd0de971302fdae17ad176b024eca8c24360f0e37c00f9df/onnx-1.16.0-cp310-cp310-macosx_10_15_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ec22a43d74eb1f2303373e2fbe7fbcaa45fb225f4eb146edfed1356ada7a9aea",
+ "url": "https://files.pythonhosted.org/packages/ef/6e/96be6692ebcd8da568084d753f386ce08efa1f99b216f346ee281edd6cc3/onnx-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "onnx",
+ "requires_dists": [
+ "Pillow; extra == \"reference\"",
+ "google-re2; extra == \"reference\"",
+ "numpy>=1.20",
+ "protobuf>=3.20.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e4088cab82b66a3b37ffc452976b14a3c599269c247895ae9ceb4066d8188a57",
+ "url": "https://files.pythonhosted.org/packages/d9/64/7fdfb9386511cd6805451e012c537073a79a958a58795c4e602e538c388c/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1a9f0e6267de3a1a1db0c54213d022c7c8b5b9ca4b580e80bdc58516c922c9e1",
+ "url": "https://files.pythonhosted.org/packages/25/72/da7c69a3542071bf1e8f65336721b8b2659194425438d988f79bc14ed9cc/opencv-python-4.9.0.80.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e5f7aa4486651a6ebfa8ed4b594b65bd2d2f41beeb4241a3e4b1b85acbbbadb",
+ "url": "https://files.pythonhosted.org/packages/35/69/b657974ddcbba54d59d7d62b01e60a8b815e35f415b996e4d355be0ac7b4/opencv_python-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7b34a52e9da36dda8c151c6394aed602e4b17fa041df0b9f5b93ae10b0fcca2a",
+ "url": "https://files.pythonhosted.org/packages/52/00/2adf376707c7965bb4569f28f73fafe303c404d01047b10e3b52761be086/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "71dfb9555ccccdd77305fc3dcca5897fbf0cf28b297c51ee55e079c065d812a3",
+ "url": "https://files.pythonhosted.org/packages/77/df/b56175c3fb5bc058774bdcf35f5a71cf9c3c5b909f98a1c688eb71cd3b1f/opencv_python-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl"
+ }
+ ],
+ "project_name": "opencv-python",
+ "requires_dists": [
+ "numpy>=1.13.3; python_version < \"3.7\"",
+ "numpy>=1.17.0; python_version >= \"3.7\"",
+ "numpy>=1.17.3; python_version >= \"3.8\"",
+ "numpy>=1.19.3; python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\"",
+ "numpy>=1.19.3; python_version >= \"3.9\"",
+ "numpy>=1.21.0; python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\"",
+ "numpy>=1.21.2; python_version >= \"3.10\"",
+ "numpy>=1.21.4; python_version >= \"3.10\" and platform_system == \"Darwin\"",
+ "numpy>=1.23.5; python_version >= \"3.11\"",
+ "numpy>=1.26.0; python_version >= \"3.12\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "4.9.0.80"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9",
+ "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz"
+ }
+ ],
+ "project_name": "packaging",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "24.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
+ "url": "https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be",
+ "url": "https://files.pythonhosted.org/packages/54/c6/43f9d44d92aed815e781ca25ba8c174257e27253a94630d21be8725a2b59/pluggy-1.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pluggy",
+ "requires_dists": [
+ "pre-commit; extra == \"dev\"",
+ "pytest-benchmark; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"dev\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9",
+ "url": "https://files.pythonhosted.org/packages/f4/d5/db585a5e8d64af6b384c7b3a63da13df2ff86933e486ba78431736c67c25/protobuf-4.25.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d",
+ "url": "https://files.pythonhosted.org/packages/15/db/7f731524fe0e56c6b2eb57d05b55d3badd80ef7d1f1ed59db191b2fdd8ab/protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c",
+ "url": "https://files.pythonhosted.org/packages/5e/d8/65adb47d921ce828ba319d6587aa8758da022de509c3862a70177a958844/protobuf-4.25.3.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019",
+ "url": "https://files.pythonhosted.org/packages/d8/82/aefe901174b5a618daee511ddd00342193c1b545e3cd6a2cd6df9ba452b5/protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c",
+ "url": "https://files.pythonhosted.org/packages/f3/bf/26deba06a4c910a85f78245cac7698f67cedd7efe00d04f6b3e1b3506a59/protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl"
+ }
+ ],
+ "project_name": "protobuf",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.25.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8",
+ "url": "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c",
+ "url": "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421",
+ "url": "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4",
+ "url": "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81",
+ "url": "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl"
+ }
+ ],
+ "project_name": "psutil",
+ "requires_dists": [
+ "enum34; python_version <= \"3.4\" and extra == \"test\"",
+ "ipaddress; python_version < \"3.0\" and extra == \"test\"",
+ "mock; python_version < \"3.0\" and extra == \"test\"",
+ "pywin32; sys_platform == \"win32\" and extra == \"test\"",
+ "wmi; sys_platform == \"win32\" and extra == \"test\""
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7",
+ "version": "5.9.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378",
+ "url": "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719",
+ "url": "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz"
+ }
+ ],
+ "project_name": "py",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "1.11.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5",
+ "url": "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690",
+ "url": "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz"
+ }
+ ],
+ "project_name": "py-cpuinfo",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "9.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473",
+ "url": "https://files.pythonhosted.org/packages/23/7e/5f50d07d5e70a2addbccd90ac2950f81d1edd0783630651d9268d7f1db49/pyasn1-0.6.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c",
+ "url": "https://files.pythonhosted.org/packages/4a/a3/d2157f333900747f20984553aca98008b6dc843eb62f3a36030140ccec0d/pyasn1-0.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pyasn1",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "0.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b",
+ "url": "https://files.pythonhosted.org/packages/13/68/8906226b15ef38e71dc926c321d2fe99de8048e9098b5dfd38343011c886/pyasn1_modules-0.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6",
+ "url": "https://files.pythonhosted.org/packages/f7/00/e7bd1dec10667e3f2be602686537969a7ac92b0a7c5165be2e5875dc3971/pyasn1_modules-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pyasn1-modules",
+ "requires_dists": [
+ "pyasn1<0.7.0,>=0.4.6"
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc",
+ "url": "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6",
+ "url": "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz"
+ }
+ ],
+ "project_name": "pycparser",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "2.22"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "03879ec299c9f4ba23901b2649a96b2143f0a5d787f0b6c39469989e2320caf1",
+ "url": "https://files.pythonhosted.org/packages/c8/c7/0d77e0e327bf09c12f445f92f5bad0b447375d7b836c5bac5255ead8436f/pygame-2.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a0769eb628c818761755eb0a0ca8216b95270ea8cbcbc82227e39ac9644643da",
+ "url": "https://files.pythonhosted.org/packages/14/54/dc58f8b70e08b6706b158f0c70f86eb1594db6797cb89383f062ad6a304d/pygame-2.5.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30d1618672a55e8c6669281ba264464b3ab563158e40d89e8c8b3faa0febebd",
+ "url": "https://files.pythonhosted.org/packages/5b/91/09f93d428b483c451eacee9ba1e04a1e9999751c80bf6236b2bdc8e19b1e/pygame-2.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ed9a3d98adafa0805ccbaaff5d2996a2b5795381285d8437a4a5d248dbd12b4a",
+ "url": "https://files.pythonhosted.org/packages/65/b6/67e33add85b0f7ac901c6fb89a57f97fdfd67c8834f425a97abaf4a60191/pygame-2.5.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c1b89eb5d539e7ac5cf75513125fb5f2f0a2d918b1fd6e981f23bf0ac1b1c24a",
+ "url": "https://files.pythonhosted.org/packages/c6/aa/2c0c867d6cff00966cfc2152b25f61599f87e88b239e4dcb8ad5357f0f69/pygame-2.5.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "39690e9be9baf58b7359d1f3b2336e1fd6f92fedbbce42987be5df27f8d30718",
+ "url": "https://files.pythonhosted.org/packages/e8/6e/31d7a068edbb029e5a35d8fe4572b67e00705cb8f6dad650397bc417b6b3/pygame-2.5.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "pygame",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "2.5.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "url": "https://files.pythonhosted.org/packages/97/9c/372fef8377a6e340b1704768d20daaded98bf13282b5327beb2e2fe2c7ef/pygments-2.17.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367",
+ "url": "https://files.pythonhosted.org/packages/55/59/8bccf4157baf25e4aa5a0bb7fa3ba8600907de105ebc22b0c78cfbf6f565/pygments-2.17.2.tar.gz"
+ }
+ ],
+ "project_name": "pygments",
+ "requires_dists": [
+ "colorama>=0.4.6; extra == \"windows-terminal\"",
+ "importlib-metadata; python_version < \"3.8\" and extra == \"plugins\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.17.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad",
+ "url": "https://files.pythonhosted.org/packages/54/a7/2104f674a5a6845b04c8ff01659becc6b8978ca410b82b94287e0b1e018b/pyOpenSSL-24.1.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f",
+ "url": "https://files.pythonhosted.org/packages/91/a8/cbeec652549e30103b9e6147ad433405fdd18807ac2d54e6dbb73184d8a1/pyOpenSSL-24.1.0.tar.gz"
+ }
+ ],
+ "project_name": "pyopenssl",
+ "requires_dists": [
+ "cryptography<43,>=41.0.5",
+ "pretend; extra == \"test\"",
+ "pytest-rerunfailures; extra == \"test\"",
+ "pytest>=3.0.1; extra == \"test\"",
+ "sphinx!=5.2.0,!=5.2.0.post0,!=7.2.5; extra == \"docs\"",
+ "sphinx-rtd-theme; extra == \"docs\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "24.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742",
+ "url": "https://files.pythonhosted.org/packages/9d/ea/6d76df31432a0e6fdf81681a895f009a4bb47b3c39036db3e1b528191d52/pyparsing-3.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "url": "https://files.pythonhosted.org/packages/46/3a/31fd28064d016a2182584d579e033ec95b809d8e220e74c4af6f0f2e8842/pyparsing-3.1.2.tar.gz"
+ }
+ ],
+ "project_name": "pyparsing",
+ "requires_dists": [
+ "jinja2; extra == \"diagrams\"",
+ "railroad-diagrams; extra == \"diagrams\""
+ ],
+ "requires_python": ">=3.6.8",
+ "version": "3.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7",
+ "url": "https://files.pythonhosted.org/packages/4d/7e/c79cecfdb6aa85c6c2e3cf63afc56d0f165f24f5c66c03c695c4d9b84756/pytest-8.1.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044",
+ "url": "https://files.pythonhosted.org/packages/30/b7/7d44bbc04c531dcc753056920e0988032e5871ac674b5a84cb979de6e7af/pytest-8.1.1.tar.gz"
+ }
+ ],
+ "project_name": "pytest",
+ "requires_dists": [
+ "argcomplete; extra == \"testing\"",
+ "attrs>=19.2; extra == \"testing\"",
+ "colorama; sys_platform == \"win32\"",
+ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"",
+ "hypothesis>=3.56; extra == \"testing\"",
+ "iniconfig",
+ "mock; extra == \"testing\"",
+ "packaging",
+ "pluggy<2.0,>=1.4",
+ "pygments>=2.7.2; extra == \"testing\"",
+ "requests; extra == \"testing\"",
+ "setuptools; extra == \"testing\"",
+ "tomli>=1; python_version < \"3.11\"",
+ "xmlschema; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "8.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6",
+ "url": "https://files.pythonhosted.org/packages/4d/a1/3b70862b5b3f830f0422844f25a823d0470739d994466be9dbbbb414d85a/pytest_benchmark-4.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1",
+ "url": "https://files.pythonhosted.org/packages/28/08/e6b0067efa9a1f2a1eb3043ecd8a0c48bfeb60d3255006dcc829d72d5da2/pytest-benchmark-4.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-benchmark",
+ "requires_dists": [
+ "aspectlib; extra == \"aspect\"",
+ "elasticsearch; extra == \"elasticsearch\"",
+ "pathlib2; python_version < \"3.4\"",
+ "py-cpuinfo",
+ "pygal; extra == \"histogram\"",
+ "pygaljs; extra == \"histogram\"",
+ "pytest>=3.8",
+ "statistics; python_version < \"3.4\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6",
+ "url": "https://files.pythonhosted.org/packages/20/49/b3e0edec68d81846f519c602ac38af9db86e1e71275528b3e814ae236063/pytest_cov-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470",
+ "url": "https://files.pythonhosted.org/packages/61/41/e046526849972555928a6d31c2068410e47a31fb5ab0a77f868596811329/pytest-cov-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-cov",
+ "requires_dists": [
+ "coverage[toml]>=5.2.1",
+ "fields; extra == \"testing\"",
+ "hunter; extra == \"testing\"",
+ "process-tests; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=4.6",
+ "six; extra == \"testing\"",
+ "virtualenv; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "810958f66a91afb1a1e2ae83089d8dc1cd2437ac96b12963042fbb9fb4d16af0",
+ "url": "https://files.pythonhosted.org/packages/f4/af/9c0bda43e486a3c9bf1e0f876d0f241bc3f229d7d65d09331a0868db9629/pytest_forked-1.6.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4dafd46a9a600f65d822b8f605133ecf5b3e1941ebb3588e943b4e3eb71a5a3f",
+ "url": "https://files.pythonhosted.org/packages/8c/c9/93ad2ba2413057ee694884b88cf7467a46c50c438977720aeac26e73fdb7/pytest-forked-1.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-forked",
+ "requires_dists": [
+ "py",
+ "pytest>=3.10"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b05cb0bcd51a7cd0375bfbeeb3eaeb01fc85665e45b21fc9494a8a19137f4d32",
+ "url": "https://files.pythonhosted.org/packages/c5/d1/2ef73ee137add043df444fddf1c851b8ca70ab9c7b7f18e18c4c244fec6d/pytest_platform_markers-1.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "07ea92669114ba8083b6653995b5a9ab14d57ca16307fd2af22d6f7d295160e4",
+ "url": "https://files.pythonhosted.org/packages/b3/e7/174a22a8cb4cf4b64456cd799f472bb90206f1ce8d537edbc1d9659689a3/pytest-platform-markers-1.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-platform-markers",
+ "requires_dists": [
+ "pytest>=3.6.0"
+ ],
+ "requires_python": null,
+ "version": "1.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32",
+ "url": "https://files.pythonhosted.org/packages/dc/e7/e75bd157331aecc190f5f8950d7ea3d2cf56c3c57fb44da70e60b221133f/pytest_rerunfailures-14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92",
+ "url": "https://files.pythonhosted.org/packages/cc/a4/6de45fe850759e94aa9a55cda807c76245af1941047294df26c851dfb4a9/pytest-rerunfailures-14.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-rerunfailures",
+ "requires_dists": [
+ "packaging>=17.1",
+ "pytest>=7.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65",
+ "url": "https://files.pythonhosted.org/packages/21/08/b1945d4b4986eb1aa10cf84efc5293bba39da80a2f95db3573dd90678408/pytest_xdist-2.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf",
+ "url": "https://files.pythonhosted.org/packages/5d/43/9dbc32d297d6eae85d6c05dc8e8d3371061bd6cbe56a2f645d9ea4b53d9b/pytest-xdist-2.5.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-xdist",
+ "requires_dists": [
+ "execnet>=1.1",
+ "filelock; extra == \"testing\"",
+ "psutil>=3.0; extra == \"psutil\"",
+ "pytest-forked",
+ "pytest>=6.2.0",
+ "setproctitle; extra == \"setproctitle\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "2.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a3caa3a11842fc7d5746376f37195e6af5f17c0a15737538bb1cebf656fb306b",
+ "url": "https://files.pythonhosted.org/packages/29/b5/c1209e6cb77647bc2c9a6a1a953355720f34f3b006b725e303c70f3c0786/pyu2f-0.1.5.tar.gz"
+ }
+ ],
+ "project_name": "pyu2f",
+ "requires_dists": [
+ "six"
+ ],
+ "requires_python": null,
+ "version": "0.1.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290",
+ "url": "https://files.pythonhosted.org/packages/07/91/45dfd0ef821a7f41d9d0136ea3608bb5b1653e42fd56a7970532cb5c003f/PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "url": "https://files.pythonhosted.org/packages/29/61/bf33c6c85c55bc45a29eee3195848ff2d518d84735eb0e2d8cb42e0d285e/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f",
+ "url": "https://files.pythonhosted.org/packages/5b/07/10033a403b23405a8fc48975444463d3d10a5c2736b7eb2550b07b367429/PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "url": "https://files.pythonhosted.org/packages/96/06/4beb652c0fe16834032e54f0956443d4cc797fe645527acee59e7deaa0a2/PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "url": "https://files.pythonhosted.org/packages/ba/91/090818dfa62e85181f3ae23dd1e8b7ea7f09684864a900cab72d29c57346/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "url": "https://files.pythonhosted.org/packages/cd/e5/af35f7ea75cf72f2cd079c95ee16797de7cd71f29ea7c68ae5ce7be1eda0/PyYAML-6.0.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "url": "https://files.pythonhosted.org/packages/f1/26/55e4f21db1f72eaef092015d9017c11510e7e6301c62a6cfee91295d13c6/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "pyyaml",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "6.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "url": "https://files.pythonhosted.org/packages/70/8e/0e2d847013cb52cd35b38c009bb167a1a26b2ce6cd6965bf26b47bc0bf44/requests-2.31.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1",
+ "url": "https://files.pythonhosted.org/packages/9d/be/10918a2eac4ae9f02f6cfe6414b7a155ccd8f7f9d4380d62fd5b955065c3/requests-2.31.0.tar.gz"
+ }
+ ],
+ "project_name": "requests",
+ "requires_dists": [
+ "PySocks!=1.5.7,>=1.5.6; extra == \"socks\"",
+ "certifi>=2017.4.17",
+ "chardet<6,>=3.0.2; extra == \"use-chardet-on-py3\"",
+ "charset-normalizer<4,>=2",
+ "idna<4,>=2.5",
+ "urllib3<3,>=1.21.1"
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.31.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e1e8ad02e518fe11073f2ea7d80b6b8be19daa27a60a1838aff7c731ddcf2ebe",
+ "url": "https://files.pythonhosted.org/packages/6e/e6/bedc75b264cbcbf6e6d0e5071d96d739f540fc09be31744a7a8824c02a8e/retry_decorator-1.1.1.tar.gz"
+ }
+ ],
+ "project_name": "retry-decorator",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2",
+ "url": "https://files.pythonhosted.org/packages/e9/93/0c0f002031f18b53af7a6166103c02b9c0667be528944137cc954ec921b3/rsa-4.7.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9",
+ "url": "https://files.pythonhosted.org/packages/db/b5/475c45a58650b0580421746504b680cd2db4e81bc941e94ca53785250269/rsa-4.7.2.tar.gz"
+ }
+ ],
+ "project_name": "rsa",
+ "requires_dists": [
+ "pyasn1>=0.1.3"
+ ],
+ "requires_python": "<4,>=3.5",
+ "version": "4.7.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "5f75eb91d8ab6037c754a87b8501cc581b2827e923682f593bed3539ce5b3999",
+ "url": "https://files.pythonhosted.org/packages/b1/f8/2038661bc32579d0c11191fc1093e49db590bfb6e63d501d7995fb798d62/sentry_sdk-1.44.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "24e6a53eeabffd2f95d952aa35ca52f0f4201d17f820ac9d3ff7244c665aaf68",
+ "url": "https://files.pythonhosted.org/packages/fd/72/85a8bc961d9160ac8c9f0a6d39dbdad21795d55c7b02a433bd0ffb75c037/sentry-sdk-1.44.1.tar.gz"
+ }
+ ],
+ "project_name": "sentry-sdk",
+ "requires_dists": [
+ "aiohttp>=3.5; extra == \"aiohttp\"",
+ "apache-beam>=2.12; extra == \"beam\"",
+ "arq>=0.23; extra == \"arq\"",
+ "asttokens; extra == \"pure-eval\"",
+ "asyncpg>=0.23; extra == \"asyncpg\"",
+ "blinker>=1.1; extra == \"flask\"",
+ "blinker>=1.1; extra == \"quart\"",
+ "bottle>=0.12.13; extra == \"bottle\"",
+ "celery-redbeat>=2; extra == \"celery-redbeat\"",
+ "celery>=3; extra == \"celery\"",
+ "certifi",
+ "chalice>=1.16.0; extra == \"chalice\"",
+ "clickhouse-driver>=0.2.0; extra == \"clickhouse-driver\"",
+ "django>=1.8; extra == \"django\"",
+ "executing; extra == \"pure-eval\"",
+ "falcon>=1.4; extra == \"falcon\"",
+ "fastapi>=0.79.0; extra == \"fastapi\"",
+ "flask>=0.11; extra == \"flask\"",
+ "grpcio>=1.21.1; extra == \"grpcio\"",
+ "httpx>=0.16.0; extra == \"httpx\"",
+ "huey>=2; extra == \"huey\"",
+ "loguru>=0.5; extra == \"loguru\"",
+ "markupsafe; extra == \"flask\"",
+ "openai>=1.0.0; extra == \"openai\"",
+ "opentelemetry-distro>=0.35b0; extra == \"opentelemetry\"",
+ "opentelemetry-distro~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-aiohttp-client~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-django~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-fastapi~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-flask~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-requests~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-sqlite3~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-urllib~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "pure-eval; extra == \"pure-eval\"",
+ "pymongo>=3.1; extra == \"pymongo\"",
+ "pyspark>=2.4.4; extra == \"pyspark\"",
+ "quart>=0.16.1; extra == \"quart\"",
+ "rq>=0.6; extra == \"rq\"",
+ "sanic>=0.8; extra == \"sanic\"",
+ "sqlalchemy>=1.2; extra == \"sqlalchemy\"",
+ "starlette>=0.19.1; extra == \"starlette\"",
+ "starlite>=1.48; extra == \"starlite\"",
+ "tiktoken>=0.3.0; extra == \"openai\"",
+ "tornado>=5; extra == \"tornado\"",
+ "urllib3>=1.25.7; python_version <= \"3.4\"",
+ "urllib3>=1.26.11; python_version >= \"3.6\"",
+ "urllib3>=1.26.9; python_version == \"3.5\""
+ ],
+ "requires_python": null,
+ "version": "1.44.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5",
+ "url": "https://files.pythonhosted.org/packages/70/1d/3b2249c833c7d52b59ff0602d760df0543dc1e6c272f145b949750edeb01/setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0",
+ "url": "https://files.pythonhosted.org/packages/24/55/8b369b56007a5a2c7594cdb58cd4a09d7cca65b28483bb5582c6975663f1/setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9",
+ "url": "https://files.pythonhosted.org/packages/35/30/ac99ecae8458ba995f85aa3aa911004679b405922e1487b0fba6fe8f4d37/setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d",
+ "url": "https://files.pythonhosted.org/packages/3d/92/17168f4bb1a695094e93e73a1ef1f7b89953a6d91e8a7699a2c840ba712f/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754",
+ "url": "https://files.pythonhosted.org/packages/4f/cc/c51e6371f640a9adbe693ddb89d68596e5a8e4b5e05b4d3c65ec504e2f6d/setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85",
+ "url": "https://files.pythonhosted.org/packages/69/a7/2a77b68c11db87c22350381d6ce022011eb420076790e0e3697153e89458/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39",
+ "url": "https://files.pythonhosted.org/packages/79/e7/54b36be02aee8ad573be68f6f46fd62838735c2f007b22df50eb5e13a20d/setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f",
+ "url": "https://files.pythonhosted.org/packages/87/7b/69bdc791001250dff279a1a81904f3f563caece4fa1607a95b9fd5197d6e/setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5",
+ "url": "https://files.pythonhosted.org/packages/94/ad/4166381d79f6ae8138be9b49f05d193a8deb748debace9896dffad45a753/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3",
+ "url": "https://files.pythonhosted.org/packages/9c/56/6f4a4e80b2810eb7ea9ab355022c780ef80457de368ab5b6b21b795e4f05/setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0",
+ "url": "https://files.pythonhosted.org/packages/9d/09/bc108723bbfb7c50c22fdf22191f3e32abcb5d6f46610018030b25f601c5/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452",
+ "url": "https://files.pythonhosted.org/packages/c3/7d/d03f319e0f3b3a6e98731a56cd4d81478ed0c12531b822fd2c728b948edb/setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74",
+ "url": "https://files.pythonhosted.org/packages/d0/ae/010811bece9a59a8bba131d9e7acea9c2e3c3cbf544bf06d8b10b8c28ff5/setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae",
+ "url": "https://files.pythonhosted.org/packages/ff/e1/b16b16a1aa12174349d15b73fd4b87e641a8ae3fb1163e80938dbbf6ae98/setproctitle-1.3.3.tar.gz"
+ }
+ ],
+ "project_name": "setproctitle",
+ "requires_dists": [
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.3.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6d10741ff20b89cd8c6a536ee9dc90d3002dec0226c78fb98605bfb9ef8a7adf",
+ "url": "https://files.pythonhosted.org/packages/40/a9/7deac76c58fa47c95360116a06b53b9b62f6db11336fe61b6ab53784d98b/setuptools-59.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d144f85102f999444d06f9c0e8c737fd0194f10f2f7e5fdb77573f6e2fa4fad0",
+ "url": "https://files.pythonhosted.org/packages/e6/e2/f2bfdf364e016f7a464db709ea40d1101c4c5a463dd7019dae0a42dbd1c6/setuptools-59.5.0.tar.gz"
+ }
+ ],
+ "project_name": "setuptools",
+ "requires_dists": [
+ "flake8-2020; extra == \"testing\"",
+ "furo; extra == \"docs\"",
+ "jaraco.envs>=2.2; extra == \"testing\"",
+ "jaraco.packaging>=8.2; extra == \"docs\"",
+ "jaraco.path>=3.2.0; extra == \"testing\"",
+ "jaraco.tidelift>=1.4; extra == \"docs\"",
+ "mock; extra == \"testing\"",
+ "paver; extra == \"testing\"",
+ "pip>=19.1; extra == \"testing\"",
+ "pygments-github-lexers==0.0.5; extra == \"docs\"",
+ "pytest-black>=0.3.7; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-checkdocs>=2.4; extra == \"testing\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-enabler>=1.0.1; extra == \"testing\"",
+ "pytest-flake8; extra == \"testing\"",
+ "pytest-mypy; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-virtualenv>=1.2.7; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=6; extra == \"testing\"",
+ "rst.linker>=1.9; extra == \"docs\"",
+ "sphinx-inline-tabs; extra == \"docs\"",
+ "sphinx; extra == \"docs\"",
+ "sphinx; extra == \"testing\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "virtualenv>=13.0.0; extra == \"testing\"",
+ "wheel; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "59.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254",
+ "url": "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "url": "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz"
+ }
+ ],
+ "project_name": "six",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da",
+ "url": "https://files.pythonhosted.org/packages/a7/a5/10f97f73544edcdef54409f1d839f6049a0d79df68adbc1ceb24d1aaca42/smmap-5.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62",
+ "url": "https://files.pythonhosted.org/packages/88/04/b5bf6d21dc4041000ccba7eb17dd3055feb237e7ffc2c20d3fae3af62baa/smmap-5.0.1.tar.gz"
+ }
+ ],
+ "project_name": "smmap",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "5.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a",
+ "url": "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1",
+ "url": "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz"
+ }
+ ],
+ "project_name": "snowballstemmer",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "1e09160a40b956dc623c910118fa636da93bd3ca0b9876a7b3df90f07d691560",
+ "url": "https://files.pythonhosted.org/packages/b2/b6/8ed35256aa530a9d3da15d20bdc0ba888d5364441bb50a5a83ee7827affe/sphinx-7.2.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9a5160e1ea90688d5963ba09a2dcd8bdd526620edbb65c328728f1b2228d5ab5",
+ "url": "https://files.pythonhosted.org/packages/73/8e/6e51da4b26665b4b92b1944ea18b2d9c825e753e19180cc5bdc818d0ed3b/sphinx-7.2.6.tar.gz"
+ }
+ ],
+ "project_name": "sphinx",
+ "requires_dists": [
+ "Jinja2>=3.0",
+ "Pygments>=2.14",
+ "alabaster<0.8,>=0.7",
+ "babel>=2.9",
+ "colorama>=0.4.5; sys_platform == \"win32\"",
+ "cython>=3.0; extra == \"test\"",
+ "docutils-stubs; extra == \"lint\"",
+ "docutils<0.21,>=0.18.1",
+ "filelock; extra == \"test\"",
+ "flake8-simplify; extra == \"lint\"",
+ "flake8>=3.5.0; extra == \"lint\"",
+ "html5lib; extra == \"test\"",
+ "imagesize>=1.3",
+ "importlib-metadata>=4.8; python_version < \"3.10\"",
+ "isort; extra == \"lint\"",
+ "mypy>=0.990; extra == \"lint\"",
+ "packaging>=21.0",
+ "pytest>=4.6; extra == \"test\"",
+ "requests>=2.25.0",
+ "ruff; extra == \"lint\"",
+ "setuptools>=67.0; extra == \"test\"",
+ "snowballstemmer>=2.0",
+ "sphinx-lint; extra == \"lint\"",
+ "sphinxcontrib-applehelp",
+ "sphinxcontrib-devhelp",
+ "sphinxcontrib-htmlhelp>=2.0.0",
+ "sphinxcontrib-jsmath",
+ "sphinxcontrib-qthelp",
+ "sphinxcontrib-serializinghtml>=1.1.9",
+ "sphinxcontrib-websupport; extra == \"docs\"",
+ "types-requests; extra == \"lint\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "7.2.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4",
+ "url": "https://files.pythonhosted.org/packages/56/89/fea3fbf6785b388e6cb8a1beaf62f96e80b37311bdeed6e133388a732426/sphinxcontrib_applehelp-1.0.8-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619",
+ "url": "https://files.pythonhosted.org/packages/26/6b/68f470fc337ed24043fec987b101f25b35010970bd958970c2ae5990859f/sphinxcontrib_applehelp-1.0.8.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-applehelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f",
+ "url": "https://files.pythonhosted.org/packages/a0/52/1049d918d1d1c72857d285c3f0c64c1cbe0be394ce1c93a3d2aa4f39fe3b/sphinxcontrib_devhelp-1.0.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3",
+ "url": "https://files.pythonhosted.org/packages/c7/a1/80b7e9f677abc673cb9320bf255ad4e08931ccbc2e66bde4b59bad3809ad/sphinxcontrib_devhelp-1.0.6.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-devhelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04",
+ "url": "https://files.pythonhosted.org/packages/c2/e9/74c4cda5b409af3222fda38f0774e616011bc935f639dbc0da5ca2d1be7d/sphinxcontrib_htmlhelp-2.0.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015",
+ "url": "https://files.pythonhosted.org/packages/8a/03/2f9d699fbfdf03ecb3b6d0e2a268a8998d009f2a9f699c2dcc936899257d/sphinxcontrib_htmlhelp-2.0.5.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-htmlhelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "html5lib; extra == \"test\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "2.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178",
+ "url": "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8",
+ "url": "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-jsmath",
+ "requires_dists": [
+ "flake8; extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.5",
+ "version": "1.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182",
+ "url": "https://files.pythonhosted.org/packages/80/b3/1beac14a88654d2e5120d0143b49be5ad450b86eb1963523d8dbdcc51eb2/sphinxcontrib_qthelp-1.0.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6",
+ "url": "https://files.pythonhosted.org/packages/ac/29/705cd4e93e98a8473d62b5c32288e6de3f0c9660d3c97d4e80d3dbbad82b/sphinxcontrib_qthelp-1.0.7.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-qthelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7",
+ "url": "https://files.pythonhosted.org/packages/38/24/228bb903ea87b9e08ab33470e6102402a644127108c7117ac9c00d849f82/sphinxcontrib_serializinghtml-1.1.10-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f",
+ "url": "https://files.pythonhosted.org/packages/54/13/8dd7a7ed9c58e16e20c7f4ce8e4cb6943eb580955236d0c0d00079a73c49/sphinxcontrib_serializinghtml-1.1.10.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-serializinghtml",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.1.10"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45",
+ "url": "https://files.pythonhosted.org/packages/3a/d0/b97889ffa769e2d1fdebb632084d5e8b53fc299d43a537acee7ec0c021a3/tensorboard-2.16.2-py3-none-any.whl"
+ }
+ ],
+ "project_name": "tensorboard",
+ "requires_dists": [
+ "absl-py>=0.4",
+ "grpcio>=1.48.2",
+ "markdown>=2.6.8",
+ "numpy>=1.12.0",
+ "protobuf!=4.24.0,>=3.19.6",
+ "setuptools>=41.0.0",
+ "six>1.9",
+ "tensorboard-data-server<0.8.0,>=0.7.0",
+ "werkzeug>=1.0.1"
+ ],
+ "requires_python": ">=3.9",
+ "version": "2.16.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530",
+ "url": "https://files.pythonhosted.org/packages/73/c6/825dab04195756cf8ff2e12698f22513b3db2f64925bdd41671bfb33aaa5/tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb",
+ "url": "https://files.pythonhosted.org/packages/7a/13/e503968fefabd4c6b2650af21e110aa8466fe21432cd7c43a84577a89438/tensorboard_data_server-0.7.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60",
+ "url": "https://files.pythonhosted.org/packages/b7/85/dabeaf902892922777492e1d253bb7e1264cadce3cea932f7ff599e53fea/tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl"
+ }
+ ],
+ "project_name": "tensorboard-data-server",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.7.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f",
+ "url": "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz"
+ }
+ ],
+ "project_name": "tomli",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "37f14f640b91effe41db244b932c2dd697ca2b51ae241a534259b9d9f7f51f6f",
+ "url": "https://download.pytorch.org/whl/cpu/torch-1.12.0+cpu-cp310-cp310-linux_x86_64.whl"
+ }
+ ],
+ "project_name": "torch",
+ "requires_dists": [
+ "typing-extensions"
+ ],
+ "requires_python": ">=3.7.0",
+ "version": "1.12.0+cpu"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "url": "https://files.pythonhosted.org/packages/f9/de/dc04a3ea60b22624b51c703a84bbe0184abcd1d0b9bc8074b5d6b7ab90bb/typing_extensions-4.10.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb",
+ "url": "https://files.pythonhosted.org/packages/16/3a/0d26ce356c7465a19c9ea8814b960f8a36c3b0d07c323176620b7b483e44/typing_extensions-4.10.0.tar.gz"
+ }
+ ],
+ "project_name": "typing-extensions",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.10.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "url": "https://files.pythonhosted.org/packages/a2/73/a68704750a7679d0b6d3ad7aa8d4da8e14e151ae82e6fee774e6e0d05ec8/urllib3-2.2.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19",
+ "url": "https://files.pythonhosted.org/packages/7a/50/7fd50a27caa0652cd4caf224aa87741ea41d3265ad13f010886167cfcc79/urllib3-2.2.1.tar.gz"
+ }
+ ],
+ "project_name": "urllib3",
+ "requires_dists": [
+ "brotli>=1.0.9; platform_python_implementation == \"CPython\" and extra == \"brotli\"",
+ "brotlicffi>=0.8.0; platform_python_implementation != \"CPython\" and extra == \"brotli\"",
+ "h2<5,>=4; extra == \"h2\"",
+ "pysocks!=1.5.7,<2.0,>=1.5.6; extra == \"socks\"",
+ "zstandard>=0.18.0; extra == \"zstd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "2.2.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "023b6c72a6ef13085c9a970f6714548eca64f56d3d8698e42372764950dfd004",
+ "url": "https://files.pythonhosted.org/packages/53/7c/f3656d1ce3b916ea35f454c6a32b56342168c08baf09a0683df240ca2dce/wandb-0.16.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c317d55af93a688f3eafcdfec897f7b72da1fe1525140e076ecdaab8b09aa46e",
+ "url": "https://files.pythonhosted.org/packages/e1/75/26d5e5923cb6a619215f6eeb6508b67651d0f4a3306169c4a1c5861a3b20/wandb-0.16.5.tar.gz"
+ }
+ ],
+ "project_name": "wandb",
+ "requires_dists": [
+ "Click!=8.0.0,>=7.1",
+ "GitPython!=3.1.29,>=1.0.0",
+ "PyYAML",
+ "PyYAML>=6.0.0; extra == \"launch\"",
+ "appdirs>=1.4.3",
+ "awscli; extra == \"launch\"",
+ "azure-containerregistry; extra == \"launch\"",
+ "azure-identity; extra == \"azure\"",
+ "azure-identity; extra == \"launch\"",
+ "azure-storage-blob; extra == \"azure\"",
+ "azure-storage-blob; extra == \"launch\"",
+ "bokeh; extra == \"media\"",
+ "boto3; extra == \"aws\"",
+ "boto3; extra == \"launch\"",
+ "botocore; extra == \"launch\"",
+ "chardet; extra == \"launch\"",
+ "cloudpickle; extra == \"models\"",
+ "docker-pycreds>=0.4.0",
+ "filelock; extra == \"importers\"",
+ "google-auth; extra == \"launch\"",
+ "google-cloud-aiplatform; extra == \"launch\"",
+ "google-cloud-artifact-registry; extra == \"launch\"",
+ "google-cloud-compute; extra == \"launch\"",
+ "google-cloud-storage; extra == \"gcp\"",
+ "google-cloud-storage; extra == \"kubeflow\"",
+ "google-cloud-storage; extra == \"launch\"",
+ "httpx>=0.23.0; extra == \"async\"",
+ "iso8601; extra == \"launch\"",
+ "kubernetes-asyncio; extra == \"launch\"",
+ "kubernetes; extra == \"kubeflow\"",
+ "kubernetes; extra == \"launch\"",
+ "minio; extra == \"kubeflow\"",
+ "mlflow; extra == \"importers\"",
+ "moviepy; extra == \"media\"",
+ "nbconvert; extra == \"launch\"",
+ "nbformat; extra == \"launch\"",
+ "numpy; extra == \"media\"",
+ "optuna; extra == \"launch\"",
+ "orjson; extra == \"perf\"",
+ "pillow; extra == \"media\"",
+ "plotly>=5.18.0; extra == \"media\"",
+ "polars; extra == \"importers\"",
+ "protobuf!=4.21.0,<5,>=3.12.0; python_version < \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.15.0; python_version == \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.19.0; python_version > \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.19.0; sys_platform != \"linux\"",
+ "psutil>=5.0.0",
+ "pydantic; extra == \"launch\"",
+ "pydantic>=2.0.0; extra == \"reports\"",
+ "rdkit-pypi; extra == \"media\"",
+ "requests<3,>=2.0.0",
+ "rich; extra == \"importers\"",
+ "sentry-sdk>=1.0.0",
+ "setproctitle",
+ "setuptools",
+ "sh; extra == \"kubeflow\"",
+ "soundfile; extra == \"media\"",
+ "sweeps>=0.2.0; extra == \"sweeps\"",
+ "tenacity; extra == \"importers\"",
+ "tomli; extra == \"launch\"",
+ "typing-extensions; extra == \"launch\"",
+ "typing-extensions; python_version < \"3.10\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "0.16.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3aac3f5da756f93030740bc235d3e09449efcf65f2f55e3602e1d851b8f48795",
+ "url": "https://files.pythonhosted.org/packages/e3/23/c9843d7550092ae7ad380611c238f44afef66f58f76c1dab7dcf313e4339/werkzeug-3.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e39b645a6ac92822588e7b39a692e7828724ceae0b0d702ef96701f90e70128d",
+ "url": "https://files.pythonhosted.org/packages/0f/84/00f7193d7bd88ced26cd5f868903e431054424610dc7c041bbe87d2a4d66/werkzeug-3.0.2.tar.gz"
+ }
+ ],
+ "project_name": "werkzeug",
+ "requires_dists": [
+ "MarkupSafe>=2.1.1",
+ "watchdog>=2.3; extra == \"watchdog\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad",
+ "url": "https://files.pythonhosted.org/packages/4d/05/4d79198ae568a92159de0f89e710a8d19e3fa267b719a236582eee921f4a/yarl-1.9.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551",
+ "url": "https://files.pythonhosted.org/packages/0b/58/dd3c69651381a57ac991dba54b20ae2da359eb4b03a661e71c451d6525c6/yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385",
+ "url": "https://files.pythonhosted.org/packages/0b/a3/7774786ec6e2dca0bb38b286f12a11af97957546e5fbcce71752a8d2cf07/yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234",
+ "url": "https://files.pythonhosted.org/packages/30/b5/215d586d5cb17ca9748d7a2d597c07147f210c0c0785257492094d083b65/yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b",
+ "url": "https://files.pythonhosted.org/packages/44/ae/fdbc9965ef69e650c3b5b04d60badef90ff0cde21a30770f0700e148b12f/yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e",
+ "url": "https://files.pythonhosted.org/packages/6c/27/cda5a927df3a894eddfee4efacdd230c2d8486e322fc672194fd651f82c5/yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c",
+ "url": "https://files.pythonhosted.org/packages/6d/a1/db0bdf8cc48515e9c02daf04ae2916fc27ce6498eca21432fc9ffa63f71b/yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863",
+ "url": "https://files.pythonhosted.org/packages/70/a9/ef6d69ce9a4e82080290bcb6db735bb8a6d6db92f2bbb92b6951bde97e7c/yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66",
+ "url": "https://files.pythonhosted.org/packages/81/c6/06938036ea48fa74521713499fba1459b0eb60af9b9afbe8e0e9e1a96c36/yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53",
+ "url": "https://files.pythonhosted.org/packages/b2/4f/796b0c73e9ff30a1047a7ee3390e157ab8424d4401b9f32a2624013a5b39/yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455",
+ "url": "https://files.pythonhosted.org/packages/c3/a0/0ade1409d184cbc9e85acd403a386a7c0563b92ff0f26d138ff9e86e48b4/yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541",
+ "url": "https://files.pythonhosted.org/packages/cc/2a/abbaf1460becba856e163f2a1274f5d34b1969d476da8e68a8fc2aeb5661/yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2",
+ "url": "https://files.pythonhosted.org/packages/d5/fc/40b85bea1f5686092ea37f472c94c023d6347266852ffd55baa01c40f596/yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392",
+ "url": "https://files.pythonhosted.org/packages/dd/90/2958ae9f2e12084d616eef95b6a48c8e6d96448add04367c20dc53a33ff2/yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf",
+ "url": "https://files.pythonhosted.org/packages/e0/ad/bedcdccbcbf91363fd425a948994f3340924145c2bc8ccb296f4a1e52c28/yarl-1.9.4.tar.gz"
+ }
+ ],
+ "project_name": "yarl",
+ "requires_dists": [
+ "idna>=2.0",
+ "multidict>=4.0",
+ "typing-extensions>=3.7.4; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.9.4"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "atomicwrites>=1.4.0",
+ "box2d-py>=2.3.5",
+ "cloudpickle~=3.0",
+ "gsutil>=4.66",
+ "gymnasium>=0.27.1",
+ "myst-parser~=2.0",
+ "numpy<1.24",
+ "onnx>=1.10",
+ "opencv-python>=3.0",
+ "protobuf>=4.0",
+ "psutil>=5.8.0",
+ "pygame>=2.1.0",
+ "pytest-benchmark==4.0.0",
+ "pytest-cov!=2.12.1,<3.1,>=2.12",
+ "pytest-platform-markers",
+ "pytest-rerunfailures",
+ "pytest-xdist<3,>=2.5",
+ "pytest~=8.0",
+ "setuptools==59.5",
+ "tensorboard>=2.8.0",
+ "torch!=1.12.0+cu116,==1.12.0+cpu",
+ "torch==1.12.0",
+ "wandb>=0.14.0"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/gpu.lock b/locks/gpu.lock
new file mode 100644
index 00000000..d33a2964
--- /dev/null
+++ b/locks/gpu.lock
@@ -0,0 +1,3248 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=gpu
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "atomicwrites>=1.4.0",
+// "box2d-py>=2.3.5",
+// "cloudpickle~=3.0",
+// "gsutil>=4.66",
+// "gymnasium>=0.27.1",
+// "myst-parser~=2.0",
+// "numpy<1.24",
+// "onnx>=1.10",
+// "opencv-python>=3.0",
+// "protobuf>=4.0",
+// "psutil>=5.8.0",
+// "pygame>=2.1.0",
+// "pytest-benchmark==4.0.0",
+// "pytest-cov!=2.12.1,<3.1,>=2.12",
+// "pytest-platform-markers",
+// "pytest-rerunfailures",
+// "pytest-xdist<3,>=2.5",
+// "pytest~=8.0",
+// "setuptools==59.5",
+// "tensorboard>=2.8.0",
+// "torch!=1.12.0+cpu,==1.12.0+cu116",
+// "torch==1.12.0",
+// "wandb>=0.14.0"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308",
+ "url": "https://files.pythonhosted.org/packages/a2/ad/e0d3c824784ff121c03cc031f944bc7e139a8f1870ffd2845cc2dd76f6c4/absl_py-2.1.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff",
+ "url": "https://files.pythonhosted.org/packages/7a/8f/fc001b92ecc467cc32ab38398bd0bfb45df46e7523bf33c2ad22a505f06e/absl-py-2.1.0.tar.gz"
+ }
+ ],
+ "project_name": "absl-py",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52",
+ "url": "https://files.pythonhosted.org/packages/1f/41/0852b954464d853cf315e60f096d3ff6a74aff75ad5f3388c06695d5d37f/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54",
+ "url": "https://files.pythonhosted.org/packages/0c/03/2cac72f64b2853397dd697aa4957755b85bfd3acc0ffe898571060f1db83/aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747",
+ "url": "https://files.pythonhosted.org/packages/18/02/4156ed2edca212041c7a5334b9520ff5a39e40648177e2f0ef13cac2b555/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7",
+ "url": "https://files.pythonhosted.org/packages/18/93/1f005bbe044471a0444a82cdd7356f5120b9cf94fe2c50c0cdbf28f1258b/aiohttp-3.9.3.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5",
+ "url": "https://files.pythonhosted.org/packages/43/68/86874ff80e74c2e8308af3d80345fd624b5b26197a914aa9a85cfaf5b025/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf",
+ "url": "https://files.pythonhosted.org/packages/4b/a0/8b50667a858f3e4f3fec2d471aa9e618783c0450b980e7a5bf617c1cb1f3/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c",
+ "url": "https://files.pythonhosted.org/packages/63/56/c1d39b27114595beaea776e164dbb793cf64c16331ba00cd0dc7cf0542f2/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768",
+ "url": "https://files.pythonhosted.org/packages/6d/8a/46ba295c98b24779370580b4450f80f35a1ae9e4bc9f9783ea1043d33395/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5",
+ "url": "https://files.pythonhosted.org/packages/7e/6e/6c0486fdd8918f9818e82b30898cb77ff0debccc4b09db5d9a939ed7a075/aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec",
+ "url": "https://files.pythonhosted.org/packages/86/74/b506f01485dba1c4298700156b915f3ba475be823a7b31056d40a9ac0daa/aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29",
+ "url": "https://files.pythonhosted.org/packages/93/40/d3decda219ebd5410eba627601d537ec3782efbcadba308e9ce381cc0b71/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc",
+ "url": "https://files.pythonhosted.org/packages/9a/41/d6ce776c9c22f402ad0b0cfbdc70a630512229854b0043bd0dbe6566d75d/aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b",
+ "url": "https://files.pythonhosted.org/packages/9d/79/b34562b6cce04322023112f1984380359d78bd043b8ef822c2f356b7a047/aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6",
+ "url": "https://files.pythonhosted.org/packages/f5/4e/41143834b3fd5b89b404c76b5a71496bca96fbd8587c1e42a8f2b2efb8b3/aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl"
+ }
+ ],
+ "project_name": "aiohttp",
+ "requires_dists": [
+ "Brotli; platform_python_implementation == \"CPython\" and extra == \"speedups\"",
+ "aiodns; (sys_platform == \"linux\" or sys_platform == \"darwin\") and extra == \"speedups\"",
+ "aiosignal>=1.1.2",
+ "async-timeout<5.0,>=4.0; python_version < \"3.11\"",
+ "attrs>=17.3.0",
+ "brotlicffi; platform_python_implementation != \"CPython\" and extra == \"speedups\"",
+ "frozenlist>=1.1.1",
+ "multidict<7.0,>=4.5",
+ "yarl<2.0,>=1.0"
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.9.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17",
+ "url": "https://files.pythonhosted.org/packages/76/ac/a7305707cb852b7e16ff80eaf5692309bde30e2b1100a1fcacdc8f731d97/aiosignal-1.3.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc",
+ "url": "https://files.pythonhosted.org/packages/ae/67/0952ed97a9793b4958e5736f6d2b346b414a2cd63e82d05940032f45b32f/aiosignal-1.3.1.tar.gz"
+ }
+ ],
+ "project_name": "aiosignal",
+ "requires_dists": [
+ "frozenlist>=1.1.0"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.3.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92",
+ "url": "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65",
+ "url": "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz"
+ }
+ ],
+ "project_name": "alabaster",
+ "requires_dists": [],
+ "requires_python": ">=3.9",
+ "version": "0.7.16"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128",
+ "url": "https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41",
+ "url": "https://files.pythonhosted.org/packages/d7/d8/05696357e0311f5b5c316d7b95f46c669dd9c15aaeecbb48c7d0aeb88c40/appdirs-1.4.4.tar.gz"
+ }
+ ],
+ "project_name": "appdirs",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c",
+ "url": "https://files.pythonhosted.org/packages/88/8c/61021c45428ad2ef6131c6068d14f7f0968767e972e427cd87bd25c9ea7b/argcomplete-3.2.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23",
+ "url": "https://files.pythonhosted.org/packages/3c/c0/031c507227ce3b715274c1cd1f3f9baf7a0f7cec075e22c7c8b5d4e468a9/argcomplete-3.2.3.tar.gz"
+ }
+ ],
+ "project_name": "argcomplete",
+ "requires_dists": [
+ "coverage; extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pexpect; extra == \"test\"",
+ "ruff; extra == \"test\"",
+ "wheel; extra == \"test\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.2.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028",
+ "url": "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f",
+ "url": "https://files.pythonhosted.org/packages/87/d6/21b30a550dafea84b1b8eee21b5e23fa16d010ae006011221f33dcd8d7f8/async-timeout-4.0.3.tar.gz"
+ }
+ ],
+ "project_name": "async-timeout",
+ "requires_dists": [
+ "typing-extensions>=3.6.5; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11",
+ "url": "https://files.pythonhosted.org/packages/87/c6/53da25344e3e3a9c01095a89f16dbcda021c609ddb42dd6d7c0528236fb2/atomicwrites-1.4.1.tar.gz"
+ }
+ ],
+ "project_name": "atomicwrites",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1",
+ "url": "https://files.pythonhosted.org/packages/e0/44/827b2a91a5816512fcaf3cc4ebc465ccd5d598c45cefa6703fcf4a79018f/attrs-23.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "url": "https://files.pythonhosted.org/packages/e3/fc/f800d51204003fa8ae392c4e8278f256206e7a919b708eef054f5f4b650d/attrs-23.2.0.tar.gz"
+ }
+ ],
+ "project_name": "attrs",
+ "requires_dists": [
+ "attrs[tests-mypy]; extra == \"tests-no-zope\"",
+ "attrs[tests-no-zope]; extra == \"tests\"",
+ "attrs[tests]; extra == \"cov\"",
+ "attrs[tests]; extra == \"dev\"",
+ "cloudpickle; platform_python_implementation == \"CPython\" and extra == \"tests-no-zope\"",
+ "coverage[toml]>=5.3; extra == \"cov\"",
+ "furo; extra == \"docs\"",
+ "hypothesis; extra == \"tests-no-zope\"",
+ "importlib-metadata; python_version < \"3.8\"",
+ "mypy>=1.6; (platform_python_implementation == \"CPython\" and python_version >= \"3.8\") and extra == \"tests-mypy\"",
+ "myst-parser; extra == \"docs\"",
+ "pre-commit; extra == \"dev\"",
+ "pympler; extra == \"tests-no-zope\"",
+ "pytest-mypy-plugins; (platform_python_implementation == \"CPython\" and python_version >= \"3.8\") and extra == \"tests-mypy\"",
+ "pytest-xdist[psutil]; extra == \"tests-no-zope\"",
+ "pytest>=4.3.0; extra == \"tests-no-zope\"",
+ "sphinx-notfound-page; extra == \"docs\"",
+ "sphinx; extra == \"docs\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "towncrier; extra == \"docs\"",
+ "zope-interface; extra == \"docs\"",
+ "zope-interface; extra == \"tests\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "23.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287",
+ "url": "https://files.pythonhosted.org/packages/0d/35/4196b21041e29a42dc4f05866d0c94fa26c9da88ce12c38c2265e42c82fb/Babel-2.14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363",
+ "url": "https://files.pythonhosted.org/packages/e2/80/cfbe44a9085d112e983282ee7ca4c00429bc4d1ce86ee5f4e60259ddff7f/Babel-2.14.0.tar.gz"
+ }
+ ],
+ "project_name": "babel",
+ "requires_dists": [
+ "freezegun~=1.0; extra == \"dev\"",
+ "pytest-cov; extra == \"dev\"",
+ "pytest>=6.0; extra == \"dev\"",
+ "pytz>=2015.7; python_version < \"3.9\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8",
+ "url": "https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a",
+ "url": "https://files.pythonhosted.org/packages/c8/af/54a920ff4255664f5d238b5aebd8eedf7a07c7a5e71e27afcfe840b82f51/boto-2.49.0.tar.gz"
+ }
+ ],
+ "project_name": "boto",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.49.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "bdacfbbc56079bb317548efe49d3d5a86646885cc27f4a2ee97e4b2960921ab7",
+ "url": "https://files.pythonhosted.org/packages/98/c2/ab05b5329dc4416b5ee5530f0625a79c394a3e3c10abe0812b9345256451/box2d-py-2.3.8.tar.gz"
+ }
+ ],
+ "project_name": "box2d-py",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.3.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945",
+ "url": "https://files.pythonhosted.org/packages/fb/2b/a64c2d25a37aeb921fddb929111413049fc5f8b9a4c1aefaffaafe768d54/cachetools-5.3.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105",
+ "url": "https://files.pythonhosted.org/packages/b3/4d/27a3e6dd09011649ad5210bdf963765bc8fa81a0827a4fc01bafd2705c5b/cachetools-5.3.3.tar.gz"
+ }
+ ],
+ "project_name": "cachetools",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "5.3.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1",
+ "url": "https://files.pythonhosted.org/packages/ba/06/a07f096c664aeb9f01624f858c3add0a4e913d6c96257acb4fce61e7de14/certifi-2024.2.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "url": "https://files.pythonhosted.org/packages/71/da/e94e26401b62acd6d91df2b52954aceb7f561743aa5ccc32152886c76c96/certifi-2024.2.2.tar.gz"
+ }
+ ],
+ "project_name": "certifi",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "2024.2.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d",
+ "url": "https://files.pythonhosted.org/packages/ee/68/74a2b9f9432b70d97d1184cdabf32d7803124c228adef9481d280864a4a7/cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684",
+ "url": "https://files.pythonhosted.org/packages/22/05/43cfda378da7bb0aa19b3cf34fe54f8867b0d581294216339d87deefd69c/cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7",
+ "url": "https://files.pythonhosted.org/packages/54/49/b8875986beef2e74fc668b95f2df010e354f78e009d33d95b375912810c3/cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673",
+ "url": "https://files.pythonhosted.org/packages/57/3a/c263cf4d5b02880274866968fa2bf196a02c4486248bc164732319b4a4c0/cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0",
+ "url": "https://files.pythonhosted.org/packages/68/ce/95b0bae7968c65473e1298efb042e10cafc7bafc14d9e4f154008241c91d/cffi-1.16.0.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088",
+ "url": "https://files.pythonhosted.org/packages/aa/aa/1c43e48a6f361d1529f9e4602d6992659a0107b5f21cae567e2eddcf8d66/cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9",
+ "url": "https://files.pythonhosted.org/packages/c4/01/f5116266fe80c04d4d1cc96c3d355606943f9fb604a810e0b02228a0ce19/cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614",
+ "url": "https://files.pythonhosted.org/packages/c9/7c/43d81bdd5a915923c3bad5bb4bff401ea00ccc8e28433fb6083d2e3bf58e/cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743",
+ "url": "https://files.pythonhosted.org/packages/eb/de/4f644fc78a1144a897e1f908abfb2058f7be05a8e8e4fe90b7f41e9de36b/cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896",
+ "url": "https://files.pythonhosted.org/packages/f0/31/a6503a5c4874fb4d4c2053f73f09a957cb427b6943fab5a43b8e156df397/cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "cffi",
+ "requires_dists": [
+ "pycparser"
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "url": "https://files.pythonhosted.org/packages/28/76/e6222113b83e3622caa4bb41032d0b1bf785250607392e1b778aca0b8a7d/charset_normalizer-3.3.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "url": "https://files.pythonhosted.org/packages/05/8c/eb854996d5fef5e4f33ad56927ad053d04dc820e4a3d39023f35cad72617/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "url": "https://files.pythonhosted.org/packages/2b/61/095a0aa1a84d1481998b534177c8566fdc50bb1233ea9a0478cd3cc075bd/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "url": "https://files.pythonhosted.org/packages/33/c3/3b96a435c5109dd5b6adc8a59ba1d678b302a97938f032e3770cc84cd354/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "url": "https://files.pythonhosted.org/packages/3f/ba/3f5e7be00b215fa10e13d64b1f6237eb6ebea66676a41b2bcdd09fe74323/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "url": "https://files.pythonhosted.org/packages/43/05/3bf613e719efe68fb3a77f9c536a389f35b95d75424b96b426a47a45ef1d/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "url": "https://files.pythonhosted.org/packages/46/6a/d5c26c41c49b546860cc1acabdddf48b0b3fb2685f4f5617ac59261b44ae/charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "url": "https://files.pythonhosted.org/packages/58/78/a0bc646900994df12e07b4ae5c713f2b3e5998f58b9d3720cce2aa45652f/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "url": "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "url": "https://files.pythonhosted.org/packages/a8/31/47d018ef89f95b8aded95c589a77c072c55e94b50a41aa99c0a2008a45a4/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "url": "https://files.pythonhosted.org/packages/b8/60/e2f67915a51be59d4539ed189eb0a2b0d292bf79270410746becb32bc2c3/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "url": "https://files.pythonhosted.org/packages/cc/94/f7cf5e5134175de79ad2059edf2adce18e0685ebdb9227ff0139975d0e93/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "url": "https://files.pythonhosted.org/packages/da/f1/3702ba2a7470666a62fd81c58a4c40be00670e5006a67f4d626e57f013ae/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "url": "https://files.pythonhosted.org/packages/eb/5c/97d97248af4920bc68687d9c3b3c0f47c910e21a8ff80af4565a576bd2f0/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "url": "https://files.pythonhosted.org/packages/f6/93/bb6cbeec3bf9da9b2eba458c15966658d1daa8b982c642f81c93ad9b40e1/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ }
+ ],
+ "project_name": "charset-normalizer",
+ "requires_dists": [],
+ "requires_python": ">=3.7.0",
+ "version": "3.3.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "url": "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de",
+ "url": "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz"
+ }
+ ],
+ "project_name": "click",
+ "requires_dists": [
+ "colorama; platform_system == \"Windows\"",
+ "importlib-metadata; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "8.1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7",
+ "url": "https://files.pythonhosted.org/packages/96/43/dae06432d0c4b1dc9e9149ad37b4ca8384cf6eb7700cd9215b177b914f0a/cloudpickle-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882",
+ "url": "https://files.pythonhosted.org/packages/c8/72/42a6570fc61b1f8913529728ad314c7cf5961540728dcad22c33fb2db6b6/cloudpickle-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "cloudpickle",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677",
+ "url": "https://files.pythonhosted.org/packages/99/15/dbcb5d0a22bf5357cf456dfd16f9ceb89c54544d6201d53bc77c75077a8e/coverage-7.4.4-pp38.pp39.pp310-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8",
+ "url": "https://files.pythonhosted.org/packages/07/58/0e076ea3a59dbfb3e981577c4e5572b432345cedd921e83006a0215b9afe/coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf",
+ "url": "https://files.pythonhosted.org/packages/10/1e/f676e1655d10bf59a6cb8de0601b7ea3c252c764782a3c2263f6d6bbcf28/coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2",
+ "url": "https://files.pythonhosted.org/packages/45/f4/10bf725621aeec5cc2fa1bc73021f5ba1ac01bcbf2c7278d8d34e1df6457/coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87",
+ "url": "https://files.pythonhosted.org/packages/50/32/829d0e709fa699dc4e498fa77a561d25fc57954ba32466279952b98f0836/coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c",
+ "url": "https://files.pythonhosted.org/packages/7e/60/62a8c190d20bf605c89a000fd6d41e3563b5792e7275b12eeefe6803b473/coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7",
+ "url": "https://files.pythonhosted.org/packages/91/4e/feff6d115dcc239e5850570ca2ea27a243c8a69596e7f1dabe54a6102d89/coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2",
+ "url": "https://files.pythonhosted.org/packages/93/41/e6e9dbb322f3c93aba7bc519b9c62846d923d7b57398bdd7eda3f0acdd11/coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49",
+ "url": "https://files.pythonhosted.org/packages/bf/d5/f809d8b630cf4c11fe490e20037a343d12a74ec2783c6cdb5aee725e7137/coverage-7.4.4.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562",
+ "url": "https://files.pythonhosted.org/packages/d3/6d/72b9f5035c50a14bc5c5fda0c28ac16c426e957a7a3debe02906b614fc4f/coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "coverage",
+ "requires_dists": [
+ "tomli; python_full_version <= \"3.11.0a6\" and extra == \"toml\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "7.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "dc7051a0db5f2bd48665a990d3ec1cc305a466a77358ca4492826f41f283601e",
+ "url": "https://files.pythonhosted.org/packages/6b/b0/e595ce2a2527e169c3bcd6c33d2473c1918e0b7f6826a043ca1245dd4e5b/crcmod-1.7.tar.gz"
+ }
+ ],
+ "project_name": "crcmod",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c",
+ "url": "https://files.pythonhosted.org/packages/6e/8d/6cce88bdeb26b4ec14b23ab9f0c2c7c0bf33ef4904bfa952c5db1749fd37/cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc",
+ "url": "https://files.pythonhosted.org/packages/0e/1d/62a2324882c0db89f64358dadfb95cae024ee3ba9fde3d5fd4d2f58af9f5/cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1",
+ "url": "https://files.pythonhosted.org/packages/13/9e/a55763a32d340d7b06d045753c186b690e7d88780cafce5f88cb931536be/cryptography-42.0.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da",
+ "url": "https://files.pythonhosted.org/packages/2c/9c/821ef6144daf80360cf6093520bf07eec7c793103ed4b1bf3fa17d2b55d8/cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a",
+ "url": "https://files.pythonhosted.org/packages/48/c8/c0962598c43d3cff2c9d6ac66d0c612bdfb1975be8d87b8889960cf8c81d/cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1",
+ "url": "https://files.pythonhosted.org/packages/50/26/248cd8b6809635ed412159791c0d3869d8ec9dfdc57d428d500a14d425b7/cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2",
+ "url": "https://files.pythonhosted.org/packages/59/48/519ecd6b65dc9ea7c8111dfde7c9ed61aeb90fe59c6b4454900bcd3e3286/cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1",
+ "url": "https://files.pythonhosted.org/packages/5b/3d/c3c21e3afaf43bacccc3ebf61d1a0d47cef6e2607dbba01662f6f9d8fc40/cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7",
+ "url": "https://files.pythonhosted.org/packages/64/f7/d3c83c79947cc6807e6acd3b2d9a1cbd312042777bc7eec50c869913df79/cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7",
+ "url": "https://files.pythonhosted.org/packages/69/f6/630eb71f246208103ffee754b8375b6b334eeedb28620b3ae57be815eeeb/cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8",
+ "url": "https://files.pythonhosted.org/packages/6d/4d/f7c14c7a49e35df829e04d451a57b843208be7442c8e087250c195775be1/cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922",
+ "url": "https://files.pythonhosted.org/packages/7d/bc/b6c691c960b5dcd54c5444e73af7f826e62af965ba59b6d7e9928b6489a2/cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278",
+ "url": "https://files.pythonhosted.org/packages/8c/50/9185cca136596448d9cc595ae22a9bd4412ad35d812550c37c1390d54673/cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8",
+ "url": "https://files.pythonhosted.org/packages/9f/c3/3d2d9bb2ff9e15b5ababc370ae85b377eacc8e3d54fcb03225471e41a1d8/cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc",
+ "url": "https://files.pythonhosted.org/packages/c2/40/c7cb9d6819b90640ffc3c4028b28f46edc525feaeaa0d98ea23e843d446d/cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30",
+ "url": "https://files.pythonhosted.org/packages/ca/2e/9f2c49bd6a18d46c05ec098b040e7d4599c61f50ced40a39adfae3f68306/cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16",
+ "url": "https://files.pythonhosted.org/packages/d1/f1/fd98e6e79242d9aeaf6a5d49639a7e85f05741575af14d3f4a1d477f572e/cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e",
+ "url": "https://files.pythonhosted.org/packages/d4/fa/057f9d7a5364c86ccb6a4bd4e5c58920dcb66532be0cc21da3f9c7617ec3/cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d",
+ "url": "https://files.pythonhosted.org/packages/d8/b1/127ecb373d02db85a7a7de5093d7ac7b7714b8907d631f0591e8f002998d/cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec",
+ "url": "https://files.pythonhosted.org/packages/d9/f9/27dda069a9f9bfda7c75305e222d904cc2445acf5eab5c696ade57d36f1b/cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb",
+ "url": "https://files.pythonhosted.org/packages/e2/59/61b2364f2a4d3668d933531bc30d012b9b2de1e534df4805678471287d57/cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee",
+ "url": "https://files.pythonhosted.org/packages/e5/61/67e090a41c70ee526bd5121b1ccabab85c727574332d03326baaedea962d/cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4",
+ "url": "https://files.pythonhosted.org/packages/fb/0b/14509319a1b49858425553d2fb3808579cfdfe98c1d71a3f046c1b4e0108/cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ }
+ ],
+ "project_name": "cryptography",
+ "requires_dists": [
+ "bcrypt>=3.1.5; extra == \"ssh\"",
+ "build; extra == \"sdist\"",
+ "certifi; extra == \"test\"",
+ "cffi>=1.12; platform_python_implementation != \"PyPy\"",
+ "check-sdist; extra == \"pep8test\"",
+ "click; extra == \"pep8test\"",
+ "mypy; extra == \"pep8test\"",
+ "nox; extra == \"nox\"",
+ "pretend; extra == \"test\"",
+ "pyenchant>=1.6.11; extra == \"docstest\"",
+ "pytest-benchmark; extra == \"test\"",
+ "pytest-cov; extra == \"test\"",
+ "pytest-randomly; extra == \"test-randomorder\"",
+ "pytest-xdist; extra == \"test\"",
+ "pytest>=6.2.0; extra == \"test\"",
+ "readme-renderer; extra == \"docstest\"",
+ "ruff; extra == \"pep8test\"",
+ "sphinx-rtd-theme>=1.1.1; extra == \"docs\"",
+ "sphinx>=5.3.0; extra == \"docs\"",
+ "sphinxcontrib-spelling>=4.0.1; extra == \"docstest\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "42.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49",
+ "url": "https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4",
+ "url": "https://files.pythonhosted.org/packages/c5/e6/d1f6c00b7221e2d7c4b470132c931325c8b22c51ca62417e300f5ce16009/docker-pycreds-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "docker-pycreds",
+ "requires_dists": [
+ "six>=1.4.0"
+ ],
+ "requires_python": null,
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6",
+ "url": "https://files.pythonhosted.org/packages/26/87/f238c0670b94533ac0353a4e2a1a771a0cc73277b88bff23d3ae35a256c1/docutils-0.20.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b",
+ "url": "https://files.pythonhosted.org/packages/1f/53/a5da4f2c5739cf66290fac1431ee52aff6851c7c8ffd8264f13affd7bcdd/docutils-0.20.1.tar.gz"
+ }
+ ],
+ "project_name": "docutils",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.20.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
+ "url": "https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68",
+ "url": "https://files.pythonhosted.org/packages/8e/1c/beef724eaf5b01bb44b6338c8c3494eff7cab376fab4904cfbbc3585dc79/exceptiongroup-1.2.0.tar.gz"
+ }
+ ],
+ "project_name": "exceptiongroup",
+ "requires_dists": [
+ "pytest>=6; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
+ "url": "https://files.pythonhosted.org/packages/e8/9c/a079946da30fac4924d92dbc617e5367d454954494cf1e71567bcc4e00ee/execnet-2.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af",
+ "url": "https://files.pythonhosted.org/packages/e4/c8/d382dc7a1e68a165f4a4ab612a08b20d8534a7d20cc590630b734ca0c54b/execnet-2.0.2.tar.gz"
+ }
+ ],
+ "project_name": "execnet",
+ "requires_dists": [
+ "hatch; extra == \"testing\"",
+ "pre-commit; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"testing\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae",
+ "url": "https://files.pythonhosted.org/packages/05/2c/ffc08c54c05cdce6fbed2aeebc46348dbe180c6d2c541c7af7ba0aa5f5f8/Farama_Notifications-0.0.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18",
+ "url": "https://files.pythonhosted.org/packages/2e/2c/8384832b7a6b1fd6ba95bbdcae26e7137bb3eedc955c42fd5cdcc086cfbf/Farama-Notifications-0.0.4.tar.gz"
+ }
+ ],
+ "project_name": "farama-notifications",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "0.0.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "758819cb5d94cdedf4e836988b74de396ceacb8e2794d21f82d131fd9ee77237",
+ "url": "https://files.pythonhosted.org/packages/61/bf/fd60001b3abc5222d8eaa4a204cd8c0ae78e75adc688f33ce4bf25b7fafa/fasteners-0.19-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b4f37c3ac52d8a445af3a66bce57b33b5e90b97c696b7b984f530cf8f0ded09c",
+ "url": "https://files.pythonhosted.org/packages/5f/d4/e834d929be54bfadb1f3e3b931c38e956aaa3b235a46a3c764c26c774902/fasteners-0.19.tar.gz"
+ }
+ ],
+ "project_name": "fasteners",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "0.19"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7",
+ "url": "https://files.pythonhosted.org/packages/83/10/466fe96dae1bff622021ee687f68e5524d6392b0a2f80d05001cd3a451ba/frozenlist-1.4.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c",
+ "url": "https://files.pythonhosted.org/packages/36/ce/dc6f29e0352fa34ebe45421960c8e7352ca63b31630a576e8ffb381e9c08/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe",
+ "url": "https://files.pythonhosted.org/packages/51/47/159ac53faf8a11ae5ee8bb9db10327575557504e549cfd76f447b969aa91/frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75",
+ "url": "https://files.pythonhosted.org/packages/53/82/274e19f122e124aee6d113188615f63b0736b4242a875f482a81f91e07e2/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950",
+ "url": "https://files.pythonhosted.org/packages/6e/4f/b8a5a2f10c4a58c52a52a40cf6cf1ffcdbf3a3b64f276f41dab989bf3ab5/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac",
+ "url": "https://files.pythonhosted.org/packages/7a/35/1328c7b0f780d34f8afc1d87ebdc2bb065a123b24766a0b475f0d67da637/frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98",
+ "url": "https://files.pythonhosted.org/packages/97/94/a1305fa4716726ae0abf3b1069c2d922fcfd442538cb850f1be543f58766/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776",
+ "url": "https://files.pythonhosted.org/packages/ae/83/bcdaa437a9bd693ba658a0310f8cdccff26bd78e45fccf8e49897904a5cd/frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc",
+ "url": "https://files.pythonhosted.org/packages/b0/2c/7be3bdc59dbae444864dbd9cde82790314390ec54636baf6b9ce212627ad/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5",
+ "url": "https://files.pythonhosted.org/packages/b8/28/899931015b8cffbe155392fe9ca663f981a17e1adc69589ee0e1e7cdc9a2/frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b",
+ "url": "https://files.pythonhosted.org/packages/cf/3d/2102257e7acad73efc4a0c306ad3953f68c504c16982bbdfee3ad75d8085/frozenlist-1.4.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a",
+ "url": "https://files.pythonhosted.org/packages/d4/e9/759043ab7d169b74fe05ebfbfa9ee5c881c303ebc838e308346204309cd0/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a",
+ "url": "https://files.pythonhosted.org/packages/ec/25/0c87df2e53c0c5d90f7517ca0ff7aca78d050a8ec4d32c4278e8c0e52e51/frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868",
+ "url": "https://files.pythonhosted.org/packages/f4/d6/ca016b0adcf8327714ccef969740688808c86e0287bf3a639ff582f24e82/frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad",
+ "url": "https://files.pythonhosted.org/packages/f8/ce/b9de7dc61e753dc318cf0de862181b484178210c5361eae6eaf06792264d/frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ }
+ ],
+ "project_name": "frozenlist",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f4120b08b7f8d32904674c98f07d4caf4083a58343c0c0fa0016e0f0254dfe31",
+ "url": "https://files.pythonhosted.org/packages/05/e5/3162be0abab32f152f331423426471935f286dd4ad70fa704f2a34ea3c1e/gcs-oauth2-boto-plugin-3.0.tar.gz"
+ }
+ ],
+ "project_name": "gcs-oauth2-boto-plugin",
+ "requires_dists": [
+ "boto>=2.29.1",
+ "freezegun; extra == \"dev\"",
+ "google-reauth>=0.1.0",
+ "httplib2>=0.18",
+ "mock; python_version < \"3.3\" and extra == \"dev\"",
+ "oauth2client>=2.2.0",
+ "pyOpenSSL>=0.13",
+ "retry-decorator>=1.0.0",
+ "rsa==4.7.2",
+ "six>=1.12.0"
+ ],
+ "requires_python": null,
+ "version": "3.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4",
+ "url": "https://files.pythonhosted.org/packages/fd/5b/8f0c4a5bb9fd491c277c21eff7ccae71b47d43c4446c9d0c6cff2fe8c2c4/gitdb-4.0.11-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b",
+ "url": "https://files.pythonhosted.org/packages/19/0d/bbb5b5ee188dec84647a4664f3e11b06ade2bde568dbd489d9d64adef8ed/gitdb-4.0.11.tar.gz"
+ }
+ ],
+ "project_name": "gitdb",
+ "requires_dists": [
+ "smmap<6,>=3.0.1"
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.11"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff",
+ "url": "https://files.pythonhosted.org/packages/e9/bd/cc3a402a6439c15c3d4294333e13042b915bbeab54edc457c723931fed3f/GitPython-3.1.43-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c",
+ "url": "https://files.pythonhosted.org/packages/b6/a1/106fd9fa2dd989b6fb36e5893961f82992cf676381707253e0bf93eb1662/GitPython-3.1.43.tar.gz"
+ }
+ ],
+ "project_name": "gitpython",
+ "requires_dists": [
+ "coverage[toml]; extra == \"test\"",
+ "ddt!=1.4.3,>=1.1.1; extra == \"test\"",
+ "gitdb<5,>=4.0.1",
+ "mock; python_version < \"3.8\" and extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pre-commit; extra == \"test\"",
+ "pytest-cov; extra == \"test\"",
+ "pytest-instafail; extra == \"test\"",
+ "pytest-mock; extra == \"test\"",
+ "pytest-sugar; extra == \"test\"",
+ "pytest>=7.3.1; extra == \"test\"",
+ "sphinx-autodoc-typehints; extra == \"doc\"",
+ "sphinx-rtd-theme; extra == \"doc\"",
+ "sphinx==4.3.2; extra == \"doc\"",
+ "sphinxcontrib-applehelp<=1.0.4,>=1.0.2; extra == \"doc\"",
+ "sphinxcontrib-devhelp==1.0.2; extra == \"doc\"",
+ "sphinxcontrib-htmlhelp<=2.0.1,>=2.0.0; extra == \"doc\"",
+ "sphinxcontrib-qthelp==1.0.3; extra == \"doc\"",
+ "sphinxcontrib-serializinghtml==1.1.5; extra == \"doc\"",
+ "typing-extensions; python_version < \"3.11\" and extra == \"test\"",
+ "typing-extensions>=3.7.4.3; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "3.1.43"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b78f74116558e0476e19501b5b4b2ac7c93261a69c5449c861ea95cbc853c688",
+ "url": "https://files.pythonhosted.org/packages/5e/cb/cb0311f2ec371c83d6510847476c665edc9cc97564a51923557bc8f0b680/google_apitools-0.5.32-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c3763e52289f61e21c41d5531e20fbda9cc8484a088b8686fd460770db8bad13",
+ "url": "https://files.pythonhosted.org/packages/dc/eb/c26c36463a769a3a9f08847b9bf218cb629ca91877a911bbd6dcf37d9e62/google-apitools-0.5.32.tar.gz"
+ }
+ ],
+ "project_name": "google-apitools",
+ "requires_dists": [
+ "fasteners>=0.14",
+ "httplib2>=0.8",
+ "mock>=1.0.1; extra == \"testing\"",
+ "oauth2client>=1.4.12",
+ "python-gflags>=3.0.6; extra == \"cli\"",
+ "six>=1.12.0"
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "0.5.32"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415",
+ "url": "https://files.pythonhosted.org/packages/9e/8d/ddbcf81ec751d8ee5fd18ac11ff38a0e110f39dfbf105e6d9db69d556dd0/google_auth-2.29.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360",
+ "url": "https://files.pythonhosted.org/packages/18/b2/f14129111cfd61793609643a07ecb03651a71dd65c6974f63b0310ff4b45/google-auth-2.29.0.tar.gz"
+ }
+ ],
+ "project_name": "google-auth",
+ "requires_dists": [
+ "aiohttp<4.0.0.dev0,>=3.6.2; extra == \"aiohttp\"",
+ "cachetools<6.0,>=2.0.0",
+ "cryptography==36.0.2; extra == \"enterprise-cert\"",
+ "cryptography>=38.0.3; extra == \"pyopenssl\"",
+ "pyasn1-modules>=0.2.1",
+ "pyopenssl==22.0.0; extra == \"enterprise-cert\"",
+ "pyopenssl>=20.0.0; extra == \"pyopenssl\"",
+ "pyu2f>=0.1.5; extra == \"reauth\"",
+ "requests<3.0.0.dev0,>=2.20.0; extra == \"aiohttp\"",
+ "requests<3.0.0.dev0,>=2.20.0; extra == \"requests\"",
+ "rsa<5,>=3.1.4"
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.29.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cb39074488d74c8853074dde47368bbf8f739d4a4338b89aab696c895b6d8368",
+ "url": "https://files.pythonhosted.org/packages/69/e1/67ffaa3a645b86318ce30717af7145070ebccec5eef5c623ae08b86129b8/google_reauth-0.1.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f9f6852a55c2c5453d581cd01f3d1278e86147c03d008409800390a834235892",
+ "url": "https://files.pythonhosted.org/packages/7d/86/74242e08d24ec4c436b8325dabbd7c60422b4829dfb1ad6ec117bdebea76/google-reauth-0.1.1.tar.gz"
+ }
+ ],
+ "project_name": "google-reauth",
+ "requires_dists": [
+ "oauth2client>=2.0.0; extra == \"oauth2client\"",
+ "pyu2f"
+ ],
+ "requires_python": null,
+ "version": "0.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70",
+ "url": "https://files.pythonhosted.org/packages/f0/fa/c1a5aaa161aee2edce9491757fc394e29415c57b0a6be8e02e208fb8b7e2/grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3",
+ "url": "https://files.pythonhosted.org/packages/00/87/727d8f65646843623064f881ee4446276d049da8bd8da6ef45edc10e6e97/grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5",
+ "url": "https://files.pythonhosted.org/packages/02/71/2a68e19dfd1276524e618149c0e34e08ea39724de10690da23678096fd92/grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d",
+ "url": "https://files.pythonhosted.org/packages/68/19/2575ce3bb14736eb9ab4b2e5026886e119dfc521488d6a2c9ad2d8b1b6d2/grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947",
+ "url": "https://files.pythonhosted.org/packages/c7/bb/d01494037edee2d8e024cac8049b169b2723186b01cebb495ccf677bbba9/grpcio-1.62.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243",
+ "url": "https://files.pythonhosted.org/packages/c9/45/e9237e5fa69bdc2cf01e6ef2be3a421cb1c2c30dbb4e0859ad9ed3bcde0c/grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea",
+ "url": "https://files.pythonhosted.org/packages/cc/fb/09c2e42f37858f699b5f56e40f2c3a45fb24b1b7a9dbed3ae1ca7e5fbac9/grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e",
+ "url": "https://files.pythonhosted.org/packages/e1/5f/19a48b32dac6a5134afbcff4a5deca46b176c58f0b1c2663e11b18db2571/grpcio-1.62.1-cp310-cp310-linux_armv7l.whl"
+ }
+ ],
+ "project_name": "grpcio",
+ "requires_dists": [
+ "grpcio-tools>=1.62.1; extra == \"protobuf\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.62.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "681a2d844acdf05fac989da6dd406944ae11cb27a4cf3c9edef74d2585ab5f05",
+ "url": "https://files.pythonhosted.org/packages/00/ce/9c70a91e1a5fc709e6acf34682b8b2499179ddc27b18b0e3670ff9c257db/gsutil-5.27.tar.gz"
+ }
+ ],
+ "project_name": "gsutil",
+ "requires_dists": [
+ "argcomplete>=1.9.4",
+ "crcmod>=1.7",
+ "fasteners>=0.14.1",
+ "gcs-oauth2-boto-plugin>=3.0",
+ "google-apitools>=0.5.32",
+ "google-auth[aiohttp]>=2.5.0",
+ "google-reauth>=0.1.0",
+ "httplib2==0.20.4",
+ "mock<=3.0.5,>=2.0.0; python_version < \"3.3\"",
+ "monotonic>=1.4",
+ "pyOpenSSL>=0.13",
+ "retry-decorator>=1.0.0",
+ "six>=1.16.0"
+ ],
+ "requires_python": "!=2.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4",
+ "version": "5.27"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "61c3384b5575985bb7f85e43213bcb40f36fcdff388cae6bc229304c71f2843e",
+ "url": "https://files.pythonhosted.org/packages/a8/4d/3cbfd81ed84db450dbe73a89afcd8bc405273918415649ac6683356afe92/gymnasium-0.29.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1a532752efcb7590478b1cc7aa04f608eb7a2fdad5570cd217b66b6a35274bb1",
+ "url": "https://files.pythonhosted.org/packages/0d/f8/5699ddb3e1c4f6d97b8930e573074849b921da8374fccd141f0f3a9bd713/gymnasium-0.29.1.tar.gz"
+ }
+ ],
+ "project_name": "gymnasium",
+ "requires_dists": [
+ "autorom[accept-rom-license]~=0.4.2; extra == \"accept-rom-license\"",
+ "box2d-py==2.3.5; extra == \"all\"",
+ "box2d-py==2.3.5; extra == \"box2d\"",
+ "cloudpickle>=1.2.0",
+ "cython<3; extra == \"all\"",
+ "cython<3; extra == \"mujoco-py\"",
+ "cython<3; extra == \"mujoco-py\"",
+ "farama-notifications>=0.0.1",
+ "imageio>=2.14.1; extra == \"all\"",
+ "imageio>=2.14.1; extra == \"mujoco\"",
+ "importlib-metadata>=4.8.0; python_version < \"3.10\"",
+ "jax>=0.4.0; extra == \"all\"",
+ "jax>=0.4.0; extra == \"jax\"",
+ "jaxlib>=0.4.0; extra == \"all\"",
+ "jaxlib>=0.4.0; extra == \"jax\"",
+ "lz4>=3.1.0; extra == \"all\"",
+ "lz4>=3.1.0; extra == \"other\"",
+ "matplotlib>=3.0; extra == \"all\"",
+ "matplotlib>=3.0; extra == \"other\"",
+ "moviepy>=1.0.0; extra == \"all\"",
+ "moviepy>=1.0.0; extra == \"other\"",
+ "mujoco-py<2.2,>=2.1; extra == \"all\"",
+ "mujoco-py<2.2,>=2.1; extra == \"mujoco-py\"",
+ "mujoco-py<2.2,>=2.1; extra == \"mujoco-py\"",
+ "mujoco>=2.3.3; extra == \"all\"",
+ "mujoco>=2.3.3; extra == \"mujoco\"",
+ "numpy>=1.21.0",
+ "opencv-python>=3.0; extra == \"all\"",
+ "opencv-python>=3.0; extra == \"other\"",
+ "pygame>=2.1.3; extra == \"all\"",
+ "pygame>=2.1.3; extra == \"box2d\"",
+ "pygame>=2.1.3; extra == \"classic-control\"",
+ "pygame>=2.1.3; extra == \"classic-control\"",
+ "pygame>=2.1.3; extra == \"toy-text\"",
+ "pygame>=2.1.3; extra == \"toy-text\"",
+ "pytest==7.1.3; extra == \"testing\"",
+ "scipy>=1.7.3; extra == \"testing\"",
+ "shimmy[atari]<1.0,>=0.1.0; extra == \"all\"",
+ "shimmy[atari]<1.0,>=0.1.0; extra == \"atari\"",
+ "swig==4.*; extra == \"all\"",
+ "swig==4.*; extra == \"box2d\"",
+ "torch>=1.0.0; extra == \"all\"",
+ "torch>=1.0.0; extra == \"other\"",
+ "typing-extensions>=4.3.0"
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.29.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8b6a905cb1c79eefd03f8669fd993c36dc341f7c558f056cb5a33b5c2f458543",
+ "url": "https://files.pythonhosted.org/packages/59/0f/29725a9caf4b2618f524e0f28e2bda91aca8f880123ec77426ede6ea1ea4/httplib2-0.20.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "58a98e45b4b1a48273073f905d2961666ecf0fbac4250ea5b47aef259eb5c585",
+ "url": "https://files.pythonhosted.org/packages/9c/65/57ad964eb8d45cc3d1316ce5ada2632f74e35863a0e57a52398416a182a1/httplib2-0.20.4.tar.gz"
+ }
+ ],
+ "project_name": "httplib2",
+ "requires_dists": [
+ "pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2; python_version > \"3.0\"",
+ "pyparsing<3,>=2.4.2; python_version < \"3.0\""
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "0.20.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f",
+ "url": "https://files.pythonhosted.org/packages/c2/e7/a82b05cf63a603df6e68d59ae6a68bf5064484a0718ea5033660af4b54a9/idna-3.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "url": "https://files.pythonhosted.org/packages/bf/3f/ea4b9117521a1e9c50344b909be7886dd00a519552724809bb1f486986c2/idna-3.6.tar.gz"
+ }
+ ],
+ "project_name": "idna",
+ "requires_dists": [],
+ "requires_python": ">=3.5",
+ "version": "3.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b",
+ "url": "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a",
+ "url": "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz"
+ }
+ ],
+ "project_name": "imagesize",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7",
+ "version": "1.4.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374",
+ "url": "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
+ "url": "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "iniconfig",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa",
+ "url": "https://files.pythonhosted.org/packages/30/6d/6de6be2d02603ab56e72997708809e8a5b0fbfee080735109b40a3564843/Jinja2-3.1.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90",
+ "url": "https://files.pythonhosted.org/packages/b2/5e/3a21abf3cd467d7876045335e681d276ac32492febe6d98ad89562d1a7e1/Jinja2-3.1.3.tar.gz"
+ }
+ ],
+ "project_name": "jinja2",
+ "requires_dists": [
+ "Babel>=2.7; extra == \"i18n\"",
+ "MarkupSafe>=2.0"
+ ],
+ "requires_python": ">=3.7",
+ "version": "3.1.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f",
+ "url": "https://files.pythonhosted.org/packages/fc/b3/0c0c994fe49cd661084f8d5dc06562af53818cc0abefaca35bdc894577c3/Markdown-3.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224",
+ "url": "https://files.pythonhosted.org/packages/22/02/4785861427848cc11e452cc62bb541006a1087cf04a1de83aedd5530b948/Markdown-3.6.tar.gz"
+ }
+ ],
+ "project_name": "markdown",
+ "requires_dists": [
+ "coverage; extra == \"testing\"",
+ "importlib-metadata>=4.4; python_version < \"3.10\"",
+ "mdx-gh-links>=0.2; extra == \"docs\"",
+ "mkdocs-gen-files; extra == \"docs\"",
+ "mkdocs-literate-nav; extra == \"docs\"",
+ "mkdocs-nature>=0.6; extra == \"docs\"",
+ "mkdocs-section-index; extra == \"docs\"",
+ "mkdocs>=1.5; extra == \"docs\"",
+ "mkdocstrings[python]; extra == \"docs\"",
+ "pyyaml; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
+ "url": "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb",
+ "url": "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "markdown-it-py",
+ "requires_dists": [
+ "commonmark~=0.9; extra == \"compare\"",
+ "coverage; extra == \"testing\"",
+ "gprof2dot; extra == \"profiling\"",
+ "jupyter_sphinx; extra == \"rtd\"",
+ "linkify-it-py<3,>=1; extra == \"linkify\"",
+ "markdown~=3.4; extra == \"compare\"",
+ "mdit-py-plugins; extra == \"plugins\"",
+ "mdit-py-plugins; extra == \"rtd\"",
+ "mdurl~=0.1",
+ "mistletoe~=1.0; extra == \"compare\"",
+ "mistune~=2.0; extra == \"compare\"",
+ "myst-parser; extra == \"rtd\"",
+ "panflute~=2.3; extra == \"compare\"",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "psutil; extra == \"benchmarking\"",
+ "pytest-benchmark; extra == \"benchmarking\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"benchmarking\"",
+ "pytest; extra == \"testing\"",
+ "pyyaml; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design; extra == \"rtd\"",
+ "sphinx; extra == \"rtd\"",
+ "sphinx_book_theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
+ "url": "https://files.pythonhosted.org/packages/30/39/8d845dd7d0b0613d86e0ef89549bfb5f61ed781f59af45fc96496e897f3a/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
+ "url": "https://files.pythonhosted.org/packages/0a/7b/85681ae3c33c385b10ac0f8dd025c30af83c78cec1c37a6aa3b55e67f5ec/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
+ "url": "https://files.pythonhosted.org/packages/29/fe/a36ba8c7ca55621620b2d7c585313efd10729e63ef81e4e61f52330da781/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
+ "url": "https://files.pythonhosted.org/packages/60/ae/9c60231cdfda003434e8bd27282b1f4e197ad5a710c14bee8bea8a9ca4f0/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
+ "url": "https://files.pythonhosted.org/packages/65/dc/1510be4d179869f5dafe071aecb3f1f41b45d37c02329dfba01ff59e5ac5/MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
+ "url": "https://files.pythonhosted.org/packages/6a/4a/a4d49415e600bacae038c67f9fecc1d5433b9d3c71a4de6f33537b89654c/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
+ "url": "https://files.pythonhosted.org/packages/7c/52/2b1b570f6b8b803cef5ac28fdf78c0da318916c7d2fe9402a84d591b394c/MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
+ "url": "https://files.pythonhosted.org/packages/87/5b/aae44c6655f3801e81aa3eef09dbbf012431987ba564d7231722f68df02d/MarkupSafe-2.1.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
+ "url": "https://files.pythonhosted.org/packages/e4/54/ad5eb37bf9d51800010a74e4665425831a9db4e7c4e0fde4352e391e808e/MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl"
+ }
+ ],
+ "project_name": "markupsafe",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.1.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9",
+ "url": "https://files.pythonhosted.org/packages/e5/3c/fe85f19699a7b40c8f9ce8ecee7e269b9b3c94099306df6f9891bdefeedd/mdit_py_plugins-0.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b",
+ "url": "https://files.pythonhosted.org/packages/b4/db/61960d68d5c39ff0dd48cb799a39ae4e297f6e9b96bf2f8da29d897fba0c/mdit_py_plugins-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "mdit-py-plugins",
+ "requires_dists": [
+ "coverage; extra == \"testing\"",
+ "markdown-it-py<4.0.0,>=1.0.0",
+ "myst-parser; extra == \"rtd\"",
+ "pre-commit; extra == \"code-style\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "sphinx-book-theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8",
+ "url": "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba",
+ "url": "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz"
+ }
+ ],
+ "project_name": "mdurl",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c",
+ "url": "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7",
+ "url": "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz"
+ }
+ ],
+ "project_name": "monotonic",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7",
+ "url": "https://files.pythonhosted.org/packages/fa/a2/17e1e23c6be0a916219c5292f509360c345b5fa6beeb50d743203c27532c/multidict-6.0.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef",
+ "url": "https://files.pythonhosted.org/packages/11/b7/bef33e84e3722bc42531af020d7ae8c31235ce8846bacaa852b6484cf868/multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf",
+ "url": "https://files.pythonhosted.org/packages/12/4d/99dfc36872dcc53956879f5da80a6505bbd29214cce90ce792a86e15fddf/multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc",
+ "url": "https://files.pythonhosted.org/packages/26/ce/f745a2d6104e56f7fa0d7d0756bb9ed27b771dd7b8d9d7348cd7f0f7b9de/multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae",
+ "url": "https://files.pythonhosted.org/packages/33/62/2c9085e571318d51212a6914566fe41dd0e33d7f268f7e2f23dcd3f06c56/multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f",
+ "url": "https://files.pythonhosted.org/packages/36/6d/d2f982fb485175727a193b4900b5f929d461e7aa87d6fb5a91a377fcc9c0/multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5",
+ "url": "https://files.pythonhosted.org/packages/8d/ea/0230b6faa9a5bc10650fd50afcc4a86e6c37af2fe05bc679b74d79253732/multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600",
+ "url": "https://files.pythonhosted.org/packages/a4/eb/d8e7693c9064554a1585698d1902839440c6c695b0f53c9a8be5d9d4a3b8/multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9",
+ "url": "https://files.pythonhosted.org/packages/b7/36/48097b96135017ed1b806c5ea27b6cdc2ed3a6861c5372b793563206c586/multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a",
+ "url": "https://files.pythonhosted.org/packages/bc/84/9579004267e1cc5968ef2ef8718dab9d8950d99354d85b739dd67b09c273/multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442",
+ "url": "https://files.pythonhosted.org/packages/c2/5c/1e76b2c742cb9e0248d1e8c4ed420817879230c833fa27d890b5fd22290b/multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182",
+ "url": "https://files.pythonhosted.org/packages/ce/e2/88cdfeaf03eab3498f688a19b62ca704d371cd904cb74b682541ca7b20a7/multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604",
+ "url": "https://files.pythonhosted.org/packages/d9/48/037440edb5d4a1c65e002925b2f24071d6c27754e6f4734f63037e3169d6/multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c",
+ "url": "https://files.pythonhosted.org/packages/f3/7d/fe7648d4b2f200f8854066ce6e56bf51889abfaf859814c62160dd0e32a9/multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da",
+ "url": "https://files.pythonhosted.org/packages/f9/79/722ca999a3a09a63b35aac12ec27dfa8e5bb3a38b0f857f7a1a209a88836/multidict-6.0.5.tar.gz"
+ }
+ ],
+ "project_name": "multidict",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "6.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14",
+ "url": "https://files.pythonhosted.org/packages/1d/f6/6d61a023d758f488e36638076e8a4ec4447a2cdf86938cf6c60cf1c860e6/myst_parser-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead",
+ "url": "https://files.pythonhosted.org/packages/e8/c1/48ea47b78ade0bb0281f34c9e343e3ea0c681fbc81464dbfd134e983954f/myst_parser-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "myst-parser",
+ "requires_dists": [
+ "beautifulsoup4; extra == \"testing\"",
+ "coverage[toml]; extra == \"testing\"",
+ "docutils<0.21,>=0.16",
+ "ipython; extra == \"rtd\"",
+ "jinja2",
+ "linkify-it-py~=2.0; extra == \"linkify\"",
+ "markdown-it-py~=3.0",
+ "mdit-py-plugins~=0.4",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "pydata-sphinx-theme==v0.13.0rc4; extra == \"rtd\"",
+ "pygments; extra == \"testing-docutils\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-param-files~=0.3.4; extra == \"testing\"",
+ "pytest-param-files~=0.3.4; extra == \"testing-docutils\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest<8,>=7; extra == \"testing\"",
+ "pytest<8,>=7; extra == \"testing-docutils\"",
+ "pyyaml",
+ "sphinx-autodoc2~=0.4.2; extra == \"rtd\"",
+ "sphinx-book-theme==1.0.0rc2; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design2; extra == \"rtd\"",
+ "sphinx-pyscript; extra == \"rtd\"",
+ "sphinx-pytest; extra == \"testing\"",
+ "sphinx-tippy>=0.3.1; extra == \"rtd\"",
+ "sphinx-togglebutton; extra == \"rtd\"",
+ "sphinx<8,>=6",
+ "sphinxext-opengraph~=0.8.2; extra == \"rtd\"",
+ "sphinxext-rediraffe~=0.2.7; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1",
+ "url": "https://files.pythonhosted.org/packages/e4/f3/679b3a042a127de0d7c84874913c3e23bb84646eb3bc6ecab3f8c872edc9/numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63",
+ "url": "https://files.pythonhosted.org/packages/0f/ae/dad4b8e7c65494cbbd1c063de114efaf9acd0f5f6171f044f0d4b6299787/numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a",
+ "url": "https://files.pythonhosted.org/packages/42/38/775b43da55fa7473015eddc9a819571517d9a271a9f8134f68fb9be2f212/numpy-1.23.5.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d",
+ "url": "https://files.pythonhosted.org/packages/4d/39/d33202cc56c21123a50c6d5e160d00c18ff685ab864dbd4bf80dd40a7af9/numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43",
+ "url": "https://files.pythonhosted.org/packages/67/6b/d7c93d458d16464da9b3f560a20c363a19e242ebbb019bd1e1d797523851/numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "numpy",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "1.23.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac",
+ "url": "https://files.pythonhosted.org/packages/95/a9/4f25a14d23f0786b64875b91784607c2277eff25d48f915e39ff0cff505a/oauth2client-4.1.3-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6",
+ "url": "https://files.pythonhosted.org/packages/a6/7b/17244b1083e8e604bf154cf9b716aecd6388acd656dd01893d0d244c94d9/oauth2client-4.1.3.tar.gz"
+ }
+ ],
+ "project_name": "oauth2client",
+ "requires_dists": [
+ "httplib2>=0.9.1",
+ "pyasn1-modules>=0.0.5",
+ "pyasn1>=0.1.7",
+ "rsa>=3.1.4",
+ "six>=1.6.1"
+ ],
+ "requires_python": null,
+ "version": "4.1.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "298f28a2b5ac09145fa958513d3d1e6b349ccf86a877dbdcccad57713fe360b3",
+ "url": "https://files.pythonhosted.org/packages/49/5f/d8e1a24247f506a77cbe22341c72ca91bea3b468c5d6bca2047d885ea3c6/onnx-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "237c6987c6c59d9f44b6136f5819af79574f8d96a760a1fa843bede11f3822f7",
+ "url": "https://files.pythonhosted.org/packages/b3/fe/0978403c8d710ece2f34006367e78de80410743fe0e7680c8f33f2dab20d/onnx-1.16.0.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "034ae21a2aaa2e9c14119a840d2926d213c27aad29e5e3edaa30145a745048e1",
+ "url": "https://files.pythonhosted.org/packages/b8/1c/50310a559857951fc6e069cf5d89deebe34287997d1c5928bca435456f62/onnx-1.16.0-cp310-cp310-macosx_10_15_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9eadbdce25b19d6216f426d6d99b8bc877a65ed92cbef9707751c6669190ba4f",
+ "url": "https://files.pythonhosted.org/packages/c8/0b/f4705e4a3fa6fd0de971302fdae17ad176b024eca8c24360f0e37c00f9df/onnx-1.16.0-cp310-cp310-macosx_10_15_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ec22a43d74eb1f2303373e2fbe7fbcaa45fb225f4eb146edfed1356ada7a9aea",
+ "url": "https://files.pythonhosted.org/packages/ef/6e/96be6692ebcd8da568084d753f386ce08efa1f99b216f346ee281edd6cc3/onnx-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "onnx",
+ "requires_dists": [
+ "Pillow; extra == \"reference\"",
+ "google-re2; extra == \"reference\"",
+ "numpy>=1.20",
+ "protobuf>=3.20.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e4088cab82b66a3b37ffc452976b14a3c599269c247895ae9ceb4066d8188a57",
+ "url": "https://files.pythonhosted.org/packages/d9/64/7fdfb9386511cd6805451e012c537073a79a958a58795c4e602e538c388c/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1a9f0e6267de3a1a1db0c54213d022c7c8b5b9ca4b580e80bdc58516c922c9e1",
+ "url": "https://files.pythonhosted.org/packages/25/72/da7c69a3542071bf1e8f65336721b8b2659194425438d988f79bc14ed9cc/opencv-python-4.9.0.80.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e5f7aa4486651a6ebfa8ed4b594b65bd2d2f41beeb4241a3e4b1b85acbbbadb",
+ "url": "https://files.pythonhosted.org/packages/35/69/b657974ddcbba54d59d7d62b01e60a8b815e35f415b996e4d355be0ac7b4/opencv_python-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7b34a52e9da36dda8c151c6394aed602e4b17fa041df0b9f5b93ae10b0fcca2a",
+ "url": "https://files.pythonhosted.org/packages/52/00/2adf376707c7965bb4569f28f73fafe303c404d01047b10e3b52761be086/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "71dfb9555ccccdd77305fc3dcca5897fbf0cf28b297c51ee55e079c065d812a3",
+ "url": "https://files.pythonhosted.org/packages/77/df/b56175c3fb5bc058774bdcf35f5a71cf9c3c5b909f98a1c688eb71cd3b1f/opencv_python-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl"
+ }
+ ],
+ "project_name": "opencv-python",
+ "requires_dists": [
+ "numpy>=1.13.3; python_version < \"3.7\"",
+ "numpy>=1.17.0; python_version >= \"3.7\"",
+ "numpy>=1.17.3; python_version >= \"3.8\"",
+ "numpy>=1.19.3; python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\"",
+ "numpy>=1.19.3; python_version >= \"3.9\"",
+ "numpy>=1.21.0; python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\"",
+ "numpy>=1.21.2; python_version >= \"3.10\"",
+ "numpy>=1.21.4; python_version >= \"3.10\" and platform_system == \"Darwin\"",
+ "numpy>=1.23.5; python_version >= \"3.11\"",
+ "numpy>=1.26.0; python_version >= \"3.12\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "4.9.0.80"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9",
+ "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz"
+ }
+ ],
+ "project_name": "packaging",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "24.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
+ "url": "https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be",
+ "url": "https://files.pythonhosted.org/packages/54/c6/43f9d44d92aed815e781ca25ba8c174257e27253a94630d21be8725a2b59/pluggy-1.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pluggy",
+ "requires_dists": [
+ "pre-commit; extra == \"dev\"",
+ "pytest-benchmark; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"dev\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9",
+ "url": "https://files.pythonhosted.org/packages/f4/d5/db585a5e8d64af6b384c7b3a63da13df2ff86933e486ba78431736c67c25/protobuf-4.25.3-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d",
+ "url": "https://files.pythonhosted.org/packages/15/db/7f731524fe0e56c6b2eb57d05b55d3badd80ef7d1f1ed59db191b2fdd8ab/protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c",
+ "url": "https://files.pythonhosted.org/packages/5e/d8/65adb47d921ce828ba319d6587aa8758da022de509c3862a70177a958844/protobuf-4.25.3.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019",
+ "url": "https://files.pythonhosted.org/packages/d8/82/aefe901174b5a618daee511ddd00342193c1b545e3cd6a2cd6df9ba452b5/protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c",
+ "url": "https://files.pythonhosted.org/packages/f3/bf/26deba06a4c910a85f78245cac7698f67cedd7efe00d04f6b3e1b3506a59/protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl"
+ }
+ ],
+ "project_name": "protobuf",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.25.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8",
+ "url": "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c",
+ "url": "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421",
+ "url": "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4",
+ "url": "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81",
+ "url": "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl"
+ }
+ ],
+ "project_name": "psutil",
+ "requires_dists": [
+ "enum34; python_version <= \"3.4\" and extra == \"test\"",
+ "ipaddress; python_version < \"3.0\" and extra == \"test\"",
+ "mock; python_version < \"3.0\" and extra == \"test\"",
+ "pywin32; sys_platform == \"win32\" and extra == \"test\"",
+ "wmi; sys_platform == \"win32\" and extra == \"test\""
+ ],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7",
+ "version": "5.9.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378",
+ "url": "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719",
+ "url": "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz"
+ }
+ ],
+ "project_name": "py",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "1.11.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5",
+ "url": "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690",
+ "url": "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz"
+ }
+ ],
+ "project_name": "py-cpuinfo",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "9.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473",
+ "url": "https://files.pythonhosted.org/packages/23/7e/5f50d07d5e70a2addbccd90ac2950f81d1edd0783630651d9268d7f1db49/pyasn1-0.6.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c",
+ "url": "https://files.pythonhosted.org/packages/4a/a3/d2157f333900747f20984553aca98008b6dc843eb62f3a36030140ccec0d/pyasn1-0.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pyasn1",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "0.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b",
+ "url": "https://files.pythonhosted.org/packages/13/68/8906226b15ef38e71dc926c321d2fe99de8048e9098b5dfd38343011c886/pyasn1_modules-0.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6",
+ "url": "https://files.pythonhosted.org/packages/f7/00/e7bd1dec10667e3f2be602686537969a7ac92b0a7c5165be2e5875dc3971/pyasn1_modules-0.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pyasn1-modules",
+ "requires_dists": [
+ "pyasn1<0.7.0,>=0.4.6"
+ ],
+ "requires_python": ">=3.8",
+ "version": "0.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc",
+ "url": "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6",
+ "url": "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz"
+ }
+ ],
+ "project_name": "pycparser",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "2.22"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "03879ec299c9f4ba23901b2649a96b2143f0a5d787f0b6c39469989e2320caf1",
+ "url": "https://files.pythonhosted.org/packages/c8/c7/0d77e0e327bf09c12f445f92f5bad0b447375d7b836c5bac5255ead8436f/pygame-2.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a0769eb628c818761755eb0a0ca8216b95270ea8cbcbc82227e39ac9644643da",
+ "url": "https://files.pythonhosted.org/packages/14/54/dc58f8b70e08b6706b158f0c70f86eb1594db6797cb89383f062ad6a304d/pygame-2.5.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30d1618672a55e8c6669281ba264464b3ab563158e40d89e8c8b3faa0febebd",
+ "url": "https://files.pythonhosted.org/packages/5b/91/09f93d428b483c451eacee9ba1e04a1e9999751c80bf6236b2bdc8e19b1e/pygame-2.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ed9a3d98adafa0805ccbaaff5d2996a2b5795381285d8437a4a5d248dbd12b4a",
+ "url": "https://files.pythonhosted.org/packages/65/b6/67e33add85b0f7ac901c6fb89a57f97fdfd67c8834f425a97abaf4a60191/pygame-2.5.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c1b89eb5d539e7ac5cf75513125fb5f2f0a2d918b1fd6e981f23bf0ac1b1c24a",
+ "url": "https://files.pythonhosted.org/packages/c6/aa/2c0c867d6cff00966cfc2152b25f61599f87e88b239e4dcb8ad5357f0f69/pygame-2.5.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "39690e9be9baf58b7359d1f3b2336e1fd6f92fedbbce42987be5df27f8d30718",
+ "url": "https://files.pythonhosted.org/packages/e8/6e/31d7a068edbb029e5a35d8fe4572b67e00705cb8f6dad650397bc417b6b3/pygame-2.5.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "pygame",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "2.5.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "url": "https://files.pythonhosted.org/packages/97/9c/372fef8377a6e340b1704768d20daaded98bf13282b5327beb2e2fe2c7ef/pygments-2.17.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367",
+ "url": "https://files.pythonhosted.org/packages/55/59/8bccf4157baf25e4aa5a0bb7fa3ba8600907de105ebc22b0c78cfbf6f565/pygments-2.17.2.tar.gz"
+ }
+ ],
+ "project_name": "pygments",
+ "requires_dists": [
+ "colorama>=0.4.6; extra == \"windows-terminal\"",
+ "importlib-metadata; python_version < \"3.8\" and extra == \"plugins\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.17.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "17ed5be5936449c5418d1cd269a1a9e9081bc54c17aed272b45856a3d3dc86ad",
+ "url": "https://files.pythonhosted.org/packages/54/a7/2104f674a5a6845b04c8ff01659becc6b8978ca410b82b94287e0b1e018b/pyOpenSSL-24.1.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cabed4bfaa5df9f1a16c0ef64a0cb65318b5cd077a7eda7d6970131ca2f41a6f",
+ "url": "https://files.pythonhosted.org/packages/91/a8/cbeec652549e30103b9e6147ad433405fdd18807ac2d54e6dbb73184d8a1/pyOpenSSL-24.1.0.tar.gz"
+ }
+ ],
+ "project_name": "pyopenssl",
+ "requires_dists": [
+ "cryptography<43,>=41.0.5",
+ "pretend; extra == \"test\"",
+ "pytest-rerunfailures; extra == \"test\"",
+ "pytest>=3.0.1; extra == \"test\"",
+ "sphinx!=5.2.0,!=5.2.0.post0,!=7.2.5; extra == \"docs\"",
+ "sphinx-rtd-theme; extra == \"docs\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "24.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742",
+ "url": "https://files.pythonhosted.org/packages/9d/ea/6d76df31432a0e6fdf81681a895f009a4bb47b3c39036db3e1b528191d52/pyparsing-3.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "url": "https://files.pythonhosted.org/packages/46/3a/31fd28064d016a2182584d579e033ec95b809d8e220e74c4af6f0f2e8842/pyparsing-3.1.2.tar.gz"
+ }
+ ],
+ "project_name": "pyparsing",
+ "requires_dists": [
+ "jinja2; extra == \"diagrams\"",
+ "railroad-diagrams; extra == \"diagrams\""
+ ],
+ "requires_python": ">=3.6.8",
+ "version": "3.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7",
+ "url": "https://files.pythonhosted.org/packages/4d/7e/c79cecfdb6aa85c6c2e3cf63afc56d0f165f24f5c66c03c695c4d9b84756/pytest-8.1.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044",
+ "url": "https://files.pythonhosted.org/packages/30/b7/7d44bbc04c531dcc753056920e0988032e5871ac674b5a84cb979de6e7af/pytest-8.1.1.tar.gz"
+ }
+ ],
+ "project_name": "pytest",
+ "requires_dists": [
+ "argcomplete; extra == \"testing\"",
+ "attrs>=19.2; extra == \"testing\"",
+ "colorama; sys_platform == \"win32\"",
+ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"",
+ "hypothesis>=3.56; extra == \"testing\"",
+ "iniconfig",
+ "mock; extra == \"testing\"",
+ "packaging",
+ "pluggy<2.0,>=1.4",
+ "pygments>=2.7.2; extra == \"testing\"",
+ "requests; extra == \"testing\"",
+ "setuptools; extra == \"testing\"",
+ "tomli>=1; python_version < \"3.11\"",
+ "xmlschema; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "8.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6",
+ "url": "https://files.pythonhosted.org/packages/4d/a1/3b70862b5b3f830f0422844f25a823d0470739d994466be9dbbbb414d85a/pytest_benchmark-4.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1",
+ "url": "https://files.pythonhosted.org/packages/28/08/e6b0067efa9a1f2a1eb3043ecd8a0c48bfeb60d3255006dcc829d72d5da2/pytest-benchmark-4.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-benchmark",
+ "requires_dists": [
+ "aspectlib; extra == \"aspect\"",
+ "elasticsearch; extra == \"elasticsearch\"",
+ "pathlib2; python_version < \"3.4\"",
+ "py-cpuinfo",
+ "pygal; extra == \"histogram\"",
+ "pygaljs; extra == \"histogram\"",
+ "pytest>=3.8",
+ "statistics; python_version < \"3.4\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6",
+ "url": "https://files.pythonhosted.org/packages/20/49/b3e0edec68d81846f519c602ac38af9db86e1e71275528b3e814ae236063/pytest_cov-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470",
+ "url": "https://files.pythonhosted.org/packages/61/41/e046526849972555928a6d31c2068410e47a31fb5ab0a77f868596811329/pytest-cov-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-cov",
+ "requires_dists": [
+ "coverage[toml]>=5.2.1",
+ "fields; extra == \"testing\"",
+ "hunter; extra == \"testing\"",
+ "process-tests; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=4.6",
+ "six; extra == \"testing\"",
+ "virtualenv; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "810958f66a91afb1a1e2ae83089d8dc1cd2437ac96b12963042fbb9fb4d16af0",
+ "url": "https://files.pythonhosted.org/packages/f4/af/9c0bda43e486a3c9bf1e0f876d0f241bc3f229d7d65d09331a0868db9629/pytest_forked-1.6.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4dafd46a9a600f65d822b8f605133ecf5b3e1941ebb3588e943b4e3eb71a5a3f",
+ "url": "https://files.pythonhosted.org/packages/8c/c9/93ad2ba2413057ee694884b88cf7467a46c50c438977720aeac26e73fdb7/pytest-forked-1.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-forked",
+ "requires_dists": [
+ "py",
+ "pytest>=3.10"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b05cb0bcd51a7cd0375bfbeeb3eaeb01fc85665e45b21fc9494a8a19137f4d32",
+ "url": "https://files.pythonhosted.org/packages/c5/d1/2ef73ee137add043df444fddf1c851b8ca70ab9c7b7f18e18c4c244fec6d/pytest_platform_markers-1.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "07ea92669114ba8083b6653995b5a9ab14d57ca16307fd2af22d6f7d295160e4",
+ "url": "https://files.pythonhosted.org/packages/b3/e7/174a22a8cb4cf4b64456cd799f472bb90206f1ce8d537edbc1d9659689a3/pytest-platform-markers-1.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-platform-markers",
+ "requires_dists": [
+ "pytest>=3.6.0"
+ ],
+ "requires_python": null,
+ "version": "1.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32",
+ "url": "https://files.pythonhosted.org/packages/dc/e7/e75bd157331aecc190f5f8950d7ea3d2cf56c3c57fb44da70e60b221133f/pytest_rerunfailures-14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92",
+ "url": "https://files.pythonhosted.org/packages/cc/a4/6de45fe850759e94aa9a55cda807c76245af1941047294df26c851dfb4a9/pytest-rerunfailures-14.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-rerunfailures",
+ "requires_dists": [
+ "packaging>=17.1",
+ "pytest>=7.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65",
+ "url": "https://files.pythonhosted.org/packages/21/08/b1945d4b4986eb1aa10cf84efc5293bba39da80a2f95db3573dd90678408/pytest_xdist-2.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf",
+ "url": "https://files.pythonhosted.org/packages/5d/43/9dbc32d297d6eae85d6c05dc8e8d3371061bd6cbe56a2f645d9ea4b53d9b/pytest-xdist-2.5.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-xdist",
+ "requires_dists": [
+ "execnet>=1.1",
+ "filelock; extra == \"testing\"",
+ "psutil>=3.0; extra == \"psutil\"",
+ "pytest-forked",
+ "pytest>=6.2.0",
+ "setproctitle; extra == \"setproctitle\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "2.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a3caa3a11842fc7d5746376f37195e6af5f17c0a15737538bb1cebf656fb306b",
+ "url": "https://files.pythonhosted.org/packages/29/b5/c1209e6cb77647bc2c9a6a1a953355720f34f3b006b725e303c70f3c0786/pyu2f-0.1.5.tar.gz"
+ }
+ ],
+ "project_name": "pyu2f",
+ "requires_dists": [
+ "six"
+ ],
+ "requires_python": null,
+ "version": "0.1.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290",
+ "url": "https://files.pythonhosted.org/packages/07/91/45dfd0ef821a7f41d9d0136ea3608bb5b1653e42fd56a7970532cb5c003f/PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "url": "https://files.pythonhosted.org/packages/29/61/bf33c6c85c55bc45a29eee3195848ff2d518d84735eb0e2d8cb42e0d285e/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f",
+ "url": "https://files.pythonhosted.org/packages/5b/07/10033a403b23405a8fc48975444463d3d10a5c2736b7eb2550b07b367429/PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "url": "https://files.pythonhosted.org/packages/96/06/4beb652c0fe16834032e54f0956443d4cc797fe645527acee59e7deaa0a2/PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "url": "https://files.pythonhosted.org/packages/ba/91/090818dfa62e85181f3ae23dd1e8b7ea7f09684864a900cab72d29c57346/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "url": "https://files.pythonhosted.org/packages/cd/e5/af35f7ea75cf72f2cd079c95ee16797de7cd71f29ea7c68ae5ce7be1eda0/PyYAML-6.0.1.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "url": "https://files.pythonhosted.org/packages/f1/26/55e4f21db1f72eaef092015d9017c11510e7e6301c62a6cfee91295d13c6/PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ }
+ ],
+ "project_name": "pyyaml",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "6.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "url": "https://files.pythonhosted.org/packages/70/8e/0e2d847013cb52cd35b38c009bb167a1a26b2ce6cd6965bf26b47bc0bf44/requests-2.31.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1",
+ "url": "https://files.pythonhosted.org/packages/9d/be/10918a2eac4ae9f02f6cfe6414b7a155ccd8f7f9d4380d62fd5b955065c3/requests-2.31.0.tar.gz"
+ }
+ ],
+ "project_name": "requests",
+ "requires_dists": [
+ "PySocks!=1.5.7,>=1.5.6; extra == \"socks\"",
+ "certifi>=2017.4.17",
+ "chardet<6,>=3.0.2; extra == \"use-chardet-on-py3\"",
+ "charset-normalizer<4,>=2",
+ "idna<4,>=2.5",
+ "urllib3<3,>=1.21.1"
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.31.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e1e8ad02e518fe11073f2ea7d80b6b8be19daa27a60a1838aff7c731ddcf2ebe",
+ "url": "https://files.pythonhosted.org/packages/6e/e6/bedc75b264cbcbf6e6d0e5071d96d739f540fc09be31744a7a8824c02a8e/retry_decorator-1.1.1.tar.gz"
+ }
+ ],
+ "project_name": "retry-decorator",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "1.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2",
+ "url": "https://files.pythonhosted.org/packages/e9/93/0c0f002031f18b53af7a6166103c02b9c0667be528944137cc954ec921b3/rsa-4.7.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9",
+ "url": "https://files.pythonhosted.org/packages/db/b5/475c45a58650b0580421746504b680cd2db4e81bc941e94ca53785250269/rsa-4.7.2.tar.gz"
+ }
+ ],
+ "project_name": "rsa",
+ "requires_dists": [
+ "pyasn1>=0.1.3"
+ ],
+ "requires_python": "<4,>=3.5",
+ "version": "4.7.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "5f75eb91d8ab6037c754a87b8501cc581b2827e923682f593bed3539ce5b3999",
+ "url": "https://files.pythonhosted.org/packages/b1/f8/2038661bc32579d0c11191fc1093e49db590bfb6e63d501d7995fb798d62/sentry_sdk-1.44.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "24e6a53eeabffd2f95d952aa35ca52f0f4201d17f820ac9d3ff7244c665aaf68",
+ "url": "https://files.pythonhosted.org/packages/fd/72/85a8bc961d9160ac8c9f0a6d39dbdad21795d55c7b02a433bd0ffb75c037/sentry-sdk-1.44.1.tar.gz"
+ }
+ ],
+ "project_name": "sentry-sdk",
+ "requires_dists": [
+ "aiohttp>=3.5; extra == \"aiohttp\"",
+ "apache-beam>=2.12; extra == \"beam\"",
+ "arq>=0.23; extra == \"arq\"",
+ "asttokens; extra == \"pure-eval\"",
+ "asyncpg>=0.23; extra == \"asyncpg\"",
+ "blinker>=1.1; extra == \"flask\"",
+ "blinker>=1.1; extra == \"quart\"",
+ "bottle>=0.12.13; extra == \"bottle\"",
+ "celery-redbeat>=2; extra == \"celery-redbeat\"",
+ "celery>=3; extra == \"celery\"",
+ "certifi",
+ "chalice>=1.16.0; extra == \"chalice\"",
+ "clickhouse-driver>=0.2.0; extra == \"clickhouse-driver\"",
+ "django>=1.8; extra == \"django\"",
+ "executing; extra == \"pure-eval\"",
+ "falcon>=1.4; extra == \"falcon\"",
+ "fastapi>=0.79.0; extra == \"fastapi\"",
+ "flask>=0.11; extra == \"flask\"",
+ "grpcio>=1.21.1; extra == \"grpcio\"",
+ "httpx>=0.16.0; extra == \"httpx\"",
+ "huey>=2; extra == \"huey\"",
+ "loguru>=0.5; extra == \"loguru\"",
+ "markupsafe; extra == \"flask\"",
+ "openai>=1.0.0; extra == \"openai\"",
+ "opentelemetry-distro>=0.35b0; extra == \"opentelemetry\"",
+ "opentelemetry-distro~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-aiohttp-client~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-django~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-fastapi~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-flask~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-requests~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-sqlite3~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "opentelemetry-instrumentation-urllib~=0.40b0; extra == \"opentelemetry-experimental\"",
+ "pure-eval; extra == \"pure-eval\"",
+ "pymongo>=3.1; extra == \"pymongo\"",
+ "pyspark>=2.4.4; extra == \"pyspark\"",
+ "quart>=0.16.1; extra == \"quart\"",
+ "rq>=0.6; extra == \"rq\"",
+ "sanic>=0.8; extra == \"sanic\"",
+ "sqlalchemy>=1.2; extra == \"sqlalchemy\"",
+ "starlette>=0.19.1; extra == \"starlette\"",
+ "starlite>=1.48; extra == \"starlite\"",
+ "tiktoken>=0.3.0; extra == \"openai\"",
+ "tornado>=5; extra == \"tornado\"",
+ "urllib3>=1.25.7; python_version <= \"3.4\"",
+ "urllib3>=1.26.11; python_version >= \"3.6\"",
+ "urllib3>=1.26.9; python_version == \"3.5\""
+ ],
+ "requires_python": null,
+ "version": "1.44.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5",
+ "url": "https://files.pythonhosted.org/packages/70/1d/3b2249c833c7d52b59ff0602d760df0543dc1e6c272f145b949750edeb01/setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0",
+ "url": "https://files.pythonhosted.org/packages/24/55/8b369b56007a5a2c7594cdb58cd4a09d7cca65b28483bb5582c6975663f1/setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9",
+ "url": "https://files.pythonhosted.org/packages/35/30/ac99ecae8458ba995f85aa3aa911004679b405922e1487b0fba6fe8f4d37/setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d",
+ "url": "https://files.pythonhosted.org/packages/3d/92/17168f4bb1a695094e93e73a1ef1f7b89953a6d91e8a7699a2c840ba712f/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754",
+ "url": "https://files.pythonhosted.org/packages/4f/cc/c51e6371f640a9adbe693ddb89d68596e5a8e4b5e05b4d3c65ec504e2f6d/setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85",
+ "url": "https://files.pythonhosted.org/packages/69/a7/2a77b68c11db87c22350381d6ce022011eb420076790e0e3697153e89458/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39",
+ "url": "https://files.pythonhosted.org/packages/79/e7/54b36be02aee8ad573be68f6f46fd62838735c2f007b22df50eb5e13a20d/setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f",
+ "url": "https://files.pythonhosted.org/packages/87/7b/69bdc791001250dff279a1a81904f3f563caece4fa1607a95b9fd5197d6e/setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5",
+ "url": "https://files.pythonhosted.org/packages/94/ad/4166381d79f6ae8138be9b49f05d193a8deb748debace9896dffad45a753/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3",
+ "url": "https://files.pythonhosted.org/packages/9c/56/6f4a4e80b2810eb7ea9ab355022c780ef80457de368ab5b6b21b795e4f05/setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0",
+ "url": "https://files.pythonhosted.org/packages/9d/09/bc108723bbfb7c50c22fdf22191f3e32abcb5d6f46610018030b25f601c5/setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452",
+ "url": "https://files.pythonhosted.org/packages/c3/7d/d03f319e0f3b3a6e98731a56cd4d81478ed0c12531b822fd2c728b948edb/setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74",
+ "url": "https://files.pythonhosted.org/packages/d0/ae/010811bece9a59a8bba131d9e7acea9c2e3c3cbf544bf06d8b10b8c28ff5/setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae",
+ "url": "https://files.pythonhosted.org/packages/ff/e1/b16b16a1aa12174349d15b73fd4b87e641a8ae3fb1163e80938dbbf6ae98/setproctitle-1.3.3.tar.gz"
+ }
+ ],
+ "project_name": "setproctitle",
+ "requires_dists": [
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.3.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6d10741ff20b89cd8c6a536ee9dc90d3002dec0226c78fb98605bfb9ef8a7adf",
+ "url": "https://files.pythonhosted.org/packages/40/a9/7deac76c58fa47c95360116a06b53b9b62f6db11336fe61b6ab53784d98b/setuptools-59.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d144f85102f999444d06f9c0e8c737fd0194f10f2f7e5fdb77573f6e2fa4fad0",
+ "url": "https://files.pythonhosted.org/packages/e6/e2/f2bfdf364e016f7a464db709ea40d1101c4c5a463dd7019dae0a42dbd1c6/setuptools-59.5.0.tar.gz"
+ }
+ ],
+ "project_name": "setuptools",
+ "requires_dists": [
+ "flake8-2020; extra == \"testing\"",
+ "furo; extra == \"docs\"",
+ "jaraco.envs>=2.2; extra == \"testing\"",
+ "jaraco.packaging>=8.2; extra == \"docs\"",
+ "jaraco.path>=3.2.0; extra == \"testing\"",
+ "jaraco.tidelift>=1.4; extra == \"docs\"",
+ "mock; extra == \"testing\"",
+ "paver; extra == \"testing\"",
+ "pip>=19.1; extra == \"testing\"",
+ "pygments-github-lexers==0.0.5; extra == \"docs\"",
+ "pytest-black>=0.3.7; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-checkdocs>=2.4; extra == \"testing\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-enabler>=1.0.1; extra == \"testing\"",
+ "pytest-flake8; extra == \"testing\"",
+ "pytest-mypy; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-virtualenv>=1.2.7; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=6; extra == \"testing\"",
+ "rst.linker>=1.9; extra == \"docs\"",
+ "sphinx-inline-tabs; extra == \"docs\"",
+ "sphinx; extra == \"docs\"",
+ "sphinx; extra == \"testing\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "virtualenv>=13.0.0; extra == \"testing\"",
+ "wheel; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "59.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254",
+ "url": "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "url": "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz"
+ }
+ ],
+ "project_name": "six",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7",
+ "version": "1.16.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da",
+ "url": "https://files.pythonhosted.org/packages/a7/a5/10f97f73544edcdef54409f1d839f6049a0d79df68adbc1ceb24d1aaca42/smmap-5.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62",
+ "url": "https://files.pythonhosted.org/packages/88/04/b5bf6d21dc4041000ccba7eb17dd3055feb237e7ffc2c20d3fae3af62baa/smmap-5.0.1.tar.gz"
+ }
+ ],
+ "project_name": "smmap",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "5.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a",
+ "url": "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1",
+ "url": "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz"
+ }
+ ],
+ "project_name": "snowballstemmer",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "2.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "1e09160a40b956dc623c910118fa636da93bd3ca0b9876a7b3df90f07d691560",
+ "url": "https://files.pythonhosted.org/packages/b2/b6/8ed35256aa530a9d3da15d20bdc0ba888d5364441bb50a5a83ee7827affe/sphinx-7.2.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9a5160e1ea90688d5963ba09a2dcd8bdd526620edbb65c328728f1b2228d5ab5",
+ "url": "https://files.pythonhosted.org/packages/73/8e/6e51da4b26665b4b92b1944ea18b2d9c825e753e19180cc5bdc818d0ed3b/sphinx-7.2.6.tar.gz"
+ }
+ ],
+ "project_name": "sphinx",
+ "requires_dists": [
+ "Jinja2>=3.0",
+ "Pygments>=2.14",
+ "alabaster<0.8,>=0.7",
+ "babel>=2.9",
+ "colorama>=0.4.5; sys_platform == \"win32\"",
+ "cython>=3.0; extra == \"test\"",
+ "docutils-stubs; extra == \"lint\"",
+ "docutils<0.21,>=0.18.1",
+ "filelock; extra == \"test\"",
+ "flake8-simplify; extra == \"lint\"",
+ "flake8>=3.5.0; extra == \"lint\"",
+ "html5lib; extra == \"test\"",
+ "imagesize>=1.3",
+ "importlib-metadata>=4.8; python_version < \"3.10\"",
+ "isort; extra == \"lint\"",
+ "mypy>=0.990; extra == \"lint\"",
+ "packaging>=21.0",
+ "pytest>=4.6; extra == \"test\"",
+ "requests>=2.25.0",
+ "ruff; extra == \"lint\"",
+ "setuptools>=67.0; extra == \"test\"",
+ "snowballstemmer>=2.0",
+ "sphinx-lint; extra == \"lint\"",
+ "sphinxcontrib-applehelp",
+ "sphinxcontrib-devhelp",
+ "sphinxcontrib-htmlhelp>=2.0.0",
+ "sphinxcontrib-jsmath",
+ "sphinxcontrib-qthelp",
+ "sphinxcontrib-serializinghtml>=1.1.9",
+ "sphinxcontrib-websupport; extra == \"docs\"",
+ "types-requests; extra == \"lint\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "7.2.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4",
+ "url": "https://files.pythonhosted.org/packages/56/89/fea3fbf6785b388e6cb8a1beaf62f96e80b37311bdeed6e133388a732426/sphinxcontrib_applehelp-1.0.8-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619",
+ "url": "https://files.pythonhosted.org/packages/26/6b/68f470fc337ed24043fec987b101f25b35010970bd958970c2ae5990859f/sphinxcontrib_applehelp-1.0.8.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-applehelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.8"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f",
+ "url": "https://files.pythonhosted.org/packages/a0/52/1049d918d1d1c72857d285c3f0c64c1cbe0be394ce1c93a3d2aa4f39fe3b/sphinxcontrib_devhelp-1.0.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3",
+ "url": "https://files.pythonhosted.org/packages/c7/a1/80b7e9f677abc673cb9320bf255ad4e08931ccbc2e66bde4b59bad3809ad/sphinxcontrib_devhelp-1.0.6.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-devhelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04",
+ "url": "https://files.pythonhosted.org/packages/c2/e9/74c4cda5b409af3222fda38f0774e616011bc935f639dbc0da5ca2d1be7d/sphinxcontrib_htmlhelp-2.0.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015",
+ "url": "https://files.pythonhosted.org/packages/8a/03/2f9d699fbfdf03ecb3b6d0e2a268a8998d009f2a9f699c2dcc936899257d/sphinxcontrib_htmlhelp-2.0.5.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-htmlhelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "html5lib; extra == \"test\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "2.0.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178",
+ "url": "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8",
+ "url": "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-jsmath",
+ "requires_dists": [
+ "flake8; extra == \"test\"",
+ "mypy; extra == \"test\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.5",
+ "version": "1.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182",
+ "url": "https://files.pythonhosted.org/packages/80/b3/1beac14a88654d2e5120d0143b49be5ad450b86eb1963523d8dbdcc51eb2/sphinxcontrib_qthelp-1.0.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6",
+ "url": "https://files.pythonhosted.org/packages/ac/29/705cd4e93e98a8473d62b5c32288e6de3f0c9660d3c97d4e80d3dbbad82b/sphinxcontrib_qthelp-1.0.7.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-qthelp",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.0.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7",
+ "url": "https://files.pythonhosted.org/packages/38/24/228bb903ea87b9e08ab33470e6102402a644127108c7117ac9c00d849f82/sphinxcontrib_serializinghtml-1.1.10-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f",
+ "url": "https://files.pythonhosted.org/packages/54/13/8dd7a7ed9c58e16e20c7f4ce8e4cb6943eb580955236d0c0d00079a73c49/sphinxcontrib_serializinghtml-1.1.10.tar.gz"
+ }
+ ],
+ "project_name": "sphinxcontrib-serializinghtml",
+ "requires_dists": [
+ "Sphinx>=5; extra == \"standalone\"",
+ "docutils-stubs; extra == \"lint\"",
+ "flake8; extra == \"lint\"",
+ "mypy; extra == \"lint\"",
+ "pytest; extra == \"test\""
+ ],
+ "requires_python": ">=3.9",
+ "version": "1.1.10"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45",
+ "url": "https://files.pythonhosted.org/packages/3a/d0/b97889ffa769e2d1fdebb632084d5e8b53fc299d43a537acee7ec0c021a3/tensorboard-2.16.2-py3-none-any.whl"
+ }
+ ],
+ "project_name": "tensorboard",
+ "requires_dists": [
+ "absl-py>=0.4",
+ "grpcio>=1.48.2",
+ "markdown>=2.6.8",
+ "numpy>=1.12.0",
+ "protobuf!=4.24.0,>=3.19.6",
+ "setuptools>=41.0.0",
+ "six>1.9",
+ "tensorboard-data-server<0.8.0,>=0.7.0",
+ "werkzeug>=1.0.1"
+ ],
+ "requires_python": ">=3.9",
+ "version": "2.16.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530",
+ "url": "https://files.pythonhosted.org/packages/73/c6/825dab04195756cf8ff2e12698f22513b3db2f64925bdd41671bfb33aaa5/tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb",
+ "url": "https://files.pythonhosted.org/packages/7a/13/e503968fefabd4c6b2650af21e110aa8466fe21432cd7c43a84577a89438/tensorboard_data_server-0.7.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60",
+ "url": "https://files.pythonhosted.org/packages/b7/85/dabeaf902892922777492e1d253bb7e1264cadce3cea932f7ff599e53fea/tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl"
+ }
+ ],
+ "project_name": "tensorboard-data-server",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.7.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f",
+ "url": "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz"
+ }
+ ],
+ "project_name": "tomli",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "74f5b137190a6face6859d630f129289e7fae6a4d9a747430b3b5d5c6297a3ae",
+ "url": "https://download.pytorch.org/whl/cu116/torch-1.12.0+cu116-cp310-cp310-linux_x86_64.whl"
+ }
+ ],
+ "project_name": "torch",
+ "requires_dists": [
+ "typing-extensions"
+ ],
+ "requires_python": ">=3.7.0",
+ "version": "1.12.0+cu116"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "url": "https://files.pythonhosted.org/packages/f9/de/dc04a3ea60b22624b51c703a84bbe0184abcd1d0b9bc8074b5d6b7ab90bb/typing_extensions-4.10.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb",
+ "url": "https://files.pythonhosted.org/packages/16/3a/0d26ce356c7465a19c9ea8814b960f8a36c3b0d07c323176620b7b483e44/typing_extensions-4.10.0.tar.gz"
+ }
+ ],
+ "project_name": "typing-extensions",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.10.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "url": "https://files.pythonhosted.org/packages/a2/73/a68704750a7679d0b6d3ad7aa8d4da8e14e151ae82e6fee774e6e0d05ec8/urllib3-2.2.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19",
+ "url": "https://files.pythonhosted.org/packages/7a/50/7fd50a27caa0652cd4caf224aa87741ea41d3265ad13f010886167cfcc79/urllib3-2.2.1.tar.gz"
+ }
+ ],
+ "project_name": "urllib3",
+ "requires_dists": [
+ "brotli>=1.0.9; platform_python_implementation == \"CPython\" and extra == \"brotli\"",
+ "brotlicffi>=0.8.0; platform_python_implementation != \"CPython\" and extra == \"brotli\"",
+ "h2<5,>=4; extra == \"h2\"",
+ "pysocks!=1.5.7,<2.0,>=1.5.6; extra == \"socks\"",
+ "zstandard>=0.18.0; extra == \"zstd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "2.2.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "023b6c72a6ef13085c9a970f6714548eca64f56d3d8698e42372764950dfd004",
+ "url": "https://files.pythonhosted.org/packages/53/7c/f3656d1ce3b916ea35f454c6a32b56342168c08baf09a0683df240ca2dce/wandb-0.16.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c317d55af93a688f3eafcdfec897f7b72da1fe1525140e076ecdaab8b09aa46e",
+ "url": "https://files.pythonhosted.org/packages/e1/75/26d5e5923cb6a619215f6eeb6508b67651d0f4a3306169c4a1c5861a3b20/wandb-0.16.5.tar.gz"
+ }
+ ],
+ "project_name": "wandb",
+ "requires_dists": [
+ "Click!=8.0.0,>=7.1",
+ "GitPython!=3.1.29,>=1.0.0",
+ "PyYAML",
+ "PyYAML>=6.0.0; extra == \"launch\"",
+ "appdirs>=1.4.3",
+ "awscli; extra == \"launch\"",
+ "azure-containerregistry; extra == \"launch\"",
+ "azure-identity; extra == \"azure\"",
+ "azure-identity; extra == \"launch\"",
+ "azure-storage-blob; extra == \"azure\"",
+ "azure-storage-blob; extra == \"launch\"",
+ "bokeh; extra == \"media\"",
+ "boto3; extra == \"aws\"",
+ "boto3; extra == \"launch\"",
+ "botocore; extra == \"launch\"",
+ "chardet; extra == \"launch\"",
+ "cloudpickle; extra == \"models\"",
+ "docker-pycreds>=0.4.0",
+ "filelock; extra == \"importers\"",
+ "google-auth; extra == \"launch\"",
+ "google-cloud-aiplatform; extra == \"launch\"",
+ "google-cloud-artifact-registry; extra == \"launch\"",
+ "google-cloud-compute; extra == \"launch\"",
+ "google-cloud-storage; extra == \"gcp\"",
+ "google-cloud-storage; extra == \"kubeflow\"",
+ "google-cloud-storage; extra == \"launch\"",
+ "httpx>=0.23.0; extra == \"async\"",
+ "iso8601; extra == \"launch\"",
+ "kubernetes-asyncio; extra == \"launch\"",
+ "kubernetes; extra == \"kubeflow\"",
+ "kubernetes; extra == \"launch\"",
+ "minio; extra == \"kubeflow\"",
+ "mlflow; extra == \"importers\"",
+ "moviepy; extra == \"media\"",
+ "nbconvert; extra == \"launch\"",
+ "nbformat; extra == \"launch\"",
+ "numpy; extra == \"media\"",
+ "optuna; extra == \"launch\"",
+ "orjson; extra == \"perf\"",
+ "pillow; extra == \"media\"",
+ "plotly>=5.18.0; extra == \"media\"",
+ "polars; extra == \"importers\"",
+ "protobuf!=4.21.0,<5,>=3.12.0; python_version < \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.15.0; python_version == \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.19.0; python_version > \"3.9\" and sys_platform == \"linux\"",
+ "protobuf!=4.21.0,<5,>=3.19.0; sys_platform != \"linux\"",
+ "psutil>=5.0.0",
+ "pydantic; extra == \"launch\"",
+ "pydantic>=2.0.0; extra == \"reports\"",
+ "rdkit-pypi; extra == \"media\"",
+ "requests<3,>=2.0.0",
+ "rich; extra == \"importers\"",
+ "sentry-sdk>=1.0.0",
+ "setproctitle",
+ "setuptools",
+ "sh; extra == \"kubeflow\"",
+ "soundfile; extra == \"media\"",
+ "sweeps>=0.2.0; extra == \"sweeps\"",
+ "tenacity; extra == \"importers\"",
+ "tomli; extra == \"launch\"",
+ "typing-extensions; extra == \"launch\"",
+ "typing-extensions; python_version < \"3.10\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "0.16.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3aac3f5da756f93030740bc235d3e09449efcf65f2f55e3602e1d851b8f48795",
+ "url": "https://files.pythonhosted.org/packages/e3/23/c9843d7550092ae7ad380611c238f44afef66f58f76c1dab7dcf313e4339/werkzeug-3.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e39b645a6ac92822588e7b39a692e7828724ceae0b0d702ef96701f90e70128d",
+ "url": "https://files.pythonhosted.org/packages/0f/84/00f7193d7bd88ced26cd5f868903e431054424610dc7c041bbe87d2a4d66/werkzeug-3.0.2.tar.gz"
+ }
+ ],
+ "project_name": "werkzeug",
+ "requires_dists": [
+ "MarkupSafe>=2.1.1",
+ "watchdog>=2.3; extra == \"watchdog\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad",
+ "url": "https://files.pythonhosted.org/packages/4d/05/4d79198ae568a92159de0f89e710a8d19e3fa267b719a236582eee921f4a/yarl-1.9.4-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551",
+ "url": "https://files.pythonhosted.org/packages/0b/58/dd3c69651381a57ac991dba54b20ae2da359eb4b03a661e71c451d6525c6/yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385",
+ "url": "https://files.pythonhosted.org/packages/0b/a3/7774786ec6e2dca0bb38b286f12a11af97957546e5fbcce71752a8d2cf07/yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234",
+ "url": "https://files.pythonhosted.org/packages/30/b5/215d586d5cb17ca9748d7a2d597c07147f210c0c0785257492094d083b65/yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b",
+ "url": "https://files.pythonhosted.org/packages/44/ae/fdbc9965ef69e650c3b5b04d60badef90ff0cde21a30770f0700e148b12f/yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e",
+ "url": "https://files.pythonhosted.org/packages/6c/27/cda5a927df3a894eddfee4efacdd230c2d8486e322fc672194fd651f82c5/yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c",
+ "url": "https://files.pythonhosted.org/packages/6d/a1/db0bdf8cc48515e9c02daf04ae2916fc27ce6498eca21432fc9ffa63f71b/yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863",
+ "url": "https://files.pythonhosted.org/packages/70/a9/ef6d69ce9a4e82080290bcb6db735bb8a6d6db92f2bbb92b6951bde97e7c/yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66",
+ "url": "https://files.pythonhosted.org/packages/81/c6/06938036ea48fa74521713499fba1459b0eb60af9b9afbe8e0e9e1a96c36/yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53",
+ "url": "https://files.pythonhosted.org/packages/b2/4f/796b0c73e9ff30a1047a7ee3390e157ab8424d4401b9f32a2624013a5b39/yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455",
+ "url": "https://files.pythonhosted.org/packages/c3/a0/0ade1409d184cbc9e85acd403a386a7c0563b92ff0f26d138ff9e86e48b4/yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541",
+ "url": "https://files.pythonhosted.org/packages/cc/2a/abbaf1460becba856e163f2a1274f5d34b1969d476da8e68a8fc2aeb5661/yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2",
+ "url": "https://files.pythonhosted.org/packages/d5/fc/40b85bea1f5686092ea37f472c94c023d6347266852ffd55baa01c40f596/yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392",
+ "url": "https://files.pythonhosted.org/packages/dd/90/2958ae9f2e12084d616eef95b6a48c8e6d96448add04367c20dc53a33ff2/yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf",
+ "url": "https://files.pythonhosted.org/packages/e0/ad/bedcdccbcbf91363fd425a948994f3340924145c2bc8ccb296f4a1e52c28/yarl-1.9.4.tar.gz"
+ }
+ ],
+ "project_name": "yarl",
+ "requires_dists": [
+ "idna>=2.0",
+ "multidict>=4.0",
+ "typing-extensions>=3.7.4; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.9.4"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "atomicwrites>=1.4.0",
+ "box2d-py>=2.3.5",
+ "cloudpickle~=3.0",
+ "gsutil>=4.66",
+ "gymnasium>=0.27.1",
+ "myst-parser~=2.0",
+ "numpy<1.24",
+ "onnx>=1.10",
+ "opencv-python>=3.0",
+ "protobuf>=4.0",
+ "psutil>=5.8.0",
+ "pygame>=2.1.0",
+ "pytest-benchmark==4.0.0",
+ "pytest-cov!=2.12.1,<3.1,>=2.12",
+ "pytest-platform-markers",
+ "pytest-rerunfailures",
+ "pytest-xdist<3,>=2.5",
+ "pytest~=8.0",
+ "setuptools==59.5",
+ "tensorboard>=2.8.0",
+ "torch!=1.12.0+cpu,==1.12.0+cu116",
+ "torch==1.12.0",
+ "wandb>=0.14.0"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/apibook.lock b/locks/tools/apibook.lock
new file mode 100644
index 00000000..a728a158
--- /dev/null
+++ b/locks/tools/apibook.lock
@@ -0,0 +1,192 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=apibook
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "python_apibook@ git+https://github.com/tgolsson/python-apibook.git"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637",
+ "url": "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e",
+ "url": "https://files.pythonhosted.org/packages/08/12/9c22a58c0b1e29271051222d8906257616da84135af9ed167c9e28f85cb3/docstring_parser-0.16.tar.gz"
+ }
+ ],
+ "project_name": "docstring-parser",
+ "requires_dists": [],
+ "requires_python": "<4.0,>=3.6",
+ "version": "0.16"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
+ "url": "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb",
+ "url": "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "markdown-it-py",
+ "requires_dists": [
+ "commonmark~=0.9; extra == \"compare\"",
+ "coverage; extra == \"testing\"",
+ "gprof2dot; extra == \"profiling\"",
+ "jupyter_sphinx; extra == \"rtd\"",
+ "linkify-it-py<3,>=1; extra == \"linkify\"",
+ "markdown~=3.4; extra == \"compare\"",
+ "mdit-py-plugins; extra == \"plugins\"",
+ "mdit-py-plugins; extra == \"rtd\"",
+ "mdurl~=0.1",
+ "mistletoe~=1.0; extra == \"compare\"",
+ "mistune~=2.0; extra == \"compare\"",
+ "myst-parser; extra == \"rtd\"",
+ "panflute~=2.3; extra == \"compare\"",
+ "pre-commit~=3.0; extra == \"code-style\"",
+ "psutil; extra == \"benchmarking\"",
+ "pytest-benchmark; extra == \"benchmarking\"",
+ "pytest-cov; extra == \"testing\"",
+ "pytest-regressions; extra == \"testing\"",
+ "pytest; extra == \"benchmarking\"",
+ "pytest; extra == \"testing\"",
+ "pyyaml; extra == \"rtd\"",
+ "sphinx-copybutton; extra == \"rtd\"",
+ "sphinx-design; extra == \"rtd\"",
+ "sphinx; extra == \"rtd\"",
+ "sphinx_book_theme; extra == \"rtd\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8",
+ "url": "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba",
+ "url": "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz"
+ }
+ ],
+ "project_name": "mdurl",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "0.1.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "url": "https://files.pythonhosted.org/packages/97/9c/372fef8377a6e340b1704768d20daaded98bf13282b5327beb2e2fe2c7ef/pygments-2.17.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367",
+ "url": "https://files.pythonhosted.org/packages/55/59/8bccf4157baf25e4aa5a0bb7fa3ba8600907de105ebc22b0c78cfbf6f565/pygments-2.17.2.tar.gz"
+ }
+ ],
+ "project_name": "pygments",
+ "requires_dists": [
+ "colorama>=0.4.6; extra == \"windows-terminal\"",
+ "importlib-metadata; python_version < \"3.8\" and extra == \"plugins\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.17.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4c48ba866bda602efbea398d1e279b1eaf8df243946af6fe77b6e818cdef891a",
+ "url": "git+https://github.com/tgolsson/python-apibook.git"
+ }
+ ],
+ "project_name": "python-apibook",
+ "requires_dists": [
+ "docstring_parser",
+ "rich"
+ ],
+ "requires_python": null,
+ "version": "0.1.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222",
+ "url": "https://files.pythonhosted.org/packages/87/67/a37f6214d0e9fe57f6ae54b2956d550ca8365857f42a1ce0392bb21d9410/rich-13.7.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432",
+ "url": "https://files.pythonhosted.org/packages/b3/01/c954e134dc440ab5f96952fe52b4fdc64225530320a910473c1fe270d9aa/rich-13.7.1.tar.gz"
+ }
+ ],
+ "project_name": "rich",
+ "requires_dists": [
+ "ipywidgets<9,>=7.5.1; extra == \"jupyter\"",
+ "markdown-it-py>=2.2.0",
+ "pygments<3.0.0,>=2.13.0",
+ "typing-extensions<5.0,>=4.0.0; python_version < \"3.9\""
+ ],
+ "requires_python": ">=3.7.0",
+ "version": "13.7.1"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "python_apibook"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/bandit.lock b/locks/tools/bandit.lock
new file mode 100644
index 00000000..d4a1a531
--- /dev/null
+++ b/locks/tools/bandit.lock
@@ -0,0 +1,42 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=bandit
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/black.lock b/locks/tools/black.lock
new file mode 100644
index 00000000..675409ea
--- /dev/null
+++ b/locks/tools/black.lock
@@ -0,0 +1,238 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=black
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "black<24,>=22.6.0"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e",
+ "url": "https://files.pythonhosted.org/packages/7b/14/4da7b12a9abc43a601c215cb5a3d176734578da109f0dbf0a832ed78be09/black-23.12.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba",
+ "url": "https://files.pythonhosted.org/packages/11/92/522a4f1e4b2b8da62e4ec0cb8acf2d257e6d39b31f4214f0fd94d2eeb5bd/black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0",
+ "url": "https://files.pythonhosted.org/packages/a4/dc/af67d8281e9a24f73d24b060f3f03f6d9ad6be259b3c6acef2845e17d09c/black-23.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2",
+ "url": "https://files.pythonhosted.org/packages/fb/58/677da52d845b59505a8a787ff22eff9cfd9046b5789aa2bd387b236db5c5/black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5",
+ "url": "https://files.pythonhosted.org/packages/fd/f4/a57cde4b60da0e249073009f4a9087e9e0a955deae78d3c2a493208d0c5c/black-23.12.1.tar.gz"
+ }
+ ],
+ "project_name": "black",
+ "requires_dists": [
+ "aiohttp!=3.9.0,>=3.7.4; (sys_platform == \"win32\" and implementation_name == \"pypy\") and extra == \"d\"",
+ "aiohttp>=3.7.4; (sys_platform != \"win32\" or implementation_name != \"pypy\") and extra == \"d\"",
+ "click>=8.0.0",
+ "colorama>=0.4.3; extra == \"colorama\"",
+ "ipython>=7.8.0; extra == \"jupyter\"",
+ "mypy-extensions>=0.4.3",
+ "packaging>=22.0",
+ "pathspec>=0.9.0",
+ "platformdirs>=2",
+ "tokenize-rt>=3.2.0; extra == \"jupyter\"",
+ "tomli>=1.1.0; python_version < \"3.11\"",
+ "typing-extensions>=4.0.1; python_version < \"3.11\"",
+ "uvloop>=0.15.2; extra == \"uvloop\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "23.12.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "url": "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de",
+ "url": "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz"
+ }
+ ],
+ "project_name": "click",
+ "requires_dists": [
+ "colorama; platform_system == \"Windows\"",
+ "importlib-metadata; python_version < \"3.8\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "8.1.7"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
+ "url": "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782",
+ "url": "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz"
+ }
+ ],
+ "project_name": "mypy-extensions",
+ "requires_dists": [],
+ "requires_python": ">=3.5",
+ "version": "1.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9",
+ "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz"
+ }
+ ],
+ "project_name": "packaging",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "24.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
+ "url": "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712",
+ "url": "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz"
+ }
+ ],
+ "project_name": "pathspec",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "0.12.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
+ "url": "https://files.pythonhosted.org/packages/55/72/4898c44ee9ea6f43396fbc23d9bfaf3d06e01b83698bdf2e4c919deceb7c/platformdirs-4.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768",
+ "url": "https://files.pythonhosted.org/packages/96/dc/c1d911bf5bb0fdc58cc05010e9f3efe3b67970cef779ba7fbc3183b987a8/platformdirs-4.2.0.tar.gz"
+ }
+ ],
+ "project_name": "platformdirs",
+ "requires_dists": [
+ "appdirs==1.4.4; extra == \"test\"",
+ "covdefaults>=2.3; extra == \"test\"",
+ "furo>=2023.9.10; extra == \"docs\"",
+ "proselint>=0.13; extra == \"docs\"",
+ "pytest-cov>=4.1; extra == \"test\"",
+ "pytest-mock>=3.12; extra == \"test\"",
+ "pytest>=7.4.3; extra == \"test\"",
+ "sphinx-autodoc-typehints>=1.25.2; extra == \"docs\"",
+ "sphinx>=7.2.6; extra == \"docs\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "4.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f",
+ "url": "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz"
+ }
+ ],
+ "project_name": "tomli",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "url": "https://files.pythonhosted.org/packages/f9/de/dc04a3ea60b22624b51c703a84bbe0184abcd1d0b9bc8074b5d6b7ab90bb/typing_extensions-4.10.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb",
+ "url": "https://files.pythonhosted.org/packages/16/3a/0d26ce356c7465a19c9ea8814b960f8a36c3b0d07c323176620b7b483e44/typing_extensions-4.10.0.tar.gz"
+ }
+ ],
+ "project_name": "typing-extensions",
+ "requires_dists": [],
+ "requires_python": ">=3.8",
+ "version": "4.10.0"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "black<24,>=22.6.0"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/docformatter.lock b/locks/tools/docformatter.lock
new file mode 100644
index 00000000..1bcb73cf
--- /dev/null
+++ b/locks/tools/docformatter.lock
@@ -0,0 +1,188 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=docformatter
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "docformatter[tomli]"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "url": "https://files.pythonhosted.org/packages/28/76/e6222113b83e3622caa4bb41032d0b1bf785250607392e1b778aca0b8a7d/charset_normalizer-3.3.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "url": "https://files.pythonhosted.org/packages/05/8c/eb854996d5fef5e4f33ad56927ad053d04dc820e4a3d39023f35cad72617/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "url": "https://files.pythonhosted.org/packages/2b/61/095a0aa1a84d1481998b534177c8566fdc50bb1233ea9a0478cd3cc075bd/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "url": "https://files.pythonhosted.org/packages/33/c3/3b96a435c5109dd5b6adc8a59ba1d678b302a97938f032e3770cc84cd354/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "url": "https://files.pythonhosted.org/packages/3f/ba/3f5e7be00b215fa10e13d64b1f6237eb6ebea66676a41b2bcdd09fe74323/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "url": "https://files.pythonhosted.org/packages/43/05/3bf613e719efe68fb3a77f9c536a389f35b95d75424b96b426a47a45ef1d/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "url": "https://files.pythonhosted.org/packages/46/6a/d5c26c41c49b546860cc1acabdddf48b0b3fb2685f4f5617ac59261b44ae/charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "url": "https://files.pythonhosted.org/packages/58/78/a0bc646900994df12e07b4ae5c713f2b3e5998f58b9d3720cce2aa45652f/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "url": "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "url": "https://files.pythonhosted.org/packages/a8/31/47d018ef89f95b8aded95c589a77c072c55e94b50a41aa99c0a2008a45a4/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "url": "https://files.pythonhosted.org/packages/b8/60/e2f67915a51be59d4539ed189eb0a2b0d292bf79270410746becb32bc2c3/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "url": "https://files.pythonhosted.org/packages/cc/94/f7cf5e5134175de79ad2059edf2adce18e0685ebdb9227ff0139975d0e93/charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "url": "https://files.pythonhosted.org/packages/da/f1/3702ba2a7470666a62fd81c58a4c40be00670e5006a67f4d626e57f013ae/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "url": "https://files.pythonhosted.org/packages/eb/5c/97d97248af4920bc68687d9c3b3c0f47c910e21a8ff80af4565a576bd2f0/charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "url": "https://files.pythonhosted.org/packages/f6/93/bb6cbeec3bf9da9b2eba458c15966658d1daa8b982c642f81c93ad9b40e1/charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl"
+ }
+ ],
+ "project_name": "charset-normalizer",
+ "requires_dists": [],
+ "requires_python": ">=3.7.0",
+ "version": "3.3.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a24f5545ed1f30af00d106f5d85dc2fce4959295687c24c8f39f5263afaf9186",
+ "url": "https://files.pythonhosted.org/packages/8b/95/568a2fca29df365b82012b09b64964a05f4f20ac83c2137b262f3fa3188f/docformatter-1.7.5-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ffed3da0daffa2e77f80ccba4f0e50bfa2755e1c10e130102571c890a61b246e",
+ "url": "https://files.pythonhosted.org/packages/f4/44/aba2c40cf796121b35835ea8c00bc5d93f2f70730eca53b36b8bbbfaefe1/docformatter-1.7.5.tar.gz"
+ }
+ ],
+ "project_name": "docformatter",
+ "requires_dists": [
+ "charset_normalizer<4.0.0,>=3.0.0",
+ "tomli<3.0.0,>=2.0.0; python_version < \"3.11\" and extra == \"tomli\"",
+ "untokenize<0.2.0,>=0.1.1"
+ ],
+ "requires_python": "<4.0,>=3.7",
+ "version": "1.7.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f",
+ "url": "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz"
+ }
+ ],
+ "project_name": "tomli",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3865dbbbb8efb4bb5eaa72f1be7f3e0be00ea8b7f125c69cbd1f5fda926f37a2",
+ "url": "https://files.pythonhosted.org/packages/f7/46/e7cea8159199096e1df52da20a57a6665da80c37fb8aeb848a3e47442c32/untokenize-0.1.1.tar.gz"
+ }
+ ],
+ "project_name": "untokenize",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "0.1.1"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "docformatter[tomli]"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/ipython.lock b/locks/tools/ipython.lock
new file mode 100644
index 00000000..a8ca2a8a
--- /dev/null
+++ b/locks/tools/ipython.lock
@@ -0,0 +1,475 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=ipython
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "ipython<8,>=7.27.0"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c",
+ "url": "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee",
+ "url": "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz"
+ }
+ ],
+ "project_name": "appnope",
+ "requires_dists": [],
+ "requires_python": ">=3.6",
+ "version": "0.1.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255",
+ "url": "https://files.pythonhosted.org/packages/4c/1c/ff6546b6c12603d8dd1070aa3c3d273ad4c07f5771689a7b69a550e8c951/backcall-0.2.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e",
+ "url": "https://files.pythonhosted.org/packages/a2/40/764a663805d84deee23043e1426a9175567db89c8b3287b5c2ad9f71aa93/backcall-0.2.0.tar.gz"
+ }
+ ],
+ "project_name": "backcall",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "0.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186",
+ "url": "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330",
+ "url": "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz"
+ }
+ ],
+ "project_name": "decorator",
+ "requires_dists": [],
+ "requires_python": ">=3.5",
+ "version": "5.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c175d2440a1caff76116eb719d40538fbb316e214eda85c5515c303aacbfb23e",
+ "url": "https://files.pythonhosted.org/packages/7c/6a/1f1365f4bf9fcb349fcaa5b61edfcefa721aa13ff37c5631296b12fab8e5/ipython-7.34.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "af3bdb46aa292bce5615b1b2ebc76c2080c5f77f54bda2ec72461317273e7cd6",
+ "url": "https://files.pythonhosted.org/packages/db/6c/3fcf0b8ee46656796099ac4b7b72497af5f090da3e43fd305f2a24c73915/ipython-7.34.0.tar.gz"
+ }
+ ],
+ "project_name": "ipython",
+ "requires_dists": [
+ "Sphinx>=1.3; extra == \"all\"",
+ "Sphinx>=1.3; extra == \"doc\"",
+ "appnope; sys_platform == \"darwin\"",
+ "backcall",
+ "colorama; sys_platform == \"win32\"",
+ "decorator",
+ "ipykernel; extra == \"all\"",
+ "ipykernel; extra == \"kernel\"",
+ "ipykernel; extra == \"test\"",
+ "ipyparallel; extra == \"all\"",
+ "ipyparallel; extra == \"parallel\"",
+ "ipywidgets; extra == \"all\"",
+ "ipywidgets; extra == \"notebook\"",
+ "jedi>=0.16",
+ "matplotlib-inline",
+ "nbconvert; extra == \"all\"",
+ "nbconvert; extra == \"nbconvert\"",
+ "nbformat; extra == \"all\"",
+ "nbformat; extra == \"nbformat\"",
+ "nbformat; extra == \"test\"",
+ "nose>=0.10.1; extra == \"all\"",
+ "nose>=0.10.1; extra == \"test\"",
+ "notebook; extra == \"all\"",
+ "notebook; extra == \"notebook\"",
+ "numpy>=1.17; extra == \"all\"",
+ "numpy>=1.17; extra == \"test\"",
+ "pexpect>4.3; sys_platform != \"win32\"",
+ "pickleshare",
+ "prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0",
+ "pygments",
+ "pygments; extra == \"all\"",
+ "pygments; extra == \"test\"",
+ "qtconsole; extra == \"all\"",
+ "qtconsole; extra == \"qtconsole\"",
+ "requests; extra == \"all\"",
+ "requests; extra == \"test\"",
+ "setuptools>=18.5",
+ "testpath; extra == \"all\"",
+ "testpath; extra == \"test\"",
+ "traitlets>=4.2"
+ ],
+ "requires_python": ">=3.7",
+ "version": "7.34.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0",
+ "url": "https://files.pythonhosted.org/packages/20/9f/bc63f0f0737ad7a60800bfd472a4836661adae21f9c2535f3957b1e54ceb/jedi-0.19.1-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd",
+ "url": "https://files.pythonhosted.org/packages/d6/99/99b493cec4bf43176b678de30f81ed003fd6a647a301b9c927280c600f0a/jedi-0.19.1.tar.gz"
+ }
+ ],
+ "project_name": "jedi",
+ "requires_dists": [
+ "Django; extra == \"testing\"",
+ "Jinja2==2.11.3; extra == \"docs\"",
+ "MarkupSafe==1.1.1; extra == \"docs\"",
+ "Pygments==2.8.1; extra == \"docs\"",
+ "alabaster==0.7.12; extra == \"docs\"",
+ "attrs; extra == \"testing\"",
+ "babel==2.9.1; extra == \"docs\"",
+ "chardet==4.0.0; extra == \"docs\"",
+ "colorama; extra == \"testing\"",
+ "commonmark==0.8.1; extra == \"docs\"",
+ "docopt; extra == \"testing\"",
+ "docutils==0.17.1; extra == \"docs\"",
+ "flake8==5.0.4; extra == \"qa\"",
+ "future==0.18.2; extra == \"docs\"",
+ "idna==2.10; extra == \"docs\"",
+ "imagesize==1.2.0; extra == \"docs\"",
+ "mock==1.0.1; extra == \"docs\"",
+ "mypy==0.971; extra == \"qa\"",
+ "packaging==20.9; extra == \"docs\"",
+ "parso<0.9.0,>=0.8.3",
+ "pyparsing==2.4.7; extra == \"docs\"",
+ "pytest<7.0.0; extra == \"testing\"",
+ "pytz==2021.1; extra == \"docs\"",
+ "readthedocs-sphinx-ext==2.1.4; extra == \"docs\"",
+ "recommonmark==0.5.0; extra == \"docs\"",
+ "requests==2.25.1; extra == \"docs\"",
+ "six==1.15.0; extra == \"docs\"",
+ "snowballstemmer==2.1.0; extra == \"docs\"",
+ "sphinx-rtd-theme==0.4.3; extra == \"docs\"",
+ "sphinx==1.8.5; extra == \"docs\"",
+ "sphinxcontrib-serializinghtml==1.1.4; extra == \"docs\"",
+ "sphinxcontrib-websupport==1.2.4; extra == \"docs\"",
+ "types-setuptools==67.2.0.1; extra == \"qa\"",
+ "urllib3==1.26.4; extra == \"docs\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "0.19.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311",
+ "url": "https://files.pythonhosted.org/packages/f2/51/c34d7a1d528efaae3d8ddb18ef45a41f284eacf9e514523b191b7d0872cc/matplotlib_inline-0.1.6-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304",
+ "url": "https://files.pythonhosted.org/packages/d9/50/3af8c0362f26108e54d58c7f38784a3bdae6b9a450bab48ee8482d737f44/matplotlib-inline-0.1.6.tar.gz"
+ }
+ ],
+ "project_name": "matplotlib-inline",
+ "requires_dists": [
+ "traitlets"
+ ],
+ "requires_python": ">=3.5",
+ "version": "0.1.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75",
+ "url": "https://files.pythonhosted.org/packages/05/63/8011bd08a4111858f79d2b09aad86638490d62fbf881c44e434a6dfca87b/parso-0.8.3-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0",
+ "url": "https://files.pythonhosted.org/packages/a2/0e/41f0cca4b85a6ea74d66d2226a7cda8e41206a624f5b330b958ef48e2e52/parso-0.8.3.tar.gz"
+ }
+ ],
+ "project_name": "parso",
+ "requires_dists": [
+ "docopt; extra == \"testing\"",
+ "flake8==3.8.3; extra == \"qa\"",
+ "mypy==0.782; extra == \"qa\"",
+ "pytest<6.0.0; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "0.8.3"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523",
+ "url": "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f",
+ "url": "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz"
+ }
+ ],
+ "project_name": "pexpect",
+ "requires_dists": [
+ "ptyprocess>=0.5"
+ ],
+ "requires_python": null,
+ "version": "4.9.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56",
+ "url": "https://files.pythonhosted.org/packages/9a/41/220f49aaea88bc6fa6cba8d05ecf24676326156c23b991e80b3f2fc24c77/pickleshare-0.7.5-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca",
+ "url": "https://files.pythonhosted.org/packages/d8/b6/df3c1c9b616e9c0edbc4fbab6ddd09df9535849c64ba51fcb6531c32d4d8/pickleshare-0.7.5.tar.gz"
+ }
+ ],
+ "project_name": "pickleshare",
+ "requires_dists": [
+ "pathlib2; python_version in \"2.6 2.7 3.2 3.3\""
+ ],
+ "requires_python": null,
+ "version": "0.7.5"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6",
+ "url": "https://files.pythonhosted.org/packages/ee/fd/ca7bf3869e7caa7a037e23078539467b433a4e01eebd93f77180ab927766/prompt_toolkit-3.0.43-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d",
+ "url": "https://files.pythonhosted.org/packages/cc/c6/25b6a3d5cd295304de1e32c9edbcf319a52e965b339629d37d42bb7126ca/prompt_toolkit-3.0.43.tar.gz"
+ }
+ ],
+ "project_name": "prompt-toolkit",
+ "requires_dists": [
+ "wcwidth"
+ ],
+ "requires_python": ">=3.7.0",
+ "version": "3.0.43"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35",
+ "url": "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220",
+ "url": "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz"
+ }
+ ],
+ "project_name": "ptyprocess",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "0.7.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "url": "https://files.pythonhosted.org/packages/97/9c/372fef8377a6e340b1704768d20daaded98bf13282b5327beb2e2fe2c7ef/pygments-2.17.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367",
+ "url": "https://files.pythonhosted.org/packages/55/59/8bccf4157baf25e4aa5a0bb7fa3ba8600907de105ebc22b0c78cfbf6f565/pygments-2.17.2.tar.gz"
+ }
+ ],
+ "project_name": "pygments",
+ "requires_dists": [
+ "colorama>=0.4.6; extra == \"windows-terminal\"",
+ "importlib-metadata; python_version < \"3.8\" and extra == \"plugins\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.17.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c",
+ "url": "https://files.pythonhosted.org/packages/92/e1/1c8bb3420105e70bdf357d57dd5567202b4ef8d27f810e98bb962d950834/setuptools-69.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e",
+ "url": "https://files.pythonhosted.org/packages/4d/5b/dc575711b6b8f2f866131a40d053e30e962e633b332acf7cd2c24843d83d/setuptools-69.2.0.tar.gz"
+ }
+ ],
+ "project_name": "setuptools",
+ "requires_dists": [
+ "build[virtualenv]; extra == \"testing\"",
+ "build[virtualenv]>=1.0.3; extra == \"testing-integration\"",
+ "filelock>=3.4.0; extra == \"testing\"",
+ "filelock>=3.4.0; extra == \"testing-integration\"",
+ "furo; extra == \"docs\"",
+ "importlib-metadata; extra == \"testing\"",
+ "ini2toml[lite]>=0.9; extra == \"testing\"",
+ "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"testing\"",
+ "jaraco.envs>=2.2; extra == \"testing\"",
+ "jaraco.envs>=2.2; extra == \"testing-integration\"",
+ "jaraco.packaging>=9.3; extra == \"docs\"",
+ "jaraco.path>=3.2.0; extra == \"testing\"",
+ "jaraco.path>=3.2.0; extra == \"testing-integration\"",
+ "jaraco.tidelift>=1.4; extra == \"docs\"",
+ "mypy==1.9; extra == \"testing\"",
+ "packaging>=23.2; extra == \"testing\"",
+ "packaging>=23.2; extra == \"testing-integration\"",
+ "pip>=19.1; extra == \"testing\"",
+ "pygments-github-lexers==0.0.5; extra == \"docs\"",
+ "pytest-checkdocs>=2.4; extra == \"testing\"",
+ "pytest-cov; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-enabler; extra == \"testing-integration\"",
+ "pytest-enabler>=2.2; extra == \"testing\"",
+ "pytest-home>=0.5; extra == \"testing\"",
+ "pytest-mypy>=0.9.1; platform_python_implementation != \"PyPy\" and extra == \"testing\"",
+ "pytest-perf; sys_platform != \"cygwin\" and extra == \"testing\"",
+ "pytest-ruff>=0.2.1; sys_platform != \"cygwin\" and extra == \"testing\"",
+ "pytest-timeout; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing-integration\"",
+ "pytest-xdist>=3; extra == \"testing\"",
+ "pytest; extra == \"testing-integration\"",
+ "pytest>=6; extra == \"testing\"",
+ "rst.linker>=1.9; extra == \"docs\"",
+ "sphinx-favicon; extra == \"docs\"",
+ "sphinx-inline-tabs; extra == \"docs\"",
+ "sphinx-lint; extra == \"docs\"",
+ "sphinx-notfound-page<2,>=1; extra == \"docs\"",
+ "sphinx-reredirects; extra == \"docs\"",
+ "sphinx<7.2.5; extra == \"docs\"",
+ "sphinx>=3.5; extra == \"docs\"",
+ "sphinxcontrib-towncrier; extra == \"docs\"",
+ "tomli-w>=1.0.0; extra == \"testing\"",
+ "tomli; extra == \"testing\"",
+ "tomli; extra == \"testing-integration\"",
+ "virtualenv>=13.0.0; extra == \"testing\"",
+ "virtualenv>=13.0.0; extra == \"testing-integration\"",
+ "wheel; extra == \"testing\"",
+ "wheel; extra == \"testing-integration\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "69.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fcdf85684a772ddeba87db2f398ce00b40ff550d1528c03c14dbf6a02003cd80",
+ "url": "https://files.pythonhosted.org/packages/7c/c4/366a09036c07f46eb8c9b2af39c97f502ef24f11f2a6e4d763655d9f2708/traitlets-5.14.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8cdd83c040dab7d1dee822678e5f5d100b514f7b72b01615b26fc5718916fdf9",
+ "url": "https://files.pythonhosted.org/packages/4f/97/d957b3a5f6da825cbbb6a02e584bcab769ea2c2a9ad67a9cc25b4bbafb30/traitlets-5.14.2.tar.gz"
+ }
+ ],
+ "project_name": "traitlets",
+ "requires_dists": [
+ "argcomplete>=3.0.3; extra == \"test\"",
+ "mypy>=1.7.0; extra == \"test\"",
+ "myst-parser; extra == \"docs\"",
+ "pre-commit; extra == \"test\"",
+ "pydata-sphinx-theme; extra == \"docs\"",
+ "pytest-mock; extra == \"test\"",
+ "pytest-mypy-testing; extra == \"test\"",
+ "pytest<8.1,>=7.0; extra == \"test\"",
+ "sphinx; extra == \"docs\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "5.14.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859",
+ "url": "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5",
+ "url": "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz"
+ }
+ ],
+ "project_name": "wcwidth",
+ "requires_dists": [
+ "backports.functools-lru-cache>=1.2.1; python_version < \"3.2\""
+ ],
+ "requires_python": null,
+ "version": "0.2.13"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "ipython<8,>=7.27.0"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/isort.lock b/locks/tools/isort.lock
new file mode 100644
index 00000000..9a840de2
--- /dev/null
+++ b/locks/tools/isort.lock
@@ -0,0 +1,95 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=isort
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "isort[colors,pyproject]<6.0,>=5.9.3"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6",
+ "url": "https://download.pytorch.org/whl/colorama-0.4.6-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6",
+ "url": "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44",
+ "url": "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz"
+ }
+ ],
+ "project_name": "colorama",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7",
+ "version": "0.4.6"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6",
+ "url": "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109",
+ "url": "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz"
+ }
+ ],
+ "project_name": "isort",
+ "requires_dists": [
+ "colorama>=0.4.6; extra == \"colors\""
+ ],
+ "requires_python": ">=3.8.0",
+ "version": "5.13.2"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "isort[colors,pyproject]<6.0,>=5.9.3"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/locks/tools/pytest.lock b/locks/tools/pytest.lock
new file mode 100644
index 00000000..bfc8b483
--- /dev/null
+++ b/locks/tools/pytest.lock
@@ -0,0 +1,452 @@
+// This lockfile was autogenerated by Pants. To regenerate, run:
+//
+// pants generate-lockfiles --resolve=pytest
+//
+// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
+// {
+// "version": 3,
+// "valid_for_interpreter_constraints": [
+// "CPython<3.11,>=3.10"
+// ],
+// "generated_with_requirements": [
+// "pytest-benchmark==4.0.0",
+// "pytest-cov!=2.12.1,<3.1,>=2.12",
+// "pytest-platform-markers",
+// "pytest-rerunfailures",
+// "pytest-xdist<3,>=2.5",
+// "pytest~=8.0"
+// ],
+// "manylinux": "manylinux2014",
+// "requirement_constraints": [],
+// "only_binary": [],
+// "no_binary": []
+// }
+// --- END PANTS LOCKFILE METADATA ---
+
+{
+ "allow_builds": true,
+ "allow_prereleases": false,
+ "allow_wheels": true,
+ "build_isolation": true,
+ "constraints": [],
+ "locked_resolves": [
+ {
+ "locked_requirements": [
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677",
+ "url": "https://files.pythonhosted.org/packages/99/15/dbcb5d0a22bf5357cf456dfd16f9ceb89c54544d6201d53bc77c75077a8e/coverage-7.4.4-pp38.pp39.pp310-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8",
+ "url": "https://files.pythonhosted.org/packages/07/58/0e076ea3a59dbfb3e981577c4e5572b432345cedd921e83006a0215b9afe/coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf",
+ "url": "https://files.pythonhosted.org/packages/10/1e/f676e1655d10bf59a6cb8de0601b7ea3c252c764782a3c2263f6d6bbcf28/coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2",
+ "url": "https://files.pythonhosted.org/packages/45/f4/10bf725621aeec5cc2fa1bc73021f5ba1ac01bcbf2c7278d8d34e1df6457/coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87",
+ "url": "https://files.pythonhosted.org/packages/50/32/829d0e709fa699dc4e498fa77a561d25fc57954ba32466279952b98f0836/coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c",
+ "url": "https://files.pythonhosted.org/packages/7e/60/62a8c190d20bf605c89a000fd6d41e3563b5792e7275b12eeefe6803b473/coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7",
+ "url": "https://files.pythonhosted.org/packages/91/4e/feff6d115dcc239e5850570ca2ea27a243c8a69596e7f1dabe54a6102d89/coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2",
+ "url": "https://files.pythonhosted.org/packages/93/41/e6e9dbb322f3c93aba7bc519b9c62846d923d7b57398bdd7eda3f0acdd11/coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49",
+ "url": "https://files.pythonhosted.org/packages/bf/d5/f809d8b630cf4c11fe490e20037a343d12a74ec2783c6cdb5aee725e7137/coverage-7.4.4.tar.gz"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562",
+ "url": "https://files.pythonhosted.org/packages/d3/6d/72b9f5035c50a14bc5c5fda0c28ac16c426e957a7a3debe02906b614fc4f/coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl"
+ }
+ ],
+ "project_name": "coverage",
+ "requires_dists": [
+ "tomli; python_full_version <= \"3.11.0a6\" and extra == \"toml\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "7.4.4"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
+ "url": "https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68",
+ "url": "https://files.pythonhosted.org/packages/8e/1c/beef724eaf5b01bb44b6338c8c3494eff7cab376fab4904cfbbc3585dc79/exceptiongroup-1.2.0.tar.gz"
+ }
+ ],
+ "project_name": "exceptiongroup",
+ "requires_dists": [
+ "pytest>=6; extra == \"test\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.2.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
+ "url": "https://files.pythonhosted.org/packages/e8/9c/a079946da30fac4924d92dbc617e5367d454954494cf1e71567bcc4e00ee/execnet-2.0.2-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af",
+ "url": "https://files.pythonhosted.org/packages/e4/c8/d382dc7a1e68a165f4a4ab612a08b20d8534a7d20cc590630b734ca0c54b/execnet-2.0.2.tar.gz"
+ }
+ ],
+ "project_name": "execnet",
+ "requires_dists": [
+ "hatch; extra == \"testing\"",
+ "pre-commit; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"testing\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "2.0.2"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374",
+ "url": "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
+ "url": "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz"
+ }
+ ],
+ "project_name": "iniconfig",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9",
+ "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz"
+ }
+ ],
+ "project_name": "packaging",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "24.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
+ "url": "https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be",
+ "url": "https://files.pythonhosted.org/packages/54/c6/43f9d44d92aed815e781ca25ba8c174257e27253a94630d21be8725a2b59/pluggy-1.4.0.tar.gz"
+ }
+ ],
+ "project_name": "pluggy",
+ "requires_dists": [
+ "pre-commit; extra == \"dev\"",
+ "pytest-benchmark; extra == \"testing\"",
+ "pytest; extra == \"testing\"",
+ "tox; extra == \"dev\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "1.4.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378",
+ "url": "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719",
+ "url": "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz"
+ }
+ ],
+ "project_name": "py",
+ "requires_dists": [],
+ "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7",
+ "version": "1.11.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5",
+ "url": "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690",
+ "url": "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz"
+ }
+ ],
+ "project_name": "py-cpuinfo",
+ "requires_dists": [],
+ "requires_python": null,
+ "version": "9.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7",
+ "url": "https://files.pythonhosted.org/packages/4d/7e/c79cecfdb6aa85c6c2e3cf63afc56d0f165f24f5c66c03c695c4d9b84756/pytest-8.1.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044",
+ "url": "https://files.pythonhosted.org/packages/30/b7/7d44bbc04c531dcc753056920e0988032e5871ac674b5a84cb979de6e7af/pytest-8.1.1.tar.gz"
+ }
+ ],
+ "project_name": "pytest",
+ "requires_dists": [
+ "argcomplete; extra == \"testing\"",
+ "attrs>=19.2; extra == \"testing\"",
+ "colorama; sys_platform == \"win32\"",
+ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"",
+ "hypothesis>=3.56; extra == \"testing\"",
+ "iniconfig",
+ "mock; extra == \"testing\"",
+ "packaging",
+ "pluggy<2.0,>=1.4",
+ "pygments>=2.7.2; extra == \"testing\"",
+ "requests; extra == \"testing\"",
+ "setuptools; extra == \"testing\"",
+ "tomli>=1; python_version < \"3.11\"",
+ "xmlschema; extra == \"testing\""
+ ],
+ "requires_python": ">=3.8",
+ "version": "8.1.1"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6",
+ "url": "https://files.pythonhosted.org/packages/4d/a1/3b70862b5b3f830f0422844f25a823d0470739d994466be9dbbbb414d85a/pytest_benchmark-4.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1",
+ "url": "https://files.pythonhosted.org/packages/28/08/e6b0067efa9a1f2a1eb3043ecd8a0c48bfeb60d3255006dcc829d72d5da2/pytest-benchmark-4.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-benchmark",
+ "requires_dists": [
+ "aspectlib; extra == \"aspect\"",
+ "elasticsearch; extra == \"elasticsearch\"",
+ "pathlib2; python_version < \"3.4\"",
+ "py-cpuinfo",
+ "pygal; extra == \"histogram\"",
+ "pygaljs; extra == \"histogram\"",
+ "pytest>=3.8",
+ "statistics; python_version < \"3.4\""
+ ],
+ "requires_python": ">=3.7",
+ "version": "4.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6",
+ "url": "https://files.pythonhosted.org/packages/20/49/b3e0edec68d81846f519c602ac38af9db86e1e71275528b3e814ae236063/pytest_cov-3.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470",
+ "url": "https://files.pythonhosted.org/packages/61/41/e046526849972555928a6d31c2068410e47a31fb5ab0a77f868596811329/pytest-cov-3.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-cov",
+ "requires_dists": [
+ "coverage[toml]>=5.2.1",
+ "fields; extra == \"testing\"",
+ "hunter; extra == \"testing\"",
+ "process-tests; extra == \"testing\"",
+ "pytest-xdist; extra == \"testing\"",
+ "pytest>=4.6",
+ "six; extra == \"testing\"",
+ "virtualenv; extra == \"testing\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "3.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "810958f66a91afb1a1e2ae83089d8dc1cd2437ac96b12963042fbb9fb4d16af0",
+ "url": "https://files.pythonhosted.org/packages/f4/af/9c0bda43e486a3c9bf1e0f876d0f241bc3f229d7d65d09331a0868db9629/pytest_forked-1.6.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4dafd46a9a600f65d822b8f605133ecf5b3e1941ebb3588e943b4e3eb71a5a3f",
+ "url": "https://files.pythonhosted.org/packages/8c/c9/93ad2ba2413057ee694884b88cf7467a46c50c438977720aeac26e73fdb7/pytest-forked-1.6.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-forked",
+ "requires_dists": [
+ "py",
+ "pytest>=3.10"
+ ],
+ "requires_python": ">=3.7",
+ "version": "1.6.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "b05cb0bcd51a7cd0375bfbeeb3eaeb01fc85665e45b21fc9494a8a19137f4d32",
+ "url": "https://files.pythonhosted.org/packages/c5/d1/2ef73ee137add043df444fddf1c851b8ca70ab9c7b7f18e18c4c244fec6d/pytest_platform_markers-1.0.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "07ea92669114ba8083b6653995b5a9ab14d57ca16307fd2af22d6f7d295160e4",
+ "url": "https://files.pythonhosted.org/packages/b3/e7/174a22a8cb4cf4b64456cd799f472bb90206f1ce8d537edbc1d9659689a3/pytest-platform-markers-1.0.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-platform-markers",
+ "requires_dists": [
+ "pytest>=3.6.0"
+ ],
+ "requires_python": null,
+ "version": "1.0.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32",
+ "url": "https://files.pythonhosted.org/packages/dc/e7/e75bd157331aecc190f5f8950d7ea3d2cf56c3c57fb44da70e60b221133f/pytest_rerunfailures-14.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92",
+ "url": "https://files.pythonhosted.org/packages/cc/a4/6de45fe850759e94aa9a55cda807c76245af1941047294df26c851dfb4a9/pytest-rerunfailures-14.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-rerunfailures",
+ "requires_dists": [
+ "packaging>=17.1",
+ "pytest>=7.2"
+ ],
+ "requires_python": ">=3.8",
+ "version": "14.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65",
+ "url": "https://files.pythonhosted.org/packages/21/08/b1945d4b4986eb1aa10cf84efc5293bba39da80a2f95db3573dd90678408/pytest_xdist-2.5.0-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf",
+ "url": "https://files.pythonhosted.org/packages/5d/43/9dbc32d297d6eae85d6c05dc8e8d3371061bd6cbe56a2f645d9ea4b53d9b/pytest-xdist-2.5.0.tar.gz"
+ }
+ ],
+ "project_name": "pytest-xdist",
+ "requires_dists": [
+ "execnet>=1.1",
+ "filelock; extra == \"testing\"",
+ "psutil>=3.0; extra == \"psutil\"",
+ "pytest-forked",
+ "pytest>=6.2.0",
+ "setproctitle; extra == \"setproctitle\""
+ ],
+ "requires_python": ">=3.6",
+ "version": "2.5.0"
+ },
+ {
+ "artifacts": [
+ {
+ "algorithm": "sha256",
+ "hash": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl"
+ },
+ {
+ "algorithm": "sha256",
+ "hash": "de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f",
+ "url": "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz"
+ }
+ ],
+ "project_name": "tomli",
+ "requires_dists": [],
+ "requires_python": ">=3.7",
+ "version": "2.0.1"
+ }
+ ],
+ "platform_tag": null
+ }
+ ],
+ "path_mappings": {},
+ "pex_version": "2.1.148",
+ "pip_version": "23.0.1",
+ "prefer_older_binary": false,
+ "requirements": [
+ "pytest-benchmark==4.0.0",
+ "pytest-cov!=2.12.1,<3.1,>=2.12",
+ "pytest-platform-markers",
+ "pytest-rerunfailures",
+ "pytest-xdist<3,>=2.5",
+ "pytest~=8.0"
+ ],
+ "requires_python": [
+ "<3.11,>=3.10"
+ ],
+ "resolver_version": "pip-2020-resolver",
+ "style": "universal",
+ "target_systems": [
+ "linux",
+ "mac"
+ ],
+ "transitive": true,
+ "use_pep517": null
+}
diff --git a/pants-plugins/BUILD b/pants-plugins/BUILD
new file mode 100644
index 00000000..db46e8d6
--- /dev/null
+++ b/pants-plugins/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/pants-plugins/macros.py b/pants-plugins/macros.py
new file mode 100644
index 00000000..9bf8e02a
--- /dev/null
+++ b/pants-plugins/macros.py
@@ -0,0 +1,12 @@
+def is_standalone():
+ return True
+
+
+def emote_dependency_path(suffix: str) -> str:
+ if suffix.startswith("/"):
+ return f"/{suffix}"
+ return f"//{suffix}"
+
+
+def emote_root_dir() -> str:
+ return "."
diff --git a/pants.toml b/pants.toml
new file mode 100644
index 00000000..0453e830
--- /dev/null
+++ b/pants.toml
@@ -0,0 +1,89 @@
+[GLOBAL]
+pants_version = "2.19.1"
+
+plugins = [
+ "pants_backend_mdbook==0.4.1",
+]
+backend_packages = [
+ "pants.backend.python",
+
+ "pants.backend.python.lint.black",
+ "pants.backend.python.lint.isort",
+ "pants.backend.python.lint.flake8",
+ "pants.backend.python.lint.docformatter",
+ "pants.backend.python.lint.bandit",
+
+ "pants.backend.shell",
+ "pants.backend.shell.lint.shellcheck",
+ "pants.backend.shell.lint.shfmt",
+
+ "pants.backend.tools.taplo",
+ "pants.backend.experimental.adhoc",
+
+ "pants_backend_mdbook",
+]
+
+build_file_prelude_globs = ["pants-plugins/macros.py"]
+
+[cli.alias]
+precommit = "fmt lint tailor test"
+--all-changed = "--changed-since=origin/main --changed-dependees=transitive"
+
+[source]
+root_patterns = ["/", "/docs/"]
+
+[python-bootstrap]
+search_path.add = [""]
+
+[python]
+interpreter_constraints = [">=3.10,<3.11"]
+pip_version = "23.0.1"
+enable_resolves = true
+default_resolve = "base"
+
+[python.resolves]
+base = "locks/base.lock"
+cpu = "locks/cpu.lock"
+gpu = "locks/gpu.lock"
+
+apibook = "locks/tools/apibook.lock"
+pytest = "locks/tools/pytest.lock"
+isort = "locks/tools/isort.lock"
+ipython = "locks/tools/ipython.lock"
+black = "locks/tools/black.lock"
+docformatter = "locks/tools/docformatter.lock"
+bandit = "locks/tools/bandit.lock"
+
+[python-repos]
+indexes = [
+ "https://pypi.org/simple/",
+ "https://tgolsson.github.io/torch-index/cpu/",
+ "https://tgolsson.github.io/torch-index/cu116/",
+]
+
+[repl]
+shell = "ipython"
+
+[black]
+install_from_resolve = "black"
+interpreter_constraints = ["==3.10.*"]
+
+[ipython]
+install_from_resolve = "ipython"
+
+[isort]
+install_from_resolve = "isort"
+args = ['--resolve-all-configs']
+interpreter_constraints = ["==3.10.*"]
+
+[pytest]
+args = ["--no-header"]
+execution_slot_var = "TEST_EXECUTION_SLOT"
+install_from_resolve = "pytest"
+
+[docformatter]
+install_from_resolve = "docformatter"
+interpreter_constraints = ["==3.10.*"]
+
+[bandit]
+args = ["-r", "-ll", "--quiet"]
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..31569911
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,65 @@
+[project]
+name = "emote-rl"
+version = "23.0.0"
+description = "A modular reinforcement learning library"
+authors = [{ name = "Embark Studios", email = "python@embark-studios.com" }]
+requires-python = ">=3.10"
+readme = "README.md"
+license = { text = "MIT" }
+
+dependencies = [
+ "tensorboard>=2.8.0",
+ "setuptools==59.5",
+ "psutil>=5.8.0",
+ "onnx>=1.10",
+ "atomicwrites>=1.4.0",
+ "numpy<1.24",
+ "cloudpickle~=3.0",
+ "myst-parser~=2.0",
+]
+
+
+[project.optional-dependencies]
+torch = ["torch==1.12.0"]
+atari = [
+ "gymnasium>=0.27.1",
+ "box2d-py>=2.3.5",
+ "pygame>=2.1.0",
+ "opencv-python>=3.0",
+]
+wandb = ["wandb>=0.14.0"]
+protobuf = ["protobuf>=4.0"]
+ci = ["gsutil>=4.66"]
+
+[project.urls]
+repository = "https://github.com/EmbarkStudios/emote"
+
+[tool.pytest.ini_options]
+minversion = "6.0"
+log_cli = true
+#log_cli_level = "INFO" # Useful when debugging locally
+log_format = "%(asctime)s:\t%(message)s"
+log_date_format = "%H:%M:%S"
+
+[tool.isort]
+py_version = 310
+profile = "black"
+combine_as_imports = true
+lines_between_types = 1
+lines_after_imports = 2
+src_paths = ["emote"]
+known_first_party = ["emote"]
+line_length = 100
+
+[tool.black]
+target-version = ['py310']
+line-length = 100
+
+[tool.mypy]
+check_untyped_defs = true
+ignore_missing_imports = true
+show_error_codes = true
+warn_redundant_casts = true
+warn_unused_configs = true
+warn_unused_ignores = true
+files = "src"
diff --git a/taplo.toml b/taplo.toml
new file mode 100644
index 00000000..8f8054d3
--- /dev/null
+++ b/taplo.toml
@@ -0,0 +1,3 @@
+[formatting]
+array_auto_collapse = false
+indent_string = " "
diff --git a/tests/BUILD b/tests/BUILD
new file mode 100644
index 00000000..d4a3d9b3
--- /dev/null
+++ b/tests/BUILD
@@ -0,0 +1,19 @@
+python_test_utils(
+ name="test_utils",
+)
+
+python_tests(
+ overrides={
+ "test_memory_exports.py": {
+ "dependencies": [
+ ":memory-artifact",
+ ]
+ },
+ },
+)
+
+
+resource(
+ name="memory-artifact",
+ source="data/export-v1.zip",
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/__init__.pyc b/tests/__init__.pyc
new file mode 100644
index 00000000..08ad529b
Binary files /dev/null and b/tests/__init__.pyc differ
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 00000000..2748c7a8
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,20 @@
+""""""
+
+import builtins
+
+import pytest
+
+
+@pytest.fixture
+def hide_pkg(monkeypatch):
+ import_orig = builtins.__import__
+
+ def _install(hide):
+ def mocked_import(name, *args, **kwargs):
+ if name == hide:
+ raise ImportError(f"No module named '{name}'")
+ return import_orig(name, *args, **kwargs)
+
+ monkeypatch.setattr(builtins, "__import__", mocked_import)
+
+ return _install
diff --git a/tests/data/export-v1.zip b/tests/data/export-v1.zip
new file mode 100644
index 00000000..00ee023a
Binary files /dev/null and b/tests/data/export-v1.zip differ
diff --git a/tests/gym/BUILD b/tests/gym/BUILD
new file mode 100644
index 00000000..e7e0ca9c
--- /dev/null
+++ b/tests/gym/BUILD
@@ -0,0 +1,9 @@
+python_sources()
+
+python_requirement(
+ name="atari",
+ dependencies=[
+ emote_dependency_path("/pyproject.toml:package_data#atari"),
+ ],
+ requirements=[],
+)
diff --git a/tests/gym/__init__.py b/tests/gym/__init__.py
new file mode 100644
index 00000000..21ee0d39
--- /dev/null
+++ b/tests/gym/__init__.py
@@ -0,0 +1,11 @@
+from .collector import SimpleGymCollector, ThreadedGymCollector
+from .dict_gym_wrapper import DictGymWrapper
+from .hit_the_middle import HitTheMiddle
+
+
+__all__ = [
+ "HitTheMiddle",
+ "SimpleGymCollector",
+ "DictGymWrapper",
+ "ThreadedGymCollector",
+]
diff --git a/tests/gym/collector.py b/tests/gym/collector.py
new file mode 100644
index 00000000..58e95e40
--- /dev/null
+++ b/tests/gym/collector.py
@@ -0,0 +1,135 @@
+"""Collectors for running OpenAI gym environments."""
+
+import threading
+
+from collections import deque
+
+from tests.gym.dict_gym_wrapper import DictGymWrapper
+
+from emote.callback import Callback
+from emote.mixins.logging import LoggingMixin
+from emote.proxies import AgentProxy, MemoryProxy
+
+
+class GymCollector(LoggingMixin, Callback):
+ MAX_NUMBER_REWARDS = 1000
+
+ def __init__(
+ self,
+ env: DictGymWrapper,
+ agent: AgentProxy,
+ memory: MemoryProxy,
+ render: bool = True,
+ warmup_steps: int = 0,
+ ):
+ super().__init__()
+ self._agent = agent
+ self._memory = memory
+ self._env = env
+ self._render = render
+ self._last_environment_rewards = deque(maxlen=1000)
+ self.num_envs = env.num_envs
+ self._warmup_steps = warmup_steps
+
+ def collect_data(self):
+ """Collect a single rollout."""
+ if self._render:
+ self._env.render()
+ actions = self._agent(self._obs)
+ next_obs, ep_info = self._env.dict_step(actions)
+
+ self._memory.add(self._obs, actions)
+ self._obs = next_obs
+
+ if "reward" in ep_info:
+ self.log_scalar("episode/reward", ep_info["reward"])
+
+ def collect_multiple(self, count: int):
+ """Collect multiple rollouts.
+
+ :param count: Number of rollouts to collect
+ """
+ for _ in range(count):
+ self.collect_data()
+
+ def begin_training(self):
+ "Make sure all envs work and collect warmup steps."
+ # Runs through the init, step cycle once on main thread to make sure all envs work.
+ self._obs = self._env.dict_reset()
+ actions = self._agent(self._obs)
+ _ = self._env.dict_step(actions)
+ self._obs = self._env.dict_reset()
+
+ # Collect trajectories for warmup steps before starting training
+ iterations_required = self._warmup_steps
+ self.collect_multiple(iterations_required)
+
+
+class ThreadedGymCollector(GymCollector):
+ def __init__(
+ self,
+ env: DictGymWrapper,
+ agent: AgentProxy,
+ memory: MemoryProxy,
+ render: bool = True,
+ warmup_steps: int = 0,
+ ):
+ super().__init__(env, agent, memory, render, warmup_steps)
+ self._warmup_steps = warmup_steps
+ self._stop = False
+ self._thread = None
+
+ def collect_forever(self):
+ """Collect rollouts forever.
+
+ .. warning::
+
+ This function means forever when it says forever. There is no
+ signal, internal or external, that'll cause this loop to end. You
+ probably want to implement a loop that calls `collect_data` or
+ `collect_multiple` while checking exit conditions.
+ """
+ # FIXME[tsolberg]: Works OK when subprocs are not involved, might want
+ # to signal this (somehow). Responsibility of parent to wrap somehow?
+
+ while not self._stop:
+ self.collect_data()
+
+ def begin_training(self):
+ super().begin_training()
+ self._thread = threading.Thread(target=self.collect_forever)
+ self._thread.start()
+
+ def end_training(self):
+ self._stop = True
+
+ if self._thread is not None:
+ self._thread.join()
+ self._thread = None
+
+ # For subprocvecenv et al that need to close connections to not crash
+ if hasattr(self._env, "close"):
+ self._env.close()
+
+
+class SimpleGymCollector(GymCollector):
+ def __init__(
+ self,
+ env: DictGymWrapper,
+ agent: AgentProxy,
+ memory: MemoryProxy,
+ render: bool = True,
+ warmup_steps: int = 0,
+ bp_steps_per_inf: int = 10,
+ ):
+ super().__init__(env, agent, memory, render, warmup_steps)
+ self._bp_steps_per_inf = bp_steps_per_inf
+
+ def begin_training(self):
+ super().begin_training()
+ return {"inf_step": self._warmup_steps}
+
+ def begin_batch(self, inf_step, bp_step):
+ if bp_step % self._bp_steps_per_inf == 0:
+ self.collect_data()
+ return {"inf_step": inf_step + self.num_envs}
diff --git a/tests/gym/dict_gym_wrapper.py b/tests/gym/dict_gym_wrapper.py
new file mode 100644
index 00000000..2e55336b
--- /dev/null
+++ b/tests/gym/dict_gym_wrapper.py
@@ -0,0 +1,97 @@
+from itertools import count
+from typing import Dict, List
+
+import gymnasium.spaces
+import numpy as np
+
+from gymnasium.vector import VectorEnv, VectorEnvWrapper
+
+from emote.typing import AgentId, DictObservation, DictResponse, EpisodeState
+from emote.utils.spaces import BoxSpace, DictSpace, MDPSpace
+
+
+class DictGymWrapper(VectorEnvWrapper):
+ def __init__(self, env: VectorEnv):
+ super().__init__(env)
+ self._next_agent = count()
+ self._agent_ids: List[AgentId] = [next(self._next_agent) for i in range(self.num_envs)]
+ self._episode_rewards: List[float] = [0.0 for i in range(self.num_envs)]
+ assert isinstance(env.single_observation_space, gymnasium.spaces.Box)
+ os: gymnasium.spaces.Box = env.single_observation_space
+
+ if len(env.single_action_space.shape) > 0:
+ action_space_shape = env.single_action_space.shape
+ else:
+ action_space_shape = (1,)
+
+ self.dict_space = MDPSpace(
+ BoxSpace(np.float32, (1,)),
+ BoxSpace(env.single_action_space.dtype, action_space_shape),
+ DictSpace({"obs": BoxSpace(os.dtype, os.shape)}),
+ )
+
+ def render(self):
+ self.env.envs[0].render()
+
+ def dict_step(self, actions: Dict[AgentId, DictResponse]) -> Dict[AgentId, DictObservation]:
+ batched_actions = np.stack(
+ [actions[agent].list_data["actions"] for agent in self._agent_ids]
+ )
+ self.step_async(batched_actions)
+ next_obs, rewards, terminated, truncated, info = super().step_wait()
+ dones = np.logical_or(terminated, truncated)
+ new_agents = []
+ results = {}
+ completed_episode_rewards = []
+
+ for env_id, reward in enumerate(rewards):
+ self._episode_rewards[env_id] += reward
+
+ for env_id, done in enumerate(dones):
+ if done:
+ results[self._agent_ids[env_id]] = DictObservation(
+ episode_state=EpisodeState.TERMINAL,
+ array_data={"obs": info["final_observation"][env_id]},
+ rewards={"reward": rewards[env_id]},
+ )
+ new_agent = next(self._next_agent)
+ results[new_agent] = DictObservation(
+ episode_state=EpisodeState.INITIAL,
+ array_data={"obs": next_obs[env_id]},
+ rewards={"reward": None},
+ )
+ new_agents.append(new_agent)
+ completed_episode_rewards.append(self._episode_rewards[env_id])
+ self._agent_ids[env_id] = new_agent
+ self._episode_rewards[env_id] = 0.0
+
+ results.update(
+ {
+ agent_id: DictObservation(
+ episode_state=EpisodeState.RUNNING,
+ array_data={"obs": next_obs[env_id]},
+ rewards={"reward": rewards[env_id]},
+ )
+ for env_id, agent_id in enumerate(self._agent_ids)
+ if agent_id not in new_agents
+ }
+ )
+
+ ep_info = {}
+ if len(completed_episode_rewards) > 0:
+ ep_info["reward"] = sum(completed_episode_rewards) / len(completed_episode_rewards)
+
+ return results, ep_info
+
+ def dict_reset(self) -> Dict[AgentId, DictObservation]:
+ self._agent_ids = [next(self._next_agent) for i in range(self.num_envs)]
+ self.reset_async()
+ obs, info = self.reset_wait()
+ return {
+ agent_id: DictObservation(
+ episode_state=EpisodeState.INITIAL,
+ array_data={"obs": obs[i]},
+ rewards={"reward": None},
+ )
+ for i, agent_id in enumerate(self._agent_ids)
+ }
diff --git a/tests/gym/hit_the_middle.py b/tests/gym/hit_the_middle.py
new file mode 100644
index 00000000..14f0b35d
--- /dev/null
+++ b/tests/gym/hit_the_middle.py
@@ -0,0 +1,83 @@
+import random
+
+import numpy as np
+
+from gymnasium import Env, spaces
+from gymnasium.utils import seeding
+
+
+class HitTheMiddle(Env):
+ def __init__(self):
+ high = np.array([10, np.finfo(np.float32).max], dtype=np.float32)
+ self.observation_space = spaces.Box(-high, high, dtype=np.float32)
+ ones = np.ones(1, dtype=np.float32)
+ self.action_space = spaces.Box(-ones, ones)
+ self._state = None
+ self._step = None
+ self.viewer = None
+
+ def step(self, action):
+ self._state[1] += action
+ self._state[0] += self._state[1]
+ self._step += 1
+ if self._state[0] > 10.0:
+ self._state[0] = 10.0
+ self._state[1] *= -1
+ elif self._state[0] < -10.0:
+ self._state[0] = -10.0
+ self._state[1] *= -1
+ done = False
+ if self._step > 30:
+ self._step = 0
+ done = True
+
+ truncated = False
+ return (
+ self._state,
+ float(-self._state[0] ** 2),
+ done,
+ truncated,
+ {},
+ )
+
+ def seed(self, seed=None):
+ self.np_random, seed = seeding.np_random(seed)
+ return [seed]
+
+ def reset(self, seed=None):
+ pos = random.random() * 20 - 10
+ vel = random.random() * 0.5 - 0.25
+ self._state = np.array([pos, vel])
+ self._step = 0
+ return self._state, {"info": []}
+
+ def render(self, mode="human"):
+ screen_width = 600
+ screen_height = 400
+
+ world_width = 20
+ scale = screen_width / world_width
+ bally = screen_height / 2
+ ballwidth = 30.0
+
+ if self.viewer is None:
+ from gymnasium.envs.classic_control import rendering
+
+ self.viewer = rendering.Viewer(screen_width, screen_height)
+ ball = rendering.make_circle(ballwidth / 2)
+ self.balltrans = rendering.Transform()
+ ball.add_attr(self.balltrans)
+ ball.set_color(0.8, 0.1, 0.6)
+ self.viewer.add_geom(ball)
+ self.track = rendering.Line((0, bally), (screen_width, bally))
+ self.track.set_color(0, 0, 0)
+ self.viewer.add_geom(self.track)
+
+ if self._state is None:
+ return None
+
+ x = self._state[0]
+ ballx = x * scale + screen_width / 2.0 # MIDDLE OF BALL
+ self.balltrans.set_translation(ballx, bally)
+
+ return self.viewer.render(return_rgb_array=(mode == "rgb_array"))
diff --git a/tests/test_amp.py b/tests/test_amp.py
new file mode 100644
index 00000000..7e7411b4
--- /dev/null
+++ b/tests/test_amp.py
@@ -0,0 +1,79 @@
+import torch
+
+from torch import Tensor, nn
+
+from emote.algorithms.amp import AMPReward, DiscriminatorLoss, gradient_loss_function
+
+
+class Discriminator(nn.Module):
+ def __init__(self, input_size: int, hidden_dims: list[int]):
+ super().__init__()
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.ReLU())
+ for n_in, n_out in zip([input_size] + hidden_dims, hidden_dims)
+ ],
+ )
+ final_layers: list[nn.Module] = [nn.Linear(hidden_dims[-1], 1)]
+ self.final_layer = nn.Sequential(*final_layers)
+
+ def forward(self, x: Tensor):
+ return self.final_layer(self.encoder(x))
+
+
+def state_map_fn(obs: Tensor):
+ return obs
+
+
+def test_gradient_loss():
+ x = torch.ones(10, 3, requires_grad=True)
+ x = x * torch.rand(10, 3)
+ y = torch.sum(4 * x * x + torch.sin(x), dim=1)
+
+ grad1 = gradient_loss_function(y, x)
+ y_dot = 8 * x + torch.cos(x)
+ grad2 = torch.mean(torch.sum(y_dot * y_dot, dim=1))
+
+ assert abs(grad1.item() - grad2.item()) < 0.001
+
+
+def test_discriminator_loss():
+ discriminator = Discriminator(input_size=20, hidden_dims=[128, 128])
+ discriminator_opt = torch.optim.Adam(discriminator.parameters(), lr=0.001)
+ loss = DiscriminatorLoss(
+ discriminator=discriminator,
+ imitation_state_map_fn=state_map_fn,
+ policy_state_map_fn=state_map_fn,
+ grad_loss_weight=1,
+ optimizer=discriminator_opt,
+ lr_schedule=None,
+ input_key="features",
+ max_grad_norm=10.0,
+ )
+ batch1 = {
+ "batch_size": 30,
+ "observation": {"features": torch.rand(30, 10)},
+ "next_observation": {"features": torch.rand(30, 10)},
+ }
+ batch2 = {
+ "batch_size": 30,
+ "observation": {"features": torch.rand(30, 10)},
+ "next_observation": {"features": torch.rand(30, 10)},
+ }
+ assert loss.loss(batch1, batch2) >= 0
+
+
+def test_amp_reward():
+ discriminator = Discriminator(input_size=20, hidden_dims=[128, 128])
+ amp_reward = AMPReward(
+ discriminator=discriminator,
+ state_map_fn=state_map_fn,
+ style_reward_weight=1.0,
+ rollout_length=1,
+ observation_key="features",
+ data_group=None,
+ )
+ observation = {"features": torch.rand(30, 10)}
+ next_observation = {"features": torch.rand(30, 10)}
+ reward = torch.rand(30, 1)
+ amp_reward.begin_batch(observation, next_observation, reward)
diff --git a/tests/test_checkpoints.py b/tests/test_checkpoints.py
new file mode 100644
index 00000000..72dcd40f
--- /dev/null
+++ b/tests/test_checkpoints.py
@@ -0,0 +1,130 @@
+from os.path import join
+from tempfile import mkdtemp
+from typing import Generator
+
+import torch
+
+from torch import nn
+from torch.optim import Adam
+
+from emote import Trainer
+from emote.algorithms.sac import QLoss
+from emote.callbacks.checkpointing import Checkpointer, CheckpointLoader
+from emote.callbacks.generic import BackPropStepsTerminator
+from emote.callbacks.loss import LossCallback
+from emote.trainer import TrainingShutdownException
+
+
+N_HIDDEN = 10
+
+
+class QNet(nn.Module):
+ def __init__(self, obs, act):
+ super().__init__()
+ self.q = nn.Sequential(
+ nn.Linear(obs + act, N_HIDDEN),
+ nn.ReLU(),
+ nn.Linear(N_HIDDEN, N_HIDDEN),
+ nn.ReLU(),
+ nn.Linear(N_HIDDEN, 1),
+ )
+
+ def forward(self, action, obs):
+ x = torch.cat([obs, action], dim=1)
+ return self.q(x)
+
+
+def nostep_dataloader() -> Generator:
+ raise TrainingShutdownException()
+ yield {} # Needed to make this a generator.
+
+
+def onestep_dataloader() -> Generator:
+ yield {}
+ raise TrainingShutdownException()
+
+
+def test_networks_checkpoint():
+ chkpt_dir = mkdtemp()
+ run_root = join(chkpt_dir, "chkpt")
+ n1 = nn.Linear(1, 1)
+ loss_cb = LossCallback(
+ name="linear",
+ optimizer=Adam(n1.parameters(), lr=0.01),
+ network=n1,
+ max_grad_norm=0,
+ data_group="",
+ )
+ c1 = [
+ Checkpointer(
+ callbacks=[loss_cb],
+ run_root=run_root,
+ checkpoint_interval=1,
+ )
+ ]
+
+ t1 = Trainer(c1, onestep_dataloader())
+ t1.state["inf_step"] = 0
+ t1.state["bp_step"] = 0
+ t1.state["batch_size"] = 0
+ t1.train()
+ n2 = nn.Linear(1, 1)
+ test_data = torch.rand(5, 1)
+
+ assert "latest_checkpoint" in t1.state
+ assert not torch.allclose(n1(test_data), n2(test_data))
+
+ c2 = [
+ CheckpointLoader(
+ callbacks=[loss_cb],
+ run_root=run_root,
+ checkpoint_index=0,
+ ),
+ BackPropStepsTerminator(1),
+ ]
+ n2 = loss_cb.network
+ t2 = Trainer(c2, nostep_dataloader())
+ t2.train()
+
+ assert torch.allclose(n1(test_data), n2(test_data))
+
+
+def random_onestep_dataloader() -> Generator:
+ yield {
+ "default": {
+ "observation": {"obs": torch.rand(3, 2)},
+ "actions": torch.rand(3, 1),
+ "q_target": torch.ones(3, 1),
+ },
+ }
+ raise TrainingShutdownException()
+
+
+def test_qloss_checkpoints():
+ chkpt_dir = mkdtemp()
+ run_root = join(chkpt_dir, "chkpt")
+ q1 = QNet(2, 1)
+ ql1 = QLoss(name="q", q=q1, opt=Adam(q1.parameters()))
+ c1 = [
+ ql1,
+ Checkpointer(callbacks=[ql1], run_root=run_root, checkpoint_interval=1),
+ ]
+
+ t1 = Trainer(c1, random_onestep_dataloader())
+ t1.state["inf_step"] = 0
+ t1.state["bp_step"] = 0
+ t1.state["batch_size"] = 0
+ t1.train()
+ q2 = QNet(2, 1)
+ test_obs = torch.rand(5, 2)
+ test_act = torch.rand(5, 1)
+ assert not torch.allclose(q1(test_act, test_obs), q2(test_act, test_obs))
+
+ ql2 = QLoss(name="q", q=q2, opt=Adam(q1.parameters()))
+ c2 = [
+ ql2,
+ CheckpointLoader(callbacks=[ql2], run_root=run_root, checkpoint_index=0),
+ ]
+ t2 = Trainer(c2, nostep_dataloader())
+ t2.train()
+ assert torch.allclose(q1(test_act, test_obs), q2(test_act, test_obs))
diff --git a/tests/test_conv1denc.py b/tests/test_conv1denc.py
new file mode 100644
index 00000000..32be0a00
--- /dev/null
+++ b/tests/test_conv1denc.py
@@ -0,0 +1,46 @@
+import pytest
+import torch
+
+from emote.nn.layers import Conv1dEncoder
+
+
+def test_conv1denc():
+ bsz = 2
+ channels = 2
+ length = 5
+
+ enc = Conv1dEncoder(
+ input_shape=(channels, length),
+ channels=[channels],
+ kernels=[1],
+ strides=[1],
+ padding=[0],
+ channels_last=False,
+ )
+
+ inp = torch.rand((bsz, channels, length))
+ out = enc(inp)
+
+ # test that the shape of the output matches the calculated one
+ output_size = enc.get_encoder_output_size() # length of flattened output
+ output_shape = (bsz, output_size)
+ assert out.shape == output_shape
+
+ # test that fails with wrong dimensions
+ inp_wrong_dim = torch.rand((bsz, length + 2, channels))
+ with pytest.raises(
+ RuntimeError,
+ match=".*to have [0-9]+ channels, but got [0-9]+ channels instead$",
+ ):
+ _ = enc(inp_wrong_dim)
+
+ # test that input gets permuted when channels_last = True
+ enc = Conv1dEncoder(
+ input_shape=(length, channels),
+ channels=[channels],
+ kernels=[1],
+ strides=[1],
+ padding=[0],
+ channels_last=True,
+ )
+ assert tuple(enc._input_shape) == (channels, length)
diff --git a/tests/test_emote.py b/tests/test_emote.py
new file mode 100644
index 00000000..36167ea5
--- /dev/null
+++ b/tests/test_emote.py
@@ -0,0 +1,5 @@
+from emote import __version__
+
+
+def test_version():
+ assert __version__ == "0.1.0"
diff --git a/tests/test_genrl.py b/tests/test_genrl.py
new file mode 100644
index 00000000..5cd56bce
--- /dev/null
+++ b/tests/test_genrl.py
@@ -0,0 +1,191 @@
+import numpy as np
+import torch
+import torch.types
+
+from torch import nn
+
+from emote.algorithms.genrl.proxies import MemoryProxyWithEncoder
+from emote.algorithms.genrl.wrappers import DecoderWrapper, EncoderWrapper, PolicyWrapper
+from emote.memory.builder import DictObsTable
+from emote.nn.action_value_mlp import ActionValueMlp
+from emote.nn.gaussian_policy import GaussianMlpPolicy
+from emote.typing import DictObservation, DictResponse, EpisodeState
+from emote.utils.spaces import BoxSpace, DictSpace, MDPSpace
+
+
+def get_conditioning_fn(len_cond: int = 0):
+ assert len_cond >= 0
+
+ def conditioning_fn(a):
+ return a[:, :len_cond]
+
+ return conditioning_fn
+
+
+class FullyConnectedEncoder(nn.Module):
+ def __init__(
+ self,
+ input_size: int,
+ output_size: int,
+ device: torch.device,
+ condition_size: int = 0,
+ hidden_sizes: list[int] = None,
+ ):
+ super().__init__()
+ self.device = device
+
+ self.input_size = input_size
+ self.output_size = output_size
+ self.condition_size = condition_size
+
+ num_layers = len(hidden_sizes)
+ layers = [
+ nn.Sequential(
+ nn.Linear(input_size + condition_size, hidden_sizes[0]),
+ nn.ReLU(),
+ )
+ ]
+ for i in range(num_layers - 1):
+ layers.append(
+ nn.Sequential(
+ nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]),
+ nn.ReLU(),
+ )
+ )
+ self.hidden_layers = nn.Sequential(*layers).to(self.device)
+ self.output_mean = nn.Linear(hidden_sizes[num_layers - 1], output_size).to(self.device)
+ self.output_log_std = nn.Linear(hidden_sizes[num_layers - 1], output_size).to(self.device)
+
+ def forward(
+ self, data: torch.Tensor, condition: torch.Tensor = None
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ if condition is not None:
+ data = torch.cat((data, condition), dim=len(data.shape) - 1)
+
+ x = self.hidden_layers(data)
+
+ mean = self.output_mean(x)
+ log_std = self.output_log_std(x)
+ return mean, log_std
+
+
+class FullyConnectedDecoder(nn.Module):
+ def __init__(
+ self,
+ latent_size: int,
+ output_size: int,
+ device: torch.device,
+ condition_size: int = 0,
+ hidden_sizes: list[int] = None,
+ freeze_bn: bool = True,
+ ):
+ super().__init__()
+ if hidden_sizes is None:
+ hidden_sizes = [128, 256, 512]
+
+ self.device = device
+
+ self.input_size = latent_size
+ self.output_size = output_size
+ self.condition_size = condition_size
+ self._freeze_bn = freeze_bn
+
+ num_layers = len(hidden_sizes)
+ layers = [
+ nn.Sequential(
+ nn.Linear(latent_size + condition_size, hidden_sizes[0]),
+ nn.ReLU(),
+ )
+ ]
+ for i in range(num_layers - 1):
+ layers.append(
+ nn.Sequential(
+ nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]),
+ nn.ReLU(),
+ )
+ )
+ layers.append(
+ nn.Sequential(
+ nn.Linear(hidden_sizes[num_layers - 1], output_size),
+ )
+ )
+ self.layers = nn.Sequential(*layers).to(self.device)
+
+ def forward(self, latent: torch.Tensor, condition: torch.Tensor = None) -> torch.Tensor:
+ if condition is not None:
+ latent = torch.cat((latent, condition), dim=len(latent.shape) - 1)
+ x = self.layers(latent)
+ return x
+
+
+LATENT_SIZE = 3
+ACTION_SIZE = 2
+OBSERVATION_SIZE = 24
+CONDITION_SIZE = 24
+BATCH_SIZE = 50
+
+HIDDEN_SIZES = [256] * 2
+
+
+def test_genrl():
+ cfn = get_conditioning_fn(CONDITION_SIZE)
+ device = torch.device("cpu")
+
+ decoder = FullyConnectedDecoder(LATENT_SIZE, ACTION_SIZE, device, CONDITION_SIZE, HIDDEN_SIZES)
+ decoder_wrapper = DecoderWrapper(decoder, cfn)
+ encoder = FullyConnectedEncoder(ACTION_SIZE, LATENT_SIZE, device, CONDITION_SIZE, HIDDEN_SIZES)
+ encoder_wrapper = EncoderWrapper(encoder, cfn)
+
+ q = ActionValueMlp(OBSERVATION_SIZE, LATENT_SIZE, HIDDEN_SIZES).to(device)
+ policy = GaussianMlpPolicy(OBSERVATION_SIZE, LATENT_SIZE, HIDDEN_SIZES).to(device)
+ policy_wrapper = PolicyWrapper(decoder_wrapper, policy)
+
+ obs = torch.rand(BATCH_SIZE, OBSERVATION_SIZE)
+
+ action, log_prob = policy_wrapper.forward(obs)
+ latent = encoder_wrapper.forward(action, obs)
+ q_vals = q.forward(latent, obs)
+
+ assert action.shape == (BATCH_SIZE, ACTION_SIZE)
+ assert q_vals.shape == (BATCH_SIZE, 1)
+ assert log_prob.shape == (BATCH_SIZE, 1)
+
+
+def test_memory_proxy():
+ cfn = get_conditioning_fn(CONDITION_SIZE)
+ device = torch.device("cpu")
+
+ encoder = FullyConnectedEncoder(ACTION_SIZE, LATENT_SIZE, device, CONDITION_SIZE, HIDDEN_SIZES)
+ encoder_wrapper = EncoderWrapper(encoder, cfn)
+
+ space = MDPSpace(
+ rewards=None,
+ actions=BoxSpace(dtype=np.float32, shape=(ACTION_SIZE,)),
+ state=DictSpace(spaces={"obs": BoxSpace(dtype=np.float32, shape=(OBSERVATION_SIZE,))}),
+ )
+
+ table = DictObsTable(spaces=space, maxlen=1000, device=device)
+
+ proxy = MemoryProxyWithEncoder(
+ table=table,
+ encoder=encoder_wrapper,
+ minimum_length_threshold=1,
+ use_terminal=True,
+ )
+
+ agent_id = 0
+ obs = np.random.rand(1, OBSERVATION_SIZE)
+ action = np.random.rand(1, ACTION_SIZE)
+
+ proxy.add(
+ {
+ agent_id: DictObservation(
+ episode_state=EpisodeState.INITIAL,
+ array_data={"obs": obs},
+ rewards={"reward": None},
+ )
+ },
+ {0: DictResponse({"actions": action}, {})},
+ )
+
+ assert (obs == proxy._store[agent_id].data["obs"][0]).all()
diff --git a/tests/test_gym_collector.py b/tests/test_gym_collector.py
new file mode 100644
index 00000000..f23efe89
--- /dev/null
+++ b/tests/test_gym_collector.py
@@ -0,0 +1,109 @@
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper, HitTheMiddle, SimpleGymCollector
+from torch import nn
+
+from emote import Trainer
+from emote.algorithms.sac import FeatureAgentProxy
+from emote.callback import BatchCallback
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsTable
+from emote.trainer import TrainingShutdownException
+
+
+class RandomPolicy(nn.Module):
+ def __init__(self, action_dim: int):
+ super().__init__()
+ self.action_dim = action_dim
+
+ def forward(self, obs: torch.Tensor):
+ batch_size = obs.shape[0]
+ rand_actions = 2 * (torch.rand(batch_size, self.action_dim) - 0.5)
+ return rand_actions, 0
+
+
+class HitTheMiddleDataInspector(BatchCallback):
+ def __init__(
+ self,
+ num_bp: int,
+ device: torch.device,
+ data_group: str = "default",
+ ):
+ super().__init__()
+ self.data_group = data_group
+ self.cycle = num_bp
+ self.device = device
+
+ def begin_batch(self, *args, **kwargs):
+ obs, next_obs, action, reward = self.get_batch(*args, **kwargs)
+ batch_size = obs.shape[0]
+ sim_next_obs, sim_reward = self.simulate_hit_the_middle(action, obs)
+ for i in range(batch_size):
+ obs_err = torch.mean(torch.abs(sim_next_obs[i] - next_obs[i])).detach()
+ reward_err = torch.mean(torch.abs(sim_reward[i] - reward[i])).detach()
+ if obs_err > 0.001 or reward_err > 0.001:
+ message = f"""
+ obs_err: {obs_err}, reward_err: {reward_err}
+ obs: {obs[i]}, action: {action[i]}
+ reward: {reward[i]}, sim_reward: {sim_reward[i]}
+ next_obs: {next_obs[i]}, sim_next_obs: {sim_next_obs[i]}
+ """
+ raise ValueError(
+ f"Loaded values for obs/reward does not match the calculated ones {message}"
+ )
+
+ def simulate_hit_the_middle(self, action, obs):
+ batch_size = action.shape[0]
+ next_reward = torch.zeros(batch_size, 1)
+ next_obs = torch.zeros(batch_size, 2)
+ for i in range(batch_size):
+ pos, vel = obs[i, 0].clone(), obs[i, 1].clone()
+ vel += action[i, 0]
+ pos += vel
+
+ if pos > 10.0:
+ pos = 10.0
+ vel *= -1.0
+ elif pos < -10.0:
+ pos = -10.0
+ vel *= -1.0
+ next_reward[i] = -(pos**2)
+ next_obs[i, :] = torch.Tensor([pos, vel])
+
+ return next_obs.to(self.device), next_reward.to(self.device)
+
+ def end_cycle(self):
+ raise TrainingShutdownException()
+
+ def get_batch(self, observation, next_observation, actions, rewards):
+ return observation["obs"], next_observation["obs"], actions, rewards
+
+
+def test_gym_collector():
+ device = torch.device("cpu")
+ batch_size = 5
+ rollout_length = 1 # The test only works for rollout_length = 1
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(
+ spaces=env.dict_space,
+ use_terminal_column=False,
+ maxlen=1000000,
+ device=device,
+ )
+ memory_proxy = TableMemoryProxy(table)
+ dataloader = MemoryLoader(
+ table=table,
+ rollout_count=batch_size // rollout_length,
+ rollout_length=rollout_length,
+ size_key="batch_size",
+ )
+
+ policy = RandomPolicy(action_dim=1)
+ agent_proxy = FeatureAgentProxy(policy, device)
+ callbacks = [
+ HitTheMiddleDataInspector(500, device),
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500, render=False),
+ ]
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
diff --git a/tests/test_htm.py b/tests/test_htm.py
new file mode 100644
index 00000000..6a425b82
--- /dev/null
+++ b/tests/test_htm.py
@@ -0,0 +1,110 @@
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from torch import nn
+from torch.optim import Adam
+
+from emote import Trainer
+from emote.algorithms.sac import AlphaLoss, FeatureAgentProxy, PolicyLoss, QLoss, QTarget
+from emote.callbacks.logging import TerminalLogger
+from emote.callbacks.testing import FinalRewardTestCheck
+from emote.extra.onnx_exporter import OnnxExporter
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsTable
+from emote.nn.gaussian_policy import GaussianMlpPolicy as Policy
+
+from .gym import DictGymWrapper, HitTheMiddle, SimpleGymCollector
+
+
+N_HIDDEN = 10
+
+
+class QNet(nn.Module):
+ def __init__(self, obs, act):
+ super().__init__()
+ self.q = nn.Sequential(
+ nn.Linear(obs + act, N_HIDDEN),
+ nn.ReLU(),
+ nn.Linear(N_HIDDEN, N_HIDDEN),
+ nn.ReLU(),
+ nn.Linear(N_HIDDEN, 1),
+ )
+
+ def forward(self, action, obs):
+ x = torch.cat([obs, action], dim=1)
+ return self.q(x)
+
+
+def test_htm():
+ device = torch.device("cpu")
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(spaces=env.dict_space, maxlen=1000, device=device)
+ memory_proxy = TableMemoryProxy(table)
+ dataloader = MemoryLoader(table, 100, 2, "batch_size")
+
+ q1 = QNet(2, 1)
+ q2 = QNet(2, 1)
+ policy = Policy(2, 1, [N_HIDDEN, N_HIDDEN])
+ ln_alpha = torch.tensor(1.0, requires_grad=True)
+ agent_proxy = FeatureAgentProxy(policy, device)
+
+ logged_cbs = [
+ QLoss(name="q1", q=q1, opt=Adam(q1.parameters(), lr=8e-3)),
+ QLoss(name="q2", q=q2, opt=Adam(q2.parameters(), lr=8e-3)),
+ PolicyLoss(pi=policy, ln_alpha=ln_alpha, q=q1, opt=Adam(policy.parameters())),
+ AlphaLoss(pi=policy, ln_alpha=ln_alpha, opt=Adam([ln_alpha]), n_actions=1),
+ QTarget(pi=policy, ln_alpha=ln_alpha, q1=q1, q2=q2),
+ ]
+
+ callbacks = logged_cbs + [
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500, render=False),
+ TerminalLogger(logged_cbs, 400),
+ FinalRewardTestCheck(logged_cbs[4], -5.0, 2000),
+ ]
+
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+ env.close()
+
+
+def test_htm_onnx(tmpdir):
+ device = torch.device("cpu")
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(spaces=env.dict_space, maxlen=1000, device=device)
+ memory_proxy = TableMemoryProxy(table)
+ dataloader = MemoryLoader(table, 100, 2, "batch_size")
+
+ q1 = QNet(2, 1)
+ q2 = QNet(2, 1)
+ policy = Policy(2, 1, [N_HIDDEN, N_HIDDEN])
+ ln_alpha = torch.tensor(1.0, requires_grad=True)
+ agent_proxy = FeatureAgentProxy(policy, device)
+
+ exporter = OnnxExporter(
+ agent_proxy,
+ env.dict_space,
+ True,
+ tmpdir / "inference",
+ 400,
+ )
+
+ logged_cbs = [
+ QLoss(name="q1", q=q1, opt=Adam(q1.parameters(), lr=8e-3)),
+ QLoss(name="q2", q=q2, opt=Adam(q2.parameters(), lr=8e-3)),
+ PolicyLoss(pi=policy, ln_alpha=ln_alpha, q=q1, opt=Adam(policy.parameters())),
+ AlphaLoss(pi=policy, ln_alpha=ln_alpha, opt=Adam([ln_alpha]), n_actions=1),
+ QTarget(pi=policy, ln_alpha=ln_alpha, q1=q1, q2=q2),
+ ]
+
+ callbacks = logged_cbs + [
+ exporter,
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500, render=False),
+ TerminalLogger(logged_cbs, 400),
+ FinalRewardTestCheck(logged_cbs[4], -5.0, 2000),
+ ]
+
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+ env.close()
diff --git a/tests/test_lunar_lander.py b/tests/test_lunar_lander.py
new file mode 100644
index 00000000..e7c25c13
--- /dev/null
+++ b/tests/test_lunar_lander.py
@@ -0,0 +1,161 @@
+import time
+
+import gymnasium as gym
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper
+from tests.gym.collector import ThreadedGymCollector
+from torch import nn
+from torch.optim import Adam
+from torch.utils.tensorboard import SummaryWriter
+
+from emote import Trainer
+from emote.algorithms.sac import AlphaLoss, FeatureAgentProxy, PolicyLoss, QLoss, QTarget
+from emote.callbacks.logging import TensorboardLogger
+from emote.callbacks.testing import FinalLossTestCheck
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsNStepTable
+from emote.nn.gaussian_policy import GaussianPolicyHead
+from emote.nn.initialization import ortho_init_
+
+
+def _make_env():
+ def _thunk():
+ env = gym.make("LunarLander-v2", continuous=True)
+ env = gym.wrappers.FrameStack(env, 3)
+ env = gym.wrappers.FlattenObservation(env)
+ return env
+
+ return _thunk
+
+
+class QNet(nn.Module):
+ def __init__(self, num_obs, num_actions, num_hidden):
+ super().__init__()
+ self.q = nn.Sequential(
+ nn.Linear(num_obs + num_actions, num_hidden),
+ nn.ReLU(),
+ nn.Linear(num_hidden, num_hidden),
+ nn.ReLU(),
+ nn.Linear(num_hidden, 1),
+ )
+ self.q.apply(ortho_init_)
+
+ def forward(self, action, obs):
+ x = torch.cat([obs, action], dim=1)
+ return self.q(x)
+
+
+class Policy(nn.Module):
+ def __init__(self, num_obs, num_actions, num_hidden):
+ super().__init__()
+ self.pi = nn.Sequential(
+ nn.Linear(num_obs, num_hidden),
+ nn.ReLU(),
+ nn.Linear(num_hidden, num_hidden),
+ nn.ReLU(),
+ GaussianPolicyHead(num_hidden, num_actions),
+ )
+ self.pi.apply(ortho_init_)
+
+ def forward(self, obs):
+ sample, log_prob = self.pi(obs)
+ # TODO: Investigate the log_prob() logic of the pytorch distribution code.
+ # The change below shouldn't be needed but significantly improves training
+ # stability when training lunar lander.
+ log_prob = log_prob.clamp(min=-2)
+ return sample, log_prob
+
+
+def setup_lunar_lander():
+ device = torch.device("cpu")
+
+ hidden_layer = 256
+ batch_size = 2000
+ rollout_len = 20
+ n_env = 10
+ learning_rate = 5e-3
+ max_grad_norm = 1
+
+ env = DictGymWrapper(AsyncVectorEnv([_make_env() for _ in range(n_env)]))
+ table = DictObsNStepTable(
+ spaces=env.dict_space,
+ use_terminal_column=True,
+ maxlen=4_000_000,
+ device=device,
+ )
+ memory_proxy = TableMemoryProxy(table, use_terminal=True)
+ dataloader = MemoryLoader(table, batch_size // rollout_len, rollout_len, "batch_size")
+
+ num_actions = env.dict_space.actions.shape[0]
+ num_obs = list(env.dict_space.state.spaces.values())[0].shape[0]
+
+ q1 = QNet(num_obs, num_actions, hidden_layer)
+ q2 = QNet(num_obs, num_actions, hidden_layer)
+ policy = Policy(num_obs, num_actions, hidden_layer)
+
+ ln_alpha = torch.tensor(1.0, requires_grad=True, device=device)
+ agent_proxy = FeatureAgentProxy(policy, device=device)
+
+ q1 = q1.to(device)
+ q2 = q2.to(device)
+ policy = policy.to(device)
+
+ logged_cbs = [
+ QLoss(
+ name="q1",
+ q=q1,
+ opt=Adam(q1.parameters(), lr=learning_rate),
+ max_grad_norm=max_grad_norm,
+ ),
+ QLoss(
+ name="q2",
+ q=q2,
+ opt=Adam(q2.parameters(), lr=learning_rate),
+ max_grad_norm=max_grad_norm,
+ ),
+ PolicyLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q=q1,
+ opt=Adam(policy.parameters(), lr=learning_rate),
+ max_grad_norm=max_grad_norm,
+ ),
+ AlphaLoss(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ opt=Adam([ln_alpha]),
+ n_actions=num_actions,
+ max_grad_norm=max_grad_norm,
+ ),
+ QTarget(
+ pi=policy,
+ ln_alpha=ln_alpha,
+ q1=q1,
+ q2=q2,
+ roll_length=rollout_len,
+ ),
+ ThreadedGymCollector(
+ env,
+ agent_proxy,
+ memory_proxy,
+ warmup_steps=batch_size,
+ render=False,
+ ),
+ ]
+ return logged_cbs, dataloader
+
+
+def test_lunar_lander_quick():
+ """Quick test that the code runs."""
+
+ experiment_name = "lunar_lander_test_" + str(time.time())
+ logged_cbs, dataloader = setup_lunar_lander()
+ callbacks = logged_cbs + [
+ TensorboardLogger(logged_cbs, SummaryWriter("runs/" + experiment_name), 100),
+ FinalLossTestCheck([logged_cbs[2]], [1000.0], 1000),
+ ]
+
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
diff --git a/tests/test_mbrl.py b/tests/test_mbrl.py
new file mode 100644
index 00000000..b8ed8b96
--- /dev/null
+++ b/tests/test_mbrl.py
@@ -0,0 +1,203 @@
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper, HitTheMiddle, SimpleGymCollector
+from torch import nn
+from torch.optim import Adam
+
+from emote import Trainer
+from emote.algorithms.sac import FeatureAgentProxy
+from emote.callbacks import BackPropStepsTerminator
+from emote.extra.schedules import BPStepScheduler
+from emote.memory import MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsTable
+from emote.models.callbacks import LossProgressCheck, ModelBasedCollector, ModelLoss
+from emote.models.ensemble import EnsembleOfGaussian
+from emote.models.model import DynamicModel
+from emote.models.model_env import ModelEnv
+from emote.utils.spaces import MDPSpace
+
+
+class FakeDataloader:
+ def __init__(self, data_group: str, num_obs: int, batch_size: int):
+ self.data_group = data_group
+ self.num_obs = num_obs
+ self.batch_size = batch_size
+
+ def __iter__(self):
+ while True:
+ batch = {
+ "observation": {"obs": torch.rand(self.batch_size, self.num_obs)},
+ }
+ yield {self.data_group: batch, "batch_size": self.batch_size}
+
+
+class MultiplierNN(nn.Module):
+ def __init__(self, value: float, device: torch.device):
+ super().__init__()
+ self.value = value
+ self.device = device
+
+ def forward(self, x: torch.Tensor):
+ return self.value * x
+
+ def sample(self, x, rng):
+ return self.forward(x)
+
+
+class RandomPolicy(nn.Module):
+ def __init__(self, action_dim: int):
+ super().__init__()
+ self.action_dim = action_dim
+
+ def forward(self, obs: torch.Tensor):
+ batch_size = obs.shape[0]
+ rand_actions = 2 * (torch.rand(batch_size, self.action_dim) - 0.5)
+ return rand_actions, 0
+
+
+def create_memory(
+ space: MDPSpace,
+ memory_size: int,
+ len_rollout: int,
+ batch_size: int,
+ data_group: str,
+ device: torch.device,
+):
+ """Creates memory and data_loader for the RL training.
+
+ Arguments:
+ space (MDPSpace): the MDP space
+ memory_size (int): the maximum length of memory
+ len_rollout (int): the rollout size for the NStepTable
+ batch_size (int): batch size
+ data_group (str): the data group for uploading the data
+ device (torch.device): the device to upload the data
+ Returns:
+ (tuple[TableMemoryProxy, MemoryLoader]): A proxy for the memory and a dataloader
+ """
+ table = DictObsTable(
+ spaces=space,
+ use_terminal_column=False,
+ maxlen=memory_size,
+ device=device,
+ )
+ memory_proxy = TableMemoryProxy(table=table, use_terminal=False)
+ data_loader = MemoryLoader(
+ table=table,
+ rollout_count=batch_size // len_rollout,
+ rollout_length=len_rollout,
+ size_key="batch_size",
+ data_group=data_group,
+ )
+ return memory_proxy, data_loader
+
+
+NUM_OBS = 2
+NUM_ACTIONS = 1
+RL_DATA_GROUP = "rl_group"
+
+
+def test_model_collector():
+ """The function tests unrolling a dynamic model and storing the rollouts in
+ a replay buffer. The fake dynamic model simply multiplies the inputs by a
+ fixed (rand) number, i.e., next_obs = obs x rand_number, rewards = actions
+ x rand_number.
+
+ The test checks the following:
+ * the replay buffer contains a correct number of samples,
+ * stored samples are the ones generated by the fake model.
+ """
+ batch_size = 10
+ rollout_size = 5
+ rand_multiplier = torch.rand(1)[0] * 10
+ env = DictGymWrapper(AsyncVectorEnv(2 * [HitTheMiddle])) # dummy envs
+ device = torch.device("cpu")
+ model = MultiplierNN(value=rand_multiplier, device=device)
+ dynamic_model = DynamicModel(model=model, no_delta_list=[0, 1])
+ model_env = ModelEnv(
+ num_envs=batch_size,
+ model=dynamic_model,
+ termination_fn=lambda a: torch.zeros(a.shape[0]),
+ )
+ policy = RandomPolicy(action_dim=NUM_ACTIONS)
+ agent = FeatureAgentProxy(policy, device)
+ memory, dataloader = create_memory(
+ env.dict_space,
+ memory_size=100,
+ len_rollout=1,
+ batch_size=batch_size,
+ data_group=RL_DATA_GROUP,
+ device=device,
+ )
+ callbacks = [
+ ModelBasedCollector(
+ model_env=model_env,
+ agent=agent,
+ memory=memory,
+ rollout_scheduler=BPStepScheduler(*[0, 10, rollout_size, rollout_size]),
+ ),
+ BackPropStepsTerminator(bp_steps=1),
+ ]
+ fake_dataset = FakeDataloader(num_obs=NUM_OBS, data_group="default", batch_size=batch_size)
+ trainer = Trainer(callbacks, fake_dataset)
+ trainer.train()
+
+ if memory.size() != (rollout_size * batch_size):
+ raise Exception(
+ f"The RL replay buffer must contain rollout_size x batch_size "
+ f"= {rollout_size * batch_size} but it contains {memory.size()}"
+ )
+
+ data_iter = iter(dataloader)
+ batch = next(data_iter)
+
+ if RL_DATA_GROUP not in batch.keys():
+ raise Exception("The RL data group does not exist in the keys\n")
+ batch = batch[RL_DATA_GROUP]
+
+ model_in = torch.cat((batch["observation"]["obs"], batch["actions"]), dim=1)
+ model_out = torch.cat((batch["next_observation"]["obs"], batch["rewards"]), dim=1)
+
+ if torch.mean(torch.abs(rand_multiplier * model_in - model_out)) > 0.001:
+ raise Exception("The loaded samples do not look correct.")
+
+
+def test_ensemble_training():
+ """The function tests ensemble training.
+
+ The test will pass if the loss goes down according to the given
+ criterion.
+ """
+ device = torch.device("cpu")
+ batch_size = 200
+ rollout_length = 1
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(spaces=env.dict_space, maxlen=10000, device=device)
+ memory_proxy = TableMemoryProxy(table)
+ dataloader = MemoryLoader(
+ table=table,
+ rollout_count=batch_size // rollout_length,
+ rollout_length=rollout_length,
+ size_key="batch_size",
+ )
+
+ model = EnsembleOfGaussian(
+ in_size=NUM_OBS + NUM_ACTIONS,
+ out_size=NUM_OBS + 1,
+ device=device,
+ ensemble_size=5,
+ )
+ dynamic_model = DynamicModel(model=model)
+ policy = RandomPolicy(action_dim=1)
+ agent_proxy = FeatureAgentProxy(policy, device)
+
+ callbacks = [
+ ModelLoss(model=dynamic_model, opt=Adam(dynamic_model.model.parameters())),
+ LossProgressCheck(model=dynamic_model, num_bp=500),
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500, render=False),
+ ]
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+ env.close()
diff --git a/tests/test_memory_exporter.py b/tests/test_memory_exporter.py
new file mode 100644
index 00000000..48e526b9
--- /dev/null
+++ b/tests/test_memory_exporter.py
@@ -0,0 +1,96 @@
+import os
+import stat
+
+import pytest
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+from tests.gym import DictGymWrapper, HitTheMiddle, SimpleGymCollector
+
+from emote import Trainer
+from emote.algorithms.sac import FeatureAgentProxy
+from emote.callbacks import BackPropStepsTerminator
+from emote.memory import MemoryExporterProxyWrapper, MemoryLoader, TableMemoryProxy
+from emote.memory.builder import DictObsTable
+from emote.memory.callbacks import MemoryImporterCallback
+from emote.memory.storage import BaseStorage
+from emote.nn.gaussian_policy import GaussianMlpPolicy as Policy
+
+
+N_HIDDEN = 10
+
+
+@pytest.mark.filterwarnings("ignore:Exporting a memory")
+def test_memory_export(tmpdir):
+ device = torch.device("cpu")
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(spaces=env.dict_space, maxlen=10000, device=device)
+ memory_proxy = TableMemoryProxy(table)
+ memory_proxy = MemoryExporterProxyWrapper(
+ memory=memory_proxy,
+ target_memory_name="memory",
+ inf_steps_per_memory_export=10,
+ experiment_root_path=tmpdir,
+ min_time_per_export=1,
+ )
+ dataloader = MemoryLoader(table, 100, 2, "batch_size")
+ policy = Policy(2, 1, [N_HIDDEN, N_HIDDEN])
+ agent_proxy = FeatureAgentProxy(policy, device)
+
+ callbacks = [
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500, render=False),
+ BackPropStepsTerminator(2500),
+ ]
+
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+ importer = MemoryImporterCallback(
+ memory=DictObsTable(spaces=env.dict_space, maxlen=10000, device=device),
+ target_memory_name="memory",
+ experiment_load_dir=tmpdir,
+ )
+
+ importer.memory.restore(os.path.join(tmpdir, "memory_export"))
+
+ for column in importer.memory._columns.values():
+ if isinstance(importer.memory._data[column.name], BaseStorage):
+ for key in importer.memory._data[column.name]:
+ assert (
+ importer.memory._data[column.name][key].all()
+ == memory_proxy._inner._table._data[column.name][key].all()
+ )
+
+ env.close()
+
+
+@pytest.mark.filterwarnings("ignore:Exporting a memory")
+def test_memory_export_permissions(tmpdir):
+ device = torch.device("cpu")
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(spaces=env.dict_space, maxlen=10000, device=device)
+ memory_proxy = TableMemoryProxy(table)
+ memory_proxy = MemoryExporterProxyWrapper(
+ memory=memory_proxy,
+ target_memory_name="memory",
+ inf_steps_per_memory_export=10,
+ experiment_root_path=tmpdir,
+ min_time_per_export=1,
+ )
+ dataloader = MemoryLoader(table, 100, 2, "batch_size")
+ policy = Policy(2, 1, [N_HIDDEN, N_HIDDEN])
+ agent_proxy = FeatureAgentProxy(policy, device)
+
+ callbacks = [
+ SimpleGymCollector(env, agent_proxy, memory_proxy, warmup_steps=500, render=False),
+ BackPropStepsTerminator(2500),
+ ]
+
+ trainer = Trainer(callbacks, dataloader)
+ trainer.train()
+
+ assert os.path.exists(os.path.join(tmpdir, "memory_export.zip"))
+ st = os.stat(os.path.join(tmpdir, "memory_export.zip"))
+ # has to be readable by the whole world
+ required_perms = stat.S_IRUSR | stat.S_IROTH | stat.S_IRGRP
+ assert (st.st_mode & required_perms) == required_perms, "file should be readable by everyone"
diff --git a/tests/test_memory_exports.py b/tests/test_memory_exports.py
new file mode 100644
index 00000000..e1f6a2d6
--- /dev/null
+++ b/tests/test_memory_exports.py
@@ -0,0 +1,139 @@
+import os
+import stat
+
+import numpy as np
+import pytest
+
+from emote.memory.column import Column, VirtualColumn
+from emote.memory.fifo_strategy import FifoEjectionStrategy
+from emote.memory.storage import SyntheticDones
+from emote.memory.table import ArrayTable, TableSerializationVersion
+from emote.memory.uniform_strategy import UniformSampleStrategy
+
+
+@pytest.fixture
+def memory():
+ spec = [
+ Column(name="observation", dtype=np.dtype("float32"), shape=tuple()),
+ Column(name="reward", dtype=np.float32, shape=tuple()),
+ VirtualColumn(
+ name="dones",
+ dtype=bool,
+ shape=(1,),
+ target_name="reward",
+ mapper=SyntheticDones,
+ ),
+ VirtualColumn(
+ name="masks",
+ dtype=np.float32,
+ shape=(1,),
+ target_name="reward",
+ mapper=SyntheticDones.as_mask,
+ ),
+ ]
+
+ memory = ArrayTable(
+ columns=spec,
+ maxlen=10_000,
+ sampler=UniformSampleStrategy(),
+ ejector=FifoEjectionStrategy(),
+ length_key="reward",
+ device="cpu",
+ )
+
+ return memory
+
+
+def test_export_base(memory, tmpdir):
+ for ii in range(0, 1000):
+ memory.add_sequence(ii, dict(observation=[1, 2, 3, 4, 5], reward=[1, 2, 3, 4]))
+
+ original_observation_data = memory._data["observation"]
+ original_reward_data = memory._data["reward"]
+
+ export_file = os.path.join(tmpdir, "export")
+ res_file = os.path.join(tmpdir, "export.zip")
+
+ memory.store(export_file)
+
+ assert os.path.exists(res_file), "written file must be exist"
+ assert os.stat(res_file).st_size > 10_000, "should contain at least 10 000 bytes"
+
+ st = os.stat(res_file)
+ # has to be readable by the whole world
+ required_perms = stat.S_IRUSR | stat.S_IROTH | stat.S_IRGRP
+ assert (st.st_mode & required_perms) == required_perms, "file should be readable by everyone"
+
+ memory.restore(export_file)
+
+ loaded_observation_data = memory._data["observation"]
+ loaded_reward_data = memory._data["reward"]
+
+ for identity in range(0, 1000):
+ assert np.all(
+ original_observation_data[identity] == loaded_observation_data[identity]
+ ), "observation data should be the same"
+ assert np.all(
+ original_reward_data[identity] == loaded_reward_data[identity]
+ ), "reward data should be the same"
+
+ for ii in range(2000, 5000):
+ memory.add_sequence(ii, dict(observation=[1, 2, 3, 4, 5], reward=[1, 2, 3, 4]))
+
+
+@pytest.mark.filterwarnings("ignore:.*Legacy memory export")
+def test_export_legacy(memory, tmpdir):
+ for ii in range(0, 1000):
+ memory.add_sequence(ii, dict(observation=[1, 2, 3, 4, 5], reward=[1, 2, 3, 4]))
+
+ export_file = os.path.join(tmpdir, "export")
+ res_file = os.path.join(tmpdir, "export.zip")
+
+ memory.store(export_file, version=TableSerializationVersion.Legacy)
+
+ assert os.path.exists(res_file), "written file must be exist"
+ assert os.stat(res_file).st_size > 10_000, "should contain at least 10 000 bytes"
+
+ st = os.stat(res_file)
+ # has to be readable by the whole world
+ required_perms = stat.S_IRUSR | stat.S_IROTH | stat.S_IRGRP
+ assert (st.st_mode & required_perms) == required_perms, "file should be readable by everyone"
+
+ memory.restore(export_file) # remove .zip
+
+
+def test_import_v1(memory):
+ for ii in range(0, 100):
+ memory.add_sequence(ii, dict(observation=[1, 2, 3, 4, 5], reward=[1, 2, 3, 4]))
+
+ data_dir = os.path.join(os.path.dirname(__file__), "data")
+ export_file = os.path.join(data_dir, "export-v1")
+
+ original_observation_data = memory._data["observation"]
+ original_reward_data = memory._data["reward"]
+
+ memory.restore(export_file)
+
+ loaded_observation_data = memory._data["observation"]
+ loaded_reward_data = memory._data["reward"]
+
+ for identity in range(0, 100):
+ assert np.all(
+ original_observation_data[identity] == loaded_observation_data[identity]
+ ), "observation data should be the same"
+ assert np.all(
+ original_reward_data[identity] == loaded_reward_data[identity]
+ ), "reward data should be the same"
+
+
+def test_legacy_warnings(tmpdir, memory):
+ for ii in range(0, 100):
+ memory.add_sequence(ii, dict(observation=[1, 2, 3, 4, 5], reward=[1, 2, 3, 4]))
+
+ export_file = os.path.join(tmpdir, "export")
+
+ with pytest.deprecated_call():
+ memory.store(export_file, version=TableSerializationVersion.Legacy)
+
+ with pytest.deprecated_call():
+ memory.restore(export_file)
diff --git a/tests/test_memory_loading.py b/tests/test_memory_loading.py
new file mode 100644
index 00000000..6598eacc
--- /dev/null
+++ b/tests/test_memory_loading.py
@@ -0,0 +1,123 @@
+import numpy as np
+import pytest
+
+from emote.memory.column import Column
+from emote.memory.fifo_strategy import FifoEjectionStrategy
+from emote.memory.memory import JointMemoryLoader, JointMemoryLoaderWithDataGroup, MemoryLoader
+from emote.memory.table import ArrayTable
+from emote.memory.uniform_strategy import UniformSampleStrategy
+
+
+@pytest.fixture
+def a_dummy_table():
+ tab = ArrayTable(
+ columns=[Column("state", (), np.float32), Column("action", (), np.float32)],
+ maxlen=1_000,
+ sampler=UniformSampleStrategy(),
+ ejector=FifoEjectionStrategy(),
+ length_key="action",
+ device="cpu",
+ )
+ tab.add_sequence(
+ 0,
+ {
+ "state": [5.0, 6.0],
+ "action": [1.0],
+ },
+ )
+
+ return tab
+
+
+@pytest.fixture
+def another_dummy_table():
+ tab = ArrayTable(
+ columns=[Column("state", (), np.float32), Column("action", (), np.float32)],
+ maxlen=1_000,
+ sampler=UniformSampleStrategy(),
+ ejector=FifoEjectionStrategy(),
+ length_key="action",
+ device="cpu",
+ )
+ tab.add_sequence(
+ 0,
+ {
+ "state": [5.0, 6.0],
+ "action": [1.0],
+ },
+ )
+
+ return tab
+
+
+def test_joint_memory_loader(a_dummy_table: ArrayTable, another_dummy_table: ArrayTable):
+ a_loader = MemoryLoader(
+ table=a_dummy_table,
+ rollout_count=1,
+ rollout_length=1,
+ size_key="batch_size",
+ data_group="a",
+ )
+ another_loader = MemoryLoader(
+ table=another_dummy_table,
+ rollout_count=1,
+ rollout_length=1,
+ size_key="batch_size",
+ data_group="another",
+ )
+
+ joint_loader = JointMemoryLoader(loaders=[a_loader, another_loader])
+
+ data = next(iter(joint_loader))
+ assert "a" in data and "another" in data, "JointMemoryLoader did not yield expected memory data"
+
+
+def test_joint_memory_loader_datagroup(a_dummy_table: ArrayTable, another_dummy_table: ArrayTable):
+ a_loader = MemoryLoader(
+ table=a_dummy_table,
+ rollout_count=1,
+ rollout_length=1,
+ size_key="batch_size",
+ data_group="a",
+ )
+ another_loader = MemoryLoader(
+ table=another_dummy_table,
+ rollout_count=1,
+ rollout_length=1,
+ size_key="batch_size",
+ data_group="another",
+ )
+
+ joint_loader = JointMemoryLoaderWithDataGroup(
+ loaders=[a_loader, another_loader], data_group="joint_datagroup"
+ )
+
+ encapsulated_data = next(iter(joint_loader))
+ data = encapsulated_data["joint_datagroup"]
+
+ assert (
+ "joint_datagroup" in encapsulated_data
+ ), "Expected joint dataloader to place data in its own datagroup, but it does not exist."
+ assert (
+ "a" in data and "another" in data
+ ), "Expected joint dataloader to actually place data in its datagroup, but it is empty."
+
+
+def test_joint_memory_loader_nonunique_loaders_trigger_exception(a_dummy_table: ArrayTable):
+ loader1 = MemoryLoader(
+ table=a_dummy_table,
+ rollout_count=1,
+ rollout_length=1,
+ size_key="batch_size",
+ data_group="a",
+ )
+ loader2 = MemoryLoader(
+ table=a_dummy_table,
+ rollout_count=1,
+ rollout_length=1,
+ size_key="batch_size",
+ data_group="a",
+ )
+
+ with pytest.raises(Exception, match="JointMemoryLoader"):
+ joint_loader = JointMemoryLoader([loader1, loader2]) # noqa
diff --git a/tests/test_memory_logger.py b/tests/test_memory_logger.py
new file mode 100644
index 00000000..e1a888e3
--- /dev/null
+++ b/tests/test_memory_logger.py
@@ -0,0 +1,193 @@
+from __future__ import annotations
+
+import pytest
+
+from gymnasium.vector import AsyncVectorEnv
+from torch.utils.tensorboard import SummaryWriter
+
+from emote.memory import LoggingProxyWrapper, TableMemoryProxy
+from emote.memory.builder import DictObsTable
+from emote.typing import DictObservation, DictResponse, EpisodeState, MetaData
+
+from .gym import DictGymWrapper, HitTheMiddle
+
+
+@pytest.fixture
+def table_proxy():
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+ table = DictObsTable(spaces=env.dict_space, maxlen=1000, device="cpu")
+ return TableMemoryProxy(table, 0, False)
+
+
+def test_construct(table_proxy, tmpdir):
+ _ = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+
+def test_add_once(table_proxy, tmpdir):
+ proxy = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+ proxy.add(
+ {
+ 0: DictObservation(
+ episode_state=EpisodeState.INITIAL,
+ array_data={"obs": [1.0]},
+ rewards={"reward": None},
+ metadata=MetaData(info={"episode/reward": 10.0}, info_lists={}),
+ )
+ },
+ {0: DictResponse({"actions": [0.0]}, {})},
+ )
+
+ assert "episode/reward" in proxy.windowed_scalar
+
+
+def test_add_multiple(table_proxy, tmpdir):
+ proxy = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+ for idx in range(10):
+ proxy.add(
+ {
+ 0: DictObservation(
+ episode_state=EpisodeState.INITIAL if idx == 0 else EpisodeState.RUNNING,
+ array_data={"obs": [1.0]},
+ rewards={"reward": None},
+ metadata=MetaData(info={"episode/reward": 10.0}, info_lists={}),
+ )
+ },
+ {0: DictResponse({"actions": [0.0]}, {})},
+ )
+
+ assert "training/inf_per_sec" in proxy.scalar_logs
+
+
+def test_completed(table_proxy, tmpdir):
+ proxy = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+ state = EpisodeState.INITIAL
+ for s in range(10):
+ proxy.add(
+ {
+ 0: DictObservation(
+ episode_state=state,
+ array_data={"obs": [1.0]},
+ rewards={"reward": None},
+ metadata=MetaData(info={"episode/reward": 10.0}, info_lists={}),
+ )
+ },
+ {0: DictResponse({"actions": [0.0]}, {})} if s < 9 else {},
+ )
+
+ state = EpisodeState.RUNNING if s < 8 else EpisodeState.TERMINAL
+
+ assert proxy.completed_episodes == 1
+
+
+def test_report(table_proxy, tmpdir):
+ proxy = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+ metrics = {"one": 1}
+ metrics_lists = {"three": [3, 3], "histogram:ones": [1, 1, 1, 1, 1]}
+
+ proxy.report(metrics, metrics_lists)
+
+ for _ in range(2):
+ proxy.report({"histogram:twos": 2}, {})
+
+ for _ in range(2):
+ proxy.report({"one": 1}, {})
+
+ assert "ones" in proxy.hist_logs
+ assert "one" in proxy.windowed_scalar and "one" in proxy.windowed_scalar_cumulative
+ assert proxy.windowed_scalar_cumulative["one"] == 3
+ assert "three" in proxy.windowed_scalar and "three" in proxy.windowed_scalar_cumulative
+ assert proxy.windowed_scalar_cumulative["three"] == 6
+ assert "twos" in proxy.hist_logs and len(proxy.hist_logs["twos"]) == 2
+
+
+def test_get_report(table_proxy, tmpdir):
+ proxy = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+ metrics = {"one": 1}
+ metrics_lists = {"three": [3, 3], "histogram:ones": [1, 1, 1, 1, 1]}
+
+ proxy.report(metrics, metrics_lists)
+
+ for _ in range(2):
+ proxy.report({"one": 1}, {})
+
+ keys = ["histogram:ones", "one", "three", "random"]
+ out, out_lists = proxy.get_report(keys)
+ for key in keys[:-1]:
+ if "histogram" in key:
+ assert key in out and key not in out_lists
+ else:
+ assert key in out and key in out_lists
+
+ assert "random" not in out and "random" not in out_lists
+ assert out["histogram:ones"] == 1
+ assert out["one"] == 1 and out["one/cumulative"] == 3
+ assert out_lists["three"] == [3, 3]
+
+
+def test_end_cycle(table_proxy, tmpdir):
+ proxy = LoggingProxyWrapper(
+ table_proxy,
+ SummaryWriter(
+ log_dir=tmpdir,
+ ),
+ 2,
+ )
+
+ state = EpisodeState.INITIAL
+ for s in range(10):
+ proxy.add(
+ {
+ 0: DictObservation(
+ episode_state=state,
+ array_data={"obs": [1.0]},
+ rewards={"reward": None},
+ metadata=MetaData(info={"episode/reward": 10.0}, info_lists={}),
+ )
+ },
+ {0: DictResponse({"actions": [0.0]}, {})} if s < 9 else {},
+ )
+
+ state = EpisodeState.RUNNING if s < 8 else EpisodeState.TERMINAL
+
+ proxy._end_cycle()
diff --git a/tests/test_memory_proxy_wrapper.py b/tests/test_memory_proxy_wrapper.py
new file mode 100644
index 00000000..1c65a334
--- /dev/null
+++ b/tests/test_memory_proxy_wrapper.py
@@ -0,0 +1,98 @@
+import pytest
+
+from emote.memory.memory import MemoryProxyWrapper
+
+
+class DummyMemoryProxy:
+ def __init__(self):
+ self.my_attribute = "hello!"
+
+ def say_hello(self):
+ return "hello world"
+
+ @property
+ def a_property(self):
+ return "a property"
+
+
+class EmptyMemoryProxyWrapper(MemoryProxyWrapper):
+ pass
+
+
+class SayHelloMemoryProxyWrapper(MemoryProxyWrapper):
+ def __init__(self, inner):
+ super().__init__(inner)
+
+ def say_hello(self):
+ return "not hello world"
+
+
+class SayGoodbyeProxyWrapper(MemoryProxyWrapper):
+ def __init__(self, inner):
+ super().__init__(inner)
+
+ def say_goodbye(self):
+ return "goodbye"
+
+
+def test_call_nonexisting_method_on_wrapper_calls_inner():
+ dummy = DummyMemoryProxy()
+ wrapper = EmptyMemoryProxyWrapper(dummy)
+
+ assert (
+ wrapper.say_hello == dummy.say_hello
+ ), "Expected wrapper to forward non-existing method to inner."
+
+
+def test_call_existing_method_on_wrapper_calls_existing():
+ dummy = DummyMemoryProxy()
+ wrapper = SayHelloMemoryProxyWrapper(dummy)
+
+ assert (
+ wrapper.say_hello == wrapper.say_hello
+ ), "Expected wrapper to always use existing method if it exist."
+
+
+def test_chained_wrappers():
+ dummy = DummyMemoryProxy()
+ wrapper1 = SayHelloMemoryProxyWrapper(dummy)
+ wrapper2 = SayGoodbyeProxyWrapper(wrapper1)
+ wrapper3 = EmptyMemoryProxyWrapper(wrapper2)
+
+ assert (
+ wrapper3.say_hello == wrapper1.say_hello
+ ), "Expected wrapper to be able to chain inner forwards."
+ assert (
+ wrapper3.say_goodbye == wrapper2.say_goodbye
+ ), "Expected wrapper to be able to chain inner forwards."
+
+
+def test_wrapper_disallows_accessing_non_method():
+ dummy = DummyMemoryProxy()
+ wrapper = EmptyMemoryProxyWrapper(dummy)
+
+ with pytest.raises(AttributeError):
+ wrapper.my_attribute
+
+
+def test_wrapper_disallows_accessing_non_existing_attribute():
+ dummy = DummyMemoryProxy()
+ wrapper = EmptyMemoryProxyWrapper(dummy)
+
+ with pytest.raises(AttributeError):
+ wrapper.i_do_not_exist
+
+
+def test_wrapper_allows_accessing_property():
+ dummy = DummyMemoryProxy()
+ wrapper = EmptyMemoryProxyWrapper(dummy)
+
+ wrapper.a_property
+
+
+def test_wrapper_allows_accessing_property_nested():
+ dummy = DummyMemoryProxy()
+ wrapper = EmptyMemoryProxyWrapper(dummy)
+ wrapper = SayGoodbyeProxyWrapper(wrapper)
+
+ wrapper.a_property
diff --git a/tests/test_memory_sampling.py b/tests/test_memory_sampling.py
new file mode 100644
index 00000000..915323fd
--- /dev/null
+++ b/tests/test_memory_sampling.py
@@ -0,0 +1,98 @@
+"""Test to validate the behavior of `CoverageBasedSampleStrategy`. Tests how
+the `alpha` parameter influences the sampling distribution between two waves of
+data.
+
+Wave 1 and Wave 2: Two separate sets of data points added to the memory. After each wave, a series of samples are drawn from the memory.
+
+Alpha modulates how much the sampling prioritizes less-visited states. A higher alpha results in a stronger bias towards less-visited states.
+
+Intended Behavior:
+ - alpha=0.0: Sampling should be approximately uniform, with no strong bias towards either wave.
+ - alpha=1.0: Sampling should strongly prioritize the less-visited states (i.e., states from Wave 2 after it is added).
+ - Intermediate alpha values (e.g., alpha=0.5) should result in intermediate behaviors.
+"""
+
+import numpy as np
+import torch
+
+from emote.memory.builder import DictObsNStepTable
+from emote.memory.coverage_based_strategy import CoverageBasedSampleStrategy
+from emote.utils.spaces import BoxSpace, DictSpace, MDPSpace
+
+
+TABLE_MAX_LEN = 4096
+SAMPLE_AMOUNT = 1024
+ALPHAS = [0.0, 0.5, 1.0]
+SEQUENCE_LEN = 10
+
+
+def create_sample_space() -> MDPSpace:
+ reward_space = BoxSpace(dtype=np.float32, shape=(1,))
+ action_space = BoxSpace(dtype=np.int32, shape=(1,))
+ obs_space = BoxSpace(dtype=np.float32, shape=(2,))
+ state_space_dict = {"obs": obs_space}
+ state_space = DictSpace(spaces=state_space_dict)
+ return MDPSpace(rewards=reward_space, actions=action_space, state=state_space)
+
+
+def populate_table(table: DictObsNStepTable, sequence_len: int, start: int, end: int):
+ for i in range(start, end):
+ sequence = {
+ "obs": [np.random.rand(2) for _ in range(sequence_len + 1)],
+ "actions": [np.random.rand(1) for _ in range(sequence_len)],
+ "rewards": [np.random.rand(1) for _ in range(sequence_len)],
+ }
+
+ table.add_sequence(
+ identity=i,
+ sequence=sequence,
+ )
+
+
+def sample_table(table: DictObsNStepTable, sample_amount: int, count: int, sequence_length: int):
+ for _ in range(sample_amount):
+ table.sample(count, sequence_length)
+
+
+def test_memory_export():
+ device = torch.device("cpu")
+ space = create_sample_space()
+ for alpha in ALPHAS:
+ table = DictObsNStepTable(
+ spaces=space,
+ use_terminal_column=False,
+ maxlen=TABLE_MAX_LEN,
+ sampler=CoverageBasedSampleStrategy(alpha=alpha),
+ device=device,
+ )
+
+ wave_length = int(TABLE_MAX_LEN / (2 * SEQUENCE_LEN))
+
+ # Wave 1
+ populate_table(table=table, sequence_len=SEQUENCE_LEN, start=0, end=wave_length)
+ sample_table(table=table, sample_amount=SAMPLE_AMOUNT, count=5, sequence_length=8)
+ pre_second_wave_sample_counts = table._sampler._sample_count.copy()
+
+ # Wave 2
+ populate_table(
+ table=table, sequence_len=SEQUENCE_LEN, start=wave_length, end=wave_length * 2
+ )
+ sample_table(table=table, sample_amount=SAMPLE_AMOUNT, count=5, sequence_length=8)
+
+ second_wave_samples = sum(
+ table._sampler._sample_count[id] - pre_second_wave_sample_counts.get(id, 0)
+ for id in range(wave_length, wave_length * 2)
+ )
+ total_new_samples = sum(
+ table._sampler._sample_count[id] - pre_second_wave_sample_counts.get(id, 0)
+ for id in table._sampler._sample_count.keys()
+ )
+
+ proportion_second_wave = second_wave_samples / total_new_samples
+
+ if alpha == 0.0:
+ assert proportion_second_wave > 0.4
+ elif alpha == 0.5:
+ assert proportion_second_wave > 0.6
+ elif alpha == 1.0:
+ assert proportion_second_wave > 0.8
diff --git a/tests/test_modified_adamw.py b/tests/test_modified_adamw.py
new file mode 100644
index 00000000..9af3b1eb
--- /dev/null
+++ b/tests/test_modified_adamw.py
@@ -0,0 +1,127 @@
+from functools import partial
+
+import pytest
+import torch
+
+from torch import nn
+
+from emote.nn.initialization import ortho_init_
+from emote.optimizers import ModifiedAdamW, separate_modules_for_weight_decay
+
+
+class QNet(nn.Module):
+ def __init__(self, num_obs, num_actions, hidden_dims):
+ super().__init__()
+
+ all_dims = [num_obs + num_actions] + hidden_dims
+
+ self.encoder = nn.Sequential(
+ *[
+ nn.Sequential(nn.Linear(n_in, n_out), nn.LayerNorm(n_out), nn.ReLU())
+ for n_in, n_out in zip(all_dims, hidden_dims)
+ ],
+ )
+ self.encoder.apply(ortho_init_)
+
+ self.final_layer = nn.Linear(hidden_dims[-1], 1)
+ self.final_layer.apply(partial(ortho_init_, gain=1))
+
+ def forward(self, action, obs):
+ x = torch.cat([obs, action], dim=1)
+ return self.final_layer(self.encoder(x))
+
+
+def module_separation(param_dict, decay, no_decay):
+ inter_params = decay & no_decay
+ union_params = decay | no_decay
+
+ assert (
+ len(inter_params) == 0
+ ), f"Parameters {str(inter_params)} made it into both decay/no_decay sets!"
+
+ assert (
+ len(param_dict.keys() - union_params) == 0
+ ), f"Parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
+
+
+def num_groups(param_groups):
+ wd, no_wd = 0, 0
+ for group in param_groups:
+ if group["weight_decay"] != 0:
+ wd += 1
+ else:
+ no_wd += 1
+
+ assert wd == 1, f"There should be one group that has weight decay, but there are {wd}"
+
+ assert no_wd == 1, f"There should be one group that has no weight decay, but there are {no_wd}"
+
+
+def test_module_separation():
+ """Validate that all parameters are added to either "decay" or
+ "no_decay"."""
+ q = QNet(2, 2, [2, 2])
+
+ decay, no_decay = separate_modules_for_weight_decay(
+ q,
+ whitelist_weight_modules=(torch.nn.Linear,),
+ blacklist_weight_modules=(torch.nn.LayerNorm,),
+ layers_to_exclude={"final_layer"},
+ )
+
+ param_dict = dict(q.named_parameters())
+
+ module_separation(param_dict, decay, no_decay)
+
+ # Make sure the test fails when a module is not added to either of the sets
+ with pytest.raises(
+ AssertionError,
+ match="Parameters {'test'} were not separated into either decay/no_decay set!",
+ ):
+ param_dict["test"] = None
+ module_separation(param_dict, decay, no_decay)
+
+ # Make sure the test fails when a module is added to both of the sets
+ with pytest.raises(
+ AssertionError,
+ match="Parameters {'test'} made it into both decay/no_decay sets!",
+ ):
+ decay.add("test")
+ no_decay.add("test")
+
+ module_separation(param_dict, decay, no_decay)
+
+
+def test_num_groups():
+ """Validate that only two groups of parameters exist: one that gets weight decay and one that doesn't."""
+ q = QNet(2, 2, [2, 2])
+
+ q_optim = ModifiedAdamW(
+ network=q,
+ lr=0.001,
+ weight_decay=0.01,
+ layers_to_exclude=["final_layer"],
+ )
+
+ num_groups(q_optim.param_groups)
+
+ # Make sure the test fails when a group with weight decay doesn't exist
+ with pytest.raises(
+ AssertionError,
+ match="There should be one group that has weight decay, but there are 0",
+ ):
+ for i in range(len(q_optim.param_groups)):
+ if q_optim.param_groups[i]["weight_decay"] > 0:
+ del q_optim.param_groups[i]
+ break
+
+ num_groups(q_optim.param_groups)
+
+ # Make sure the test fails when multiple groups with weight decay exist
+ with pytest.raises(
+ AssertionError,
+ match="There should be one group that has weight decay, but there are 2",
+ ):
+ q_optim.param_groups += [{"weight_decay": 0.1}, {"weight_decay": 0.2}]
+
+ num_groups(q_optim.param_groups)
diff --git a/tests/test_nstep.py b/tests/test_nstep.py
new file mode 100644
index 00000000..436ccda8
--- /dev/null
+++ b/tests/test_nstep.py
@@ -0,0 +1,102 @@
+# This file has been taken from https://github.com/jackharmer/agency (MIT License)
+
+import torch
+
+from pytest import approx
+
+from emote.utils.gamma_matrix import discount, make_gamma_matrix
+
+
+def simple_discount(rewards, gamma, value, masks):
+ discounts = []
+ V = value
+ for cc in reversed(range(len(rewards))):
+ V = rewards[cc] + gamma * masks[cc] * V
+ discounts.append(V)
+ return list(reversed(discounts))
+
+
+def test_simple_discount_works():
+ rewards = [0.1, 0.2, 0.3, 0.4]
+ masks = [1, 1, 1, 1]
+ gamma = 0.9
+ value = 10
+
+ # Create the true values
+ v3 = rewards[3] + gamma * masks[3] * value
+ v2 = rewards[2] + gamma * masks[2] * v3
+ v1 = rewards[1] + gamma * masks[1] * v2
+ v0 = rewards[0] + gamma * masks[0] * v1
+
+ d_true = [v0, v1, v2, v3]
+
+ d = simple_discount(rewards, gamma, value, masks)
+
+ assert d_true == approx(d, 1e-5)
+
+ masks = [1, 1, 1, 0]
+
+ v3 = rewards[3] + gamma * masks[3] * value
+ v2 = rewards[2] + gamma * masks[2] * v3
+ v1 = rewards[1] + gamma * masks[1] * v2
+ v0 = rewards[0] + gamma * masks[0] * v1
+
+ d_true = [v0, v1, v2, v3]
+
+ d = simple_discount(rewards, gamma, value, masks)
+
+ assert d_true == approx(d, 1e-5)
+
+
+def test_gamma_matrix():
+ rewards = [0.1, 0.2, 0.3, 0.4]
+ masks = [1, 1, 1, 1]
+ gamma = 0.9
+ value = 10
+ gamma_matrix = make_gamma_matrix(gamma, len(rewards))
+
+ d_simple = simple_discount(rewards, gamma, value, masks)
+ d_gamma = discount(
+ torch.tensor(rewards).unsqueeze(0),
+ torch.tensor([value * masks[-1]]).unsqueeze(0),
+ gamma_matrix,
+ )
+
+ assert d_gamma.cpu().numpy() == approx(d_simple, 1e-5)
+
+ masks = [1, 1, 1, 0]
+ d_simple = simple_discount(rewards, gamma, value, masks)
+ d_gamma = discount(
+ torch.tensor(rewards).unsqueeze(0),
+ torch.tensor([value * masks[-1]]).unsqueeze(0),
+ gamma_matrix,
+ )
+
+ assert d_gamma.cpu().numpy() == approx(d_simple, 1e-5)
+
+
+def test_gamma_matrix_roll1():
+ rewards = [0.1]
+ masks = [1]
+ gamma = 0.9
+ value = 10
+ gamma_matrix = make_gamma_matrix(gamma, len(rewards))
+
+ d_simple = simple_discount(rewards, gamma, value, masks)
+ d_gamma = discount(
+ torch.tensor(rewards).unsqueeze(0),
+ torch.tensor([value * masks[-1]]).unsqueeze(0),
+ gamma_matrix,
+ )
+
+ assert d_gamma.cpu().numpy() == approx(d_simple, 1e-5)
+
+ masks = [0]
+ d_simple = simple_discount(rewards, gamma, value, masks)
+ d_gamma = discount(
+ torch.tensor(rewards).unsqueeze(0),
+ torch.tensor([value * masks[-1]]).unsqueeze(0),
+ gamma_matrix,
+ )
+
+ assert d_gamma.cpu().numpy() == approx(d_simple, 1e-5)
diff --git a/tests/test_onnx.py b/tests/test_onnx.py
new file mode 100644
index 00000000..bf477a2b
--- /dev/null
+++ b/tests/test_onnx.py
@@ -0,0 +1,93 @@
+import onnx
+import pytest
+import torch
+
+from gymnasium.vector import AsyncVectorEnv
+
+from emote.extra.onnx_exporter import OnnxExporter
+from emote.nn.gaussian_policy import GaussianMlpPolicy as Policy
+from emote.proxies import GenericAgentProxy
+
+from .gym import DictGymWrapper, HitTheMiddle
+
+
+N_HIDDEN = 10
+
+
+@pytest.fixture
+def exporter(tmpdir):
+ device = torch.device("cpu")
+ env = DictGymWrapper(AsyncVectorEnv(10 * [HitTheMiddle]))
+
+ policy = Policy(2, 1, [N_HIDDEN, N_HIDDEN])
+
+ input_keys = list(env.dict_space.state.spaces.keys())
+ agent_proxy = GenericAgentProxy(policy, device, input_keys, True, ["actions"])
+
+ exporter = OnnxExporter(
+ agent_proxy,
+ env.dict_space,
+ True,
+ tmpdir / "inference",
+ 50,
+ )
+
+ return exporter
+
+
+def test_onnx_metadata_set(exporter):
+ exporter.add_metadata("this is a key", "this is a value")
+ exporter.add_metadata("this will be overridden", "oh no!")
+
+ handle = exporter.export(
+ {
+ "this is another key": "this is another value",
+ "this will be overridden": "oh yes!",
+ }
+ )
+
+ with open(handle.filepath, "rb") as f:
+ model = onnx.load_model(f, "protobuf")
+
+ print(model.metadata_props)
+
+ assert len(model.metadata_props) == 3
+ assert model.metadata_props[0].key == "this is a key"
+ assert model.metadata_props[0].value == "this is a value"
+
+ assert model.metadata_props[1].key == "this will be overridden"
+ assert model.metadata_props[1].value == "oh yes!"
+
+ assert model.metadata_props[2].key == "this is another key"
+ assert model.metadata_props[2].value == "this is another value"
+
+
+def test_onnx_requires_str_key(exporter):
+ with pytest.raises(TypeError):
+ exporter.add_metadata(1, "this is a value")
+
+ with pytest.raises(TypeError):
+ exporter.export(
+ {
+ 1: "this is another value",
+ }
+ )
+
+
+def test_onnx_converts_value_to_str(exporter):
+ exporter.add_metadata("this is a key", 1)
+
+ handle = exporter.export(
+ {
+ "this is another key": 2,
+ }
+ )
+
+ with open(handle.filepath, "rb") as f:
+ model = onnx.load_model(f, "protobuf")
+
+ assert model.metadata_props[0].key == "this is a key"
+ assert model.metadata_props[0].value == "1"
+
+ assert model.metadata_props[1].key == "this is another key"
+ assert model.metadata_props[1].value == "2"
diff --git a/tests/test_sac.py b/tests/test_sac.py
new file mode 100644
index 00000000..40314c6c
--- /dev/null
+++ b/tests/test_sac.py
@@ -0,0 +1,116 @@
+import random
+import string
+
+import numpy as np
+import pytest
+import torch
+
+from emote.algorithms.sac import AlphaLoss, FeatureAgentProxy
+from emote.extra.schedules import ConstantSchedule, CyclicSchedule
+from emote.nn.gaussian_policy import GaussianMlpPolicy
+from emote.typing import DictObservation, EpisodeState
+
+
+IN_DIM: int = 3
+OUT_DIM: int = 2
+
+
+@pytest.fixture
+def random_key():
+ return "".join(random.choices(string.ascii_uppercase + string.digits, k=10))
+
+
+@pytest.fixture
+def feature_proxy(random_key):
+ policy = GaussianMlpPolicy(IN_DIM, OUT_DIM, [16, 16])
+ return (
+ FeatureAgentProxy(policy, torch.device("cpu"), input_key=random_key),
+ random_key,
+ )
+
+
+def test_input_key(feature_proxy):
+ agent_proxy, key = feature_proxy
+ assert agent_proxy.input_names == (key,), "wrong input key"
+
+
+def test_input_call(feature_proxy):
+ agent_proxy, key = feature_proxy
+ result = agent_proxy(
+ {
+ i: DictObservation(
+ rewards={},
+ episode_state=EpisodeState.RUNNING,
+ array_data={key: np.array([0.0] * IN_DIM, dtype=np.float32)},
+ )
+ for i in range(3)
+ }
+ )
+ assert len(result) == 3
+
+
+def test_alpha_value_ref_valid_after_load():
+ policy = GaussianMlpPolicy(IN_DIM, OUT_DIM, [16, 16])
+ init_ln_alpha = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
+ optim = torch.optim.Adam([init_ln_alpha])
+ loss = AlphaLoss(pi=policy, ln_alpha=init_ln_alpha, opt=optim, n_actions=OUT_DIM)
+
+ dummy_load_ln_alpha = torch.tensor(1337.0, dtype=torch.float32, requires_grad=True)
+ state_dict = {"network_state_dict": dummy_load_ln_alpha}
+
+ ln_alpha_before_load = loss.ln_alpha
+ loss.load_state_dict(state_dict, load_weights=True, load_optimizer=False, load_hparams=False)
+ ln_alpha_after_load = loss.ln_alpha
+
+ assert torch.equal(
+ ln_alpha_after_load, dummy_load_ln_alpha
+ ), "expected to actually load a alpha value."
+ assert (
+ ln_alpha_before_load is ln_alpha_after_load
+ ), "expected ln(alpha) to be the same python object after loading. The reference is used by other loss functions such as PolicyLoss!"
+
+
+def test_target_entropy_schedules():
+ policy = GaussianMlpPolicy(IN_DIM, OUT_DIM, [16, 16])
+ init_ln_alpha = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
+ optim = torch.optim.Adam([init_ln_alpha])
+ loss = AlphaLoss(pi=policy, ln_alpha=init_ln_alpha, opt=optim, n_actions=OUT_DIM)
+
+ # Check if default is set correctly when no t_entropy is passed
+ init_entropy = loss.t_entropy.value
+ assert init_entropy == -OUT_DIM
+ print(init_entropy)
+
+ # Check that default schedule is constant and doesn't update the value
+ assert isinstance(loss.t_entropy, ConstantSchedule)
+ for _ in range(5):
+ loss.end_batch()
+ assert init_entropy == loss.t_entropy.value
+
+ # Check that value is updated when using a schedule
+ start = 5
+ end = 0
+ steps = 5
+ schedule = CyclicSchedule(start, end, steps, mode="triangular")
+ loss = AlphaLoss(
+ pi=policy, ln_alpha=init_ln_alpha, opt=optim, n_actions=OUT_DIM, t_entropy=schedule
+ )
+
+ for _ in range(steps + 1):
+ loss.end_batch()
+ assert loss.t_entropy.value == end
+
+ for _ in range(steps):
+ loss.end_batch()
+ assert loss.t_entropy.value == start
+
+ # Check that invalid types are not accepted
+ invalid_t_entropy = torch.optim.lr_scheduler.LinearLR(optim, 1, end / start, steps)
+ with pytest.raises(TypeError):
+ AlphaLoss(
+ pi=policy,
+ ln_alpha=init_ln_alpha,
+ opt=optim,
+ n_actions=OUT_DIM,
+ t_entropy=invalid_t_entropy,
+ )
diff --git a/tests/test_system_logger.py b/tests/test_system_logger.py
new file mode 100644
index 00000000..b2eec67f
--- /dev/null
+++ b/tests/test_system_logger.py
@@ -0,0 +1,22 @@
+import time
+
+from emote.extra.system_logger import SystemLogger
+
+
+def test_records_all_metrics():
+ logger = SystemLogger()
+ logger.end_cycle(bp_step=0, bp_samples=0)
+
+ data = list(range(2 * 1024 * 1024))
+ start = time.perf_counter()
+ i = 0
+ while (time.perf_counter() - start) < 1:
+ data[i % (2 * 1024 * 1024)] += 1
+ i += 1
+
+ logger.end_cycle(bp_step=1, bp_samples=1000)
+
+ # we allocated 2 M ints, each 28 bytes large.
+ assert logger.scalar_logs["system/ram_usage_growth_mb_step"] > (2 * 28)
+ # we just pinned the thread for 1 second so this should be close to 100
+ assert logger.scalar_logs["system/cpu_load"] > 10.0
diff --git a/tests/test_table.py b/tests/test_table.py
new file mode 100644
index 00000000..1a41cd73
--- /dev/null
+++ b/tests/test_table.py
@@ -0,0 +1,66 @@
+import numpy as np
+import pytest
+import torch
+
+from emote.memory.adaptors import TerminalAdaptor
+from emote.memory.fifo_strategy import FifoEjectionStrategy
+from emote.memory.storage import SyntheticDones
+from emote.memory.table import ArrayTable, Column, TagColumn, VirtualColumn
+from emote.memory.uniform_strategy import UniformSampleStrategy
+
+
+@pytest.fixture
+def table():
+ spec = [
+ Column(name="obs", dtype=np.float32, shape=(3,)),
+ Column(name="reward", dtype=np.float32, shape=()),
+ VirtualColumn("dones", dtype=bool, shape=(1,), target_name="reward", mapper=SyntheticDones),
+ VirtualColumn(
+ "masks",
+ dtype=np.float32,
+ shape=(1,),
+ target_name="reward",
+ mapper=SyntheticDones.as_mask,
+ ),
+ TagColumn(name="terminal", shape=(), dtype=np.float32),
+ ]
+
+ table = ArrayTable(
+ columns=spec,
+ maxlen=10_000,
+ sampler=UniformSampleStrategy(),
+ ejector=FifoEjectionStrategy(),
+ length_key="reward",
+ adaptors=[TerminalAdaptor("terminal", "masks")],
+ device="cpu",
+ )
+
+ return table
+
+
+def test_sampled_data_is_always_copied(table: ArrayTable):
+ for ii in range(0, 600):
+ table.add_sequence(
+ ii,
+ dict(
+ obs=[[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]],
+ reward=[1, 2, 3, 4],
+ terminal=[0, 0, 0, 0, 0],
+ ),
+ )
+
+ sample_count = 100
+ counts = [256, 512]
+ seq_len = 3
+ for _ in range(sample_count):
+ for count in counts:
+ sample1 = table.sample(count, seq_len)
+ sample2 = table.sample(count, seq_len)
+
+ for key in sample1.keys():
+ col_samp_1: torch.Tensor = sample1[key]
+ col_samp_2: torch.Tensor = sample2[key]
+
+ assert (
+ col_samp_1.data_ptr() != col_samp_2.data_ptr()
+ ), "2 table samples share memory! This is not allowed! Samples must always copy their data."
diff --git a/tests/test_trainer.py b/tests/test_trainer.py
new file mode 100644
index 00000000..50b68fe9
--- /dev/null
+++ b/tests/test_trainer.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+import pytest
+
+from emote.callback import Callback
+from emote.trainer import Trainer, TrainingShutdownException
+
+
+class DummyCallback(Callback):
+ def __init__(self, cycle: int | None = None):
+ super().__init__(cycle)
+ self.end_cycle_called = 0
+
+ def end_cycle(self):
+ self.end_cycle_called += 1
+
+
+class DummyLoader:
+ def __iter__(self):
+ for _ in range(3):
+ yield {"batch_size": 0}
+
+ raise TrainingShutdownException("end of data")
+
+
+@pytest.mark.parametrize("interval,expected", ((None, 0), (0, 0), (1, 3), (2, 1)))
+def test_callback_cycle_called_count(interval, expected):
+ callback = DummyCallback(interval)
+
+ Trainer([callback], DummyLoader()).train()
+
+ assert callback.end_cycle_called == expected
diff --git a/tests/test_vae.py b/tests/test_vae.py
new file mode 100644
index 00000000..e826251f
--- /dev/null
+++ b/tests/test_vae.py
@@ -0,0 +1,62 @@
+import torch
+
+from tests.test_genrl import FullyConnectedDecoder, FullyConnectedEncoder, get_conditioning_fn
+from torch.optim import Adam
+
+from emote.algorithms.genrl.vae import VAELoss, VariationalAutoencoder
+
+
+def test_vae_training():
+ action_dim = 10
+ obs_dim = 20
+ latent_dim = 3
+ batch_size = 100
+ device = torch.device("cpu")
+ hidden_layer_dim = [256] * 3
+
+ actions = torch.rand(batch_size, action_dim)
+ obs = torch.rand(batch_size, obs_dim)
+ beta = 0.001
+
+ encoder = FullyConnectedEncoder(
+ input_size=action_dim,
+ output_size=latent_dim,
+ condition_size=obs_dim,
+ device=device,
+ hidden_sizes=hidden_layer_dim,
+ )
+
+ decoder = FullyConnectedDecoder(
+ latent_size=latent_dim,
+ output_size=action_dim,
+ condition_size=obs_dim,
+ device=device,
+ hidden_sizes=hidden_layer_dim,
+ )
+
+ vae = VariationalAutoencoder(
+ encoder=encoder,
+ decoder=decoder,
+ device=device,
+ beta=beta,
+ )
+
+ cfn = get_conditioning_fn(obs_dim)
+
+ vae_loss = VAELoss(
+ vae=vae,
+ opt=Adam(vae.parameters(), lr=0.001),
+ conditioning_func=cfn,
+ )
+
+ vae_loss.optimizer.zero_grad()
+
+ loss = vae_loss.loss(default={"actions": actions, "observation": {"obs": obs}})
+ loss_v1 = loss.item()
+
+ loss.backward()
+ vae_loss.optimizer.step()
+
+ loss = vae_loss.loss(default={"actions": actions, "observation": {"obs": obs}})
+ loss_v2 = loss.item()
+ assert loss_v1 > loss_v2
diff --git a/tests/test_wandb.py b/tests/test_wandb.py
new file mode 100644
index 00000000..aa15600d
--- /dev/null
+++ b/tests/test_wandb.py
@@ -0,0 +1,67 @@
+import os
+
+import pytest
+import wandb
+
+from emote.callback import Callback
+from emote.mixins.logging import LoggingMixin
+from emote.trainer import Trainer
+
+
+os.environ["WANDB_MODE"] = "offline"
+N = 10000
+
+
+class DummyCallback(LoggingMixin, Callback):
+ def __init__(self):
+ super().__init__()
+ self.end_batch_called = 0
+
+ def end_batch(self):
+ self.end_batch_called += 1
+ self.log_scalar("dummy", self.end_batch_called)
+
+
+class DummyLoader:
+ def __iter__(self):
+ for _ in range(N):
+ yield {"batch_size": 0}
+
+
+def test_raises_help_if_wandb_not_installed(hide_pkg):
+ hide_pkg("wandb")
+
+ with pytest.raises(ImportError) as ex:
+ from emote.callbacks.wb_logger import WBLogger
+
+ WBLogger([], {}, log_interval=1)
+
+ assert ex.value.msg == "enable the optional `wandb` feature to use the WBLogger"
+ assert isinstance(ex.value.__cause__, ImportError)
+ assert ex.value.__cause__.msg == "No module named 'wandb'"
+
+
+def test_logging():
+ from emote.callbacks.wb_logger import WBLogger
+
+ dummy_cb = DummyCallback()
+ logger = WBLogger(
+ callbacks=[dummy_cb],
+ config={
+ "wandb_project": "test_project",
+ "wandb_run": "test_run",
+ "metadata": "test",
+ },
+ log_interval=N,
+ )
+
+ # check if a run is initialized
+ assert wandb.run is not None
+
+ # check if the additional info is logged in the config
+ assert "metadata" in wandb.config.keys()
+
+ Trainer([dummy_cb, logger], DummyLoader()).train()
+
+ # wandb.summary() is a dict that should contain the last logged values of the run by default
+ assert wandb.summary["dummy_bp_step"] == N