diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 4bf8207a..ae1f9608 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -22,7 +22,7 @@ jobs: - name: Install Dependencies run: | python -m pip install -U pip poetry - python -m poetry install --no-cache -E docs + python -m poetry install --no-cache --with docs - name: Determine Version id: determine_version diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6e21994c..cd2cb306 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,7 +37,7 @@ jobs: - name: Install Package run: | python -m pip install -U pip poetry - python -m poetry install --no-cache -E test -E onnx + python -m poetry install --no-cache --with test -E onnx - name: Run Tests run: | diff --git a/BREAKING_CHANGES.md b/BREAKING_CHANGES.md index 2a3e7ff0..c31cbd8e 100644 --- a/BREAKING_CHANGES.md +++ b/BREAKING_CHANGES.md @@ -1,6 +1,13 @@ # Breaking Changes All the breaking changes will be documented in this file. +### [2.0.0] + +#### Changed + +- Quadra 2.0.0 works with torch 2 and pytorch lightning 2, lightning trainer configurations must be aligned following the [migration guide](https://lightning.ai/docs/pytorch/LTS/upgrade/migration_guide.html). +- Quadra now relies on CUDA 12 to work instead of the old CUDA 11.6 + ### [1.5.0] #### Changed diff --git a/CHANGELOG.md b/CHANGELOG.md index 102b3cce..02bde517 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ # Changelog All notable changes to this project will be documented in this file. +### [2.0.0] + +#### Updated + +- Update torch to 2.1.2 with CUDA 12 support +- Update pytorch lightning to 2.1.* + +#### Changed + +- Refactor hydra plugins to use optional dev groups intend of extras to avoid dragging local packages around in external installations +- Refactor extra dev dependencies to use poetry groups instead +- Improve trainer configs to avoid wrong overrides when calling different trainer overrides ### [1.5.8] #### Fix diff --git a/README.md b/README.md index 98704f34..5dcab234 100644 --- a/README.md +++ b/README.md @@ -92,16 +92,18 @@ source myenv/bin/activate pip install --upgrade pip ``` -3a. **Install the `quadra` package** with pip: - ```shell - pip install git+https://github.com/orobix/quadra.git - ``` +3. Install the package -3b. **Install the `quadra` package** with poetry: - ```shell - pip install poetry - poetry add git+https://github.com/orobix/quadra.git - ``` + * **Install the `quadra` package** with pip: + ```shell + pip install git+https://github.com/orobix/quadra.git + ``` + + * **Install the `quadra` package** with poetry: + ```shell + pip install poetry + poetry add git+https://github.com/orobix/quadra.git + ``` 4. **Run from CLI**: Run the following command to check if the installation was successful: @@ -243,7 +245,7 @@ First clone the repository from Github First clone the repository from `Github`, then we need to install the package with optional dependencies (generally in editable mode) and enable the pre-commit hooks. 1. `git clone https://github.com/orobix/quadra.git && cd quadra` -1. Install `quadra` package in editable mode `poetry install -E dev -E docs -E test` +1. Install `quadra` package in editable mode `poetry install --with test,dev,docs --all-extras` 2. Install pre-commit hooks `pre-commit install` 3. (Optional) Eventually build documentation by calling required commands (see below). diff --git a/docs/tutorials/contribution.md b/docs/tutorials/contribution.md index 5b8c503d..ffa18b37 100644 --- a/docs/tutorials/contribution.md +++ b/docs/tutorials/contribution.md @@ -15,9 +15,9 @@ After setting up your environment you can install `Quadra` Library in different !!!info - - `poetry install -E dev` (for development) - - `poetry install -E docs` (for documentation) - - `poetry install -E test` (for testing) + - `poetry install --with dev` (for development) + - `poetry install --with docs` (for documentation) + - `poetry install --with test` (for testing) ## Pre-commit Hooks diff --git a/docs/tutorials/devices_setup.md b/docs/tutorials/devices_setup.md index 3a8eea5e..05786932 100644 --- a/docs/tutorials/devices_setup.md +++ b/docs/tutorials/devices_setup.md @@ -12,7 +12,6 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 ``` diff --git a/poetry.lock b/poetry.lock index 91b4b2ff..602d4ac1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -187,7 +187,7 @@ test = ["flake8 (==3.7.9)", "mock (==2.0.0)", "pylint (==1.9.3)"] [[package]] name = "anomalib" -version = "0.7.0+obx.1.2.10" +version = "0.7.0+obx.1.3.0" description = "anomalib - Anomaly Detection Library" optional = false python-versions = ">=3.7" @@ -214,8 +214,8 @@ openvino = ["defusedxml (==0.7.1)", "networkx (>=2.5,<3.0)", "nncf (>=2.1.0)", " [package.source] type = "git" url = "https://github.com/orobix/anomalib.git" -reference = "v0.7.0+obx.1.2.10" -resolved_reference = "5a4d1243f0ddc3df6f86da6c7765b82fc3317136" +reference = "v0.7.0+obx.1.3.0" +resolved_reference = "04e2db7795c26a3d6cc1baf797134a895d1ad87a" [[package]] name = "antlr4-python3-runtime" @@ -264,7 +264,7 @@ files = [ name = "astroid" version = "2.15.8" description = "An abstract syntax tree for Python with inference support." -optional = true +optional = false python-versions = ">=3.7.2" files = [ {file = "astroid-2.15.8-py3-none-any.whl", hash = "sha256:1aa149fc5c6589e3d0ece885b4491acd80af4f087baafa3fb5203b113e68cd3c"}, @@ -321,7 +321,7 @@ files = [ name = "babel" version = "2.14.0" description = "Internationalization utilities" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"}, @@ -386,7 +386,7 @@ typecheck = ["mypy"] name = "black" version = "22.12.0" description = "The uncompromising code formatter." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, @@ -526,7 +526,7 @@ files = [ name = "cairocffi" version = "1.6.1" description = "cffi-based cairo bindings for Python" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "cairocffi-1.6.1-py3-none-any.whl", hash = "sha256:aa78ee52b9069d7475eeac457389b6275aa92111895d78fbaa2202a52dac112e"}, @@ -545,7 +545,7 @@ xcb = ["xcffib (>=1.4.0)"] name = "cairosvg" version = "2.7.0" description = "A Simple SVG Converter based on Cairo" -optional = true +optional = false python-versions = ">=3.5" files = [ {file = "CairoSVG-2.7.0-py3-none-any.whl", hash = "sha256:17cb96423a896258848322a95c80160e714a58f1af3dd73b8e1750994519b9f9"}, @@ -642,7 +642,7 @@ pycparser = "*" name = "cfgv" version = "3.4.0" description = "Validate configuration and produce human readable error messages." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, @@ -958,7 +958,7 @@ test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] name = "coverage" version = "7.4.0" description = "Code coverage measurement for Python" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, @@ -1090,7 +1090,7 @@ test-randomorder = ["pytest-randomly"] name = "cssselect2" version = "0.7.0" description = "CSS selectors for Python ElementTree" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "cssselect2-0.7.0-py3-none-any.whl", hash = "sha256:fd23a65bfd444595913f02fc71f6b286c29261e354c41d722ca7a261a49b5969"}, @@ -1144,7 +1144,7 @@ urllib3 = ">=1.26.7,<3" name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" -optional = true +optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, @@ -1155,7 +1155,7 @@ files = [ name = "dill" version = "0.3.7" description = "serialize all of Python" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, @@ -1226,7 +1226,7 @@ files = [ name = "docutils" version = "0.20.1" description = "Docutils -- Python Documentation Utilities" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, @@ -1671,7 +1671,7 @@ tqdm = ["tqdm"] name = "ghp-import" version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." -optional = true +optional = false python-versions = "*" files = [ {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, @@ -1882,7 +1882,7 @@ test = ["objgraph", "psutil"] name = "griffe" version = "0.39.1" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "griffe-0.39.1-py3-none-any.whl", hash = "sha256:6ce4ecffcf0d2f96362c5974b3f7df812da8f8d4cfcc5ebc8202ef72656fc087"}, @@ -2122,7 +2122,7 @@ optuna = ">=2.10.0,<3.0.0" name = "hydra-plugins" version = "1.0.0" description = "Hydra plugin allowing the discovery of external configurations" -optional = true +optional = false python-versions = "*" files = [] develop = false @@ -2135,7 +2135,7 @@ url = "quadra_hydra_plugin" name = "identify" version = "2.5.33" description = "File identification library for Python" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"}, @@ -2331,7 +2331,7 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, @@ -2353,7 +2353,7 @@ files = [ name = "interrogate" version = "1.5.0" description = "Interrogate a codebase for docstring coverage." -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "interrogate-1.5.0-py3-none-any.whl", hash = "sha256:a4ccc5cbd727c74acc98dee6f5e79ef264c0bcfa66b68d4e123069b2af89091a"}, @@ -2378,7 +2378,7 @@ tests = ["pytest", "pytest-cov", "pytest-mock"] name = "isort" version = "5.11.5" description = "A Python utility / library to sort Python imports." -optional = true +optional = false python-versions = ">=3.7.0" files = [ {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, @@ -2717,7 +2717,7 @@ test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] name = "lazy-object-proxy" version = "1.10.0" description = "A fast and thorough lazy object proxy." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "lazy-object-proxy-1.10.0.tar.gz", hash = "sha256:78247b6d45f43a52ef35c25b5581459e85117225408a4128a3daf8bf9648ac69"}, @@ -3151,7 +3151,7 @@ python-dateutil = ">=2.7" name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, @@ -3192,7 +3192,7 @@ files = [ name = "mergedeep" version = "1.3.4" description = "A deep merge function for 🐍." -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, @@ -3203,7 +3203,7 @@ files = [ name = "mike" version = "1.1.2" description = "Manage multiple versions of your MkDocs-powered documentation" -optional = true +optional = false python-versions = "*" files = [ {file = "mike-1.1.2-py3-none-any.whl", hash = "sha256:4c307c28769834d78df10f834f57f810f04ca27d248f80a75f49c6fa2d1527ca"}, @@ -3239,7 +3239,7 @@ urllib3 = "*" name = "mkdocs" version = "1.5.2" description = "Project documentation with Markdown." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mkdocs-1.5.2-py3-none-any.whl", hash = "sha256:60a62538519c2e96fe8426654a67ee177350451616118a41596ae7c876bb7eac"}, @@ -3270,7 +3270,7 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp name = "mkdocs-autorefs" version = "0.5.0" description = "Automatically link across pages in MkDocs." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "mkdocs_autorefs-0.5.0-py3-none-any.whl", hash = "sha256:7930fcb8ac1249f10e683967aeaddc0af49d90702af111a5e390e8b20b3d97ff"}, @@ -3285,7 +3285,7 @@ mkdocs = ">=1.1" name = "mkdocs-gen-files" version = "0.5.0" description = "MkDocs plugin to programmatically generate documentation pages during the build" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_gen_files-0.5.0-py3-none-any.whl", hash = "sha256:7ac060096f3f40bd19039e7277dd3050be9a453c8ac578645844d4d91d7978ea"}, @@ -3299,7 +3299,7 @@ mkdocs = ">=1.0.3" name = "mkdocs-literate-nav" version = "0.6.0" description = "MkDocs plugin to specify the navigation in Markdown instead of YAML" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_literate_nav-0.6.0-py3-none-any.whl", hash = "sha256:8c1b84714e5974da5e44e011ec0069275ae7647270c13a679662cf6ffce675a4"}, @@ -3313,7 +3313,7 @@ mkdocs = ">=1.0.3" name = "mkdocs-material" version = "9.2.8" description = "Documentation that simply works" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_material-9.2.8-py3-none-any.whl", hash = "sha256:6bc8524f8047a4f060d6ab0925b9d7cb61b3b5e6d5ca8a8e8085f8bfdeca1b71"}, @@ -3337,7 +3337,7 @@ requests = ">=2.31,<3.0" name = "mkdocs-material-extensions" version = "1.1.1" description = "Extension pack for Python Markdown and MkDocs Material." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, @@ -3348,7 +3348,7 @@ files = [ name = "mkdocs-section-index" version = "0.3.6" description = "MkDocs plugin to allow clickable sections that lead to an index page" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mkdocs_section_index-0.3.6-py3-none-any.whl", hash = "sha256:4172deff4890868d7d88ef41e895546ee07714cca83a7dcc56db979c00e10acf"}, @@ -3362,7 +3362,7 @@ mkdocs = ">=1.2" name = "mkdocstrings" version = "0.23.0" description = "Automatic documentation from sources, for MkDocs." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "mkdocstrings-0.23.0-py3-none-any.whl", hash = "sha256:051fa4014dfcd9ed90254ae91de2dbb4f24e166347dae7be9a997fe16316c65e"}, @@ -3388,7 +3388,7 @@ python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] name = "mkdocstrings-python" version = "1.6.2" description = "A Python handler for mkdocstrings." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "mkdocstrings_python-1.6.2-py3-none-any.whl", hash = "sha256:cf560df975faf712808e44c1c2e52b8267f17bc89c8b23e7b9bfe679561adf4d"}, @@ -3505,7 +3505,7 @@ files = [ name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -optional = true +optional = false python-versions = "*" files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, @@ -3685,7 +3685,7 @@ yaml = ["PyYAML (>=5.1.0)"] name = "mypy" version = "1.0.1" description = "Optional static typing for Python" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, @@ -3731,7 +3731,7 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -optional = true +optional = false python-versions = ">=3.5" files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, @@ -3760,7 +3760,7 @@ test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] name = "nh3" version = "0.2.15" description = "Python bindings to the ammonia HTML sanitization library." -optional = true +optional = false python-versions = "*" files = [ {file = "nh3-0.2.15-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:9c0d415f6b7f2338f93035bba5c0d8c1b464e538bfbb1d598acd47d7969284f0"}, @@ -3810,7 +3810,7 @@ twitter = ["twython"] name = "nodeenv" version = "1.8.0" description = "Node.js virtual environment builder" -optional = true +optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" files = [ {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, @@ -3865,6 +3865,115 @@ files = [ {file = "numpy-1.26.3.tar.gz", hash = "sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4"}, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + [[package]] name = "nvidia-ml-py" version = "11.515.75" @@ -3876,6 +3985,38 @@ files = [ {file = "nvidia_ml_py-11.515.75-py3-none-any.whl", hash = "sha256:5bf5f5240f5a242689c1d1129135a0bd79c8b93d2a282c7229fe32ab63e7999b"}, ] +[[package]] +name = "nvidia-nccl-cu12" +version = "2.18.1" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:1a6c4acefcbebfa6de320f412bf7866de856e786e0462326ba1bac40de0b5e71"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.3.101" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + [[package]] name = "nvitop" version = "0.11.0" @@ -3940,67 +4081,60 @@ PyYAML = ">=5.1.0" [[package]] name = "onnx" -version = "1.14.0" +version = "1.15.0" description = "Open Neural Network Exchange" optional = true -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "onnx-1.14.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:fb35c2c347486416f87f41557242c05d7ee804d3676c6c8c98eef6f5b1889e7b"}, - {file = "onnx-1.14.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd683d4aa6d55365582055a6c1e10a55d6c08a59e9216cbb67e37ad3a5b2b980"}, - {file = "onnx-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00b0d2620c10dcb9ec33441e807dc5851d2843d445e0faab5e22c8ad6874a67a"}, - {file = "onnx-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01893a4a2d70b68e8ee20269ccde4069a6fd243dc9e296643e2afeb0050527bc"}, - {file = "onnx-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0753b0f118be71ff109dd994a3d6769e5871e9feaddfada77931c63f9de534b3"}, - {file = "onnx-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c3a2354d9d997c7a4a5e467b5373c98dc549d4a33c77d5723e1eda7e87559c"}, - {file = "onnx-1.14.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:5e780fd1ed25493596a141e93303d0b2897acb9ebfdee7047a916d8f8e525ab3"}, - {file = "onnx-1.14.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9d28d64cbac3ebdc0c9761a300340c60ec60316099906e354e5059e90335fb3b"}, - {file = "onnx-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba92fed1aa27cba385bc3890fbbe6484603e837e67c957b22899f93c70990cc4"}, - {file = "onnx-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fab7e6e1c2d9d6479edad8e9088cdfd87ea293cb08f31565adabfb33c6e5789"}, - {file = "onnx-1.14.0-cp311-cp311-win32.whl", hash = "sha256:6e966f5ef38a0521595cad6a1d14d9ae205c593d2824d8c1fa044fa5ba15370d"}, - {file = "onnx-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:1fe8ba794d261d722018bd1385f02f966aace0fcb5448881ab5dd55ab0ebb81b"}, - {file = "onnx-1.14.0-cp37-cp37m-macosx_10_12_universal2.whl", hash = "sha256:c16dacf577700ff9cb076c61c880d1a4bc612eed96280396a54ee1e1bd7e2d68"}, - {file = "onnx-1.14.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bbdca51da9fa9ec43eebd8c640bf71c05daa2afbeaa2c6478466470e28e41111"}, - {file = "onnx-1.14.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3315c304d23a06ebd07fffe2456ab7f1e0a8dba317393d5c17a671ae2da6645e"}, - {file = "onnx-1.14.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1545159f2e7fbc5b4a3ae032cd4d9ddeafc62c4f27fe22cbc3ecff49338992"}, - {file = "onnx-1.14.0-cp37-cp37m-win32.whl", hash = "sha256:18cd98f7e234e268cb60c47a1f8ea5f6ffba50fe11de924b17498b1571d0cd2c"}, - {file = "onnx-1.14.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a8f7454acded506b6359ee0837c8527c64964973d7d25ed6b16b7d4314599502"}, - {file = "onnx-1.14.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:a9702e7dd120bca421a820020151cbb1003077e17ded29cc8d44ff32a9a57ad8"}, - {file = "onnx-1.14.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:369c3ecace7e8c7df6efbcbc712b262626796ae4a83decd29111afafa025a30c"}, - {file = "onnx-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fbcdc1a0c1057785bc5f7254aca0cf0b49d19c74696f1ade107638054157315"}, - {file = "onnx-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed099fbdada4accead109a4479d5f73fb974566cce8d3c6fca94774f9645934c"}, - {file = "onnx-1.14.0-cp38-cp38-win32.whl", hash = "sha256:296e689aa54a9ae4e560b2bb149a64e96775699a0624af5f631665b9cda90482"}, - {file = "onnx-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:e1607f97007515df303c1f40b77363545af99a1f32d2f73240c8aa526cdbd109"}, - {file = "onnx-1.14.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:7800b6ec74b1fe3fbb3bf4a2380e2f4007c1a7f2d6927599ad40eead6eae5e19"}, - {file = "onnx-1.14.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:45d3effe59e20d0a9fdc51f5bb8f38299086c79576b894ed945e6a058c4b210a"}, - {file = "onnx-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a593b46015326feb949781d030cb1d0d5d388cca52bff2e2995badf55d56b38d"}, - {file = "onnx-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54614942574415ef3f0bce0800c6f41ecea8201f8042754e204ee8c0a8e473e1"}, - {file = "onnx-1.14.0-cp39-cp39-win32.whl", hash = "sha256:dcfaeb2d15e93c456003fac13ffa35144ba9d2666a83e2cef650dd5c90a2b768"}, - {file = "onnx-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:0639427ac61e5a0181f4f7c89f9fc82b3c9715c95071f9c3de79bbe303a4ae65"}, - {file = "onnx-1.14.0.tar.gz", hash = "sha256:43b85087c6b919de66872a043c7f4899fe6f840e11ffca7e662b2ce9e4cc2927"}, + {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:51cacb6aafba308aaf462252ced562111f6991cdc7bc57a6c554c3519453a8ff"}, + {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:0aee26b6f7f7da7e840de75ad9195a77a147d0662c94eaa6483be13ba468ffc1"}, + {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baf6ef6c93b3b843edb97a8d5b3d229a1301984f3f8dee859c29634d2083e6f9"}, + {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ed899fe6000edc05bb2828863d3841cfddd5a7cf04c1a771f112e94de75d9f"}, + {file = "onnx-1.15.0-cp310-cp310-win32.whl", hash = "sha256:f1ad3d77fc2f4b4296f0ac2c8cadd8c1dcf765fc586b737462d3a0fe8f7c696a"}, + {file = "onnx-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca4ebc4f47109bfb12c8c9e83dd99ec5c9f07d2e5f05976356c6ccdce3552010"}, + {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:233ffdb5ca8cc2d960b10965a763910c0830b64b450376da59207f454701f343"}, + {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:51fa79c9ea9af033638ec51f9177b8e76c55fad65bb83ea96ee88fafade18ee7"}, + {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f277d4861729f5253a51fa41ce91bfec1c4574ee41b5637056b43500917295ce"}, + {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8a7c94d2ebead8f739fdb70d1ce5a71726f4e17b3e5b8ad64455ea1b2801a85"}, + {file = "onnx-1.15.0-cp311-cp311-win32.whl", hash = "sha256:17dcfb86a8c6bdc3971443c29b023dd9c90ff1d15d8baecee0747a6b7f74e650"}, + {file = "onnx-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:60a3e28747e305cd2e766e6a53a0a6d952cf9e72005ec6023ce5e07666676a4e"}, + {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6b5c798d9e0907eaf319e3d3e7c89a2ed9a854bcb83da5fefb6d4c12d5e90721"}, + {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a4f774ff50092fe19bd8f46b2c9b27b1d30fbd700c22abde48a478142d464322"}, + {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2b0e7f3938f2d994c34616bfb8b4b1cebbc4a0398483344fe5e9f2fe95175e6"}, + {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49cebebd0020a4b12c1dd0909d426631212ef28606d7e4d49463d36abe7639ad"}, + {file = "onnx-1.15.0-cp38-cp38-win32.whl", hash = "sha256:1fdf8a3ff75abc2b32c83bf27fb7c18d6b976c9c537263fadd82b9560fe186fa"}, + {file = "onnx-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:763e55c26e8de3a2dce008d55ae81b27fa8fb4acbb01a29b9f3c01f200c4d676"}, + {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:b2d5e802837629fc9c86f19448d19dd04d206578328bce202aeb3d4bedab43c4"}, + {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9a9cfbb5e5d5d88f89d0dfc9df5fb858899db874e1d5ed21e76c481f3cafc90d"}, + {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f472bbe5cb670a0a4a4db08f41fde69b187a009d0cb628f964840d3f83524e9"}, + {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf2de9bef64792e5b8080c678023ac7d2b9e05d79a3e17e92cf6a4a624831d2"}, + {file = "onnx-1.15.0-cp39-cp39-win32.whl", hash = "sha256:ef4d9eb44b111e69e4534f3233fc2c13d1e26920d24ae4359d513bd54694bc6d"}, + {file = "onnx-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:95d7a3e2d79d371e272e39ae3f7547e0b116d0c7f774a4004e97febe6c93507f"}, + {file = "onnx-1.15.0.tar.gz", hash = "sha256:b18461a7d38f286618ca2a6e78062a2a9c634ce498e631e708a8041b00094825"}, ] [package.dependencies] numpy = "*" protobuf = ">=3.20.2" -typing-extensions = ">=3.6.2.1" [package.extras] -lint = ["lintrunner (>=0.10.0)", "lintrunner-adapters (>=0.3)"] +reference = ["Pillow", "google-re2"] [[package]] name = "onnxruntime-gpu" -version = "1.15.0" +version = "1.17.0" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" optional = true python-versions = "*" files = [ - {file = "onnxruntime_gpu-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:287f8f542c7ec4234ee823c46f19ae661376cac06b2276f65c44cf88371b9c0a"}, - {file = "onnxruntime_gpu-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:12c0d5c3f3339b025fdcc8d416512ab53394a13f77528292d0a6d061e3dc9a01"}, - {file = "onnxruntime_gpu-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e4e2f1bb421ec3d9e69663ab8fffe99cba0b17456c8ff237965fcdacf71588d"}, - {file = "onnxruntime_gpu-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:1f2f508348ce91dd61607d3d86aaad8663178a17e9661722873ef3804f678345"}, - {file = "onnxruntime_gpu-1.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c05d061f20f005cf0a185f26f0e50042b6aa17eff9f94f768a59e5063013bebd"}, - {file = "onnxruntime_gpu-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:35d2b52610d9f3087c6550c85674bd085bfa04b547bf3d4942f651bf8dcaa908"}, - {file = "onnxruntime_gpu-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a43153ff1dc706d3cc39af8d77d3b44a2dcb176a8498f0e02d0804db358973b"}, - {file = "onnxruntime_gpu-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:87abc8461b43c0194d911e64e625e192b809ee105ddfde706c7c691157a0d203"}, + {file = "onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:11e7f7f781fef16c09ec8d03bfb6da84cf61c54fc59e8a4ea047a90c4a24e88f"}, + {file = "onnxruntime_gpu-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:00288d6a152752dc74d3aa9ef7e411c608fc9ec5db5672492c2b0b1104709645"}, + {file = "onnxruntime_gpu-1.17.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:b26cd73a47f640cb363f670526597d5770c997fd9fd53de24c13febfa4b99489"}, + {file = "onnxruntime_gpu-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:5217a1609361114d966607f5328e2af5088272015a3bc01bcacf447a98dd10a5"}, + {file = "onnxruntime_gpu-1.17.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:b3bb47112a36af2660d9ad7cf4987dbf1c54b1b1522cb7b5ee2f20e61a0e1ed2"}, + {file = "onnxruntime_gpu-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:1f567ffeb252be6b2a75015b487bee023b57e48f2cccd84f3e0208aa1623d41d"}, + {file = "onnxruntime_gpu-1.17.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7d721e967259ee81831011a36756728cdf9c9c23f7187faa4cd1010ea263a3ce"}, + {file = "onnxruntime_gpu-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:b10564ee6c160d32fefa75469968fe4d8e901c144892c0facd9036c498e53922"}, ] [package.dependencies] @@ -4011,6 +4145,11 @@ packaging = "*" protobuf = "*" sympy = "*" +[package.source] +type = "legacy" +url = "https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple" +reference = "onnx_cu12" + [[package]] name = "onnxsim" version = "0.4.28" @@ -4057,10 +4196,10 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, - {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, + {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, + {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, ] [[package]] @@ -4081,10 +4220,10 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, - {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""}, + {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, + {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, ] [[package]] @@ -4196,7 +4335,7 @@ files = [ name = "paginate" version = "0.5.6" description = "Divides large result sets into pages for easier browsing" -optional = true +optional = false python-versions = "*" files = [ {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, @@ -4240,8 +4379,8 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.20.3", markers = "python_version < \"3.10\""}, {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, ] python-dateutil = ">=2.8.1" pytz = ">=2020.1" @@ -4253,7 +4392,7 @@ test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] name = "pandas-stubs" version = "1.5.3.230321" description = "Type annotations for pandas" -optional = true +optional = false python-versions = ">=3.8,<3.12" files = [ {file = "pandas_stubs-1.5.3.230321-py3-none-any.whl", hash = "sha256:4bf36b3071dd55f0e558ac8efe07676a120f2ed89e7a3df0fb78ddf2733bf247"}, @@ -4288,7 +4427,7 @@ invoke = ["invoke (>=2.0)"] name = "pathspec" version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, @@ -4437,7 +4576,7 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co name = "pluggy" version = "1.3.0" description = "plugin and hook calling mechanisms for python" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, @@ -4489,7 +4628,7 @@ xattr = {version = ">=0.10.0,<0.11.0", markers = "sys_platform == \"darwin\""} name = "poetry-bumpversion" version = "0.3.2" description = "Poetry plugin to update __version__ in __init__ file and other files containing version strings" -optional = true +optional = false python-versions = ">=3.8,<4.0" files = [ {file = "poetry_bumpversion-0.3.2-py3-none-any.whl", hash = "sha256:df51b3c27330820ad036e83b6ab6db083dcaeb13bab3f320b781c69a38773f81"}, @@ -4531,7 +4670,7 @@ poetry-core = ">=1.7.0,<2.0.0" name = "pre-commit" version = "3.0.4" description = "A framework for managing and maintaining multi-language pre-commit hooks." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "pre_commit-3.0.4-py2.py3-none-any.whl", hash = "sha256:9e3255edb0c9e7fe9b4f328cb3dc86069f8fdc38026f1bf521018a05eaf4d67b"}, @@ -4668,7 +4807,7 @@ files = [ name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" -optional = true +optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, @@ -4888,7 +5027,7 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] name = "pylint" version = "2.16.4" description = "python code static checker" -optional = true +optional = false python-versions = ">=3.7.2" files = [ {file = "pylint-2.16.4-py3-none-any.whl", hash = "sha256:4a770bb74fde0550fa0ab4248a2ad04e7887462f9f425baa0cd8d3c1d098eaee"}, @@ -4914,7 +5053,7 @@ testutils = ["gitpython (>3)"] name = "pymdown-extensions" version = "10.7" description = "Extension pack for Python Markdown." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, @@ -5007,7 +5146,7 @@ files = [ name = "pytest" version = "7.4.4" description = "pytest: simple powerful testing with Python" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, @@ -5029,7 +5168,7 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "pytest-cov" version = "4.0.0" description = "Pytest plugin for measuring coverage." -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "pytest-cov-4.0.0.tar.gz", hash = "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470"}, @@ -5047,7 +5186,7 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytest-env" version = "1.1.3" description = "pytest plugin that allows you to add environment variables." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "pytest_env-1.1.3-py3-none-any.whl", hash = "sha256:aada77e6d09fcfb04540a6e462c58533c37df35fa853da78707b17ec04d17dfc"}, @@ -5065,7 +5204,7 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "pytest-mock (>=3.12)"] name = "pytest-lazy-fixture" version = "0.6.3" description = "It helps to use fixtures in pytest.mark.parametrize" -optional = true +optional = false python-versions = "*" files = [ {file = "pytest-lazy-fixture-0.6.3.tar.gz", hash = "sha256:0e7d0c7f74ba33e6e80905e9bfd81f9d15ef9a790de97993e34213deb5ad10ac"}, @@ -5079,7 +5218,7 @@ pytest = ">=3.2.5" name = "pytest-mock" version = "3.11.1" description = "Thin-wrapper around the mock package for easier use with pytest" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "pytest-mock-3.11.1.tar.gz", hash = "sha256:7f6b125602ac6d743e523ae0bfa71e1a697a2f5534064528c6ff84c2f7c2fc7f"}, @@ -5136,38 +5275,34 @@ dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatc [[package]] name = "pytorch-lightning" -version = "1.9.5" +version = "2.1.3" description = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytorch-lightning-1.9.5.tar.gz", hash = "sha256:925fe7b80ddf04859fa385aa493b260be4000b11a2f22447afb4a932d1f07d26"}, - {file = "pytorch_lightning-1.9.5-py3-none-any.whl", hash = "sha256:06821558158623c5d2ecf5d3d0374dc8bd661e0acd3acf54a6d6f71737c156c5"}, + {file = "pytorch-lightning-2.1.3.tar.gz", hash = "sha256:2500b002fa09cb37b0e12f879876bf30a2d260b0f04783d33264dab175f0c966"}, + {file = "pytorch_lightning-2.1.3-py3-none-any.whl", hash = "sha256:03ed186035a230b161130e0d8ecf1dd6657ff7e3f1520e9257b0db7650f9aeea"}, ] [package.dependencies] -fsspec = {version = ">2021.06.0", extras = ["http"]} -lightning-utilities = ">=0.6.0.post0" +fsspec = {version = ">=2022.5.0", extras = ["http"]} +lightning-utilities = ">=0.8.0" numpy = ">=1.17.2" -packaging = ">=17.1" +packaging = ">=20.0" PyYAML = ">=5.4" -torch = ">=1.10.0" +torch = ">=1.12.0" torchmetrics = ">=0.7.0" tqdm = ">=4.57.0" typing-extensions = ">=4.0.0" [package.extras] -all = ["colossalai (>=0.2.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.11.1)"] -colossalai = ["colossalai (>=0.2.0)"] -deepspeed = ["deepspeed (>=0.6.0)"] -dev = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "colossalai (>=0.2.0)", "coverage (==6.5.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "fastapi (<0.87.0)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnx (<1.14.0)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "rich (>=10.14.0,!=10.15.0.a)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.11.1)", "uvicorn (<0.19.1)"] -examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.7.1)", "torchmetrics (>=0.10.0)", "torchvision (>=0.11.1)"] -extra = ["hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)"] -fairscale = ["fairscale (>=0.4.5)"] -hivemind = ["hivemind (==1.1.5)"] -horovod = ["horovod (>=0.21.2,!=0.24.0)"] -strategies = ["colossalai (>=0.2.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)"] -test = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "coverage (==6.5.0)", "fastapi (<0.87.0)", "onnx (<1.14.0)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn (<0.19.1)"] +all = ["bitsandbytes (<=0.41.1)", "deepspeed (>=0.8.2,<=0.9.3)", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.26.1)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] +deepspeed = ["deepspeed (>=0.8.2,<=0.9.3)"] +dev = ["bitsandbytes (<=0.41.1)", "cloudpickle (>=1.3)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "fastapi", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.26.1)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "rich (>=12.3.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)", "uvicorn"] +examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.15.0)", "lightning-utilities (>=0.8.0)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] +extra = ["bitsandbytes (<=0.41.1)", "hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.26.1)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)"] +strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] +test = ["cloudpickle (>=1.3)", "coverage (==7.3.1)", "fastapi", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn"] [[package]] name = "pytz" @@ -5239,6 +5374,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -5277,7 +5413,7 @@ files = [ name = "pyyaml-env-tag" version = "0.1" description = "A custom YAML tag for referencing environment variables in YAML files. " -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, @@ -5424,7 +5560,7 @@ full = ["numpy"] name = "readme-renderer" version = "42.0" description = "readme_renderer is a library for rendering readme descriptions for Warehouse" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "readme_renderer-42.0-py3-none-any.whl", hash = "sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d"}, @@ -5598,7 +5734,7 @@ requests = ">=2.0.1,<3.0.0" name = "rfc3986" version = "2.0.0" description = "Validating URI References per RFC 3986" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, @@ -5644,7 +5780,7 @@ pyasn1 = ">=0.1.3" name = "ruff" version = "0.0.257" description = "An extremely fast Python linter, written in Rust." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "ruff-0.0.257-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:7280640690c1d0046b20e0eb924319a89d8e22925d7d232180ce31196e7478f8"}, @@ -6434,7 +6570,7 @@ pbr = ">=2.0.0,<2.1.0 || >2.1.0" name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, @@ -6569,7 +6705,7 @@ torchvision = "*" name = "tinycss2" version = "1.2.1" description = "A tiny CSS parser" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, @@ -6587,7 +6723,7 @@ test = ["flake8", "isort", "pytest"] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -optional = true +optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, @@ -6618,32 +6754,131 @@ files = [ [[package]] name = "torch" -version = "1.13.1+cu116" +version = "2.1.2+cu121" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "torch-1.13.1+cu116-cp310-cp310-linux_x86_64.whl", hash = "sha256:51d5870cdf05b6208b1c739fe0ba511b977eca37f9507829675596acc11b6ca4"}, - {file = "torch-1.13.1+cu116-cp310-cp310-win_amd64.whl", hash = "sha256:6d59b73bbd83eee53e7978925168fe068709e1344050fdabf4043695084b2ccc"}, - {file = "torch-1.13.1+cu116-cp311-cp311-linux_x86_64.whl", hash = "sha256:8cbfbb27f44fcc246d298f3812f3f7963622b4c1f2823670ae549416199f5ef7"}, - {file = "torch-1.13.1+cu116-cp37-cp37m-linux_x86_64.whl", hash = "sha256:20d7c6e00804b6bea6f69b77240c4fcdf244cce2f6b1ff73beff7c0df6553d9d"}, - {file = "torch-1.13.1+cu116-cp37-cp37m-win_amd64.whl", hash = "sha256:c2493a30d0c5ff426fad3d60d9d535c24678b45cc4ce1cffca0f1044d408cb96"}, - {file = "torch-1.13.1+cu116-cp38-cp38-linux_x86_64.whl", hash = "sha256:9338faa0a5a0eb625e17e39729f06fb0a574098d7ab88d856bbda4cb76a0b665"}, - {file = "torch-1.13.1+cu116-cp38-cp38-win_amd64.whl", hash = "sha256:1c33942d411d4dee25e56755cfd09538f53a497a6f0453d54ce96a5ca341627b"}, - {file = "torch-1.13.1+cu116-cp39-cp39-linux_x86_64.whl", hash = "sha256:db457a822d736013b6ffe509053001bc918bdd78fe68967b605f53984a9afac5"}, - {file = "torch-1.13.1+cu116-cp39-cp39-win_amd64.whl", hash = "sha256:80a6b55915ac72c087ab85122289431fde5c5a4c85ca83a38c6d11a7ecbfdb35"}, + {file = "torch-2.1.2+cu121-cp310-cp310-linux_x86_64.whl", hash = "sha256:b2184b7729ef3b9b10065c074a37c1e603fd99f91e38376e25cb7ed6e1d54696"}, ] [package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} typing-extensions = "*" [package.extras] +dynamo = ["jinja2"] opt-einsum = ["opt-einsum (>=3.3)"] [package.source] -type = "legacy" -url = "https://download.pytorch.org/whl/cu116" -reference = "torch_cu116" +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-linux_x86_64.whl" + +[[package]] +name = "torch" +version = "2.1.2+cu121" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.1.2+cu121-cp310-cp310-win_amd64.whl", hash = "sha256:9925143dece0e63c5404a72d59eb668ef78795418e96b576f94d75dcea6030b9"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-win_amd64.whl" + +[[package]] +name = "torch" +version = "2.1.2+cu121" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.1.2+cu121-cp39-cp39-linux_x86_64.whl", hash = "sha256:eaaf6907e3723c0ca6a91df5e01a7eef8cabec93120e9a50739f5a5f14a2aa46"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +dynamo = ["jinja2"] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-linux_x86_64.whl" + +[[package]] +name = "torch" +version = "2.1.2+cu121" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.1.2+cu121-cp39-cp39-win_amd64.whl", hash = "sha256:2d287804328dfb950ae6d418c9d8561d8f379237cf0710566d80efb96b6cd744"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-win_amd64.whl" [[package]] name = "torchinfo" @@ -6695,35 +6930,34 @@ files = [ [[package]] name = "torchvision" -version = "0.14.1+cu116" +version = "0.16.2+cu121" description = "image and video datasets and models for torch deep learning" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "torchvision-0.14.1+cu116-cp310-cp310-linux_x86_64.whl", hash = "sha256:fcb58d41bf95dd8b85d388fa1969d1dcad55eec055e317981d653a05c4ca6d8b"}, - {file = "torchvision-0.14.1+cu116-cp310-cp310-win_amd64.whl", hash = "sha256:0481f119c1ca5bf3704d0848161be956db04d26c01ffb95b3b3b08cbdfa301c2"}, - {file = "torchvision-0.14.1+cu116-cp37-cp37m-linux_x86_64.whl", hash = "sha256:4985719c66c9612ff4bb2d3a53c3faafdd934152b2a87414d27bdc7874a4360f"}, - {file = "torchvision-0.14.1+cu116-cp37-cp37m-win_amd64.whl", hash = "sha256:5debbbda105c5e0ce8d0b40718c435a88e61b2c2c64d3d0d80ca89e4e9b3f6c7"}, - {file = "torchvision-0.14.1+cu116-cp38-cp38-linux_x86_64.whl", hash = "sha256:47593585e971c3e0c980fabfbfb885eb5ff054716baa555660c5b0304c9ea39d"}, - {file = "torchvision-0.14.1+cu116-cp38-cp38-win_amd64.whl", hash = "sha256:fefa6bee4c3019723320c6e554e400d6781ccecce99c1772a165efd0696b3462"}, - {file = "torchvision-0.14.1+cu116-cp39-cp39-linux_x86_64.whl", hash = "sha256:a9fc38040e133d1779f131b4497caef830e9e699faf89cd323cd58794ffb305b"}, - {file = "torchvision-0.14.1+cu116-cp39-cp39-win_amd64.whl", hash = "sha256:4b75cfe80d1e778f252fce94a7dd4ea35bc66a10efd53c4c63910ee95425face"}, + {file = "torchvision-0.16.2+cu121-cp310-cp310-linux_x86_64.whl", hash = "sha256:baa7970c6b5437312e5dd0bd0f2571a20b786c3e285bafd6ed3e4f62a5c3c76e"}, + {file = "torchvision-0.16.2+cu121-cp310-cp310-win_amd64.whl", hash = "sha256:6bad36a0b958873e340bc250d9e76eade2a6e82414ca7228298b23641b14dd38"}, + {file = "torchvision-0.16.2+cu121-cp311-cp311-linux_x86_64.whl", hash = "sha256:092d59110a9fecf7c3b44d18b1b5aa10ba898e2541e07b674fe59268521eb8cb"}, + {file = "torchvision-0.16.2+cu121-cp311-cp311-win_amd64.whl", hash = "sha256:55def0091f7079cfbfa83dbf2f91860bf59257c9566eb67a64fb856287079586"}, + {file = "torchvision-0.16.2+cu121-cp38-cp38-linux_x86_64.whl", hash = "sha256:0266727d089f512aa900aead2908530f54d9077784bde1e7ca46f66b8f567e98"}, + {file = "torchvision-0.16.2+cu121-cp38-cp38-win_amd64.whl", hash = "sha256:5e6ed34e045cc01cc4481ab7c3e932ded00b24b537fd5457cea53efa721e4ad1"}, + {file = "torchvision-0.16.2+cu121-cp39-cp39-linux_x86_64.whl", hash = "sha256:aa1325a6e041603deecefc569e64b8c75a6c9a1b876e29a2def298ba8456474d"}, + {file = "torchvision-0.16.2+cu121-cp39-cp39-win_amd64.whl", hash = "sha256:dbeb2cd41956db95b08f7848dcb8740e04bbafb1f42cf69b214849bf0615f11a"}, ] [package.dependencies] numpy = "*" pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" requests = "*" -torch = "1.13.1" -typing-extensions = "*" +torch = "2.1.2" [package.extras] scipy = ["scipy"] [package.source] type = "legacy" -url = "https://download.pytorch.org/whl/cu116" -reference = "torch_cu116" +url = "https://download.pytorch.org/whl/cu121" +reference = "torch_cu121" [[package]] name = "tqdm" @@ -6756,6 +6990,31 @@ files = [ {file = "tripy-1.0.0-py3-none-any.whl", hash = "sha256:13f861a47a2f6626137b55cf06b2a15d147bbfac7d80c53d59c5b557a59b24ec"}, ] +[[package]] +name = "triton" +version = "2.1.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.1.0-0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:66439923a30d5d48399b08a9eae10370f6c261a5ec864a64983bae63152d39d7"}, + {file = "triton-2.1.0-0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:919b06453f0033ea52c13eaf7833de0e57db3178d23d4e04f9fc71c4f2c32bf8"}, + {file = "triton-2.1.0-0-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae4bb8a91de790e1866405211c4d618379781188f40d5c4c399766914e84cd94"}, + {file = "triton-2.1.0-0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39f6fb6bdccb3e98f3152e3fbea724f1aeae7d749412bbb1fa9c441d474eba26"}, + {file = "triton-2.1.0-0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:21544e522c02005a626c8ad63d39bdff2f31d41069592919ef281e964ed26446"}, + {file = "triton-2.1.0-0-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:143582ca31dd89cd982bd3bf53666bab1c7527d41e185f9e3d8a3051ce1b663b"}, + {file = "triton-2.1.0-0-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82fc5aeeedf6e36be4e4530cbdcba81a09d65c18e02f52dc298696d45721f3bd"}, + {file = "triton-2.1.0-0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:81a96d110a738ff63339fc892ded095b31bd0d205e3aace262af8400d40b6fa8"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.18)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"] +tutorials = ["matplotlib", "pandas", "tabulate"] + [[package]] name = "trove-classifiers" version = "2024.1.8" @@ -6785,7 +7044,7 @@ test = ["pytest"] name = "twine" version = "4.0.2" description = "Collection of utilities for publishing packages on PyPI" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"}, @@ -6807,7 +7066,7 @@ urllib3 = ">=1.26.0" name = "types-pytz" version = "2023.3.1.1" description = "Typing stubs for pytz" -optional = true +optional = false python-versions = "*" files = [ {file = "types-pytz-2023.3.1.1.tar.gz", hash = "sha256:cc23d0192cd49c8f6bba44ee0c81e4586a8f30204970fc0894d209a6b08dab9a"}, @@ -6818,7 +7077,7 @@ files = [ name = "types-pyyaml" version = "6.0.12.12" description = "Typing stubs for PyYAML" -optional = true +optional = false python-versions = "*" files = [ {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"}, @@ -6963,7 +7222,7 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", name = "verspec" version = "0.1.0" description = "Flexible version handling" -optional = true +optional = false python-versions = "*" files = [ {file = "verspec-0.1.0-py3-none-any.whl", hash = "sha256:741877d5633cc9464c45a469ae2a31e801e6dbbaa85b9675d481cda100f11c31"}, @@ -7050,7 +7309,7 @@ sweeps = ["PyYAML", "jsonref (>=0.2)", "jsonschema (>=3.2.0)", "numpy (>=1.15,<1 name = "watchdog" version = "3.0.0" description = "Filesystem events monitoring" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, @@ -7100,7 +7359,7 @@ files = [ name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -optional = true +optional = false python-versions = "*" files = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, @@ -7181,7 +7440,7 @@ files = [ name = "wrapt" version = "1.16.0" description = "Module for decorators, wrappers and monkey patching." -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, @@ -7566,12 +7825,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -dev = ["black", "cairosvg", "hydra-plugins", "interrogate", "isort", "mike", "mkdocs", "mkdocs_autorefs", "mkdocs_gen_files", "mkdocs_literate_nav", "mkdocs_material", "mkdocs_material_extensions", "mkdocs_section_index", "mkdocstrings", "mkdocstrings_python", "mypy", "pandas_stubs", "poetry-bumpversion", "pre_commit", "pylint", "pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_mock", "ruff", "twine", "types_pyyaml"] -docs = ["cairosvg", "mike", "mkdocs", "mkdocs_autorefs", "mkdocs_gen_files", "mkdocs_literate_nav", "mkdocs_material", "mkdocs_material_extensions", "mkdocs_section_index", "mkdocstrings", "mkdocstrings_python"] onnx = ["onnx", "onnxruntime_gpu", "onnxsim"] -test = ["pytest", "pytest_cov", "pytest_env", "pytest_lazy_fixture", "pytest_mock"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.11" -content-hash = "5f9091afc0e09128749ceb173b259219da0cdb0f0feea3ffb8d7d0eaff20250c" +content-hash = "71b257f752974e3c002c0a4027a9faf5c5a99ae2eac9704e668393b6c2c53299" diff --git a/pyproject.toml b/pyproject.toml index 35ae02e6..1214415f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "quadra" -version = "1.5.8" +version = "2.0.0" description = "Deep Learning experiment orchestration library" authors = [ "Federico Belotti ", @@ -40,15 +40,16 @@ quadra = "quadra.main:main" [tool.poetry.dependencies] python = ">=3.9,<3.11" poetry = "1.7.1" -# This will make hydra-plugins available also when running with poetry install -hydra-plugins = { path = "quadra_hydra_plugin", optional = true } -# TODO: We could support previous torch version using mutually exclusive python version but it's bad... -# TODO: Right now it seems that poetry will download every kind of possible dependency from cu116 -# To make it faster we could hardcode the correct version of the dependencies -torch = { version = "1.13.1", source = "torch_cu116" } -torchvision = { version = "0.14.1", source = "torch_cu116" } -pytorch_lightning = "1.9.5" +torch = [ + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-linux_x86_64.whl", markers = "sys_platform == 'linux' and python_version == '3.10'" }, + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp310-cp310-win_amd64.whl", markers = "sys_platform == 'win32' and python_version == '3.10'" }, + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-linux_x86_64.whl", markers = "sys_platform == 'linux' and python_version == '3.9'" }, + { url = "https://download.pytorch.org/whl/cu121/torch-2.1.2%2Bcu121-cp39-cp39-win_amd64.whl", markers = "sys_platform == 'win32' and python_version == '3.9'" }, +] +torchvision = { version = "~0.16", source = "torch_cu121" } + +pytorch_lightning = "~2.1" torchsummary = "~1.5" torchmetrics = "~0.10" hydra_core = "~1.3" @@ -78,108 +79,72 @@ h5py = "~3.8" timm = "0.9.12" # Right now only this ref supports timm 0.9.12 segmentation_models_pytorch = { git = "https://github.com/qubvel/segmentation_models.pytorch", rev = "7b381f899ed472a477a89d381689caf535b5d0a6" } -anomalib = { git = "https://github.com/orobix/anomalib.git", tag = "v0.7.0+obx.1.2.10" } +anomalib = { git = "https://github.com/orobix/anomalib.git", tag = "v0.7.0+obx.1.3.0" } xxhash = "~3.2" torchinfo = "~1.8" -# Test dependencies -pytest = { version = "^7", optional = true } -pytest_cov = { version = "~4.0", optional = true } -pytest_lazy_fixture = { version = "~0.6", optional = true } -pytest_mock = { version = "~3.11", optional = true } -pytest_env = { version = "~1.1", optional = true } - -# Dev dependencies -interrogate = { version = "~1.5", optional = true } -black = { version = "~22.12", optional = true } -isort = { version = "~5.11", optional = true } -pre_commit = { version = "~3.0", optional = true } -pylint = { version = "~2.16", optional = true } -types_pyyaml = { version = "~6.0.12", optional = true } -mypy = { version = "~1.0", optional = true } -ruff = { version = "0.0.257", optional = true } -pandas_stubs = { version = "~1.5.3", optional = true } -twine = { version = "~4.0", optional = true } -poetry-bumpversion = { version = "~0.3", optional = true } - -# Documentation dependencies -mkdocs = { version = "1.5.2", optional = true } -mkdocs_literate_nav = { version = "0.6.0", optional = true } -mkdocs_section_index = { version = "0.3.6", optional = true } -mkdocstrings = { version = "0.23.0", optional = true } -mkdocs_autorefs = { version = "0.5.0", optional = true } -mkdocs_gen_files = { version = "0.5.0", optional = true } -mkdocs_material = { version = "9.2.8", optional = true } -mkdocstrings_python = { version = "1.6.2", optional = true } -mkdocs_material_extensions = { version = "1.1.1", optional = true } -mike = { version = "1.1.2", optional = true } -cairosvg = { version = "2.7.0", optional = true } - # ONNX dependencies -onnx = { version = "1.14.0", optional = true } +onnx = { version = "1.15.0", optional = true } onnxsim = { version = "0.4.28", optional = true } -onnxruntime_gpu = { version = "1.15.0", optional = true } +onnxruntime_gpu = { version = "1.17.0", optional = true, source = "onnx_cu12" } [[tool.poetry.source]] -name = "torch_cu116" -url = "https://download.pytorch.org/whl/cu116" +name = "torch_cu121" +url = "https://download.pytorch.org/whl/cu121" priority = "explicit" -[tool.poetry.extras] -dev = [ - "black", - "isort", - "pre_commit", - "pylint", - "bump2version", - "types_pyyaml", - "mypy", - "ruff", - "pandas_stubs", - "twine", - "pytest", - "pytest_cov", - "pytest_lazy_fixture", - "pytest_mock", - "pytest_env", - "interrogate", - "mkdocs", - "mkdocs_literate_nav", - "mkdocs_section_index", - "mkdocstrings", - "mkdocs_autorefs", - "mkdocs_gen_files", - "mkdocs_material", - "mkdocstrings_python", - "mkdocs_material_extensions", - "mike", - "cairosvg", - "poetry-bumpversion", - "hydra-plugins", -] +[[tool.poetry.source]] +name = "onnx_cu12" +url = "https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple/" +priority = "explicit" -test = [ - "pytest", - "pytest_cov", - "pytest_lazy_fixture", - "pytest_mock", - "pytest_env", -] +[tool.poetry.group.dev] +optional = true -docs = [ - "mkdocs", - "mkdocs_literate_nav", - "mkdocs_section_index", - "mkdocstrings", - "mkdocs_autorefs", - "mkdocs_gen_files", - "mkdocs_material", - "mkdocstrings_python", - "mkdocs_material_extensions", - "mike", - "cairosvg", -] +[tool.poetry.group.dev.dependencies] +hydra-plugins = { path = "quadra_hydra_plugin" } +# Dev dependencies +interrogate = "~1.5" +black = "~22.12" +isort = "~5.11" +pre_commit = "~3.0" +pylint = "~2.16" +types_pyyaml = "~6.0.12" +mypy = "~1.0" +ruff = "0.0.257" +pandas_stubs = "~1.5.3" +twine = "~4.0" +poetry-bumpversion = "~0.3" + +# Test dependencies +[tool.poetry.group.test] +optional = true + +[tool.poetry.group.test.dependencies] +pytest = "^7" +pytest_cov = "~4.0" +pytest_lazy_fixture = "~0.6" +pytest_mock = "~3.11" +pytest_env = "~1.1" +# Documentation dependencies +[tool.poetry.group.docs] +optional = true + +[tool.poetry.group.docs.dependencies] +mkdocs = "1.5.2" +mkdocs_literate_nav = "0.6.0" +mkdocs_section_index = "0.3.6" +mkdocstrings = "0.23.0" +mkdocs_autorefs = "0.5.0" +mkdocs_gen_files = "0.5.0" +mkdocs_material = "9.2.8" +mkdocstrings_python = "1.6.2" +mkdocs_material_extensions = "1.1.1" +mike = "1.1.2" +cairosvg = "2.7.0" + +[tool.poetry.extras] onnx = ["onnx", "onnxsim", "onnxruntime_gpu"] [tool.poetry_bumpversion.file."quadra/__init__.py"] @@ -188,7 +153,7 @@ replace = '__version__ = "{new_version}"' # Black formatting [tool.black] -line_length = 120 +line-length = 120 include = '\.pyi?$' exclude = ''' /( diff --git a/quadra/__init__.py b/quadra/__init__.py index 40d71e12..b8154772 100644 --- a/quadra/__init__.py +++ b/quadra/__init__.py @@ -1,4 +1,4 @@ -__version__ = "1.5.8" +__version__ = "2.0.0" def get_version(): diff --git a/quadra/configs/experiment/base/anomaly/cfa.yaml b/quadra/configs/experiment/base/anomaly/cfa.yaml index 9d0176d0..1fd25197 100644 --- a/quadra/configs/experiment/base/anomaly/cfa.yaml +++ b/quadra/configs/experiment/base/anomaly/cfa.yaml @@ -42,43 +42,6 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null - gradient_clip_val: 0 - gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 30 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - strategy: null - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false - replace_sampler_ddp: true - detect_anomaly: false - auto_scale_batch_size: false - plugins: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/cflow.yaml b/quadra/configs/experiment/base/anomaly/cflow.yaml index 67df900d..87279bfd 100644 --- a/quadra/configs/experiment/base/anomaly/cflow.yaml +++ b/quadra/configs/experiment/base/anomaly/cflow.yaml @@ -41,43 +41,7 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null - gradient_clip_val: 0 - gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 50 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - strategy: null - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false - replace_sampler_ddp: true - detect_anomaly: false - auto_scale_batch_size: false - plugins: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle + max_epochs: 50 diff --git a/quadra/configs/experiment/base/anomaly/csflow.yaml b/quadra/configs/experiment/base/anomaly/csflow.yaml index faa6f256..a1bf91e8 100644 --- a/quadra/configs/experiment/base/anomaly/csflow.yaml +++ b/quadra/configs/experiment/base/anomaly/csflow.yaml @@ -40,43 +40,9 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 1 # Grad clip value set based on the official implementation gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 240 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - strategy: null - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false - replace_sampler_ddp: true - detect_anomaly: false - auto_scale_batch_size: false - plugins: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle + max_epochs: 240 diff --git a/quadra/configs/experiment/base/anomaly/draem.yaml b/quadra/configs/experiment/base/anomaly/draem.yaml index c696c621..b7e4174e 100644 --- a/quadra/configs/experiment/base/anomaly/draem.yaml +++ b/quadra/configs/experiment/base/anomaly/draem.yaml @@ -43,43 +43,9 @@ logger: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 0 gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 max_epochs: 700 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - strategy: null - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false - replace_sampler_ddp: true - detect_anomaly: false - auto_scale_batch_size: false - plugins: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml index 4d1fd57a..e98c47c7 100644 --- a/quadra/configs/experiment/base/anomaly/efficient_ad.yaml +++ b/quadra/configs/experiment/base/anomaly/efficient_ad.yaml @@ -33,44 +33,11 @@ logger: trainer: devices: [2] - accelerator: auto - strategy: - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} - default_root_dir: null - detect_anomaly: false - deterministic: false - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 max_epochs: 20 max_steps: 20000 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - replace_sampler_ddp: true - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 val_check_interval: 1.0 # Don't validate before extracting features. + # This will avoid issues with ModelSignatureWrapper + # As the default forward for EfficientAD is performed with a None attribute + # Which we currently can't handle + num_sanity_val_steps: 0 diff --git a/quadra/configs/experiment/base/anomaly/fastflow.yaml b/quadra/configs/experiment/base/anomaly/fastflow.yaml index 61eedf64..69027764 100644 --- a/quadra/configs/experiment/base/anomaly/fastflow.yaml +++ b/quadra/configs/experiment/base/anomaly/fastflow.yaml @@ -37,45 +37,10 @@ logger: experiment_name: run_name: ${core.name} -# PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> devices: [0] - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 0 gradient_clip_algorithm: norm - num_nodes: 1 - enable_progress_bar: true - overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 - max_epochs: 500 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - strategy: null - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false - replace_sampler_ddp: true - detect_anomaly: false - auto_scale_batch_size: false - plugins: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle + max_epochs: 500 diff --git a/quadra/configs/experiment/base/anomaly/padim.yaml b/quadra/configs/experiment/base/anomaly/padim.yaml index 1b05f7a2..bd251ca5 100644 --- a/quadra/configs/experiment/base/anomaly/padim.yaml +++ b/quadra/configs/experiment/base/anomaly/padim.yaml @@ -33,44 +33,5 @@ logger: trainer: devices: [2] - accelerator: auto - strategy: - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - replace_sampler_ddp: true - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/experiment/base/anomaly/patchcore.yaml b/quadra/configs/experiment/base/anomaly/patchcore.yaml index f71ec1e2..1c0767f7 100644 --- a/quadra/configs/experiment/base/anomaly/patchcore.yaml +++ b/quadra/configs/experiment/base/anomaly/patchcore.yaml @@ -33,44 +33,5 @@ logger: trainer: devices: [2] - accelerator: auto - strategy: - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false check_val_every_n_epoch: ${trainer.max_epochs} # Don't validate before extracting features. - default_root_dir: null - detect_anomaly: false - deterministic: false - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 max_epochs: 1 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - replace_sampler_ddp: true - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. diff --git a/quadra/configs/experiment/base/ssl/linear_eval.yaml b/quadra/configs/experiment/base/ssl/linear_eval.yaml index f8ac93a8..50ab0f07 100644 --- a/quadra/configs/experiment/base/ssl/linear_eval.yaml +++ b/quadra/configs/experiment/base/ssl/linear_eval.yaml @@ -18,7 +18,6 @@ core: backbone: model: num_classes: 2 - ckpt_path: ??? trainer: devices: [2, 3] diff --git a/quadra/configs/model/anomalib/cfa.yaml b/quadra/configs/model/anomalib/cfa.yaml index bed23d7f..afacc73d 100644 --- a/quadra/configs/model/anomalib/cfa.yaml +++ b/quadra/configs/model/anomalib/cfa.yaml @@ -27,43 +27,9 @@ metrics: # PL Trainer Args. Don't add extra parameter here. trainer: - enable_checkpointing: true - default_root_dir: null gradient_clip_val: 0 gradient_clip_algorithm: norm - num_nodes: 1 devices: [0] - enable_progress_bar: true - overfit_batches: 0.0 - track_grad_norm: -1 check_val_every_n_epoch: 1 # Don't validate before extracting features. - fast_dev_run: false - accumulate_grad_batches: 1 max_epochs: 30 - min_epochs: null - max_steps: -1 - min_steps: null - max_time: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - limit_predict_batches: 1.0 val_check_interval: 1.0 # Don't validate before extracting features. - log_every_n_steps: 50 - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - strategy: null - sync_batchnorm: false - precision: 32 - enable_model_summary: true - num_sanity_val_steps: 0 - profiler: null - benchmark: false - deterministic: false - reload_dataloaders_every_n_epochs: 0 - auto_lr_find: false - replace_sampler_ddp: true - detect_anomaly: false - auto_scale_batch_size: false - plugins: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle diff --git a/quadra/configs/model/anomalib/csflow.yaml b/quadra/configs/model/anomalib/csflow.yaml index 85c78f82..3146d7f1 100644 --- a/quadra/configs/model/anomalib/csflow.yaml +++ b/quadra/configs/model/anomalib/csflow.yaml @@ -32,49 +32,3 @@ metrics: method: adaptive #options: [adaptive, manual] manual_image: null manual_pixel: null - -# PL Trainer Args. Don't add extra parameter here. -trainer: - accelerator: gpu # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 - default_root_dir: null - detect_anomaly: false - deterministic: false - devices: [0] - enable_checkpointing: true - enable_model_summary: true - enable_progress_bar: true - fast_dev_run: false - gradient_clip_val: 1 # Grad clip value set based on the official implementation - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - max_epochs: 240 - max_steps: -1 - max_time: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null - precision: 32 - profiler: null - reload_dataloaders_every_n_epochs: 0 - replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 diff --git a/quadra/configs/trainer/lightning_cpu.yaml b/quadra/configs/trainer/lightning_cpu.yaml index 3236010b..90a71872 100644 --- a/quadra/configs/trainer/lightning_cpu.yaml +++ b/quadra/configs/trainer/lightning_cpu.yaml @@ -3,7 +3,34 @@ devices: 1 accelerator: cpu min_epochs: 1 max_epochs: 10 -strategy: null -resume_from_checkpoint: null +strategy: auto log_every_n_steps: 10 precision: 32 +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_gpu.yaml b/quadra/configs/trainer/lightning_gpu.yaml index 66e52054..1e366b6f 100644 --- a/quadra/configs/trainer/lightning_gpu.yaml +++ b/quadra/configs/trainer/lightning_gpu.yaml @@ -3,5 +3,33 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 +strategy: auto +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_gpu_bf16.yaml b/quadra/configs/trainer/lightning_gpu_bf16.yaml index 868770e7..9507d2bf 100644 --- a/quadra/configs/trainer/lightning_gpu_bf16.yaml +++ b/quadra/configs/trainer/lightning_gpu_bf16.yaml @@ -1,8 +1,36 @@ _target_: pytorch_lightning.Trainer -devices: [0] accelerator: gpu +devices: [0] min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 precision: bf16 +strategy: auto +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_gpu_fp16.yaml b/quadra/configs/trainer/lightning_gpu_fp16.yaml index 081f08d9..02814b80 100644 --- a/quadra/configs/trainer/lightning_gpu_fp16.yaml +++ b/quadra/configs/trainer/lightning_gpu_fp16.yaml @@ -3,6 +3,34 @@ devices: [0] accelerator: gpu min_epochs: 1 max_epochs: 10 -resume_from_checkpoint: null log_every_n_steps: 10 precision: 16 +strategy: auto +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/configs/trainer/lightning_multigpu.yaml b/quadra/configs/trainer/lightning_multigpu.yaml index bdcc0ee4..5c2d7643 100644 --- a/quadra/configs/trainer/lightning_multigpu.yaml +++ b/quadra/configs/trainer/lightning_multigpu.yaml @@ -6,5 +6,32 @@ max_epochs: 10 strategy: _target_: pytorch_lightning.strategies.DDPStrategy find_unused_parameters: false -resume_from_checkpoint: null log_every_n_steps: 10 +num_nodes: 1 +fast_dev_run: false +max_time: +limit_train_batches: +limit_val_batches: +limit_test_batches: +limit_predict_batches: +overfit_batches: 0.0 +val_check_interval: +check_val_every_n_epoch: 1 +num_sanity_val_steps: +enable_checkpointing: +enable_progress_bar: +enable_model_summary: +accumulate_grad_batches: 1 +gradient_clip_val: +gradient_clip_algorithm: +deterministic: +benchmark: +inference_mode: True +use_distributed_sampler: True +profiler: +detect_anomaly: False +barebones: False +plugins: +sync_batchnorm: False +reload_dataloaders_every_n_epochs: 0 +default_root_dir: diff --git a/quadra/models/base.py b/quadra/models/base.py index f08a4d4b..c0bab73a 100644 --- a/quadra/models/base.py +++ b/quadra/models/base.py @@ -141,6 +141,9 @@ def __getattribute__(self, __name: str) -> Any: "to", "half", "cpu", + "call_super_init", + "_call_impl", + "_compiled_call_impl", ]: return super().__getattribute__(__name) diff --git a/quadra/modules/base.py b/quadra/modules/base.py index ae030d26..36cb28e4 100644 --- a/quadra/modules/base.py +++ b/quadra/modules/base.py @@ -67,7 +67,8 @@ def configure_optimizers(self) -> Tuple[List[Any], List[Dict[str, Any]]]: } return [self.optimizer], [lr_scheduler_conf] - def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): + # pylint: disable=unused-argument + def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx: int = 0): """Redefine optimizer zero grad.""" optimizer.zero_grad(set_to_none=True) diff --git a/quadra/modules/ssl/byol.py b/quadra/modules/ssl/byol.py index a439b4f8..64fceb4a 100644 --- a/quadra/modules/ssl/byol.py +++ b/quadra/modules/ssl/byol.py @@ -138,20 +138,14 @@ def optimizer_step( epoch: int, batch_idx: int, optimizer: Union[Optimizer, LightningOptimizer], - optimizer_idx: int = 0, optimizer_closure: Optional[Callable[[], Any]] = None, - on_tpu: bool = False, - using_lbfgs: bool = False, ) -> None: """Override optimizer step to update the teacher parameters.""" super().optimizer_step( epoch, batch_idx, optimizer, - optimizer_idx=optimizer_idx, optimizer_closure=optimizer_closure, - on_tpu=on_tpu, - using_lbfgs=using_lbfgs, ) self.update_teacher() diff --git a/quadra/modules/ssl/dino.py b/quadra/modules/ssl/dino.py index 59c71a30..59555e8c 100644 --- a/quadra/modules/ssl/dino.py +++ b/quadra/modules/ssl/dino.py @@ -157,13 +157,11 @@ def training_step(self, batch: Tuple[List[torch.Tensor], torch.Tensor], *args: A def configure_gradient_clipping( self, optimizer: Optimizer, - optimizer_idx: int, gradient_clip_val: Optional[Union[int, float]] = None, gradient_clip_algorithm: Optional[str] = None, ): """Configure gradient clipping for the optimizer.""" if gradient_clip_algorithm is not None and gradient_clip_val is not None: - clip_gradients(self.model, gradient_clip_val) clip_gradients(self.student_projection_mlp, gradient_clip_val) self.cancel_gradients_last_layer(self.current_epoch, self.freeze_last_layer) @@ -173,19 +171,13 @@ def optimizer_step( epoch: int, batch_idx: int, optimizer: Union[Optimizer, LightningOptimizer], - optimizer_idx: int = 0, optimizer_closure: Optional[Callable[[], Any]] = None, - on_tpu: bool = False, - using_lbfgs: bool = False, ) -> None: - """Override optimizer_step to update the teacher model.""" + """Override optimizer step to update the teacher parameters.""" super().optimizer_step( epoch, batch_idx, optimizer, - optimizer_idx=optimizer_idx, optimizer_closure=optimizer_closure, - on_tpu=on_tpu, - using_lbfgs=using_lbfgs, ) self.update_teacher() diff --git a/quadra/modules/ssl/hyperspherical.py b/quadra/modules/ssl/hyperspherical.py index b5186594..b7242cac 100644 --- a/quadra/modules/ssl/hyperspherical.py +++ b/quadra/modules/ssl/hyperspherical.py @@ -199,7 +199,7 @@ def validation_step(self, batch, batch_idx): ) return {"val_loss": total_loss} - def validation_epoch_end(self, outputs): + def on_validation_epoch_end(self, outputs): avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() return {"val_loss": avg_loss} diff --git a/quadra/tasks/base.py b/quadra/tasks/base.py index 40c45590..72a1edcc 100644 --- a/quadra/tasks/base.py +++ b/quadra/tasks/base.py @@ -6,10 +6,10 @@ import hydra import torch from hydra.core.hydra_config import HydraConfig +from lightning_fabric.utilities.device_parser import _parse_gpu_ids from omegaconf import DictConfig, OmegaConf, open_dict from pytorch_lightning import Callback, LightningModule, Trainer from pytorch_lightning.loggers import Logger, MLFlowLogger -from pytorch_lightning.utilities.device_parser import parse_gpu_ids from pytorch_lightning.utilities.exceptions import MisconfigurationException from quadra import get_version @@ -231,7 +231,7 @@ def devices(self, devices) -> None: return try: - self._devices = parse_gpu_ids(devices, include_cuda=True) + self._devices = _parse_gpu_ids(devices, include_cuda=True) except MisconfigurationException: self._devices = 1 self.config.trainer["accelerator"] = "cpu" diff --git a/quadra/tasks/classification.py b/quadra/tasks/classification.py index 05b943da..eff845b3 100644 --- a/quadra/tasks/classification.py +++ b/quadra/tasks/classification.py @@ -4,6 +4,7 @@ import json import os import typing +from copy import deepcopy from pathlib import Path from typing import Any, Dict, Generic, List, Optional, cast @@ -178,7 +179,7 @@ def module(self, module_config): ) if self.checkpoint_path is not None: log.info("Loading model from lightning checkpoint: %s", self.checkpoint_path) - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, model=self.model, optimizer=self.optimizer, @@ -286,7 +287,7 @@ def export(self) -> None: if self.best_model_path is not None: log.info("Saving deployment model for %s checkpoint", self.best_model_path) - module = self.module.load_from_checkpoint( + module = self.module.__class__.load_from_checkpoint( self.best_model_path, model=self.module.model, optimizer=self.optimizer, @@ -332,6 +333,8 @@ def generate_report(self) -> None: if not self.run_test or self.config.trainer.get("fast_dev_run"): self.datamodule.setup(stage="test") + # Deepcopy to remove the inference mode from gradients causing issues when loading checkpoints + self.module.model = deepcopy(self.module.model) if "16" in self.trainer.precision: log.warning("Gradcam is currently not supported with half precision, it will be disabled") self.module.gradcam = False @@ -1130,11 +1133,7 @@ def prepare_gradcam(self) -> None: return if isinstance(self.deployment_model.model.features_extractor, timm.models.resnet.ResNet): - target_layers = [ - cast(BaseNetworkBuilder, self.deployment_model.model).features_extractor.layer4[ - -1 - ] # type: ignore[index] - ] + target_layers = [cast(BaseNetworkBuilder, self.deployment_model.model).features_extractor.layer4[-1]] self.cam = GradCAM( model=self.deployment_model.model, target_layers=target_layers, diff --git a/quadra/tasks/segmentation.py b/quadra/tasks/segmentation.py index a61d840f..18974475 100644 --- a/quadra/tasks/segmentation.py +++ b/quadra/tasks/segmentation.py @@ -107,7 +107,9 @@ def module(self, module_config) -> None: log.info("Instantiating module <%s>", module_config.module["_target_"]) module = hydra.utils.instantiate(module_config.module, model=model, optimizer=optimizer, lr_scheduler=scheduler) if self.checkpoint_path is not None: - module.load_from_checkpoint(self.checkpoint_path, model=model, optimizer=optimizer, lr_scheduler=scheduler) + module.__class__.load_from_checkpoint( + self.checkpoint_path, model=model, optimizer=optimizer, lr_scheduler=scheduler + ) self._module = module def prepare(self) -> None: @@ -129,7 +131,7 @@ def export(self) -> None: best_model_path = self.trainer.checkpoint_callback.best_model_path log.info("Loaded best model from %s", best_model_path) - module = self.module.load_from_checkpoint( + module = self.module.__class__.load_from_checkpoint( best_model_path, model=self.module.model, loss_fun=None, diff --git a/quadra/tasks/ssl.py b/quadra/tasks/ssl.py index 0804154b..4638a2c6 100644 --- a/quadra/tasks/ssl.py +++ b/quadra/tasks/ssl.py @@ -170,7 +170,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, model=self.backbone, projection_mlp=self.projection_mlp, @@ -231,7 +231,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, model=self.backbone, projection_mlp=self.projection_mlp, @@ -349,7 +349,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, student=self.student_model, teacher=self.teacher_model, @@ -420,7 +420,7 @@ def module(self, module_config): lr_scheduler=self.scheduler, ) if self.checkpoint_path is not None: - module = module.load_from_checkpoint( + module = module.__class__.load_from_checkpoint( self.checkpoint_path, student=self.student_model, teacher=self.teacher_model, diff --git a/quadra/utils/utils.py b/quadra/utils/utils.py index c9ac89b1..91170c57 100644 --- a/quadra/utils/utils.py +++ b/quadra/utils/utils.py @@ -20,10 +20,10 @@ import torch from hydra.core.hydra_config import HydraConfig from hydra.utils import get_original_cwd +from lightning_fabric.utilities.device_parser import _parse_gpu_ids from omegaconf import DictConfig, OmegaConf from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.utilities import rank_zero_only -from pytorch_lightning.utilities.device_parser import parse_gpu_ids import quadra import quadra.utils.export as quadra_export @@ -263,7 +263,7 @@ def finish( tensorboard_logger = get_tensorboard_logger(trainer=trainer) file_names = ["config.yaml", "config_resolved.yaml", "config_tree.txt", "data/dataset.csv"] if "16" in str(trainer.precision): - index = parse_gpu_ids(config.trainer.devices, include_cuda=True)[0] + index = _parse_gpu_ids(config.trainer.devices, include_cuda=True)[0] device = "cuda:" + str(index) half_precision = True else: diff --git a/tests/conftest.py b/tests/conftest.py index 85922195..44b0b87e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -44,7 +44,9 @@ def limit_torch_threads(): @pytest.fixture(autouse=True) def setup_devices(device: str): """Set the device to run tests on.""" - torch_device = torch.device(device) + # torch_device = torch.device(device) os.environ["QUADRA_TEST_DEVICE"] = device - if torch_device.type != "cuda": - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + + # TODO: If we use this lightning crashes because it sees gpus but no gpu are available!! + # if torch_device.type != "cuda": + # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"