From 7e716d19729fed73ce6d93c4e9e7d2e49e012263 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 05:07:44 +0000 Subject: [PATCH] build(deps): bump optimum from 1.19.1 to 1.20.0 in /runtimes/huggingface Bumps [optimum](https://github.com/huggingface/optimum) from 1.19.1 to 1.20.0. - [Release notes](https://github.com/huggingface/optimum/releases) - [Commits](https://github.com/huggingface/optimum/compare/v1.19.1...v1.20.0) --- updated-dependencies: - dependency-name: optimum dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- runtimes/huggingface/poetry.lock | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/runtimes/huggingface/poetry.lock b/runtimes/huggingface/poetry.lock index 745b9648f..7d7bc0f32 100644 --- a/runtimes/huggingface/poetry.lock +++ b/runtimes/huggingface/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "absl-py" @@ -2371,13 +2371,13 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] [[package]] name = "optimum" -version = "1.19.1" +version = "1.20.0" description = "Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to integrate third-party libraries from Hardware Partners and interface with their specific functionality." optional = false python-versions = ">=3.7.0" files = [ - {file = "optimum-1.19.1-py3-none-any.whl", hash = "sha256:ca474589682fe10f7827c85260d116603a5823d5c251c453620584cbc06fa5f6"}, - {file = "optimum-1.19.1.tar.gz", hash = "sha256:fd723b723bb7fe57d98b4afbd5f9e1d923d31a9843eff32331725162994ec849"}, + {file = "optimum-1.20.0-py3-none-any.whl", hash = "sha256:0c0d0746043c95e22cf3586946d7408d353f10c0486f1c7d2d11084a5cfc0ede"}, + {file = "optimum-1.20.0.tar.gz", hash = "sha256:b64c7536fe738db9b56605105efe72006401ad2aa00cb499ae407f2e06f3043b"}, ] [package.dependencies] @@ -2395,7 +2395,7 @@ packaging = "*" protobuf = {version = ">=3.20.1", optional = true, markers = "extra == \"onnxruntime\""} sympy = "*" torch = ">=1.11" -transformers = {version = ">=4.26.0,<4.41.0", extras = ["sentencepiece"]} +transformers = {version = ">=4.26.0,<4.42.0", extras = ["sentencepiece"]} [package.extras] amd = ["optimum-amd"] @@ -2408,15 +2408,15 @@ exporters-gpu = ["onnx", "onnxruntime-gpu", "timm"] exporters-tf = ["h5py", "numpy (<1.24.0)", "onnx", "onnxruntime", "tensorflow (>=2.4,<=2.12.1)", "tf2onnx", "timm", "transformers[sentencepiece] (>=4.26.0,<4.38.0)"] furiosa = ["optimum-furiosa"] graphcore = ["optimum-graphcore"] -habana = ["optimum-habana", "transformers (>=4.37.0,<4.38.0)"] -intel = ["optimum-intel (>=1.15.0)"] -neural-compressor = ["optimum-intel[neural-compressor] (>=1.15.0)"] -neuron = ["optimum-neuron[neuron] (>=0.0.20)", "transformers (==4.36.2)"] -neuronx = ["optimum-neuron[neuronx] (>=0.0.20)", "transformers (==4.36.2)"] -nncf = ["optimum-intel[nncf] (>=1.15.0)"] +habana = ["optimum-habana", "transformers (>=4.38.0,<4.39.0)"] +intel = ["optimum-intel (>=1.16.0)"] +neural-compressor = ["optimum-intel[neural-compressor] (>=1.16.0)"] +neuron = ["optimum-neuron[neuron] (>=0.0.20)", "transformers (>=4.36.2,<4.42.0)"] +neuronx = ["optimum-neuron[neuronx] (>=0.0.20)", "transformers (>=4.36.2,<4.42.0)"] +nncf = ["optimum-intel[nncf] (>=1.16.0)"] onnxruntime = ["datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime (>=1.11.0)", "protobuf (>=3.20.1)"] onnxruntime-gpu = ["accelerate", "datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime-gpu (>=1.11.0)", "protobuf (>=3.20.1)"] -openvino = ["optimum-intel[openvino] (>=1.15.0)"] +openvino = ["optimum-intel[openvino] (>=1.16.0)"] quality = ["black (>=23.1,<24.0)", "ruff (==0.1.5)"] tests = ["Pillow", "accelerate", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest (<=8.0.0)", "pytest-xdist", "requests", "rjieba", "sacremoses", "scikit-learn", "timm", "torchaudio", "torchvision"]