diff --git a/requirements-gpu.txt b/requirements-gpu.txt index 617e84d..77d02b1 100644 --- a/requirements-gpu.txt +++ b/requirements-gpu.txt @@ -1 +1,2 @@ -onnxruntime-gpu +onnxruntime-gpu<1.18 +numpy<2 diff --git a/requirements-test.txt b/requirements-test.txt index 2bf7f76..3c8f4e1 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -17,4 +17,4 @@ matplotlib natsort torchaudio torch -transformers \ No newline at end of file +transformers diff --git a/requirements-zoo.txt b/requirements-zoo.txt index 32e572b..5b95131 100644 --- a/requirements-zoo.txt +++ b/requirements-zoo.txt @@ -21,4 +21,4 @@ transformers click datasets evaluate -pyannote.audio \ No newline at end of file +pyannote.audio diff --git a/soundutils/langid/silero.py b/soundutils/langid/silero.py index 0551cd3..ec6ceba 100644 --- a/soundutils/langid/silero.py +++ b/soundutils/langid/silero.py @@ -25,7 +25,8 @@ def _lang_group_dict_95(): @lru_cache() def _open_model(): - return open_onnx_model(hf_hub_download(repo_id=_REPO_ID, filename='lang_classifier_95.onnx')) + onnx_file = hf_hub_download(repo_id=_REPO_ID, filename='lang_classifier_95.onnx') + return open_onnx_model(onnx_file, mode='cpu') def _raw_langid(sound: SoundTyping, top_n: int = 5): diff --git a/soundutils/utils/onnx.py b/soundutils/utils/onnx.py index 22a37a1..cd5e595 100644 --- a/soundutils/utils/onnx.py +++ b/soundutils/utils/onnx.py @@ -74,7 +74,7 @@ def _open_onnx_model(ckpt: str, provider: str, use_cpu: bool = True) -> Inferenc providers.append("CPUExecutionProvider") logging.info(f'Model {ckpt!r} loaded with provider {provider!r}') - return InferenceSession(ckpt, options, providers=providers) + return InferenceSession(ckpt, sess_options=options, providers=providers) def open_onnx_model(ckpt: str, mode: str = None) -> InferenceSession: @@ -93,4 +93,4 @@ def open_onnx_model(ckpt: str, mode: str = None) -> InferenceSession: on Linux, executing ``export ONNX_MODE=cpu`` will ignore any existing CUDA and force the model inference to run on CPU. """ - return _open_onnx_model(ckpt, get_onnx_provider(mode or os.environ.get('ONNX_MODE', None))) + return _open_onnx_model(ckpt, provider=get_onnx_provider(mode or os.environ.get('ONNX_MODE', None)))