Skip to content

Commit

Permalink
dev(narugo): force silero-langid model use cpu
Browse files Browse the repository at this point in the history
  • Loading branch information
narugo1992 committed Sep 2, 2024
1 parent bfd36d4 commit 7ee4085
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 6 deletions.
3 changes: 2 additions & 1 deletion requirements-gpu.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
onnxruntime-gpu
onnxruntime-gpu<1.18
numpy<2
2 changes: 1 addition & 1 deletion requirements-test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ matplotlib
natsort
torchaudio
torch
transformers
transformers
2 changes: 1 addition & 1 deletion requirements-zoo.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ transformers
click
datasets
evaluate
pyannote.audio
pyannote.audio
3 changes: 2 additions & 1 deletion soundutils/langid/silero.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ def _lang_group_dict_95():

@lru_cache()
def _open_model():
return open_onnx_model(hf_hub_download(repo_id=_REPO_ID, filename='lang_classifier_95.onnx'))
onnx_file = hf_hub_download(repo_id=_REPO_ID, filename='lang_classifier_95.onnx')
return open_onnx_model(onnx_file, mode='cpu')


def _raw_langid(sound: SoundTyping, top_n: int = 5):
Expand Down
4 changes: 2 additions & 2 deletions soundutils/utils/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _open_onnx_model(ckpt: str, provider: str, use_cpu: bool = True) -> Inferenc
providers.append("CPUExecutionProvider")

logging.info(f'Model {ckpt!r} loaded with provider {provider!r}')
return InferenceSession(ckpt, options, providers=providers)
return InferenceSession(ckpt, sess_options=options, providers=providers)


def open_onnx_model(ckpt: str, mode: str = None) -> InferenceSession:
Expand All @@ -93,4 +93,4 @@ def open_onnx_model(ckpt: str, mode: str = None) -> InferenceSession:
on Linux, executing ``export ONNX_MODE=cpu`` will ignore any existing CUDA and force the model inference
to run on CPU.
"""
return _open_onnx_model(ckpt, get_onnx_provider(mode or os.environ.get('ONNX_MODE', None)))
return _open_onnx_model(ckpt, provider=get_onnx_provider(mode or os.environ.get('ONNX_MODE', None)))

0 comments on commit 7ee4085

Please sign in to comment.