Skip to content

Commit

Permalink
Fix gpu dependency and only leverage onnx when GPU is available (#157)
Browse files Browse the repository at this point in the history
* replacing appending instead of write

* fix eetq dependency

* gpu guard required eetq

* fix bug when gpu is available

* fix for gpu device

* reverse

* fix

* replace gpu -> cuda
  • Loading branch information
cotran2 authored Oct 9, 2024
1 parent 5c4a6bc commit 8b5db45
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 14 deletions.
4 changes: 2 additions & 2 deletions arch/src/consts.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
pub const DEFAULT_EMBEDDING_MODEL: &str = "katanemo/bge-large-en-v1.5-onnx";
pub const DEFAULT_INTENT_MODEL: &str = "katanemo/deberta-base-nli-onnx";
pub const DEFAULT_EMBEDDING_MODEL: &str = "katanemo/bge-large-en-v1.5";
pub const DEFAULT_INTENT_MODEL: &str = "katanemo/deberta-base-nli";
pub const DEFAULT_PROMPT_TARGET_THRESHOLD: f64 = 0.8;
pub const DEFAULT_HALLUCINATED_THRESHOLD: f64 = 0.1;
pub const RATELIMIT_SELECTOR_HEADER_KEY: &str = "x-arch-ratelimit-selector";
Expand Down
2 changes: 1 addition & 1 deletion model_server/app/guard_model_config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
jailbreak:
cpu: "katanemo/Arch-Guard-cpu"
gpu: "katanemo/Arch-Guard-gpu"
gpu: "katanemo/Arch-Guard"
26 changes: 15 additions & 11 deletions model_server/app/load_models.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
import sentence_transformers
from transformers import AutoTokenizer, pipeline
from transformers import AutoTokenizer, AutoModel, pipeline
import sqlite3
import torch
from optimum.onnxruntime import ORTModelForFeatureExtraction, ORTModelForSequenceClassification # type: ignore
Expand All @@ -18,16 +18,17 @@ def get_device():
return device


def load_transformers(
model_name=os.getenv("MODELS", "katanemo/bge-large-en-v1.5-onnx")
):
def load_transformers(model_name=os.getenv("MODELS", "katanemo/bge-large-en-v1.5")):
print("Loading Embedding Model")
transformers = {}
device = get_device()
transformers["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
transformers["model"] = ORTModelForFeatureExtraction.from_pretrained(
model_name, device_map=device
)
if device != "cuda":
transformers["model"] = ORTModelForFeatureExtraction.from_pretrained(
model_name, file_name="onnx/model.onnx"
)
else:
transformers["model"] = AutoModel.from_pretrained(model_name, device_map=device)
transformers["model_name"] = model_name

return transformers
Expand Down Expand Up @@ -64,13 +65,16 @@ def load_guard_model(


def load_zero_shot_models(
model_name=os.getenv("ZERO_SHOT_MODELS", "katanemo/deberta-base-nli-onnx")
model_name=os.getenv("ZERO_SHOT_MODELS", "katanemo/deberta-base-nli")
):
zero_shot_model = {}
device = get_device()
zero_shot_model["model"] = ORTModelForSequenceClassification.from_pretrained(
model_name
)
if device != "cuda":
zero_shot_model["model"] = ORTModelForSequenceClassification.from_pretrained(
model_name, file_name="onnx/model.onnx"
)
else:
zero_shot_model["model"] = AutoModel.from_pretrained(model_name)
zero_shot_model["tokenizer"] = AutoTokenizer.from_pretrained(model_name)

# create pipeline
Expand Down

0 comments on commit 8b5db45

Please sign in to comment.