From 1086ec59a3b4f0ef74834ab60f9be6ecece2120b Mon Sep 17 00:00:00 2001 From: Luiz Antonio Date: Tue, 5 Mar 2024 15:47:20 -0500 Subject: [PATCH] Bump TGI version to 1.4.2 to support gemma models --- .../manifest-templates/text-generation-inference.tftpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/inference-server/text-generation-inference/manifest-templates/text-generation-inference.tftpl b/benchmarks/inference-server/text-generation-inference/manifest-templates/text-generation-inference.tftpl index ecb70abd1..34a4531e5 100644 --- a/benchmarks/inference-server/text-generation-inference/manifest-templates/text-generation-inference.tftpl +++ b/benchmarks/inference-server/text-generation-inference/manifest-templates/text-generation-inference.tftpl @@ -50,7 +50,7 @@ spec: - name: text-generation-inference ports: - containerPort: 80 - image: "ghcr.io/huggingface/text-generation-inference:1.1.1" + image: "ghcr.io/huggingface/text-generation-inference:1.4.2" args: ["--model-id", "${model_id}", "--num-shard", "${gpu_count}"] # , "{token}" tensor parallelism, should correspond to number of gpus below %{ for hugging_face_token_secret in hugging_face_token_secret_list ~} env: