From 9566f4e9f7c8c5034365295cfbd615e650d14d65 Mon Sep 17 00:00:00 2001 From: Fangyin Cheng Date: Thu, 21 Nov 2024 13:55:04 +0800 Subject: [PATCH] feat(model): Support Qwen2.5 coder models (#2139) --- dbgpt/configs/model_config.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/dbgpt/configs/model_config.py b/dbgpt/configs/model_config.py index 298a64da7..83fae6190 100644 --- a/dbgpt/configs/model_config.py +++ b/dbgpt/configs/model_config.py @@ -183,7 +183,20 @@ def get_device() -> str: "qwen2.5-coder-1.5b-instruct": os.path.join( MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct" ), + "qwen2.5-coder-32b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-32B-Instruct" + ), + "qwen2.5-coder-14b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-14B-Instruct" + ), + "qwen2.5-coder-3b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-3B-Instruct"), "qwen2.5-coder-7b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-7B-Instruct"), + "qwen2.5-coder-1.5b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct" + ), + "qwen2.5-coder-0.5b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-0.5B-Instruct" + ), # (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2 "wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"), # wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf