diff --git a/dbgpt/configs/model_config.py b/dbgpt/configs/model_config.py index 298a64da7..83fae6190 100644 --- a/dbgpt/configs/model_config.py +++ b/dbgpt/configs/model_config.py @@ -183,7 +183,20 @@ def get_device() -> str: "qwen2.5-coder-1.5b-instruct": os.path.join( MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct" ), + "qwen2.5-coder-32b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-32B-Instruct" + ), + "qwen2.5-coder-14b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-14B-Instruct" + ), + "qwen2.5-coder-3b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-3B-Instruct"), "qwen2.5-coder-7b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-7B-Instruct"), + "qwen2.5-coder-1.5b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct" + ), + "qwen2.5-coder-0.5b-instruct": os.path.join( + MODEL_PATH, "Qwen2.5-Coder-0.5B-Instruct" + ), # (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2 "wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"), # wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf