Skip to content

Commit

Permalink
feat(model): Support Qwen2.5 coder models (#2139)
Browse files Browse the repository at this point in the history
  • Loading branch information
fangyinc authored Nov 21, 2024
1 parent 3ccfa94 commit 9566f4e
Showing 1 changed file with 13 additions and 0 deletions.
13 changes: 13 additions & 0 deletions dbgpt/configs/model_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,20 @@ def get_device() -> str:
"qwen2.5-coder-1.5b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct"
),
"qwen2.5-coder-32b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-32B-Instruct"
),
"qwen2.5-coder-14b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-14B-Instruct"
),
"qwen2.5-coder-3b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-3B-Instruct"),
"qwen2.5-coder-7b-instruct": os.path.join(MODEL_PATH, "Qwen2.5-Coder-7B-Instruct"),
"qwen2.5-coder-1.5b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-1.5B-Instruct"
),
"qwen2.5-coder-0.5b-instruct": os.path.join(
MODEL_PATH, "Qwen2.5-Coder-0.5B-Instruct"
),
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
# wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf
Expand Down

0 comments on commit 9566f4e

Please sign in to comment.