From dce03862d582dab50ba8be583d97b667006f96fd Mon Sep 17 00:00:00 2001 From: yyhhyy <95077259+yyhhyyyyyy@users.noreply.github.com> Date: Sat, 20 Apr 2024 21:03:10 +0800 Subject: [PATCH] feat(model): Support Qwen2MoE (#1439) Co-authored-by: Fangyin Cheng --- README.md | 1 + README.zh.md | 1 + dbgpt/configs/model_config.py | 2 ++ dbgpt/model/adapter/hf_adapter.py | 28 ++++++++++++++++++++++++++++ 4 files changed, 32 insertions(+) diff --git a/README.md b/README.md index 706043aa9..a09b441b9 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,7 @@ At present, we have introduced several key features to showcase our current capa We offer extensive model support, including dozens of large language models (LLMs) from both open-source and API agents, such as LLaMA/LLaMA2, Baichuan, ChatGLM, Wenxin, Tongyi, Zhipu, and many more. - News + - πŸ”₯πŸ”₯πŸ”₯ [Qwen1.5-MoE-A2.7B-Chat](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B-Chat) - πŸ”₯πŸ”₯πŸ”₯ [Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) - πŸ”₯πŸ”₯πŸ”₯ [Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) - πŸ”₯πŸ”₯πŸ”₯ [CodeQwen1.5-7B-Chat](https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat) diff --git a/README.zh.md b/README.zh.md index 4775af49b..aba033e91 100644 --- a/README.zh.md +++ b/README.zh.md @@ -152,6 +152,7 @@ ζ΅·ι‡ζ¨‘εž‹ζ”―ζŒοΌŒεŒ…ζ‹¬εΌ€ζΊγ€APIδ»£η†η­‰ε‡ εη§ε€§θ―­θ¨€ζ¨‘εž‹γ€‚ε¦‚LLaMA/LLaMA2、Baichuan、ChatGLMγ€ζ–‡εΏƒγ€ι€šδΉ‰γ€ζ™Ίθ°±η­‰γ€‚ε½“ε‰ε·²ζ”―ζŒε¦‚δΈ‹ζ¨‘εž‹: - ζ–°ε’žζ”―ζŒζ¨‘εž‹ + - πŸ”₯πŸ”₯πŸ”₯ [Qwen1.5-MoE-A2.7B-Chat](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B-Chat) - πŸ”₯πŸ”₯πŸ”₯ [Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) - πŸ”₯πŸ”₯πŸ”₯ [Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) - πŸ”₯πŸ”₯πŸ”₯ [CodeQwen1.5-7B-Chat](https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat) diff --git a/dbgpt/configs/model_config.py b/dbgpt/configs/model_config.py index 4da990a42..a9f75bb98 100644 --- a/dbgpt/configs/model_config.py +++ b/dbgpt/configs/model_config.py @@ -114,6 +114,8 @@ def get_device() -> str: "qwen1.5-72b-chat": os.path.join(MODEL_PATH, "Qwen1.5-72B-Chat"), # https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat "codeqwen1.5-7b-chat": os.path.join(MODEL_PATH, "CodeQwen1.5-7B-Chat"), + # https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B-Chat + "qwen1.5-moe-a2.7b-chat": os.path.join(MODEL_PATH, "Qwen1.5-MoE-A2.7B-Chat"), # (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2 "wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"), # wget https://huggingface.co/TheBloke/vicuna-13B-v1.5-GGUF/resolve/main/vicuna-13b-v1.5.Q4_K_M.gguf -O models/ggml-model-q4_0.gguf diff --git a/dbgpt/model/adapter/hf_adapter.py b/dbgpt/model/adapter/hf_adapter.py index 60ed6a043..d93f00cee 100644 --- a/dbgpt/model/adapter/hf_adapter.py +++ b/dbgpt/model/adapter/hf_adapter.py @@ -267,6 +267,33 @@ def do_match(self, lower_model_name_or_path: Optional[str] = None): lower_model_name_or_path and "qwen" in lower_model_name_or_path and "1.5" in lower_model_name_or_path + and "moe" not in lower_model_name_or_path + ) + + +class QwenMoeAdapter(NewHFChatModelAdapter): + """ + https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B + + TODO: There are problems with quantization. + """ + + support_4bit: bool = False + support_8bit: bool = False + + def check_transformer_version(self, current_version: str) -> None: + print(f"Checking version: Current version {current_version}") + if not current_version >= "4.40.0": + raise ValueError( + "Qwen 1.5 Moe require transformers.__version__>=4.40.0, please upgrade your transformers package." + ) + + def do_match(self, lower_model_name_or_path: Optional[str] = None): + return ( + lower_model_name_or_path + and "qwen" in lower_model_name_or_path + and "1.5" in lower_model_name_or_path + and "moe" in lower_model_name_or_path ) @@ -314,4 +341,5 @@ def get_str_prompt( register_model_adapter(GemmaAdapter) register_model_adapter(StarlingLMAdapter) register_model_adapter(QwenAdapter) +register_model_adapter(QwenMoeAdapter) register_model_adapter(Llama3Adapter)