diff --git a/README.ja.md b/README.ja.md index 355dd615d..c11a6647e 100644 --- a/README.ja.md +++ b/README.ja.md @@ -154,6 +154,7 @@ DB-GPTのアーキテクチャは以下の図に示されています: 私たちは、LLaMA/LLaMA2、Baichuan、ChatGLM、Wenxin、Tongyi、Zhipuなど、オープンソースおよびAPIエージェントからの数十の大規模言語モデル(LLM)を含む幅広いモデルをサポートしています。 - ニュース + - 🔥🔥🔥 [glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat) - 🔥🔥🔥 [Phi-3](https://huggingface.co/collections/microsoft/phi-3-6626e15e9585a200d2d761e3) - 🔥🔥🔥 [Yi-1.5-34B-Chat](https://huggingface.co/01-ai/Yi-1.5-34B-Chat) - 🔥🔥🔥 [Yi-1.5-9B-Chat](https://huggingface.co/01-ai/Yi-1.5-9B-Chat) diff --git a/README.md b/README.md index f4ef78d7a..561df833d 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,7 @@ At present, we have introduced several key features to showcase our current capa We offer extensive model support, including dozens of large language models (LLMs) from both open-source and API agents, such as LLaMA/LLaMA2, Baichuan, ChatGLM, Wenxin, Tongyi, Zhipu, and many more. - News + - 🔥🔥🔥 [glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat) - 🔥🔥🔥 [Phi-3](https://huggingface.co/collections/microsoft/phi-3-6626e15e9585a200d2d761e3) - 🔥🔥🔥 [Yi-1.5-34B-Chat](https://huggingface.co/01-ai/Yi-1.5-34B-Chat) - 🔥🔥🔥 [Yi-1.5-9B-Chat](https://huggingface.co/01-ai/Yi-1.5-9B-Chat) diff --git a/README.zh.md b/README.zh.md index 12a0806b1..5b78f9097 100644 --- a/README.zh.md +++ b/README.zh.md @@ -152,6 +152,7 @@ 海量模型支持,包括开源、API代理等几十种大语言模型。如LLaMA/LLaMA2、Baichuan、ChatGLM、文心、通义、智谱等。当前已支持如下模型: - 新增支持模型 + - 🔥🔥🔥 [glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat) - 🔥🔥🔥 [Phi-3](https://huggingface.co/collections/microsoft/phi-3-6626e15e9585a200d2d761e3) - 🔥🔥🔥 [Yi-1.5-34B-Chat](https://huggingface.co/01-ai/Yi-1.5-34B-Chat) - 🔥🔥🔥 [Yi-1.5-9B-Chat](https://huggingface.co/01-ai/Yi-1.5-9B-Chat) diff --git a/dbgpt/configs/model_config.py b/dbgpt/configs/model_config.py index 6f49242f7..7a0e20102 100644 --- a/dbgpt/configs/model_config.py +++ b/dbgpt/configs/model_config.py @@ -52,6 +52,8 @@ def get_device() -> str: "chatglm2-6b-int4": os.path.join(MODEL_PATH, "chatglm2-6b-int4"), # https://huggingface.co/THUDM/chatglm3-6b "chatglm3-6b": os.path.join(MODEL_PATH, "chatglm3-6b"), + # https://huggingface.co/THUDM/glm-4-9b-chat + "glm-4-9b-chat": os.path.join(MODEL_PATH, "glm-4-9b-chat"), "guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"), "falcon-40b": os.path.join(MODEL_PATH, "falcon-40b"), "gorilla-7b": os.path.join(MODEL_PATH, "gorilla-7b"), diff --git a/dbgpt/model/adapter/hf_adapter.py b/dbgpt/model/adapter/hf_adapter.py index ec24965fc..a24031136 100644 --- a/dbgpt/model/adapter/hf_adapter.py +++ b/dbgpt/model/adapter/hf_adapter.py @@ -18,6 +18,8 @@ class NewHFChatModelAdapter(LLMModelAdapter, ABC): prompt template for this model """ + trust_remote_code: bool = True + def new_adapter(self, **kwargs) -> "NewHFChatModelAdapter": return self.__class__() @@ -77,13 +79,18 @@ def load(self, model_path: str, from_pretrained_kwargs: dict): model_path, use_fast=self.use_fast_tokenizer(), revision=revision, - trust_remote_code=True, + trust_remote_code=self.trust_remote_code, ) except TypeError: tokenizer = AutoTokenizer.from_pretrained( - model_path, use_fast=False, revision=revision, trust_remote_code=True + model_path, + use_fast=False, + revision=revision, + trust_remote_code=self.trust_remote_code, ) try: + if "trust_remote_code" not in from_pretrained_kwargs: + from_pretrained_kwargs["trust_remote_code"] = self.trust_remote_code model = AutoModelForCausalLM.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) @@ -480,6 +487,19 @@ def do_match(self, lower_model_name_or_path: Optional[str] = None): ) +class GLM4Aapter(NewHFChatModelAdapter): + """ + https://huggingface.co/defog/glm-4-8b + """ + + def do_match(self, lower_model_name_or_path: Optional[str] = None): + return ( + lower_model_name_or_path + and "glm-4" in lower_model_name_or_path + and "chat" in lower_model_name_or_path + ) + + # The following code is used to register the model adapter # The last registered model adapter is matched first register_model_adapter(YiAdapter) @@ -496,3 +516,4 @@ def do_match(self, lower_model_name_or_path: Optional[str] = None): register_model_adapter(PhiAdapter) register_model_adapter(SQLCoderAdapter) register_model_adapter(OpenChatAdapter) +register_model_adapter(GLM4Aapter)