diff --git a/README.md b/README.md index 2743b20ae6..9a7e68b6a6 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ ______________________________________________________________________ ## News 🎉 +- \[2023/09\] TurboMind supports Qwen-14B - \[2023/09\] TurboMind supports InternLM-20B - \[2023/09\] TurboMind supports all features of Code Llama: code completion, infilling, chat / instruct, and python specialist. Click [here](./docs/en/supported_models/codellama.md) for deployment guide - \[2023/09\] TurboMind supports Baichuan2-7B diff --git a/README_zh-CN.md b/README_zh-CN.md index b87e4eac71..f1986b2aaa 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -20,6 +20,7 @@ ______________________________________________________________________ ## 更新 🎉 +- \[2023/09\] TurboMind 支持 Qwen-14B - \[2023/09\] TurboMind 支持 InternLM-20B 模型 - \[2023/09\] TurboMind 支持 Code Llama 所有功能:代码续写、填空、对话、Python专项。点击[这里](./docs/zh_cn/supported_models/codellama.md)阅读部署方法 - \[2023/09\] TurboMind 支持 Baichuan2-7B diff --git a/lmdeploy/model.py b/lmdeploy/model.py index fbdc5a4337..3bfc59aefa 100644 --- a/lmdeploy/model.py +++ b/lmdeploy/model.py @@ -448,6 +448,7 @@ def messages2prompt(self, messages, sequence_start=True): return ret +@MODELS.register_module(name='qwen-14b') @MODELS.register_module(name='qwen-7b') class Qwen7BChat(BaseModel): """Chat template for Qwen-7B-Chat.""" @@ -508,11 +509,6 @@ def messages2prompt(self, messages, sequence_start=True): return ret -@MODELS.register_module(name='qwen-14b') -class Qwen14BChat(Qwen7BChat): - pass - - @MODELS.register_module(name='codellama') class CodeLlama(Llama2):