From b7eb9aba497934a8d523ffdf662d97aabd91c566 Mon Sep 17 00:00:00 2001 From: awwaawwa <8493196+awwaawwa@users.noreply.github.com> Date: Fri, 17 May 2024 11:15:23 +0800 Subject: [PATCH] [Feature]: allow model mutex override in core_functional.py (#1708) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * allow_core_func_specify_model * change arg name * 模型覆盖支持热更新&当模型覆盖指向不存在的模型时报错 * allow model mutex override --------- Co-authored-by: binary-husky --- core_functional.py | 2 ++ request_llms/bridge_all.py | 42 +++++++++++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/core_functional.py b/core_functional.py index 594113502..9cc00e6f6 100644 --- a/core_functional.py +++ b/core_functional.py @@ -33,6 +33,8 @@ def get_core_functions(): "AutoClearHistory": False, # [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符) "PreProcess": None, + # [7] 模型选择 (可选参数。如不设置,则使用当前全局模型;如设置,则用指定模型覆盖全局模型。) + # "ModelOverride": "gpt-3.5-turbo", # 主要用途:强制点击此基础功能按钮时,使用指定的模型。 }, diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 2cde4a064..9a12f9845 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -906,6 +906,13 @@ def decode(self, *args, **kwargs): AVAIL_LLM_MODELS += [azure_model_name] +# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-= +# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-= +# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-= + +# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-= +# -=-=-=-=-=-=-= 👇 以下是多模型路由切换函数 -=-=-=-=-=-=-= +# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-= def LLM_CATCH_EXCEPTION(f): @@ -942,13 +949,11 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys model = llm_kwargs['llm_model'] n_model = 1 if '&' not in model: - - # 如果只询问1个大语言模型: + # 如果只询问“一个”大语言模型(多数情况): method = model_info[model]["fn_without_ui"] return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience) else: - - # 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支 + # 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支 executor = ThreadPoolExecutor(max_workers=4) models = model.split('&') n_model = len(models) @@ -1001,8 +1006,26 @@ def mutex_manager(window_mutex, observe_window): res = '

\n\n---\n\n'.join(return_string_collect) return res - -def predict(inputs:str, llm_kwargs:dict, *args, **kwargs): +# 根据基础功能区 ModelOverride 参数调整模型类型,用于 `predict` 中 +import importlib +import core_functional +def execute_model_override(llm_kwargs, additional_fn, method): + functional = core_functional.get_core_functions() + if 'ModelOverride' in functional[additional_fn]: + # 热更新Prompt & ModelOverride + importlib.reload(core_functional) + functional = core_functional.get_core_functions() + model_override = functional[additional_fn]['ModelOverride'] + if model_override not in model_info: + raise ValueError(f"模型覆盖参数 '{model_override}' 指向一个暂不支持的模型,请检查配置文件。") + method = model_info[model_override]["fn_with_ui"] + llm_kwargs['llm_model'] = model_override + return llm_kwargs, additional_fn, method + # 默认返回原参数 + return llm_kwargs, additional_fn, method + +def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot, + history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None): """ 发送至LLM,流式获取输出。 用于基础的对话功能。 @@ -1021,6 +1044,11 @@ def predict(inputs:str, llm_kwargs:dict, *args, **kwargs): """ inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm") + method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项 - yield from method(inputs, llm_kwargs, *args, **kwargs) + + if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型 + llm_kwargs, additional_fn, method = execute_model_override(llm_kwargs, additional_fn, method) + + yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn)