From 6f7584014e9d15d3d76f0cb47475a89d83a4648c Mon Sep 17 00:00:00 2001 From: tpoisonooo Date: Thu, 4 Jan 2024 04:09:20 +0000 Subject: [PATCH] update --- .gitignore | 1 + config.ini | 21 ++++++++------------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index c882e621..6d575bcf 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ service/__pycache__/ frontend/__pycache__/ pk/ badcase.txt +config.bak diff --git a/config.ini b/config.ini index 82569b4e..09427eda 100644 --- a/config.ini +++ b/config.ini @@ -1,36 +1,31 @@ - - - - [feature_store] reject_throttle = 767.0 # text2vec model path, support local relative path and huggingface model format -model_path = "../models/text2vec-large-chinese" +model_path = "shibing624/text2vec-base-chinese" work_dir = "workdir" [web_search] # check https://serper.dev/api-key to get a free API key -x_api_key = "aa3da0cd69c5a2df7c0b664dc8a4c118de532405" +x_api_key = "${YOUR-API-KEY}" domain_partial_order = ["openai.com", "pytorch.org", "readthedocs.io", "nvidia.com", "stackoverflow.com", "juejin.cn", "zhuanlan.zhihu.com", "www.cnblogs.com"] save_dir = "logs/web_search_result" [llm] -# enable local/remote LLM or not enable_local = 1 enable_remote = 0 # hybrid llm service address -# client_url = "http://10.140.24.142:39999/inference" -client_url = "http://10.140.24.142:39999/inference" +client_url = "http://127.0.0.1:8888/inference" [llm.server] # local LLM configuration +# support "internlm2-7B", "internlm2-20B" and "internlm2-70B" local_llm_path = "/internlm/ampere_7b_v1_7_0" local_llm_max_text_length = 16000 # remote LLM service configuration # support any python3 openai interface, such as "gpt", "kimi" and so on remote_type = "kimi" -remote_api_key = "Y2tpMG41dDB0YzExbjRqYW5nN2c6bXNrLTFzVlB2NGJRaDExeWdnNTlZY3dYMm5mcVRpWng=" +remote_api_key = "${YOUR-API-KEY}" # max text length for remote LLM. for example, use 128000 for kimi, 192000 for gpt remote_llm_max_text_length = 128000 # openai model type. use "moonshot-v1-128k" for kimi, "gpt-4" for gpt @@ -48,7 +43,7 @@ has_weekday = 1 [sg_search] binary_src_path = "/usr/local/bin/src" -src_access_token = "sgp_636f79ad2075640f_3ef2a135579615403e29b88d4402f1e6183ad347" +src_access_token = "${YOUR-SRC-ACCESS-TOKEN}" # add your repo here, we just take opencompass and lmdeploy as example [sg_search.opencompass] @@ -61,7 +56,7 @@ introduction = "lmdeploy 是一个用于压缩、部署和服务 LLM(Large Lan [frontend] # chat group type, support "lark" and "none" -# check https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add bot +# check https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add lark bot type = "none" # char group webhook url, send reply to group -webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/7a5d3d98-fdfd-40f8-b8de-851cb7e81e5c" +webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/7a5d3d98-fdfd-40f8-b8de-851cb7e81e5c" \ No newline at end of file