diff --git a/dist/buildinfo.json b/dist/buildinfo.json index 30ef10b0..a7e47d52 100644 --- a/dist/buildinfo.json +++ b/dist/buildinfo.json @@ -1 +1 @@ -{"sha": "954408a", "timestamp": 1700550928} +{"sha": "9273c89", "timestamp": 1705300786} diff --git a/dist/index.js b/dist/index.js index 3ce11a52..82aeb624 100644 --- a/dist/index.js +++ b/dist/index.js @@ -1,20 +1,38 @@ // src/env.js var Environment = class { + // -- 版本数据 -- + // + // 当前版本 + BUILD_TIMESTAMP = 1705300786; + // 当前版本 commit id + BUILD_VERSION = "9273c89"; + // -- 基础配置 -- /** * @type {I18n | null} */ I18N = null; + // 多语言支持 LANGUAGE = "zh-cn"; - // AI提供商: auto, openai, azure, workers + // 检查更新的分支 + UPDATE_BRANCH = "master"; + // AI提供商: auto, openai, azure, workers, gemini AI_PROVIDER = "auto"; + // -- Telegram 相关 -- + // + // Telegram API Domain + TELEGRAM_API_DOMAIN = "https://api.telegram.org"; // 允许访问的Telegram Token, 设置时以逗号分隔 TELEGRAM_AVAILABLE_TOKENS = []; + // -- 权限相关 -- + // // 允许所有人使用 I_AM_A_GENEROUS_PERSON = false; // 白名单 CHAT_WHITE_LIST = []; // 用户配置 LOCK_USER_CONFIG_KEYS = []; + // -- 群组相关 -- + // // 允许访问的Telegram Token 对应的Bot Name, 设置时以逗号分隔 TELEGRAM_BOT_NAME = []; // 群组白名单 @@ -23,10 +41,8 @@ var Environment = class { GROUP_CHAT_BOT_ENABLE = true; // 群组机器人共享模式,关闭后,一个群组只有一个会话和配置。开启的话群组的每个人都有自己的会话上下文 GROUP_CHAT_BOT_SHARE_MODE = false; - // OpenAI API Key - API_KEY = []; - // OpenAI的模型名称 - CHAT_MODEL = "gpt-3.5-turbo"; + // -- 历史记录相关 -- + // // 为了避免4096字符限制,将消息删减 AUTO_TRIM_HISTORY = true; // 最大历史记录长度 @@ -37,10 +53,24 @@ var Environment = class { GPT3_TOKENS_COUNT = false; // GPT3计数器资源地址 GPT3_TOKENS_COUNT_REPO = "https://raw.githubusercontent.com/tbxark-arc/GPT-3-Encoder/master"; + // -- Prompt 相关 -- + // // 全局默认初始化消息 SYSTEM_INIT_MESSAGE = null; // 全局默认初始化消息角色 SYSTEM_INIT_MESSAGE_ROLE = "system"; + // -- Open AI 配置 -- + // + // OpenAI API Key + API_KEY = []; + // OpenAI的模型名称 + CHAT_MODEL = "gpt-3.5-turbo"; + // OpenAI API Domain 可替换兼容openai api的其他服务商 + OPENAI_API_DOMAIN = "https://api.openai.com"; + // OpenAI API BASE `https://api.openai.com/v1` + OPENAI_API_BASE = null; + // -- DALLE 配置 -- + // // DALL-E的模型名称 DALL_E_MODEL = "dall-e-2"; // DALL-E图片尺寸 @@ -49,18 +79,16 @@ var Environment = class { DALL_E_IMAGE_QUALITY = "standard"; // DALL-E图片风格 DALL_E_IMAGE_STYLE = "vivid"; + // -- 特性开关 -- + // // 是否开启使用统计 ENABLE_USAGE_STATISTICS = false; // 隐藏部分命令按钮 HIDE_COMMAND_BUTTONS = ["/role"]; // 显示快捷回复按钮 SHOW_REPLY_BUTTON = false; - // 检查更新的分支 - UPDATE_BRANCH = "master"; - // 当前版本 - BUILD_TIMESTAMP = 1700550928; - // 当前版本 commit id - BUILD_VERSION = "954408a"; + // -- 模式开关 -- + // // 使用流模式 STREAM_MODE = true; // 安全模式 @@ -69,24 +97,28 @@ var Environment = class { DEBUG_MODE = false; // 开发模式 DEV_MODE = false; - // Telegram API Domain - TELEGRAM_API_DOMAIN = "https://api.telegram.org"; - // OpenAI API Domain 可替换兼容openai api的其他服务商 - OPENAI_API_DOMAIN = "https://api.openai.com"; - // OpenAI API BASE `https://api.openai.com/v1` - OPENAI_API_BASE = null; + // -- AZURE 配置 -- + // // Azure API Key AZURE_API_KEY = null; // Azure Completions API AZURE_COMPLETIONS_API = null; + // Azure DallE API + AZURE_DALLE_API = null; // Cloudflare Account ID CLOUDFLARE_ACCOUNT_ID = null; // Cloudflare Token CLOUDFLARE_TOKEN = null; // Text Generation Model - WORKERS_CHAT_MODEL = "@cf/meta/llama-2-7b-chat-fp16"; + WORKERS_CHAT_MODEL = "@cf/mistral/mistral-7b-instruct-v0.1 "; // Text-to-Image Model WORKERS_IMAGE_MODEL = "@cf/stabilityai/stable-diffusion-xl-base-1.0"; + // Google Gemini API Key + GOOGLE_API_KEY = null; + // Google Gemini API + GOOGLE_COMPLETIONS_API = "https://generativelanguage.googleapis.com/v1beta/models/"; + // Google Gemini Model + GOOGLE_COMPLETIONS_MODEL = "gemini.js-pro"; }; var ENV = new Environment(); var DATABASE = null; @@ -105,8 +137,10 @@ function initEnv(env, i18n2) { OPENAI_API_BASE: "string", AZURE_API_KEY: "string", AZURE_COMPLETIONS_API: "string", + AZURE_DALLE_API: "string", CLOUDFLARE_ACCOUNT_ID: "string", - CLOUDFLARE_TOKEN: "string" + CLOUDFLARE_TOKEN: "string", + GOOGLE_API_KEY: "string" }; const customCommandPrefix = "CUSTOM_COMMAND_"; for (const key of Object.keys(env)) { @@ -203,10 +237,18 @@ var Context = class { AZURE_API_KEY: ENV.AZURE_API_KEY, // Azure Completions API AZURE_COMPLETIONS_API: ENV.AZURE_COMPLETIONS_API, + // Azure DALL-E API + AZURE_DALLE_API: ENV.AZURE_DALLE_API, // WorkersAI聊天记录模型 WORKERS_CHAT_MODEL: ENV.WORKERS_CHAT_MODEL, // WorkersAI图片模型 - WORKER_IMAGE_MODEL: ENV.WORKERS_IMAGE_MODEL + WORKER_IMAGE_MODEL: ENV.WORKERS_IMAGE_MODEL, + // Google Gemini API Key + GOOGLE_API_KEY: ENV.GOOGLE_API_KEY, + // Google Gemini API + GOOGLE_COMPLETIONS_API: ENV.GOOGLE_COMPLETIONS_API, + // Google Gemini Model + GOOGLE_COMPLETIONS_MODEL: ENV.GOOGLE_COMPLETIONS_MODEL }; USER_DEFINE = { // 自定义角色 @@ -282,7 +324,7 @@ var Context = class { console.error(e); } { - const aiProvider = new Set("auto,openai,azure,workers".split(",")); + const aiProvider = new Set("auto,openai,azure,workers,gemini".split(",")); if (!aiProvider.has(this.USER_CONFIG.AI_PROVIDER)) { this.USER_CONFIG.AI_PROVIDER = "auto"; } @@ -1049,9 +1091,8 @@ function isOpenAIEnable(context) { return context.USER_CONFIG.OPENAI_API_KEY || ENV.API_KEY.length > 0; } function isAzureEnable(context) { - const api = context.USER_CONFIG.AZURE_COMPLETIONS_API || ENV.AZURE_COMPLETIONS_API; const key = context.USER_CONFIG.AZURE_API_KEY || ENV.AZURE_API_KEY; - return api !== null && key !== null; + return key !== null; } async function requestCompletionsFromOpenAI(message, history, context, onStream) { const body = { @@ -1071,8 +1112,8 @@ async function requestCompletionsFromOpenAI(message, history, context, onStream) }; { const provider = context.USER_CONFIG.AI_PROVIDER; - if (provider === "azure" || provider === "auto" && isAzureEnable(context)) { - url = ENV.AZURE_COMPLETIONS_API; + if (provider === "azure" || provider === "auto" && isAzureEnable(context) && context.USER_CONFIG.AZURE_COMPLETIONS_API !== null) { + url = context.USER_CONFIG.AZURE_COMPLETIONS_API; header["api-key"] = azureKeyFromContext(context); delete header["Authorization"]; delete body.model; @@ -1119,10 +1160,21 @@ Body: ${JSON.stringify(body)}`); } } setTimeout(() => updateBotUsage(result.usage, context).catch(console.error), 0); - return result.choices[0].message.content; + try { + return result.choices[0].message.content; + } catch (e) { + if (!result) { + throw new Error("Empty response"); + } + throw Error(result?.error?.message || JSON.stringify(result)); + } } async function requestImageFromOpenAI(prompt, context) { - const key = openAIKeyFromContext(context); + let url = `${ENV.OPENAI_API_BASE}/images/generations`; + const header = { + "Content-Type": "application/json", + "Authorization": `Bearer ${openAIKeyFromContext(context)}` + }; const body = { prompt, n: 1, @@ -1133,12 +1185,22 @@ async function requestImageFromOpenAI(prompt, context) { body.quality = context.USER_CONFIG.DALL_E_IMAGE_QUALITY; body.style = context.USER_CONFIG.DALL_E_IMAGE_STYLE; } - const resp = await fetch(`${ENV.OPENAI_API_BASE}/images/generations`, { + { + const provider = context.USER_CONFIG.AI_PROVIDER; + if (provider === "azure" || provider === "auto" && isAzureEnable(context) && context.USER_CONFIG.AZURE_DALLE_API !== null) { + url = context.USER_CONFIG.AZURE_DALLE_API; + const vaildSize = ["1792x1024", "1024x1024", "1024x1792"]; + if (!vaildSize.includes(body.size)) { + body.size = "1024x1024"; + } + header["api-key"] = azureKeyFromContext(context); + delete header["Authorization"]; + delete body.model; + } + } + const resp = await fetch(url, { method: "POST", - headers: { - "Content-Type": "application/json", - "Authorization": `Bearer ${key}` - }, + headers: header, body: JSON.stringify(body) }).then((res) => res.json()); if (resp.error?.message) { @@ -1221,7 +1283,14 @@ ${ENV.I18N.message.loading}...`); return contentFull; } else { const data = await resp.json(); - return data.result.response; + try { + return data.result.response; + } catch (e) { + if (!data) { + throw new Error("Empty response"); + } + throw new Error(data?.errors?.[0]?.message || JSON.stringify(data)); + } } } async function requestImageFromWorkersAI(prompt, context) { @@ -1229,6 +1298,60 @@ async function requestImageFromWorkersAI(prompt, context) { return await raw.blob(); } +// src/gemini.js +function isGeminiAIEnable(context) { + return !!context.USER_CONFIG.GOOGLE_API_KEY; +} +async function requestCompletionsFromGeminiAI(message, history, context, onStream) { + const url = `${context.USER_CONFIG.GOOGLE_COMPLETIONS_API}${context.USER_CONFIG.GOOGLE_COMPLETIONS_MODEL}:${// 暂时不支持stream模式 + // onStream ? 'streamGenerateContent' : 'generateContent' + "generateContent"}?key=${context.USER_CONFIG.GOOGLE_API_KEY}`; + const contentsTemp = [...history || [], { role: "user", content: message }]; + const contents = []; + for (const msg of contentsTemp) { + switch (msg.role) { + case "assistant": + msg.role = "model"; + break; + case "system": + case "user": + msg.role = "user"; + break; + default: + continue; + } + if (contents.length === 0 || contents[contents.length - 1].role !== msg.role) { + contents.push({ + "role": msg.role, + "parts": [ + { + "text": msg.content + } + ] + }); + } else { + contents[contents.length - 1].parts[0].text += msg.content; + } + } + const resp = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json", + "User-Agent": CONST.USER_AGENT + }, + body: JSON.stringify({ contents }) + }); + const data = await resp.json(); + try { + return data.candidates[0].content.parts[0].text; + } catch (e) { + if (!data) { + throw new Error("Empty response"); + } + throw new Error(data?.error?.message || JSON.stringify(data)); + } +} + // src/llm.js async function loadHistory(key, context) { const initMessage = { role: "system", content: context.USER_CONFIG.SYSTEM_INIT_MESSAGE }; @@ -1301,6 +1424,8 @@ function loadChatLLM(context) { return requestCompletionsFromOpenAI; case "workers": return requestCompletionsFromWorkersAI; + case "gemini": + return requestCompletionsFromGeminiAI; default: if (isOpenAIEnable(context) || isAzureEnable(context)) { return requestCompletionsFromOpenAI; @@ -1308,6 +1433,9 @@ function loadChatLLM(context) { if (isWorkersAIEnable(context)) { return requestCompletionsFromWorkersAI; } + if (isGeminiAIEnable(context)) { + return requestCompletionsFromGeminiAI; + } return null; } } @@ -1316,11 +1444,11 @@ function loadImageGen(context) { case "openai": return requestImageFromOpenAI; case "azure": - return null; + return requestImageFromOpenAI; case "workers": return requestImageFromWorkersAI; default: - if (isOpenAIEnable(context)) { + if (isOpenAIEnable(context) || isAzureEnable(context)) { return requestImageFromOpenAI; } if (isWorkersAIEnable(context)) { @@ -1705,6 +1833,8 @@ async function commandSystem(message, command, subcommand, context) { context.USER_CONFIG.OPENAI_API_KEY = "******"; context.USER_CONFIG.AZURE_API_KEY = "******"; context.USER_CONFIG.AZURE_COMPLETIONS_API = "******"; + context.USER_CONFIG.AZURE_DALLE_API = "******"; + context.USER_CONFIG.GOOGLE_API_KEY = "******"; msg = "
\n" + msg; msg += `USER_CONFIG: ${JSON.stringify(context.USER_CONFIG, null, 2)} `; diff --git a/dist/timestamp b/dist/timestamp index bac9aae7..6e157c85 100644 --- a/dist/timestamp +++ b/dist/timestamp @@ -1 +1 @@ -1700550928 +1705300786 diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md index d47f8212..a76c3845 100644 --- a/doc/cn/CONFIG.md +++ b/doc/cn/CONFIG.md @@ -14,54 +14,55 @@ 为每个用户通用的配置,通常在workers配置界面填写 -| KEY | 说明 | 默认值 | 特殊说明 | -|:--------------------------|------------------------|------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------| -| AI_PROVIDER | AI提供商 | `auto` | AI提供商: auto, azure, openai, workers; auto为自动选择一个有效的配置,判断优先级为 azure > openai > workers | -| API_KEY | OpenAI API Key | `null` | 可以同时使用多个key,使用的时候会随机选择一个 | -| CHAT_MODEL | open ai 模型选择 | `gpt-3.5-turbo` | | -| - | - | - | - | -| TELEGRAM_AVAILABLE_TOKENS | 支持多个Telegram Bot Token | `null` | 多个Token用`,`分隔 | -| - | - | - | - | -| CHAT_WHITE_LIST | 聊天ID白名单 | `null` | 多个ID用`,`分隔,不知道ID,和机器人聊一句就能返回 | -| I_AM_A_GENEROUS_PERSON | 关闭白名单,允许所有人访问 | `false` | 鉴于很多人不想设置白名单,或者不知道怎么获取ID,所以设置这个选项就能允许所有人访问, 值为`true`时生效 | -| LOCK_USER_CONFIG_KEYS | 锁定自定义用户配置 | `[]` | 可以锁定某些字段。比如设置为`CHAT_MODEL`就可以防止其他用户通过`/setenv`指令切换模型,多个字段用`,`分隔 | -| - | - | - | - | -| AUTO_TRIM_HISTORY | 自动清理历史记录 | `true` | 为了避免4096字符限制,将消息删减 | -| MAX_HISTORY_LENGTH | 最大历史记录长度 | `20` | `AUTO_TRIM_HISTORY开启后` 为了避免4096字符限制,将消息删减 | -| MAX_TOKEN_LENGTH | 最大历史token数量 | 2048 | 过长容易超时建议设定在一个合适的数字 | -| GPT3_TOKENS_COUNT | GTP计数模式 | `false` | 使用更加精准的token计数模式替代单纯判断字符串长度,但是容易超时 | -| - | - | - | - | -| SYSTEM_INIT_MESSAGE | 系统初始化信息 | `你是一个得力的助手` | 默认机器人设定 | -| SYSTEM_INIT_MESSAGE_ROLE | 系统初始化信息角色 | `system` | 默认机器人设定 | -| - | - | - | - | -| ENABLE_USAGE_STATISTICS | 开启使用统计 | `false` | 开启后,每次调用API都会记录到KV,可以通过`/usage`查看 | -| HIDE_COMMAND_BUTTONS | 隐藏指令按钮 | `null` | 把想要隐藏的按钮写入用逗号分开`/start,/system`, 记得带上斜杠,修改之后得重新`init` | -| SHOW_REPLY_BUTTON | 显示快捷回复按钮 | `false` | 显示快捷回复按钮 | -| - | - | - | - | -| UPDATE_BRANCH | 分支 | `master` | 版本检测所在分支 | -| - | - | - | - | -| DEBUG_MODE | 调试模式 | `false` | 目前可以把最新一条消息保存到KV方便调试,非常消耗KV写入量,正式环境务必关闭 | -| DEV_MODE | 开发模式 | `false` | 开发测试用 | -| STREAM_MODE | 流模式 | `true` | 得到类似ChatGPT Web一样的打字机输出模式 | -| SAFE_MODE | 安全模式 | `true` | 安全模式,会增加KV写损耗,但是能避免Workers超时导致的Telegram死亡循环重试,减少Token的浪费,不建议关闭。 | -| - | - | - | - | -| LANGUAGE | 语言 | `zh-CN` | `zh-CN`,`zh-TW`和`en` | -| - | - | - | - | -| TELEGRAM_API_DOMAIN | Telegram | `https://api.telegram.org` | 可以自定义Telegram服务器 | -| OPENAI_API_DOMAIN | OpenAI | `https://api.openai.com` | 可以替换为其他与OpenAI API兼容的其他服务商的域名 | -| - | - | - | - | -| AZURE_API_KEY | azure api key | `null` | 支持azure的API,两个密钥随便选一个就可以。如果你要默认使用azure,你可以设置`AI_PROVIDER`为`azure` | -| AZURE_COMPLETIONS_API | azure api url | `null` | 格式`https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2023-05-15` | -| - | - | - | - | -| CLOUDFLARE_ACCOUNT_ID | Cloudflare 的 用户ID | `null` | 你可以在workers首页的右侧信息栏中找到这个信息。如果你要默认使用workers ai,你可以设置`AI_PROVIDER`为`workers` | -| CLOUDFLARE_TOKEN | Cloudflare的Token | `null` | 你可以在`https://dash.cloudflare.com/profile/api-tokens`中使用`Workers AI (Beta)`模板创建 | -| WORKERS_CHAT_MODEL | 文字生成模型 | `@cf/meta/llama-2-7b-chat-fp16` | 具体模型列表可以查看`https://developers.cloudflare.com/workers-ai/models/llm/` | -| WORKERS_IMAGE_MODEL | 文字生成图片模型 | `@cf/stabilityai/stable-diffusion-xl-base-1.0` | 同上 | -| - | - | - | - | -| DALL_E_MODEL | 生成图像的模型 | `dall-e-2` | 支持 `dall-e-2` 和 `dall-e-3` | -| DALL_E_IMAGE_SIZE | 生成图像的尺寸 | `512x512` | 生成图像的尺寸。对于 dall-e-2,必须是256x256、512x512或1024x1024之一。对于 dall-e-3 模型,必须是1024x1024、1792x1024或1024x1792之一。 | -| DALL_E_IMAGE_QUALITY | 生成图像的质量 | `standard` | 将要生成的图片质量。hd会创建具有更精细细节和整体一致性的图片。此参数仅支持dall-e-3. | -| DALL_E_IMAGE_STYLE | 生成图像的风格 | `vivid` | 生成图像的风格。必须是 vivid 或 natural 中的一个。vivid使模型倾向于产生超现实和戏剧化的图片。natural使模型产生更自然、不那么超现实外观的图片。此参数仅支持dall-e-3. | +| KEY | 说明 | 默认值 | 特殊说明 | +|:--------------------------|--------------------------------------------|---------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------| +| LANGUAGE | 语言 | `zh-CN` | `zh-CN`,`zh-TW`和`en` | +| AI_PROVIDER | AI提供商 | `auto` | AI提供商: auto, azure, openai, workers; auto为自动选择一个有效的配置,判断优先级为 azure > openai > workers | +| UPDATE_BRANCH | 分支 | `master` | 版本检测所在分支 | +| - | - | - | - | +| TELEGRAM_API_DOMAIN | Telegram | `https://api.telegram.org` | 可以自定义Telegram服务器 | +| TELEGRAM_AVAILABLE_TOKENS | 支持多个Telegram Bot Token | `null` | 多个Token用`,`分隔 | +| - | - | - | - | +| CHAT_WHITE_LIST | 聊天ID白名单 | `null` | 多个ID用`,`分隔,不知道ID,和机器人聊一句就能返回 | +| I_AM_A_GENEROUS_PERSON | 关闭白名单,允许所有人访问 | `false` | 鉴于很多人不想设置白名单,或者不知道怎么获取ID,所以设置这个选项就能允许所有人访问, 值为`true`时生效 | +| LOCK_USER_CONFIG_KEYS | 锁定自定义用户配置 | `[]` | 可以锁定某些字段。比如设置为`CHAT_MODEL`就可以防止其他用户通过`/setenv`指令切换模型,多个字段用`,`分隔 | +| - | - | - | - | +| AUTO_TRIM_HISTORY | 自动清理历史记录 | `true` | 为了避免4096字符限制,将消息删减 | +| MAX_HISTORY_LENGTH | 最大历史记录长度 | `20` | `AUTO_TRIM_HISTORY开启后` 为了避免4096字符限制,将消息删减 | +| MAX_TOKEN_LENGTH | 最大历史token数量 | 2048 | 过长容易超时建议设定在一个合适的数字 | +| GPT3_TOKENS_COUNT | GTP计数模式 | `false` | 使用更加精准的token计数模式替代单纯判断字符串长度,但是容易超时 | +| GPT3_TOKENS_COUNT_REPO | GPT3计数器资源所在Repo | `https://raw.githubusercontent.com/tbxark-arc/GPT-3-Encoder/master` | 加载 GPT3 Token 计数配置的资源文件 | +| - | - | - | - | +| SYSTEM_INIT_MESSAGE | 系统初始化信息 | `你是一个得力的助手` | 默认机器人设定 | +| SYSTEM_INIT_MESSAGE_ROLE | 系统初始化信息角色 | `system` | 默认机器人设定 | +| - | - | - | - | +| ENABLE_USAGE_STATISTICS | 开启使用统计 | `false` | 开启后,每次调用API都会记录到KV,可以通过`/usage`查看 | +| HIDE_COMMAND_BUTTONS | 隐藏指令按钮 | `null` | 把想要隐藏的按钮写入用逗号分开`/start,/system`, 记得带上斜杠,修改之后得重新`init` | +| SHOW_REPLY_BUTTON | 显示快捷回复按钮 | `false` | 显示快捷回复按钮 | +| - | - | - | - | +| DEBUG_MODE | 调试模式 | `false` | 目前可以把最新一条消息保存到KV方便调试,非常消耗KV写入量,正式环境务必关闭 | +| DEV_MODE | 开发模式 | `false` | 开发测试用 | +| STREAM_MODE | 流模式 | `true` | 得到类似ChatGPT Web一样的打字机输出模式 | +| SAFE_MODE | 安全模式 | `true` | 安全模式,会增加KV写损耗,但是能避免Workers超时导致的Telegram死亡循环重试,减少Token的浪费,不建议关闭。 | +| - | - | - | - | +| API_KEY | OpenAI API Key | `null` | 可以同时使用多个key,使用的时候会随机选择一个 | +| CHAT_MODEL | open ai 模型选择 | `gpt-3.5-turbo` | | +| OPENAI_API_DOMAIN | OPENAI API Domain [废弃: 使用 OPENAI_API_BASE] | `https://api.openai.com` | 可以替换为其他与OpenAI API兼容的其他服务商的域名 | +| OPENAI_API_DOMAIN | OPENAI API Base URL | `https://api.openai.com/v1` | 兼容Cloudflare AI 网关 | +| - | - | - | - | +| AZURE_API_KEY | azure api key | `null` | 支持azure的API,两个密钥随便选一个就可以。如果你要默认使用azure,你可以设置`AI_PROVIDER`为`azure` | +| AZURE_COMPLETIONS_API | azure api url | `null` | 格式`https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2023-05-15` | +| AZURE_DALLE_API | azure dalle api url | `null` | 格式`https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-12-01-preview` | +| - | - | - | - | +| CLOUDFLARE_ACCOUNT_ID | Cloudflare 的 用户ID | `null` | 你可以在workers首页的右侧信息栏中找到这个信息。如果你要默认使用workers ai,你可以设置`AI_PROVIDER`为`workers` | +| CLOUDFLARE_TOKEN | Cloudflare的Token | `null` | 你可以在`https://dash.cloudflare.com/profile/api-tokens`中使用`Workers AI (Beta)`模板创建 | +| WORKERS_CHAT_MODEL | 文字生成模型 | `@cf/meta/llama-2-7b-chat-fp16` | 具体模型列表可以查看`https://developers.cloudflare.com/workers-ai/models/llm/` | +| WORKERS_IMAGE_MODEL | 文字生成图片模型 | `@cf/stabilityai/stable-diffusion-xl-base-1.0` | 同上 | +| - | - | - | - | +| DALL_E_MODEL | 生成图像的模型 | `dall-e-2` | 支持 `dall-e-2` 和 `dall-e-3` | +| DALL_E_IMAGE_SIZE | 生成图像的尺寸 | `512x512` | 生成图像的尺寸。对于 dall-e-2,必须是256x256、512x512或1024x1024之一。对于 dall-e-3 模型,必须是1024x1024、1792x1024或1024x1792之一。 | +| DALL_E_IMAGE_QUALITY | 生成图像的质量 | `standard` | 将要生成的图片质量。hd会创建具有更精细细节和整体一致性的图片。此参数仅支持dall-e-3. | +| DALL_E_IMAGE_STYLE | 生成图像的风格 | `vivid` | 生成图像的风格。必须是 vivid 或 natural 中的一个。vivid使模型倾向于产生超现实和戏剧化的图片。natural使模型产生更自然、不那么超现实外观的图片。此参数仅支持dall-e-3. | @@ -88,22 +89,25 @@ 每个用户的自定义配置,只能通过Telegram发送消息来修改,消息格式为`/setenv KEY=VALUE`, 用户配置的优先级比系统配置的更高。如果想删除配置,请使用`/delenv KEY`。 批量设置变量请使用`/setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}` -| KEY | 说明 | -|:------------------------|:-----------------------------------------------------------------------------------------------------------------| -| AI_PROVIDER | 配置与ENV相同 | -| CHAT_MODEL | 配置与ENV相同 | -| OPENAI_API_KEY | 设置该值之后将不会调用系统配置的KEY | -| OPENAI_API_EXTRA_PARAMS | OpenAI API额外参数,设定后每次调用API都会带上,可以用来调整温度等参数, `/setenv OPENAI_API_EXTRA_PARAMS={"temperature": 0.5}` 每次修改必须为完整JSON | -| SYSTEM_INIT_MESSAGE | 配置与ENV相同 | -| DALL_E_MODEL | 配置与ENV相同 | -| DALL_E_IMAGE_SIZE | 配置与ENV相同 | -| DALL_E_IMAGE_QUALITY | 配置与ENV相同 | -| DALL_E_IMAGE_STYLE | 配置与ENV相同 | -| AZURE_API_KEY | 配置与ENV相同 | -| AZURE_COMPLETIONS_API | 配置与ENV相同 | -| WORKERS_CHAT_MODEL | 配置与ENV相同 | -| WORKER_IMAGE_MODEL | 配置与ENV相同 | - +| KEY | 说明 | +|:-------------------------|:-----------------------------------------------------------------------------------------------------------------| +| AI_PROVIDER | 配置与 `ENV.AI_PROVIDER` 相同 | +| CHAT_MODEL | 配置与 `ENV.CHAT_MODEL` 相同 | +| OPENAI_API_KEY | 设置该值之后将不会调用系统配置的KEY | +| OPENAI_API_EXTRA_PARAMS | OpenAI API额外参数,设定后每次调用API都会带上,可以用来调整温度等参数, `/setenv OPENAI_API_EXTRA_PARAMS={"temperature": 0.5}` 每次修改必须为完整JSON | +| SYSTEM_INIT_MESSAGE | 配置与 `ENV.SYSTEM_INIT_MESSAGE` 相同 | +| DALL_E_MODEL | 配置与 `ENV.DALL_E_MODEL` 相同 | +| DALL_E_IMAGE_SIZE | 配置与 `ENV.DALL_E_IMAGE_SIZE` 相同 | +| DALL_E_IMAGE_QUALITY | 配置与 `ENV.DALL_E_IMAGE_QUALITY` 相同 | +| DALL_E_IMAGE_STYLE | 配置与 `ENV.DALL_E_IMAGE_STYLE` 相同 | +| AZURE_API_KEY | 配置与 `ENV.AZURE_API_KEY` 相同 | +| AZURE_COMPLETIONS_API | 配置与 `ENV.AZURE_COMPLETIONS_API` 相同 | +| AZURE_DALLE_API | 配置与 `ENV.AZURE_DALLE_API` 相同 | +| WORKERS_CHAT_MODEL | 配置与 `ENV.WORKERS_CHAT_MODEL` 相同 | +| WORKER_IMAGE_MODEL | 配置与 `ENV.WORKER_IMAGE_MODEL` 相同 | +| GOOGLE_API_KEY | 配置与 `ENV.GOOGLE_API_KEY` 相同 | +| GOOGLE_COMPLETIONS_API | 配置与 `ENV.GOOGLE_COMPLETIONS_API` 相同 | +| GOOGLE_COMPLETIONS_MODEL | 配置与 `ENV.GOOGLE_COMPLETIONS_MODEL` 相同 | ### 支持命令 diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md index ac31d3b2..e8804736 100644 --- a/doc/en/CONFIG.md +++ b/doc/en/CONFIG.md @@ -15,54 +15,55 @@ It is recommended to fill in environment variables in the Workers configuration ### System Configuration Configuration that is common to each user, usually filled in the Workers configuration interface. -| KEY | Description | Default Value | Special Description | -|:--------------------------|---------------------------------------------|------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| AI_PROVIDER | AI provider | `auto` | AI providers: auto, azure, openai, workers; auto automatically selects a valid configuration. The priority order is azure > openai > workers. | -| API_KEY | OpenAI API Key | `null` | Multiple keys can be used at the same time, and one will be randomly selected when using the | -| CHAT_MODEL | Open AI model | `gpt-3.5-turbo` | | -| - | - | - | - | -| TELEGRAM_AVAILABLE_TOKENS | Support for multiple Telegram Bot Token | `null` | Multiple Token separated by `,` | -| - | - | - | - | -| CHAT_WHITE_LIST | Chat ID Whitelisting | `null` | Multiple IDs are separated by `,`, not knowing the IDs, talking to the bot for a sentence returns | -| I_AM_A_GENEROUS_PERSON | Close the whitelist and allow access to all | `false` | Since many people don't want to whitelist, or don't know how to get an ID, setting this option will allow everyone to access it, with a value of `true`. | -| LOCK_USER_CONFIG_KEYS | Lock custom user configurations | `[]` | You can lock certain fields. For example, setting it to `CHAT_MODEL` can prevent other users from switching models through the `/setenv` command. Multiple fields are separated by `,`. | -| - | - | - | - | -| AUTO_TRIM_HISTORY | Automatically trim history | `true` | To avoid the 4096 character limit, truncate the message | -| MAX_HISTORY_LENGTH | Maximum history length | `20` | `When AUTO_TRIM_HISTORY is turned on` To avoid the 4096 character limit, truncate the message | -| MAX_TOKEN_LENGTH | Maximum number of historical tokens | 2048 | Too long and easy to timeout suggest setting at a suitable number | -| GPT3_TOKENS_COUNT | GTP counting mode | `false` | Use more accurate token counting mode instead of just judging string length, but it's easy to time out | -| - | - | - | - | -| SYSTEM_INIT_MESSAGE | System initialization message | `You are a useful assistant.` | Default robot init message | -| SYSTEM_INIT_MESSAGE_ROLE | System initialization message Role | `system` | Default robot init role | -| - | - | - | - | -| ENABLE_USAGE_STATISTICS | Enable usage statistics | `false` | After enabling, each API call will be recorded in KV and can be viewed through `/usage`. | -| HIDE_COMMAND_BUTTONS | Hide command buttons | `null` | Write the buttons you want to hide separated by commas `/start,/system`, remember to include slashes, and after modifying, reinitialize `init`. | -| SHOW_REPLY_BUTTON | Show Quick Reply button | `false` | Display quick reply buttons. | -| - | - | - | - | -| UPDATE_BRANCH | Git branch | `master` | Branch where version detection is located | -| - | - | - | - | -| DEBUG_MODE | Debug mode | `false` | Currently, the latest message can be saved to KV for convenient debugging. It consumes a lot of KV write volume and must be turned off in the production environment. | -| DEV_MODE | Developer mode | `false` | Development testing | -| STREAM_MODE | Stream mode | `true` | Get a typewriter output mode similar to ChatGPT Web. | -| SAFE_MODE | Safe mode | `true` | Safe mode will increase KV write overhead, but it can avoid Telegram's death loop retry caused by Workers timeout, reduce token waste, and it is not recommended to disable. | -| - | - | - | - | -| LANGUAGE | Language | `zh-CN` | `zh-CN`,`zh-TW`, `en` | -| - | - | - | - | -| TELEGRAM_API_DOMAIN | Telegram | `https://api.telegram.org` | Customization of Telegram API server. | -| OPENAI_API_DOMAIN | OpenAI | `https://api.openai.com` | Can be replaced with the domain name of other service providers compatible with OpenAI API. | -| - | - | - | - | -| AZURE_API_KEY | azure api key | `null` | Support Azure API, choose either of the two keys. If you want to use Azure by default, you can set `AI_PROVIDER` to `azure`. | -| AZURE_COMPLETIONS_API | azure api url | `null` | `https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2023-05-15` | -| - | - | - | - | -| CLOUDFLARE_ACCOUNT_ID | Cloudflare User ID | `null` | You can find this information in the right sidebar of the workers homepage, If you want to use Azure by default, you can set `AI_PROVIDER` to `workers`. | -| CLOUDFLARE_TOKEN | Cloudflare Token | `null` | You can create using the `Workers AI (Beta)` template at `https://dash.cloudflare.com/profile/api-tokens`. | -| WORKERS_CHAT_MODEL | Text generation model | `@cf/meta/llama-2-7b-chat-fp16` | You can check the specific model list at `https://developers.cloudflare.com/workers-ai/models/llm/`. | -| WORKERS_IMAGE_MODEL | Text-to-image generation model | `@cf/stabilityai/stable-diffusion-xl-base-1.0` | Same as above. | -| - | - | - | - | -| DALL_E_MODEL | The model of the generated image | `dall-e-2` | Support `dall-e-2` and `dall-e-3` | -| DALL_E_IMAGE_SIZE | The size of the generated image | `512x512` | The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. | -| DALL_E_IMAGE_QUALITY | The image quality of the generated image | `standard` | The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. | -| DALL_E_IMAGE_STYLE | The image style of the generated image | `vivid` | The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. | +| KEY | Description | Default Value | Special Description | +|:--------------------------|-------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| LANGUAGE | Language | `zh-CN` | `zh-CN`,`zh-TW`, `en` | +| AI_PROVIDER | AI provider | `auto` | AI providers: `auto, azure, openai, workers`; auto automatically selects a valid configuration. The priority order is azure > openai > workers. | +| UPDATE_BRANCH | Git branch | `master` | Branch where version detection is located | +| - | - | - | - | +| TELEGRAM_API_DOMAIN | Telegram | `https://api.telegram.org` | Customization of Telegram API server. | +| TELEGRAM_AVAILABLE_TOKENS | Support for multiple Telegram Bot Token | `null` | Multiple Token separated by `,` | +| - | - | - | - | +| CHAT_WHITE_LIST | Chat ID Whitelisting | `null` | Multiple IDs are separated by `,`, not knowing the IDs, talking to the bot for a sentence returns | +| I_AM_A_GENEROUS_PERSON | Close the whitelist and allow access to all | `false` | Since many people don't want to whitelist, or don't know how to get an ID, setting this option will allow everyone to access it, with a value of `true`. | +| LOCK_USER_CONFIG_KEYS | Lock custom user configurations | `[]` | You can lock certain fields. For example, setting it to `CHAT_MODEL` can prevent other users from switching models through the `/setenv` command. Multiple fields are separated by `,`. | +| - | - | - | - | +| AUTO_TRIM_HISTORY | Automatically trim history | `true` | To avoid the 4096 character limit, truncate the message | +| MAX_HISTORY_LENGTH | Maximum history length | `20` | `When AUTO_TRIM_HISTORY is turned on` To avoid the 4096 character limit, truncate the message | +| MAX_TOKEN_LENGTH | Maximum number of historical tokens | 2048 | Too long and easy to timeout suggest setting at a suitable number | +| GPT3_TOKENS_COUNT | GTP counting mode | `false` | Use more accurate token counting mode instead of just judging string length, but it's easy to time out | +| GPT3_TOKENS_COUNT_REPO | GPT3 Counter Resource Repo | `https://raw.githubusercontent.com/tbxark-arc/GPT-3-Encoder/master` | Resource file for loading GPT3 Token counting configurations | +| - | - | - | - | +| SYSTEM_INIT_MESSAGE | System initialization message | `You are a useful assistant.` | Default robot init message | +| SYSTEM_INIT_MESSAGE_ROLE | System initialization message Role | `system` | Default robot init role | +| - | - | - | - | +| ENABLE_USAGE_STATISTICS | Enable usage statistics | `false` | After enabling, each API call will be recorded in KV and can be viewed through `/usage`. | +| HIDE_COMMAND_BUTTONS | Hide command buttons | `null` | Write the buttons you want to hide separated by commas `/start,/system`, remember to include slashes, and after modifying, reinitialize `init`. | +| SHOW_REPLY_BUTTON | Show Quick Reply button | `false` | Display quick reply buttons. | +| - | - | - | - | +| DEBUG_MODE | Debug mode | `false` | Currently, the latest message can be saved to KV for convenient debugging. It consumes a lot of KV write volume and must be turned off in the production environment. | +| DEV_MODE | Developer mode | `false` | Development testing | +| STREAM_MODE | Stream mode | `true` | Get a typewriter output mode similar to ChatGPT Web. | +| SAFE_MODE | Safe mode | `true` | Safe mode will increase KV write overhead, but it can avoid Telegram's death loop retry caused by Workers timeout, reduce token waste, and it is not recommended to disable. | +| - | - | - | - | +| API_KEY | OpenAI API Key | `null` | Multiple keys can be used at the same time, and one will be randomly selected when using the | +| CHAT_MODEL | Open AI model | `gpt-3.5-turbo` | | +| OPENAI_API_DOMAIN | OpenAI API Domain [Deprecated: use OPENAI_API_BASE] | `https://api.openai.com` | Can be replaced with the domain name of other service providers compatible with OpenAI API. | +| OPENAI_API_BASE | OPENAI API Base URL | `https://api.openai.com/v1` | Compatible with Cloudflare AI Gateway | +| - | - | - | - | +| AZURE_API_KEY | azure api key | `null` | Support Azure API, choose either of the two keys. If you want to use Azure by default, you can set `AI_PROVIDER` to `azure`. | +| AZURE_COMPLETIONS_API | azure completions api url | `null` | `https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2023-05-15` | +| AZURE_DALLE_API | azure dalle api url | `null` | `https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-12-01-preview` | +| - | - | - | - | +| CLOUDFLARE_ACCOUNT_ID | Cloudflare User ID | `null` | You can find this information in the right sidebar of the workers homepage, If you want to use Azure by default, you can set `AI_PROVIDER` to `workers`. | +| CLOUDFLARE_TOKEN | Cloudflare Token | `null` | You can create using the `Workers AI (Beta)` template at `https://dash.cloudflare.com/profile/api-tokens`. | +| WORKERS_CHAT_MODEL | Text generation model | `@cf/mistral/mistral-7b-instruct-v0.1 ` | You can check the specific model list at `https://developers.cloudflare.com/workers-ai/models/llm/`. | +| WORKERS_IMAGE_MODEL | Text-to-image generation model | `@cf/stabilityai/stable-diffusion-xl-base-1.0` | Same as above. | +| - | - | - | - | +| DALL_E_MODEL | The model of the generated image | `dall-e-2` | Support `dall-e-2` and `dall-e-3` | +| DALL_E_IMAGE_SIZE | The size of the generated image | `512x512` | The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. | +| DALL_E_IMAGE_QUALITY | The image quality of the generated image | `standard` | The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. | +| DALL_E_IMAGE_STYLE | The image style of the generated image | `vivid` | The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. | @@ -89,23 +90,25 @@ You can add the bot to a group, and then everyone in the group can chat with the Each user's custom configuration can only be modified by sending a message through Telegram. The message format is `/setenv KEY=VALUE`. User configurations have higher priority than system configurations. If you want to delete a configuration, please use `/delenv KEY`. To set variables in bulk, please use `/setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}`. -| KEY | Description | -|:------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| AI_PROVIDER | Configuration same as ENV | -| CHAT_MODEL | Configuration same as ENV | -| OPENAI_API_KEY | After setting this value, the system-configured KEY will not be called. | -| OPENAI_API_EXTRA_PARAMS | OpenAI API additional parameters, once set, will be included in every API call and can be used to adjust temperature and other parameters. `/setenv OPENAI_API_EXTRA_PARAMS={"temperature": 0.5}` Each modification must be a complete JSON. | -| SYSTEM_INIT_MESSAGE | Configuration same as ENV | -| DALL_E_MODEL | Configuration same as ENV | -| DALL_E_IMAGE_SIZE | Configuration same as ENV | -| DALL_E_IMAGE_QUALITY | Configuration same as ENV | -| DALL_E_IMAGE_STYLE | Configuration same as ENV | -| AZURE_API_KEY | Configuration same as ENV | -| AZURE_COMPLETIONS_API | Configuration same as ENV | -| WORKERS_CHAT_MODEL | Configuration same as ENV | -| WORKER_IMAGE_MODEL | Configuration same as ENV | - - +| KEY | Description | +|:-------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| AI_PROVIDER | Configuration same as `ENV.AI_PROVIDER` | +| CHAT_MODEL | Configuration same as `ENV.CHAT_MODEL` | +| OPENAI_API_KEY | After setting this value, the system-configured KEY will not be called. | +| OPENAI_API_EXTRA_PARAMS | OpenAI API additional parameters, once set, will be included in every API call and can be used to adjust temperature and other parameters. `/setenv OPENAI_API_EXTRA_PARAMS={"temperature": 0.5}` Each modification must be a complete JSON. | +| SYSTEM_INIT_MESSAGE | Configuration same as `ENV.SYSTEM_INIT_MESSAGE` | +| DALL_E_MODEL | Configuration same as `ENV.DALL_E_MODEL` | +| DALL_E_IMAGE_SIZE | Configuration same as `ENV.DALL_E_IMAGE_SIZE` | +| DALL_E_IMAGE_QUALITY | Configuration same as `ENV.DALL_E_IMAGE_QUALITY` | +| DALL_E_IMAGE_STYLE | Configuration same as `ENV.DALL_E_IMAGE_STYLE` | +| AZURE_API_KEY | Configuration same as `ENV.AZURE_API_KEY` | +| AZURE_COMPLETIONS_API | Configuration same as `ENV.AZURE_COMPLETIONS_API` | +| AZURE_DALLE_API | Configuration same as `ENV.AZURE_DALLE_API` | +| WORKERS_CHAT_MODEL | Configuration same as `ENV.WORKERS_CHAT_MODEL` | +| WORKER_IMAGE_MODEL | Configuration same as `ENV.WORKER_IMAGE_MODEL` | +| GOOGLE_API_KEY | Configuration same as `ENV.GOOGLE_API_KEY` | +| GOOGLE_COMPLETIONS_API | Configuration same as `ENV.GOOGLE_COMPLETIONS_API` | +| GOOGLE_COMPLETIONS_MODEL | Configuration same as `ENV.GOOGLE_COMPLETIONS_MODEL` | ### Support command diff --git a/src/command.js b/src/command.js index 6233aa79..f06c4c8d 100644 --- a/src/command.js +++ b/src/command.js @@ -439,6 +439,8 @@ async function commandSystem(message, command, subcommand, context) { context.USER_CONFIG.OPENAI_API_KEY = '******'; context.USER_CONFIG.AZURE_API_KEY = '******'; context.USER_CONFIG.AZURE_COMPLETIONS_API = '******'; + context.USER_CONFIG.AZURE_DALLE_API = '******'; + context.USER_CONFIG.GOOGLE_API_KEY = '******'; msg = '\n' + msg; msg += `USER_CONFIG: ${JSON.stringify(context.USER_CONFIG, null, 2)}\n`; diff --git a/src/context.js b/src/context.js index 358db249..328e1417 100644 --- a/src/context.js +++ b/src/context.js @@ -48,11 +48,21 @@ export class Context { AZURE_API_KEY: ENV.AZURE_API_KEY, // Azure Completions API AZURE_COMPLETIONS_API: ENV.AZURE_COMPLETIONS_API, + // Azure DALL-E API + AZURE_DALLE_API: ENV.AZURE_DALLE_API, // WorkersAI聊天记录模型 WORKERS_CHAT_MODEL: ENV.WORKERS_CHAT_MODEL, // WorkersAI图片模型 WORKER_IMAGE_MODEL: ENV.WORKERS_IMAGE_MODEL, + + + // Google Gemini API Key + GOOGLE_API_KEY: ENV.GOOGLE_API_KEY, + // Google Gemini API + GOOGLE_COMPLETIONS_API: ENV.GOOGLE_COMPLETIONS_API, + // Google Gemini Model + GOOGLE_COMPLETIONS_MODEL: ENV.GOOGLE_COMPLETIONS_MODEL, }; USER_DEFINE = { @@ -118,7 +128,7 @@ export class Context { console.error(e); } { - const aiProvider = new Set('auto,openai,azure,workers'.split(',')); + const aiProvider = new Set('auto,openai,azure,workers,gemini'.split(',')); if (!aiProvider.has(this.USER_CONFIG.AI_PROVIDER)) { this.USER_CONFIG.AI_PROVIDER = 'auto'; } diff --git a/src/env.js b/src/env.js index 9fa4ea68..51c5d879 100644 --- a/src/env.js +++ b/src/env.js @@ -4,19 +4,35 @@ import './i18n/type.js'; * @class Environment */ class Environment { + // -- 版本数据 -- + // + // 当前版本 + BUILD_TIMESTAMP = process?.env?.BUILD_TIMESTAMP || 0; + // 当前版本 commit id + BUILD_VERSION = process?.env?.BUILD_VERSION || ''; + + + // -- 基础配置 -- /** * @type {I18n | null} */ I18N = null; + // 多语言支持 LANGUAGE = 'zh-cn'; - - - // AI提供商: auto, openai, azure, workers + // 检查更新的分支 + UPDATE_BRANCH = 'master'; + // AI提供商: auto, openai, azure, workers, gemini AI_PROVIDER = 'auto'; + // -- Telegram 相关 -- + // + // Telegram API Domain + TELEGRAM_API_DOMAIN = 'https://api.telegram.org'; // 允许访问的Telegram Token, 设置时以逗号分隔 TELEGRAM_AVAILABLE_TOKENS = []; + // -- 权限相关 -- + // // 允许所有人使用 I_AM_A_GENEROUS_PERSON = false; // 白名单 @@ -24,6 +40,8 @@ class Environment { // 用户配置 LOCK_USER_CONFIG_KEYS = []; + // -- 群组相关 -- + // // 允许访问的Telegram Token 对应的Bot Name, 设置时以逗号分隔 TELEGRAM_BOT_NAME = []; // 群组白名单 @@ -33,10 +51,8 @@ class Environment { // 群组机器人共享模式,关闭后,一个群组只有一个会话和配置。开启的话群组的每个人都有自己的会话上下文 GROUP_CHAT_BOT_SHARE_MODE = false; - // OpenAI API Key - API_KEY = []; - // OpenAI的模型名称 - CHAT_MODEL = 'gpt-3.5-turbo'; + // -- 历史记录相关 -- + // // 为了避免4096字符限制,将消息删减 AUTO_TRIM_HISTORY = true; // 最大历史记录长度 @@ -47,11 +63,27 @@ class Environment { GPT3_TOKENS_COUNT = false; // GPT3计数器资源地址 GPT3_TOKENS_COUNT_REPO = 'https://raw.githubusercontent.com/tbxark-arc/GPT-3-Encoder/master'; + + // -- Prompt 相关 -- + // // 全局默认初始化消息 SYSTEM_INIT_MESSAGE = null; // 全局默认初始化消息角色 SYSTEM_INIT_MESSAGE_ROLE = 'system'; + // -- Open AI 配置 -- + // + // OpenAI API Key + API_KEY = []; + // OpenAI的模型名称 + CHAT_MODEL = 'gpt-3.5-turbo'; + // OpenAI API Domain 可替换兼容openai api的其他服务商 + OPENAI_API_DOMAIN = 'https://api.openai.com'; + // OpenAI API BASE `https://api.openai.com/v1` + OPENAI_API_BASE = null; + + // -- DALLE 配置 -- + // // DALL-E的模型名称 DALL_E_MODEL = 'dall-e-2'; // DALL-E图片尺寸 @@ -61,6 +93,8 @@ class Environment { // DALL-E图片风格 DALL_E_IMAGE_STYLE = 'vivid'; + // -- 特性开关 -- + // // 是否开启使用统计 ENABLE_USAGE_STATISTICS = false; // 隐藏部分命令按钮 @@ -68,14 +102,8 @@ class Environment { // 显示快捷回复按钮 SHOW_REPLY_BUTTON = false; - - // 检查更新的分支 - UPDATE_BRANCH = 'master'; - // 当前版本 - BUILD_TIMESTAMP = process?.env?.BUILD_TIMESTAMP || 0; - // 当前版本 commit id - BUILD_VERSION = process?.env?.BUILD_VERSION || ''; - + // -- 模式开关 -- + // // 使用流模式 STREAM_MODE = true; // 安全模式 @@ -85,26 +113,30 @@ class Environment { // 开发模式 DEV_MODE = false; - // Telegram API Domain - TELEGRAM_API_DOMAIN = 'https://api.telegram.org'; - // OpenAI API Domain 可替换兼容openai api的其他服务商 - OPENAI_API_DOMAIN = 'https://api.openai.com'; - // OpenAI API BASE `https://api.openai.com/v1` - OPENAI_API_BASE = null; - + // -- AZURE 配置 -- + // // Azure API Key AZURE_API_KEY = null; // Azure Completions API AZURE_COMPLETIONS_API = null; + // Azure DallE API + AZURE_DALLE_API = null; // Cloudflare Account ID CLOUDFLARE_ACCOUNT_ID = null; // Cloudflare Token CLOUDFLARE_TOKEN = null; // Text Generation Model - WORKERS_CHAT_MODEL = '@cf/meta/llama-2-7b-chat-fp16'; + WORKERS_CHAT_MODEL = '@cf/mistral/mistral-7b-instruct-v0.1 '; // Text-to-Image Model WORKERS_IMAGE_MODEL = '@cf/stabilityai/stable-diffusion-xl-base-1.0'; + + // Google Gemini API Key + GOOGLE_API_KEY = null; + // Google Gemini API + GOOGLE_COMPLETIONS_API = 'https://generativelanguage.googleapis.com/v1beta/models/'; + // Google Gemini Model + GOOGLE_COMPLETIONS_MODEL = 'gemini.js-pro'; } @@ -137,8 +169,10 @@ export function initEnv(env, i18n) { OPENAI_API_BASE: 'string', AZURE_API_KEY: 'string', AZURE_COMPLETIONS_API: 'string', + AZURE_DALLE_API: 'string', CLOUDFLARE_ACCOUNT_ID: 'string', CLOUDFLARE_TOKEN: 'string', + GOOGLE_API_KEY: 'string', }; diff --git a/src/gemini.js b/src/gemini.js new file mode 100644 index 00000000..6f2e57ef --- /dev/null +++ b/src/gemini.js @@ -0,0 +1,77 @@ +/* eslint-disable no-unused-vars */ +import {Context} from './context.js'; +import {CONST} from './env.js'; + +/** + * @param {Context} context + * @return {boolean} + */ +export function isGeminiAIEnable(context) { + return !!(context.USER_CONFIG.GOOGLE_API_KEY); +} + +/** + * 发送消息到Gemini + * + * @param {string} message + * @param {Array} history + * @param {Context} context + * @param {function} onStream + * @return {Promise} + */ +export async function requestCompletionsFromGeminiAI(message, history, context, onStream) { + const url = `${context.USER_CONFIG.GOOGLE_COMPLETIONS_API}${context.USER_CONFIG.GOOGLE_COMPLETIONS_MODEL}:${ + // 暂时不支持stream模式 + // onStream ? 'streamGenerateContent' : 'generateContent' + 'generateContent' + }?key=${context.USER_CONFIG.GOOGLE_API_KEY}`; + + const contentsTemp = [...history || [], {role: 'user', content: message}]; + const contents = []; + // role必须是 model,user 而且不能连续两个一样 + for (const msg of contentsTemp) { + switch (msg.role) { + case 'assistant': + msg.role = 'model'; + break; + case 'system': + case 'user': + msg.role = 'user'; + break; + default: + continue; + } + // 如果存在最后一个元素或role不一样则插入 + if (contents.length === 0 || contents[contents.length - 1].role !== msg.role) { + contents.push({ + 'role': msg.role, + 'parts': [ + { + 'text': msg.content, + }, + ], + }); + } else { + // 否则合并 + contents[contents.length - 1].parts[0].text += msg.content; + } + } + + const resp = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'User-Agent': CONST.USER_AGENT, + }, + body: JSON.stringify({contents}), + }); + const data = await resp.json(); + try { + return data.candidates[0].content.parts[0].text; + } catch (e) { + if (!data) { + throw new Error('Empty response'); + } + throw new Error(data?.error?.message || JSON.stringify(data)); + } +} diff --git a/src/llm.js b/src/llm.js index 34622034..c7543dd2 100644 --- a/src/llm.js +++ b/src/llm.js @@ -9,6 +9,7 @@ import {Context} from './context.js'; import {isAzureEnable, isOpenAIEnable, requestCompletionsFromOpenAI, requestImageFromOpenAI} from './openai.js'; import {tokensCounter} from './utils.js'; import {isWorkersAIEnable, requestCompletionsFromWorkersAI, requestImageFromWorkersAI} from './workers-ai.js'; +import {isGeminiAIEnable, requestCompletionsFromGeminiAI} from './gemini.js'; /** @@ -117,6 +118,8 @@ export function loadChatLLM(context) { return requestCompletionsFromOpenAI; case 'workers': return requestCompletionsFromWorkersAI; + case 'gemini': + return requestCompletionsFromGeminiAI; default: if (isOpenAIEnable(context) || isAzureEnable(context)) { return requestCompletionsFromOpenAI; @@ -124,6 +127,9 @@ export function loadChatLLM(context) { if (isWorkersAIEnable(context)) { return requestCompletionsFromWorkersAI; } + if (isGeminiAIEnable(context)) { + return requestCompletionsFromGeminiAI; + } return null; } } @@ -138,11 +144,11 @@ export function loadImageGen(context) { case 'openai': return requestImageFromOpenAI; case 'azure': - return null; + return requestImageFromOpenAI; case 'workers': return requestImageFromWorkersAI; default: - if (isOpenAIEnable(context)) { + if (isOpenAIEnable(context) || isAzureEnable(context)) { return requestImageFromOpenAI; } if (isWorkersAIEnable(context)) { diff --git a/src/message.js b/src/message.js index a9e1c42a..da12478c 100644 --- a/src/message.js +++ b/src/message.js @@ -257,7 +257,7 @@ async function msgHandleRole(message, context) { for (const key in roleConfig) { if ( context.USER_CONFIG.hasOwnProperty(key) && typeof context.USER_CONFIG[key] === typeof roleConfig[key] ) { if (ENV.LOCK_USER_CONFIG_KEYS.includes(key)) { - continue; + continue; } context.USER_CONFIG[key] = roleConfig[key]; } diff --git a/src/openai.js b/src/openai.js index 797b8ed7..aafc0ab0 100644 --- a/src/openai.js +++ b/src/openai.js @@ -40,9 +40,9 @@ export function isOpenAIEnable(context) { * @return {boolean} */ export function isAzureEnable(context) { - const api = context.USER_CONFIG.AZURE_COMPLETIONS_API || ENV.AZURE_COMPLETIONS_API; + // const api = context.USER_CONFIG.AZURE_COMPLETIONS_API || ENV.AZURE_COMPLETIONS_API; const key = context.USER_CONFIG.AZURE_API_KEY || ENV.AZURE_API_KEY; - return api !== null && key !== null; + return key !== null; } @@ -75,8 +75,8 @@ export async function requestCompletionsFromOpenAI(message, history, context, on }; { const provider = context.USER_CONFIG.AI_PROVIDER; - if (provider === 'azure' || (provider === 'auto' && isAzureEnable(context)) ) { - url = ENV.AZURE_COMPLETIONS_API; + if (provider === 'azure' || (provider === 'auto' && isAzureEnable(context) && context.USER_CONFIG.AZURE_COMPLETIONS_API !== null) ) { + url = context.USER_CONFIG.AZURE_COMPLETIONS_API; header['api-key'] = azureKeyFromContext(context); delete header['Authorization']; delete body.model; @@ -120,7 +120,14 @@ export async function requestCompletionsFromOpenAI(message, history, context, on } } setTimeout(() => updateBotUsage(result.usage, context).catch(console.error), 0); - return result.choices[0].message.content; + try { + return result.choices[0].message.content; + } catch (e) { + if (!result) { + throw new Error('Empty response'); + } + throw Error(result?.error?.message || JSON.stringify(result)); + } } @@ -131,7 +138,11 @@ export async function requestCompletionsFromOpenAI(message, history, context, on * @return {Promise } */ export async function requestImageFromOpenAI(prompt, context) { - const key = openAIKeyFromContext(context); + let url = `${ENV.OPENAI_API_BASE}/images/generations`; + const header = { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${openAIKeyFromContext(context)}`, + }; const body = { prompt: prompt, n: 1, @@ -142,12 +153,23 @@ export async function requestImageFromOpenAI(prompt, context) { body.quality = context.USER_CONFIG.DALL_E_IMAGE_QUALITY; body.style = context.USER_CONFIG.DALL_E_IMAGE_STYLE; } - const resp = await fetch(`${ENV.OPENAI_API_BASE}/images/generations`, { + { + const provider = context.USER_CONFIG.AI_PROVIDER; + if (provider === 'azure' || (provider === 'auto' && isAzureEnable(context) && context.USER_CONFIG.AZURE_DALLE_API !== null) ) { + url = context.USER_CONFIG.AZURE_DALLE_API; + // 1792x1024, 1024x1024, or 1024x1792. + const vaildSize = ['1792x1024', '1024x1024', '1024x1792']; + if (!vaildSize.includes(body.size)) { + body.size = '1024x1024'; + } + header['api-key'] = azureKeyFromContext(context); + delete header['Authorization']; + delete body.model; + } + } + const resp = await fetch(url, { method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'Authorization': `Bearer ${key}`, - }, + headers: header, body: JSON.stringify(body), }).then((res) => res.json()); if (resp.error?.message) { diff --git a/src/workers-ai.js b/src/workers-ai.js index d24607ab..93c6e632 100644 --- a/src/workers-ai.js +++ b/src/workers-ai.js @@ -77,7 +77,14 @@ export async function requestCompletionsFromWorkersAI(message, history, context, return contentFull; } else { const data = await resp.json(); - return data.result.response; + try { + return data.result.response; + } catch (e) { + if (!data) { + throw new Error('Empty response'); + } + throw new Error(data?.errors?.[0]?.message || JSON.stringify(data)); + } } }