diff --git a/README.md b/README.md
index 74c2462723d0..81398a6e6e49 100644
--- a/README.md
+++ b/README.md
@@ -134,6 +134,7 @@ We have implemented support for the following model service providers:
- **Minimax**: Integrated the Minimax models, including the MoE model **abab6**, offers a broader range of choices. [Learn more](https://www.minimaxi.com/)
- **DeepSeek**: Integrated with the DeepSeek series models, an innovative AI startup from China, The product has been designed to provide a model that balances performance with price. [Learn more](https://www.deepseek.com/)
- **Qwen**: Integrated the Qwen series models, including the latest **qwen-turbo**, **qwen-plus** and **qwen-max**. [Lean more](https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction)
+- **Novita AI**: Access **Llama**, **Mistral**, and other leading open-source models at cheapest prices. Engage in uncensored role-play, spark creative discussions, and foster unrestricted innovation. **Pay For What You Use.** [Learn more](https://novita.ai/llm-api?utm_source=lobechat&utm_medium=ch&utm_campaign=api)
At the same time, we are also planning to support more model service providers, such as Replicate and Perplexity, to further enrich our service provider library. If you would like LobeChat to support your favorite service provider, feel free to join our [community discussion](https://github.com/lobehub/lobe-chat/discussions/1284).
diff --git a/docs/usage/providers/novita.mdx b/docs/usage/providers/novita.mdx
new file mode 100644
index 000000000000..4040567f93ce
--- /dev/null
+++ b/docs/usage/providers/novita.mdx
@@ -0,0 +1,80 @@
+---
+title: Using Novita AI API Key in LobeChat
+description: >-
+ Learn how to integrate Novita AI's language model APIs into LobeChat. Follow the steps to register, create an Novita AI API key,
+ configure settings, and chat with our various AI models.
+
+tags:
+ - Novita AI
+ - Llama3
+ - Mistral
+ - uncensored
+ - API key
+ - Web UI
+---
+
+# Using Novita AI in LobeChat
+
+
+
+[Novita AI](https://novita.ai/) is an AI API platform that provides a variety of LLM and image generation APIs, supporting Llama3 (8B, 70B), Mistral, and many other cutting-edge models. We offer a variety of censored and uncensored models to meet your different needs.
+
+This document will guide you on how to integrate Novita AI in LobeChat:
+
+
+
+### Step 1: Register and Log in to Novita AI
+
+- Visit [Novita.ai](https://novita.ai/) and create an account
+- You can log in with your Google or Github account
+- Upon registration, Novita AI will provide a $0.5 credit.
+
+
+
+### Step 2: Obtain the API Key
+
+- Visit Novita AI's [key management page](https://novita.ai/dashboard/key), create and copy an API Key.
+
+
+
+### Step 3: Configure Novita AI in LobeChat
+
+- Visit the `Settings` interface in LobeChat
+- Find the setting for `novita.ai` under `Language Model`
+
+
+
+- Open novita.ai and enter the obtained API key
+- Choose a Novita AI model for your assistant to start the conversation
+
+
+
+
+ During usage, you may need to pay the API service provider, please refer to Novita AI's pricing
+ policy.
+
+
+
+
+You can now engage in conversations using the models provided by Novita AI in LobeChat.
diff --git a/docs/usage/providers/novita.zh-CN.mdx b/docs/usage/providers/novita.zh-CN.mdx
new file mode 100644
index 000000000000..cd72b8dd61af
--- /dev/null
+++ b/docs/usage/providers/novita.zh-CN.mdx
@@ -0,0 +1,76 @@
+---
+title: 在 LobeChat 中使用 Novita AI API Key
+description: 学习如何将 Novita AI 的大语言模型 API 集成到 LobeChat 中。跟随以下步骤注册 Novita AI 账号、创建 API Key、充值信用额度并在 LobeChat 中进行设置。并与我们的多种 AI 模型交谈。
+tags:
+ - Novita AI
+ - Llama3
+ - Mistral
+ - uncensored
+ - API key
+ - Web UI
+---
+
+# 在 LobeChat 中使用 Novita AI
+
+
+
+[Novita AI](https://novita.ai/) 是一个 AI API 平台,它提供多种大语言模型与 AI 图像生成的 API 服务。支持 Llama3 (8B, 70B),Mistral 和其他最新的模型。
+
+本文档将指导你如何在 LobeChat 中使用 Novita AI:
+
+
+
+### 步骤一:注册 Novita AI 账号并登录
+
+- 访问 [Novita.ai](https://novita.ai/) 并创建账号
+- 你可以使用 Google 或者 Github 账号登录
+- 注册后,Novita AI 会赠送 0.5 美元的使用额度
+
+
+
+### 步骤二:创建 API 密钥
+
+- 访问 Novita AI 的 [密钥管理页面](https://novita.ai/dashboard/key) ,创建并且复制一个 API 密钥.
+
+
+
+### 步骤三:在 LobeChat 中配置 Novita AI
+
+- 访问 LobeChat 的 `设置` 界面
+- 在 `语言模型` 下找到 `novita.ai` 的设置项
+- 打开 novita.ai 并填入获得的 API 密钥
+
+
+
+- 为你的助手选择一个 Novita AI 模型即可开始对话
+
+
+
+
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考 Novita AI 的相关费用政策。
+
+
+
+
+至此你已经可以在 LobeChat 中使用 Novita AI 提供的模型进行对话了。
diff --git a/src/app/(main)/settings/llm/ProviderList/providers.tsx b/src/app/(main)/settings/llm/ProviderList/providers.tsx
index 110e42e78f4f..30aa944167b1 100644
--- a/src/app/(main)/settings/llm/ProviderList/providers.tsx
+++ b/src/app/(main)/settings/llm/ProviderList/providers.tsx
@@ -11,6 +11,7 @@ import {
Minimax,
Mistral,
Moonshot,
+ Novita,
OpenRouter,
Perplexity,
Stepfun,
@@ -35,6 +36,7 @@ import {
MinimaxProviderCard,
MistralProviderCard,
MoonshotProviderCard,
+ NovitaProviderCard,
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
@@ -126,6 +128,11 @@ export const useProviderList = (): ProviderItem[] => {
docUrl: urlJoin(BASE_DOC_URL, 'openrouter'),
title: ,
},
+ {
+ ...NovitaProviderCard,
+ docUrl: urlJoin(BASE_DOC_URL, 'novita'),
+ title: ,
+ },
{
...TogetherAIProviderCard,
docUrl: urlJoin(BASE_DOC_URL, 'togetherai'),
@@ -179,12 +186,12 @@ export const useProviderList = (): ProviderItem[] => {
{
...BaichuanProviderCard,
docUrl: urlJoin(BASE_DOC_URL, 'baichuan'),
- title: ,
+ title: ,
},
{
...TaichuProviderCard,
docUrl: urlJoin(BASE_DOC_URL, 'taichu'),
- title: ,
+ title: ,
},
{
...Ai360ProviderCard,
diff --git a/src/app/api/chat/agentRuntime.ts b/src/app/api/chat/agentRuntime.ts
index 45da6157e600..e5ff047ef3fc 100644
--- a/src/app/api/chat/agentRuntime.ts
+++ b/src/app/api/chat/agentRuntime.ts
@@ -172,6 +172,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
return { apiKey };
}
+ case ModelProvider.Novita: {
+ const { NOVITA_API_KEY } = getLLMConfig();
+
+ const apiKey = apiKeyManager.pick(payload?.apiKey || NOVITA_API_KEY);
+
+ return { apiKey };
+ }
case ModelProvider.Baichuan: {
const { BAICHUAN_API_KEY } = getLLMConfig();
diff --git a/src/components/ModelProviderIcon/index.tsx b/src/components/ModelProviderIcon/index.tsx
index a4edfe5ecac4..de6270991684 100644
--- a/src/components/ModelProviderIcon/index.tsx
+++ b/src/components/ModelProviderIcon/index.tsx
@@ -12,6 +12,7 @@ import {
Minimax,
Mistral,
Moonshot,
+ Novita,
Ollama,
OpenAI,
OpenRouter,
@@ -117,6 +118,10 @@ const ModelProviderIcon = memo(({ provider }) => {
return ;
}
+ case ModelProvider.Novita: {
+ return ;
+ }
+
case ModelProvider.Baichuan: {
return ;
}
diff --git a/src/config/llm.ts b/src/config/llm.ts
index 0a226be16777..b745e7a235bc 100644
--- a/src/config/llm.ts
+++ b/src/config/llm.ts
@@ -76,6 +76,9 @@ export const getLLMConfig = () => {
ENABLED_STEPFUN: z.boolean(),
STEPFUN_API_KEY: z.string().optional(),
+ ENABLED_NOVITA: z.boolean(),
+ NOVITA_API_KEY: z.string().optional(),
+
ENABLED_BAICHUAN: z.boolean(),
BAICHUAN_API_KEY: z.string().optional(),
@@ -157,6 +160,9 @@ export const getLLMConfig = () => {
ENABLED_STEPFUN: !!process.env.STEPFUN_API_KEY,
STEPFUN_API_KEY: process.env.STEPFUN_API_KEY,
+ ENABLED_NOVITA: !!process.env.NOVITA_API_KEY,
+ NOVITA_API_KEY: process.env.NOVITA_API_KEY,
+
ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
diff --git a/src/config/modelProviders/index.ts b/src/config/modelProviders/index.ts
index d4dc405e903c..db5ae1446e8b 100644
--- a/src/config/modelProviders/index.ts
+++ b/src/config/modelProviders/index.ts
@@ -11,6 +11,7 @@ import GroqProvider from './groq';
import MinimaxProvider from './minimax';
import MistralProvider from './mistral';
import MoonshotProvider from './moonshot';
+import NovitaProvider from './novita';
import OllamaProvider from './ollama';
import OpenAIProvider from './openai';
import OpenRouterProvider from './openrouter';
@@ -40,6 +41,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
AnthropicProvider.chatModels,
ZeroOneProvider.chatModels,
StepfunProvider.chatModels,
+ NovitaProvider.chatModels,
BaichuanProvider.chatModels,
TaichuProvider.chatModels,
Ai360Provider.chatModels,
@@ -64,6 +66,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
ZeroOneProvider,
ZhiPuProvider,
StepfunProvider,
+ NovitaProvider,
BaichuanProvider,
TaichuProvider,
Ai360Provider,
@@ -89,6 +92,7 @@ export { default as GroqProviderCard } from './groq';
export { default as MinimaxProviderCard } from './minimax';
export { default as MistralProviderCard } from './mistral';
export { default as MoonshotProviderCard } from './moonshot';
+export { default as NovitaProviderCard } from './novita';
export { default as OllamaProviderCard } from './ollama';
export { default as OpenAIProviderCard } from './openai';
export { default as OpenRouterProviderCard } from './openrouter';
diff --git a/src/config/modelProviders/novita.ts b/src/config/modelProviders/novita.ts
new file mode 100644
index 000000000000..586a95cb4713
--- /dev/null
+++ b/src/config/modelProviders/novita.ts
@@ -0,0 +1,91 @@
+import { ModelProviderCard } from '@/types/llm';
+
+const Novita: ModelProviderCard = {
+ chatModels: [
+ {
+ displayName: 'Llama3 8B Instruct',
+ enabled: true,
+ id: 'meta-llama/llama-3-8b-instruct',
+ tokens: 8192,
+ },
+ {
+ displayName: 'Llama3 70B Instruct',
+ enabled: true,
+ id: 'meta-llama/llama-3-70b-instruct',
+ tokens: 8192,
+ },
+ {
+ displayName: 'Nous Hermes 2 Pro - Llama3 8B',
+ enabled: true,
+ id: 'nousresearch/hermes-2-pro-llama-3-8b',
+ tokens: 8192,
+ },
+ {
+ displayName: 'Nous Hermes - Llama2 8B',
+ enabled: true,
+ id: 'nousresearch/nous-hermes-llama2-13b',
+ tokens: 4096,
+ },
+ {
+ displayName: 'Mistral 7B Instruct',
+ enabled: true,
+ id: 'mistralai/mistral-7b-instruct',
+ tokens: 32_768,
+ },
+ {
+ displayName: 'Dolphin Mixtral 8x22B',
+ enabled: true,
+ id: 'cognitivecomputations/dolphin-mixtral-8x22b',
+ tokens: 16_000,
+ },
+ {
+ displayName: 'L3-70b-Euryale-v2.1',
+ enabled: true,
+ id: 'sao10k/l3-70b-euryale-v2.1',
+ tokens: 16_000,
+ },
+ {
+ displayName: 'Midnight Rose 70B',
+ enabled: true,
+ id: 'sophosympatheia/midnight-rose-70b',
+ tokens: 4096,
+ },
+ {
+ displayName: 'Mythomax L2 13b',
+ enabled: true,
+ id: 'gryphe/mythomax-l2-13b',
+ tokens: 4096,
+ },
+ {
+ displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO',
+ enabled: true,
+ id: 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ tokens: 32_768,
+ },
+ {
+ displayName: 'Lzlv 70b',
+ enabled: true,
+ id: 'lzlv_70b',
+ tokens: 4096,
+ },
+ {
+ displayName: 'Open Hermes 2.5 Mistral 7B',
+ enabled: true,
+ id: 'teknium/openhermes-2.5-mistral-7b',
+ tokens: 4096,
+ },
+ {
+ displayName: 'Wizardlm2 8x22B',
+ enabled: true,
+ id: 'microsoft/wizardlm-2-8x22b',
+ tokens: 65_535,
+ },
+ ],
+ checkModel: 'meta-llama/llama-3-70b-instruct',
+ disableBrowserRequest: true,
+ id: 'novita',
+ modelList: { showModelFetcher: true },
+ name: 'Novita',
+};
+
+export default Novita;
diff --git a/src/const/settings/llm.ts b/src/const/settings/llm.ts
index c6446df684a9..1cd98e069ae8 100644
--- a/src/const/settings/llm.ts
+++ b/src/const/settings/llm.ts
@@ -9,6 +9,7 @@ import {
MinimaxProviderCard,
MistralProviderCard,
MoonshotProviderCard,
+ NovitaProviderCard,
OllamaProviderCard,
OpenAIProviderCard,
OpenRouterProviderCard,
@@ -68,6 +69,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(MoonshotProviderCard),
},
+ novita: {
+ enabled: false,
+ enabledModels: filterEnabledModels(NovitaProviderCard),
+ },
ollama: {
enabled: true,
enabledModels: filterEnabledModels(OllamaProviderCard),
diff --git a/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx b/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx
index d1145d464d0f..35dfda03f61c 100644
--- a/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx
+++ b/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx
@@ -9,6 +9,7 @@ import {
Minimax,
Mistral,
Moonshot,
+ Novita,
OpenAI,
OpenRouter,
Perplexity,
@@ -94,6 +95,9 @@ const ProviderAvatar = memo(({ provider }) => {
case ModelProvider.ZeroOne: {
return ;
}
+ case ModelProvider.Novita: {
+ return ;
+ }
case ModelProvider.Ai360: {
return ;
diff --git a/src/libs/agent-runtime/AgentRuntime.ts b/src/libs/agent-runtime/AgentRuntime.ts
index 7aba68f37f97..fdb28eb25b3f 100644
--- a/src/libs/agent-runtime/AgentRuntime.ts
+++ b/src/libs/agent-runtime/AgentRuntime.ts
@@ -14,6 +14,7 @@ import { LobeGroq } from './groq';
import { LobeMinimaxAI } from './minimax';
import { LobeMistralAI } from './mistral';
import { LobeMoonshotAI } from './moonshot';
+import { LobeNovitaAI } from './novita';
import { LobeOllamaAI } from './ollama';
import { LobeOpenAI } from './openai';
import { LobeOpenRouterAI } from './openrouter';
@@ -115,6 +116,7 @@ class AgentRuntime {
minimax: Partial;
mistral: Partial;
moonshot: Partial;
+ novita: Partial;
ollama: Partial;
openai: Partial;
openrouter: Partial;
@@ -226,8 +228,13 @@ class AgentRuntime {
break;
}
+ case ModelProvider.Novita: {
+ runtimeModel = new LobeNovitaAI(params.novita ?? {});
+ break;
+ }
+
case ModelProvider.Baichuan: {
- runtimeModel = new LobeBaichuanAI(params.baichuan);
+ runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
break;
}
diff --git a/src/libs/agent-runtime/novita/index.test.ts b/src/libs/agent-runtime/novita/index.test.ts
new file mode 100644
index 000000000000..d96df2da83d6
--- /dev/null
+++ b/src/libs/agent-runtime/novita/index.test.ts
@@ -0,0 +1,251 @@
+// @vitest-environment node
+import OpenAI from 'openai';
+import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+
+import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
+import { ModelProvider } from '@/libs/agent-runtime';
+import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
+
+import * as debugStreamModule from '../utils/debugStream';
+import { LobeNovitaAI } from './index';
+
+const provider = ModelProvider.Novita;
+const defaultBaseURL = 'https://api.novita.ai/v3/openai';
+const bizErrorType = AgentRuntimeErrorType.ProviderBizError;
+const invalidErrorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
+
+// Mock the console.error to avoid polluting test output
+vi.spyOn(console, 'error').mockImplementation(() => {});
+
+let instance: LobeOpenAICompatibleRuntime;
+
+beforeEach(() => {
+ instance = new LobeNovitaAI({ apiKey: 'test' });
+
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
+ new ReadableStream() as any,
+ );
+});
+
+afterEach(() => {
+ vi.clearAllMocks();
+});
+
+describe('NovitaAI', () => {
+ describe('init', () => {
+ it('should correctly initialize with an API key', async () => {
+ const instance = new LobeNovitaAI({ apiKey: 'test_api_key' });
+ expect(instance).toBeInstanceOf(LobeNovitaAI);
+ expect(instance.baseURL).toEqual(defaultBaseURL);
+ });
+ });
+
+ describe('chat', () => {
+ describe('Error', () => {
+ it('should return Error with an openai error response when OpenAI.APIError is thrown', async () => {
+ // Arrange
+ const apiError = new OpenAI.APIError(
+ 400,
+ {
+ status: 400,
+ error: {
+ message: 'Bad Request',
+ },
+ },
+ 'Error message',
+ {},
+ );
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'meta-llama/llama-3-8b-instruct',
+ temperature: 0.999,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: defaultBaseURL,
+ error: {
+ error: { message: 'Bad Request' },
+ status: 400,
+ },
+ errorType: bizErrorType,
+ provider,
+ });
+ }
+ });
+
+ it('should throw AgentRuntimeError if no apiKey is provided', async () => {
+ try {
+ new LobeNovitaAI({});
+ } catch (e) {
+ expect(e).toEqual({ errorType: invalidErrorType });
+ }
+ });
+
+ it('should return Error with the cause when OpenAI.APIError is thrown with cause', async () => {
+ // Arrange
+ const errorInfo = {
+ stack: 'abc',
+ cause: {
+ message: 'api is undefined',
+ },
+ };
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'meta-llama/llama-3-8b-instruct',
+ temperature: 0.999,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: defaultBaseURL,
+ error: {
+ cause: { message: 'api is undefined' },
+ stack: 'abc',
+ },
+ errorType: bizErrorType,
+ provider,
+ });
+ }
+ });
+
+ it('should return Error with an cause response with desensitize Url', async () => {
+ // Arrange
+ const errorInfo = {
+ stack: 'abc',
+ cause: { message: 'api is undefined' },
+ };
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
+
+ instance = new LobeNovitaAI({
+ apiKey: 'test',
+
+ baseURL: 'https://api.abc.com/v1',
+ });
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'meta-llama/llama-3-8b-instruct',
+ temperature: 0.999,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: 'https://api.***.com/v1',
+ error: {
+ cause: { message: 'api is undefined' },
+ stack: 'abc',
+ },
+ errorType: bizErrorType,
+ provider,
+ });
+ }
+ });
+
+ it('should throw an error type on 401 status code', async () => {
+ // Mock the API call to simulate a 401 error
+ const error = new Error('InvalidApiKey') as any;
+ error.status = 401;
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
+
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'meta-llama/llama-3-8b-instruct',
+ temperature: 0.999,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: defaultBaseURL,
+ error: new Error('InvalidApiKey'),
+ errorType: invalidErrorType,
+ provider,
+ });
+ }
+ });
+
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
+ // Arrange
+ const genericError = new Error('Generic Error');
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'meta-llama/llama-3-8b-instruct',
+ temperature: 0.999,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: defaultBaseURL,
+ errorType: 'AgentRuntimeError',
+ provider,
+ error: {
+ name: genericError.name,
+ cause: genericError.cause,
+ message: genericError.message,
+ stack: genericError.stack,
+ },
+ });
+ }
+ });
+ });
+
+ describe('DEBUG', () => {
+ it('should call debugStream and return StreamingTextResponse when DEBUG_NOVITA_CHAT_COMPLETION is 1', async () => {
+ // Arrange
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
+ const mockDebugStream = new ReadableStream({
+ start(controller) {
+ controller.enqueue('Debug stream content');
+ controller.close();
+ },
+ }) as any;
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
+
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
+ });
+
+ // 保存原始环境变量值
+ const originalDebugValue = process.env.DEBUG_NOVITA_CHAT_COMPLETION;
+
+ // 模拟环境变量
+ process.env.DEBUG_NOVITA_CHAT_COMPLETION = '1';
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
+
+ // 执行测试
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
+ // 假设的测试函数调用,你可能需要根据实际情况调整
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'meta-llama/llama-3-8b-instruct',
+ stream: true,
+ temperature: 0.999,
+ });
+
+ // 验证 debugStream 被调用
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
+
+ // 恢复原始环境变量值
+ process.env.DEBUG_NOVITA_CHAT_COMPLETION = originalDebugValue;
+ });
+ });
+ });
+});
diff --git a/src/libs/agent-runtime/novita/index.ts b/src/libs/agent-runtime/novita/index.ts
new file mode 100644
index 000000000000..6a61c60577a6
--- /dev/null
+++ b/src/libs/agent-runtime/novita/index.ts
@@ -0,0 +1,15 @@
+import { ModelProvider } from '../types';
+import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
+
+export const LobeNovitaAI = LobeOpenAICompatibleFactory({
+ baseURL: 'https://api.novita.ai/v3/openai',
+ constructorOptions: {
+ defaultHeaders: {
+ 'X-Novita-Source': 'lobechat',
+ },
+ },
+ debug: {
+ chatCompletion: () => process.env.DEBUG_NOVITA_CHAT_COMPLETION === '1',
+ },
+ provider: ModelProvider.Novita,
+});
diff --git a/src/libs/agent-runtime/types/type.ts b/src/libs/agent-runtime/types/type.ts
index 7d40b71ad59e..b2ebbc83e82f 100644
--- a/src/libs/agent-runtime/types/type.ts
+++ b/src/libs/agent-runtime/types/type.ts
@@ -33,6 +33,7 @@ export enum ModelProvider {
Minimax = 'minimax',
Mistral = 'mistral',
Moonshot = 'moonshot',
+ Novita = 'novita',
Ollama = 'ollama',
OpenAI = 'openai',
OpenRouter = 'openrouter',
diff --git a/src/server/globalConfig/index.ts b/src/server/globalConfig/index.ts
index ddc2aa009fa6..a12fa0d94f03 100644
--- a/src/server/globalConfig/index.ts
+++ b/src/server/globalConfig/index.ts
@@ -32,6 +32,7 @@ export const getServerGlobalConfig = () => {
ENABLED_ANTHROPIC,
ENABLED_MINIMAX,
ENABLED_MISTRAL,
+ ENABLED_NOVITA,
ENABLED_QWEN,
ENABLED_STEPFUN,
ENABLED_BAICHUAN,
@@ -82,6 +83,7 @@ export const getServerGlobalConfig = () => {
minimax: { enabled: ENABLED_MINIMAX },
mistral: { enabled: ENABLED_MISTRAL },
moonshot: { enabled: ENABLED_MOONSHOT },
+ novita: { enabled: ENABLED_NOVITA },
ollama: {
enabled: ENABLED_OLLAMA,
fetchOnClient: !OLLAMA_PROXY_URL,
diff --git a/src/server/routers/edge/config/__snapshots__/index.test.ts.snap b/src/server/routers/edge/config/__snapshots__/index.test.ts.snap
index ba73e371274f..14926665383e 100644
--- a/src/server/routers/edge/config/__snapshots__/index.test.ts.snap
+++ b/src/server/routers/edge/config/__snapshots__/index.test.ts.snap
@@ -93,19 +93,25 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_
{
"enabled": false,
"enabledModels": [
- "google/gemma-7b-it",
- "mistralai/mistral-7b-instruct",
+ "google/gemma-7b-it:free",
+ "mistralai/mistral-7b-instruct:free",
],
"serverModelCards": [
{
- "displayName": "google/gemma-7b-it",
+ "displayName": "Google: Gemma 7B (free)",
"enabled": true,
- "id": "google/gemma-7b-it",
+ "functionCall": false,
+ "id": "google/gemma-7b-it:free",
+ "tokens": 8192,
+ "vision": false,
},
{
- "displayName": "Mistral-7B-Instruct",
+ "displayName": "Mistral 7B Instruct (free)",
"enabled": true,
- "id": "mistralai/mistral-7b-instruct",
+ "functionCall": false,
+ "id": "mistralai/mistral-7b-instruct:free",
+ "tokens": 32768,
+ "vision": false,
},
],
}
diff --git a/src/server/routers/edge/config/index.test.ts b/src/server/routers/edge/config/index.test.ts
index 2e89555e9ea7..89e028b49b76 100644
--- a/src/server/routers/edge/config/index.test.ts
+++ b/src/server/routers/edge/config/index.test.ts
@@ -130,7 +130,7 @@ describe('configRouter', () => {
describe('OPENROUTER_MODEL_LIST', () => {
it('custom deletion, addition, and renaming of models', async () => {
process.env.OPENROUTER_MODEL_LIST =
- '-all,+google/gemma-7b-it,+mistralai/mistral-7b-instruct=Mistral-7B-Instruct';
+ '-all,+google/gemma-7b-it:free,+mistralai/mistral-7b-instruct:free';
const response = await router.getGlobalConfig();
diff --git a/src/types/user/settings/keyVaults.ts b/src/types/user/settings/keyVaults.ts
index 641f5119ff6c..46fc0db51254 100644
--- a/src/types/user/settings/keyVaults.ts
+++ b/src/types/user/settings/keyVaults.ts
@@ -28,6 +28,7 @@ export interface UserKeyVaults {
minimax?: OpenAICompatibleKeyVault;
mistral?: OpenAICompatibleKeyVault;
moonshot?: OpenAICompatibleKeyVault;
+ novita?: OpenAICompatibleKeyVault;
ollama?: OpenAICompatibleKeyVault;
openai?: OpenAICompatibleKeyVault;
openrouter?: OpenAICompatibleKeyVault;