Skip to content

Commit

Permalink
✨ feat: wenxin agent-runtime
Browse files Browse the repository at this point in the history
  • Loading branch information
arvinxx committed Jun 17, 2024
1 parent e815c47 commit 232c72b
Show file tree
Hide file tree
Showing 19 changed files with 516 additions and 1 deletion.
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@
"@aws-sdk/client-s3": "^3.583.0",
"@aws-sdk/s3-request-presigner": "^3.583.0",
"@azure/openai": "1.0.0-beta.12",
"@baiducloud/qianfan": "^0.1.0",
"@cfworker/json-schema": "^1.12.8",
"@clerk/localizations": "2.0.0",
"@clerk/nextjs": "^5.1.2",
Expand Down
46 changes: 46 additions & 0 deletions src/app/(main)/settings/llm/ProviderList/Wenxin/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
'use client';

import { Wenxin } from '@lobehub/icons';
import { Input } from 'antd';
import { useTranslation } from 'react-i18next';

import { WenxinProviderCard } from '@/config/modelProviders';
import { GlobalLLMProviderKey } from '@/types/user/settings';

import { KeyVaultsConfigKey } from '../../const';
import { ProviderItem } from '../../type';

const providerKey: GlobalLLMProviderKey = 'wenxin';

export const useWenxinProvider = (): ProviderItem => {
const { t } = useTranslation('modelProvider');

return {
...WenxinProviderCard,
apiKeyItems: [
{
children: (
<Input.Password
autoComplete={'new-password'}
placeholder={t(`${providerKey}.accessKey.placeholder`)}
/>
),
desc: t(`${providerKey}.accessKey.desc`),
label: t(`${providerKey}.accessKey.title`),
name: [KeyVaultsConfigKey, providerKey, 'accessKey'],
},
{
children: (
<Input.Password
autoComplete={'new-password'}
placeholder={t(`${providerKey}.secretKey.placeholder`)}
/>
),
desc: t(`${providerKey}.secretKey.desc`),
label: t(`${providerKey}.secretKey.title`),
name: [KeyVaultsConfigKey, providerKey, 'secretKey'],
},
],
title: <Wenxin.Combine size={32} type={'color'} />,
};
};
5 changes: 4 additions & 1 deletion src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import { useAzureProvider } from './Azure';
import { useBedrockProvider } from './Bedrock';
import { useOllamaProvider } from './Ollama';
import { useOpenAIProvider } from './OpenAI';
import { useWenxinProvider } from './Wenxin';

const AnthropicBrand = () => {
const { isDarkMode } = useTheme();
Expand Down Expand Up @@ -78,6 +79,7 @@ export const useProviderList = (): ProviderItem[] => {
const ollamaProvider = useOllamaProvider();
const openAIProvider = useOpenAIProvider();
const bedrockProvider = useBedrockProvider();
const wenxinProvider = useWenxinProvider();

return useMemo(
() => [
Expand Down Expand Up @@ -129,7 +131,8 @@ export const useProviderList = (): ProviderItem[] => {
...PerplexityProviderCard,
title: <Perplexity.Combine size={24} type={'color'} />,
},
{
wenxinProvider,
{
...ZhiPuProviderCard,
title: <Zhipu.Combine size={32} type={'color'} />,
},
Expand Down
21 changes: 21 additions & 0 deletions src/app/api/chat/agentRuntime.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import {
} from '@/libs/agent-runtime';
import { AgentRuntime } from '@/libs/agent-runtime';
import { LobeStepfunAI } from '@/libs/agent-runtime/stepfun';
import LobeWenxinAI from '@/libs/agent-runtime/wenxin';

import { initAgentRuntimeWithUserPayload } from './agentRuntime';

Expand Down Expand Up @@ -53,6 +54,9 @@ vi.mock('@/config/llm', () => ({
TOGETHERAI_API_KEY: 'test-togetherai-key',
QWEN_API_KEY: 'test-qwen-key',
STEPFUN_API_KEY: 'test-stepfun-key',

WENXIN_ACCESS_KEY: 'test-wenxin-access-key',
WENXIN_SECRET_KEY: 'test-wenxin-secret-key',
})),
}));

Expand Down Expand Up @@ -201,6 +205,16 @@ describe('initAgentRuntimeWithUserPayload method', () => {
expect(runtime['_runtime']).toBeInstanceOf(LobeStepfunAI);
});

it.skip('Wenxin AI provider: with apikey', async () => {
const jwtPayload: JWTPayload = {
wenxinAccessKey: 'user-wenxin-accessKey',
wenxinSecretKey: 'wenxin-secret-key',
};
const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Wenxin, jwtPayload);
expect(runtime).toBeInstanceOf(AgentRuntime);
expect(runtime['_runtime']).toBeInstanceOf(LobeWenxinAI);
});

it('Unknown Provider: with apikey and endpoint, should initialize to OpenAi', async () => {
const jwtPayload: JWTPayload = {
apiKey: 'user-unknown-key',
Expand Down Expand Up @@ -338,6 +352,13 @@ describe('initAgentRuntimeWithUserPayload method', () => {
expect(runtime['_runtime']).toBeInstanceOf(LobeTogetherAI);
});

it.skip('Wenxin AI provider: without apikey', async () => {
const jwtPayload = {};
const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Wenxin, jwtPayload);
expect(runtime).toBeInstanceOf(AgentRuntime);
expect(runtime['_runtime']).toBeInstanceOf(LobeWenxinAI);
});

it('Unknown Provider', async () => {
const jwtPayload = {};
const runtime = await initAgentRuntimeWithUserPayload('unknown', jwtPayload);
Expand Down
24 changes: 24 additions & 0 deletions src/app/api/chat/wenxin/route.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// @vitest-environment edge-runtime
import { describe, expect, it, vi } from 'vitest';

import { POST as UniverseRoute } from '../[provider]/route';
import { POST, runtime } from './route';

// 模拟 '../[provider]/route'
vi.mock('../[provider]/route', () => ({
POST: vi.fn().mockResolvedValue('mocked response'),
}));

describe('Configuration tests', () => {
it('should have runtime set to "edge"', () => {
expect(runtime).toBe('nodejs');
});
});

describe('Minimax POST function tests', () => {
it('should call UniverseRoute with correct parameters', async () => {
const mockRequest = new Request('https://example.com', { method: 'POST' });
await POST(mockRequest);
expect(UniverseRoute).toHaveBeenCalledWith(mockRequest, { params: { provider: 'minimax' } });
});
});
30 changes: 30 additions & 0 deletions src/app/api/chat/wenxin/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import { getLLMConfig } from '@/config/llm';
import { AgentRuntime } from '@/libs/agent-runtime';
import LobeWenxinAI from '@/libs/agent-runtime/wenxin';

import { POST as UniverseRoute } from '../[provider]/route';

export const runtime = 'nodejs';

export const maxDuration = 30;

export const POST = async (req: Request) =>
UniverseRoute(req, {
createRuntime: (payload) => {
const { WENXIN_ACCESS_KEY, WENXIN_SECRET_KEY } = getLLMConfig();
let accessKey: string | undefined = WENXIN_ACCESS_KEY;
let secretKey: string | undefined = WENXIN_SECRET_KEY;

// if the payload has the api key, use user
if (payload.apiKey) {
accessKey = payload?.wenxinAccessKey;
secretKey = payload?.wenxinSecretKey;
}

const params = { accessKey, secretKey };
const instance = new LobeWenxinAI(params);

return new AgentRuntime(instance);
},
params: { provider: 'wenxin' },
});
6 changes: 6 additions & 0 deletions src/config/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,9 @@ export const getLLMConfig = () => {
AWS_ACCESS_KEY_ID: z.string().optional(),
AWS_SECRET_ACCESS_KEY: z.string().optional(),

WENXIN_ACCESS_KEY: z.string().optional(),
WENXIN_SECRET_KEY: z.string().optional(),

ENABLED_OLLAMA: z.boolean(),
OLLAMA_PROXY_URL: z.string().optional(),
OLLAMA_MODEL_LIST: z.string().optional(),
Expand Down Expand Up @@ -185,6 +188,9 @@ export const getLLMConfig = () => {
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY,

WENXIN_ACCESS_KEY: process.env.WENXIN_ACCESS_KEY,
WENXIN_SECRET_KEY: process.env.WENXIN_SECRET_KEY,

ENABLED_OLLAMA: process.env.ENABLED_OLLAMA !== '0',
OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
OLLAMA_MODEL_LIST: process.env.OLLAMA_MODEL_LIST || process.env.OLLAMA_CUSTOM_MODELS,
Expand Down
1 change: 1 addition & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,3 +82,4 @@ export { default as StepfunProviderCard } from './stepfun';
export { default as TogetherAIProviderCard } from './togetherai';
export { default as ZeroOneProviderCard } from './zeroone';
export { default as ZhiPuProviderCard } from './zhipu';
export { default as WenxinProviderCard } from './wenxin';
11 changes: 11 additions & 0 deletions src/config/modelProviders/wenxin.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const Wenxin: ModelProviderCard = {
chatModels: [],
checkModel: 'ERNIE-4.0-8K',
id: 'wenxin',
name: 'Wenxin',
};

export default Wenxin;
4 changes: 4 additions & 0 deletions src/const/auth.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ export interface JWTPayload {
awsAccessKeyId?: string;
awsRegion?: string;
awsSecretAccessKey?: string;

wenxinAccessKey?: string;
wenxinSecretKey?: string;

/**
* user id
* in client db mode it's a uuid
Expand Down
5 changes: 5 additions & 0 deletions src/const/settings/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import {
QwenProviderCard,
StepfunProviderCard,
TogetherAIProviderCard,
WenxinProviderCard,
ZeroOneProviderCard,
ZhiPuProviderCard,
filterEnabledModels,
Expand Down Expand Up @@ -86,6 +87,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(TogetherAIProviderCard),
},
wenxin: {
enabled: false,
enabledModels: filterEnabledModels(WenxinProviderCard),
},
zeroone: {
enabled: false,
enabledModels: filterEnabledModels(ZeroOneProviderCard),
Expand Down
1 change: 1 addition & 0 deletions src/libs/agent-runtime/AgentRuntime.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import {
ModelProvider,
} from '@/libs/agent-runtime';
import { LobeStepfunAI } from '@/libs/agent-runtime/stepfun';
import LobeWenxinAI from '@/libs/agent-runtime/wenxin';

import { AgentChatOptions } from './AgentRuntime';
import { LobeBedrockAIParams } from './bedrock';
Expand Down
1 change: 1 addition & 0 deletions src/libs/agent-runtime/types/type.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ export enum ModelProvider {
Qwen = 'qwen',
Stepfun = 'stepfun',
TogetherAI = 'togetherai',
Wenxin = 'wenxin',
ZeroOne = 'zeroone',
ZhiPu = 'zhipu',
}
Expand Down
Loading

0 comments on commit 232c72b

Please sign in to comment.