Skip to content

Commit ee4dd1b

Browse files
berry-13danny-avila
andauthoredJul 19, 2024
🚀 feat: gpt-4o-mini (danny-avila#3384)
* feat: `gpt-4o-mini` * feat: retrival * fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini' * fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini' * fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini' * fix: add jsdoc * fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini' --------- Co-authored-by: Danny Avila <[email protected]>
1 parent f6125cc commit ee4dd1b

File tree

8 files changed

+99
-65
lines changed

8 files changed

+99
-65
lines changed
 

‎.env.example

+3-3
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ GOOGLE_KEY=user_provided
144144
#============#
145145

146146
OPENAI_API_KEY=user_provided
147-
# OPENAI_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
147+
# OPENAI_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
148148

149149
DEBUG_OPENAI=false
150150

@@ -166,7 +166,7 @@ DEBUG_OPENAI=false
166166

167167
ASSISTANTS_API_KEY=user_provided
168168
# ASSISTANTS_BASE_URL=
169-
# ASSISTANTS_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
169+
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
170170

171171
#==========================#
172172
# Azure Assistants API #
@@ -188,7 +188,7 @@ ASSISTANTS_API_KEY=user_provided
188188
# Plugins #
189189
#============#
190190

191-
# PLUGIN_MODELS=gpt-4o,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
191+
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
192192

193193
DEBUG_PLUGINS=true
194194

‎README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
5151
- 🌿 Fork Messages & Conversations for Advanced Context control
5252
- 💬 Multimodal Chat:
53-
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o`), and Gemini Vision 📸
53+
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o` and `gpt-4o-mini`), and Gemini Vision 📸
5454
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
5555
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
5656
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️

‎api/models/tx.js

+3
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ const tokenValues = {
1212
'4k': { prompt: 1.5, completion: 2 },
1313
'16k': { prompt: 3, completion: 4 },
1414
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
15+
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
1516
'gpt-4o': { prompt: 5, completion: 15 },
1617
'gpt-4-1106': { prompt: 10, completion: 30 },
1718
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
@@ -54,6 +55,8 @@ const getValueKey = (model, endpoint) => {
5455
return 'gpt-3.5-turbo-1106';
5556
} else if (modelName.includes('gpt-3.5')) {
5657
return '4k';
58+
} else if (modelName.includes('gpt-4o-mini')) {
59+
return 'gpt-4o-mini';
5760
} else if (modelName.includes('gpt-4o')) {
5861
return 'gpt-4o';
5962
} else if (modelName.includes('gpt-4-vision')) {

‎api/models/tx.spec.js

+19
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,12 @@ describe('getValueKey', () => {
4949
expect(getValueKey('gpt-4o-0125')).toBe('gpt-4o');
5050
});
5151

52+
it('should return "gpt-4o-mini" for model type of "gpt-4o-mini"', () => {
53+
expect(getValueKey('gpt-4o-mini-2024-07-18')).toBe('gpt-4o-mini');
54+
expect(getValueKey('openai/gpt-4o-mini')).toBe('gpt-4o-mini');
55+
expect(getValueKey('gpt-4o-mini-0718')).toBe('gpt-4o-mini');
56+
});
57+
5258
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
5359
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
5460
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
@@ -109,6 +115,19 @@ describe('getMultiplier', () => {
109115
);
110116
});
111117

118+
it('should return the correct multiplier for gpt-4o-mini', () => {
119+
const valueKey = getValueKey('gpt-4o-mini-2024-07-18');
120+
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
121+
tokenValues['gpt-4o-mini'].prompt,
122+
);
123+
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
124+
tokenValues['gpt-4o-mini'].completion,
125+
);
126+
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
127+
tokenValues['gpt-4-1106'].completion,
128+
);
129+
});
130+
112131
it('should derive the valueKey from the model if not provided for new models', () => {
113132
expect(
114133
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),

‎api/utils/tokens.js

+66-56
Original file line numberDiff line numberDiff line change
@@ -1,45 +1,6 @@
11
const z = require('zod');
22
const { EModelEndpoint } = require('librechat-data-provider');
33

4-
const models = [
5-
'text-davinci-003',
6-
'text-davinci-002',
7-
'text-davinci-001',
8-
'text-curie-001',
9-
'text-babbage-001',
10-
'text-ada-001',
11-
'davinci',
12-
'curie',
13-
'babbage',
14-
'ada',
15-
'code-davinci-002',
16-
'code-davinci-001',
17-
'code-cushman-002',
18-
'code-cushman-001',
19-
'davinci-codex',
20-
'cushman-codex',
21-
'text-davinci-edit-001',
22-
'code-davinci-edit-001',
23-
'text-embedding-ada-002',
24-
'text-similarity-davinci-001',
25-
'text-similarity-curie-001',
26-
'text-similarity-babbage-001',
27-
'text-similarity-ada-001',
28-
'text-search-davinci-doc-001',
29-
'text-search-curie-doc-001',
30-
'text-search-babbage-doc-001',
31-
'text-search-ada-doc-001',
32-
'code-search-babbage-code-001',
33-
'code-search-ada-code-001',
34-
'gpt2',
35-
'gpt-4',
36-
'gpt-4-0314',
37-
'gpt-4-32k',
38-
'gpt-4-32k-0314',
39-
'gpt-3.5-turbo',
40-
'gpt-3.5-turbo-0301',
41-
];
42-
434
const openAIModels = {
445
'gpt-4': 8187, // -5 from max
456
'gpt-4-0613': 8187, // -5 from max
@@ -49,6 +10,7 @@ const openAIModels = {
4910
'gpt-4-1106': 127990, // -10 from max
5011
'gpt-4-0125': 127990, // -10 from max
5112
'gpt-4o': 127990, // -10 from max
13+
'gpt-4o-mini': 127990, // -10 from max
5214
'gpt-4-turbo': 127990, // -10 from max
5315
'gpt-4-vision': 127990, // -10 from max
5416
'gpt-3.5-turbo': 16375, // -10 from max
@@ -101,7 +63,6 @@ const anthropicModels = {
10163

10264
const aggregateModels = { ...openAIModels, ...googleModels, ...anthropicModels, ...cohereModels };
10365

104-
// Order is important here: by model series and context size (gpt-4 then gpt-3, ascending)
10566
const maxTokensMap = {
10667
[EModelEndpoint.azureOpenAI]: openAIModels,
10768
[EModelEndpoint.openAI]: aggregateModels,
@@ -110,6 +71,24 @@ const maxTokensMap = {
11071
[EModelEndpoint.anthropic]: anthropicModels,
11172
};
11273

74+
/**
75+
* Finds the first matching pattern in the tokens map.
76+
* @param {string} modelName
77+
* @param {Record<string, number>} tokensMap
78+
* @returns {string|null}
79+
*/
80+
function findMatchingPattern(modelName, tokensMap) {
81+
const keys = Object.keys(tokensMap);
82+
for (let i = keys.length - 1; i >= 0; i--) {
83+
const modelKey = keys[i];
84+
if (modelName.includes(modelKey)) {
85+
return modelKey;
86+
}
87+
}
88+
89+
return null;
90+
}
91+
11392
/**
11493
* Retrieves the maximum tokens for a given model name. If the exact model name isn't found,
11594
* it searches for partial matches within the model name, checking keys in reverse order.
@@ -143,12 +122,11 @@ function getModelMaxTokens(modelName, endpoint = EModelEndpoint.openAI, endpoint
143122
return tokensMap[modelName];
144123
}
145124

146-
const keys = Object.keys(tokensMap);
147-
for (let i = keys.length - 1; i >= 0; i--) {
148-
if (modelName.includes(keys[i])) {
149-
const result = tokensMap[keys[i]];
150-
return result?.context ?? result;
151-
}
125+
const matchedPattern = findMatchingPattern(modelName, tokensMap);
126+
127+
if (matchedPattern) {
128+
const result = tokensMap[matchedPattern];
129+
return result?.context ?? result;
152130
}
153131

154132
return undefined;
@@ -181,15 +159,8 @@ function matchModelName(modelName, endpoint = EModelEndpoint.openAI) {
181159
return modelName;
182160
}
183161

184-
const keys = Object.keys(tokensMap);
185-
for (let i = keys.length - 1; i >= 0; i--) {
186-
const modelKey = keys[i];
187-
if (modelName.includes(modelKey)) {
188-
return modelKey;
189-
}
190-
}
191-
192-
return modelName;
162+
const matchedPattern = findMatchingPattern(modelName, tokensMap);
163+
return matchedPattern || modelName;
193164
}
194165

195166
const modelSchema = z.object({
@@ -241,8 +212,47 @@ function processModelData(input) {
241212
return tokenConfig;
242213
}
243214

215+
const tiktokenModels = new Set([
216+
'text-davinci-003',
217+
'text-davinci-002',
218+
'text-davinci-001',
219+
'text-curie-001',
220+
'text-babbage-001',
221+
'text-ada-001',
222+
'davinci',
223+
'curie',
224+
'babbage',
225+
'ada',
226+
'code-davinci-002',
227+
'code-davinci-001',
228+
'code-cushman-002',
229+
'code-cushman-001',
230+
'davinci-codex',
231+
'cushman-codex',
232+
'text-davinci-edit-001',
233+
'code-davinci-edit-001',
234+
'text-embedding-ada-002',
235+
'text-similarity-davinci-001',
236+
'text-similarity-curie-001',
237+
'text-similarity-babbage-001',
238+
'text-similarity-ada-001',
239+
'text-search-davinci-doc-001',
240+
'text-search-curie-doc-001',
241+
'text-search-babbage-doc-001',
242+
'text-search-ada-doc-001',
243+
'code-search-babbage-code-001',
244+
'code-search-ada-code-001',
245+
'gpt2',
246+
'gpt-4',
247+
'gpt-4-0314',
248+
'gpt-4-32k',
249+
'gpt-4-32k-0314',
250+
'gpt-3.5-turbo',
251+
'gpt-3.5-turbo-0301',
252+
]);
253+
244254
module.exports = {
245-
tiktokenModels: new Set(models),
255+
tiktokenModels,
246256
maxTokensMap,
247257
inputSchema,
248258
modelSchema,

‎packages/data-provider/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "librechat-data-provider",
3-
"version": "0.7.2",
3+
"version": "0.7.3",
44
"description": "data services for librechat apps",
55
"main": "dist/index.js",
66
"module": "dist/index.es.js",

‎packages/data-provider/src/config.ts

+5-3
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ export const defaultSocialLogins = ['google', 'facebook', 'openid', 'github', 'd
1212
export const defaultRetrievalModels = [
1313
'gpt-4o',
1414
'gpt-4o-2024-05-13',
15+
'gpt-4o-mini',
16+
'gpt-4o-mini-2024-07-18',
1517
'gpt-4-turbo-preview',
1618
'gpt-3.5-turbo-0125',
1719
'gpt-4-0125-preview',
@@ -530,7 +532,7 @@ const sharedOpenAIModels = [
530532

531533
export const defaultModels = {
532534
[EModelEndpoint.azureAssistants]: sharedOpenAIModels,
533-
[EModelEndpoint.assistants]: ['gpt-4o', ...sharedOpenAIModels],
535+
[EModelEndpoint.assistants]: ['gpt-4o-mini', 'gpt-4o', ...sharedOpenAIModels],
534536
[EModelEndpoint.google]: [
535537
'gemini-pro',
536538
'gemini-pro-vision',
@@ -559,13 +561,12 @@ export const defaultModels = {
559561
'claude-instant-1-100k',
560562
],
561563
[EModelEndpoint.openAI]: [
564+
'gpt-4o-mini',
562565
'gpt-4o',
563566
...sharedOpenAIModels,
564567
'gpt-4-vision-preview',
565568
'gpt-3.5-turbo-instruct-0914',
566-
'gpt-3.5-turbo-0301',
567569
'gpt-3.5-turbo-instruct',
568-
'text-davinci-003',
569570
],
570571
};
571572

@@ -621,6 +622,7 @@ export const supportsBalanceCheck = {
621622

622623
export const visionModels = [
623624
'gpt-4o',
625+
'gpt-4o-mini',
624626
'gpt-4-turbo',
625627
'gpt-4-vision',
626628
'llava',

‎packages/data-provider/src/schemas.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ export enum EAgent {
219219

220220
export const agentOptionSettings = {
221221
model: {
222-
default: 'gpt-4o',
222+
default: 'gpt-4o-mini',
223223
},
224224
temperature: {
225225
min: 0,

0 commit comments

Comments
 (0)
Please sign in to comment.