Skip to content

Commit 876e254

Browse files
danny-avilaMichielMAnalytics
authored andcommitted
✨ feat: GPT-5 Token Limits, Rates, Icon, Reasoning Support
1 parent 78acb16 commit 876e254

5 files changed

Lines changed: 151 additions & 4 deletions

File tree

api/models/tx.js

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,9 @@ const tokenValues = Object.assign(
8787
'gpt-4.1': { prompt: 2, completion: 8 },
8888
'gpt-4.5': { prompt: 75, completion: 150 },
8989
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
90+
'gpt-5': { prompt: 1.25, completion: 10 },
91+
'gpt-5-mini': { prompt: 0.25, completion: 2 },
92+
'gpt-5-nano': { prompt: 0.05, completion: 0.4 },
9093
'gpt-4o': { prompt: 2.5, completion: 10 },
9194
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
9295
'gpt-4-1106': { prompt: 10, completion: 30 },
@@ -217,6 +220,12 @@ const getValueKey = (model, endpoint) => {
217220
return 'gpt-4.1';
218221
} else if (modelName.includes('gpt-4o-2024-05-13')) {
219222
return 'gpt-4o-2024-05-13';
223+
} else if (modelName.includes('gpt-5-nano')) {
224+
return 'gpt-5-nano';
225+
} else if (modelName.includes('gpt-5-mini')) {
226+
return 'gpt-5-mini';
227+
} else if (modelName.includes('gpt-5')) {
228+
return 'gpt-5';
220229
} else if (modelName.includes('gpt-4o-mini')) {
221230
return 'gpt-4o-mini';
222231
} else if (modelName.includes('gpt-4o')) {

api/models/tx.spec.js

Lines changed: 74 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,14 @@ describe('getValueKey', () => {
2525
expect(getValueKey('gpt-4-some-other-info')).toBe('8k');
2626
});
2727

28-
it('should return undefined for model names that do not match any known patterns', () => {
29-
expect(getValueKey('gpt-5-some-other-info')).toBeUndefined();
28+
it('should return "gpt-5" for model name containing "gpt-5"', () => {
29+
expect(getValueKey('gpt-5-some-other-info')).toBe('gpt-5');
30+
expect(getValueKey('gpt-5-2025-01-30')).toBe('gpt-5');
31+
expect(getValueKey('gpt-5-2025-01-30-0130')).toBe('gpt-5');
32+
expect(getValueKey('openai/gpt-5')).toBe('gpt-5');
33+
expect(getValueKey('openai/gpt-5-2025-01-30')).toBe('gpt-5');
34+
expect(getValueKey('gpt-5-turbo')).toBe('gpt-5');
35+
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
3036
});
3137

3238
it('should return "gpt-3.5-turbo-1106" for model name containing "gpt-3.5-turbo-1106"', () => {
@@ -84,6 +90,29 @@ describe('getValueKey', () => {
8490
expect(getValueKey('gpt-4.1-nano-0125')).toBe('gpt-4.1-nano');
8591
});
8692

93+
it('should return "gpt-5" for model type of "gpt-5"', () => {
94+
expect(getValueKey('gpt-5-2025-01-30')).toBe('gpt-5');
95+
expect(getValueKey('gpt-5-2025-01-30-0130')).toBe('gpt-5');
96+
expect(getValueKey('openai/gpt-5')).toBe('gpt-5');
97+
expect(getValueKey('openai/gpt-5-2025-01-30')).toBe('gpt-5');
98+
expect(getValueKey('gpt-5-turbo')).toBe('gpt-5');
99+
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
100+
});
101+
102+
it('should return "gpt-5-mini" for model type of "gpt-5-mini"', () => {
103+
expect(getValueKey('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
104+
expect(getValueKey('openai/gpt-5-mini')).toBe('gpt-5-mini');
105+
expect(getValueKey('gpt-5-mini-0130')).toBe('gpt-5-mini');
106+
expect(getValueKey('gpt-5-mini-2025-01-30-0130')).toBe('gpt-5-mini');
107+
});
108+
109+
it('should return "gpt-5-nano" for model type of "gpt-5-nano"', () => {
110+
expect(getValueKey('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
111+
expect(getValueKey('openai/gpt-5-nano')).toBe('gpt-5-nano');
112+
expect(getValueKey('gpt-5-nano-0130')).toBe('gpt-5-nano');
113+
expect(getValueKey('gpt-5-nano-2025-01-30-0130')).toBe('gpt-5-nano');
114+
});
115+
87116
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
88117
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
89118
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
@@ -207,6 +236,48 @@ describe('getMultiplier', () => {
207236
);
208237
});
209238

239+
it('should return the correct multiplier for gpt-5', () => {
240+
const valueKey = getValueKey('gpt-5-2025-01-30');
241+
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5'].prompt);
242+
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
243+
tokenValues['gpt-5'].completion,
244+
);
245+
expect(getMultiplier({ model: 'gpt-5-preview', tokenType: 'prompt' })).toBe(
246+
tokenValues['gpt-5'].prompt,
247+
);
248+
expect(getMultiplier({ model: 'openai/gpt-5', tokenType: 'completion' })).toBe(
249+
tokenValues['gpt-5'].completion,
250+
);
251+
});
252+
253+
it('should return the correct multiplier for gpt-5-mini', () => {
254+
const valueKey = getValueKey('gpt-5-mini-2025-01-30');
255+
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-mini'].prompt);
256+
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
257+
tokenValues['gpt-5-mini'].completion,
258+
);
259+
expect(getMultiplier({ model: 'gpt-5-mini-preview', tokenType: 'prompt' })).toBe(
260+
tokenValues['gpt-5-mini'].prompt,
261+
);
262+
expect(getMultiplier({ model: 'openai/gpt-5-mini', tokenType: 'completion' })).toBe(
263+
tokenValues['gpt-5-mini'].completion,
264+
);
265+
});
266+
267+
it('should return the correct multiplier for gpt-5-nano', () => {
268+
const valueKey = getValueKey('gpt-5-nano-2025-01-30');
269+
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-nano'].prompt);
270+
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
271+
tokenValues['gpt-5-nano'].completion,
272+
);
273+
expect(getMultiplier({ model: 'gpt-5-nano-preview', tokenType: 'prompt' })).toBe(
274+
tokenValues['gpt-5-nano'].prompt,
275+
);
276+
expect(getMultiplier({ model: 'openai/gpt-5-nano', tokenType: 'completion' })).toBe(
277+
tokenValues['gpt-5-nano'].completion,
278+
);
279+
});
280+
210281
it('should return the correct multiplier for gpt-4o', () => {
211282
const valueKey = getValueKey('gpt-4o-2024-08-06');
212283
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
@@ -307,7 +378,7 @@ describe('getMultiplier', () => {
307378
});
308379

309380
it('should return defaultRate if derived valueKey does not match any known patterns', () => {
310-
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-5-some-other-info' })).toBe(
381+
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-10-some-other-info' })).toBe(
311382
defaultRate,
312383
);
313384
});

api/utils/tokens.js

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ const openAIModels = {
1919
'gpt-4.1': 1047576,
2020
'gpt-4.1-mini': 1047576,
2121
'gpt-4.1-nano': 1047576,
22+
'gpt-5': 400000,
23+
'gpt-5-mini': 400000,
24+
'gpt-5-nano': 400000,
2225
'gpt-4o': 127500, // -500 from max
2326
'gpt-4o-mini': 127500, // -500 from max
2427
'gpt-4o-2024-05-13': 127500, // -500 from max
@@ -252,6 +255,9 @@ const modelMaxOutputs = {
252255
o1: 32268, // -500 from max: 32,768
253256
'o1-mini': 65136, // -500 from max: 65,536
254257
'o1-preview': 32268, // -500 from max: 32,768
258+
'gpt-5': 128000,
259+
'gpt-5-mini': 128000,
260+
'gpt-5-nano': 128000,
255261
'gpt-oss-20b': 131000,
256262
'gpt-oss-120b': 131000,
257263
system_default: 1024,

api/utils/tokens.spec.js

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,35 @@ describe('getModelMaxTokens', () => {
156156
);
157157
});
158158

159+
test('should return correct tokens for gpt-5 matches', () => {
160+
expect(getModelMaxTokens('gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
161+
expect(getModelMaxTokens('gpt-5-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
162+
expect(getModelMaxTokens('openai/gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
163+
expect(getModelMaxTokens('gpt-5-2025-01-30')).toBe(
164+
maxTokensMap[EModelEndpoint.openAI]['gpt-5'],
165+
);
166+
});
167+
168+
test('should return correct tokens for gpt-5-mini matches', () => {
169+
expect(getModelMaxTokens('gpt-5-mini')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini']);
170+
expect(getModelMaxTokens('gpt-5-mini-preview')).toBe(
171+
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
172+
);
173+
expect(getModelMaxTokens('openai/gpt-5-mini')).toBe(
174+
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
175+
);
176+
});
177+
178+
test('should return correct tokens for gpt-5-nano matches', () => {
179+
expect(getModelMaxTokens('gpt-5-nano')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano']);
180+
expect(getModelMaxTokens('gpt-5-nano-preview')).toBe(
181+
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
182+
);
183+
expect(getModelMaxTokens('openai/gpt-5-nano')).toBe(
184+
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
185+
);
186+
});
187+
159188
test('should return correct tokens for Anthropic models', () => {
160189
const models = [
161190
'claude-2.1',
@@ -363,6 +392,19 @@ describe('getModelMaxTokens', () => {
363392
});
364393
});
365394

395+
test('should return correct max output tokens for GPT-5 models', () => {
396+
const { getModelMaxOutputTokens } = require('./tokens');
397+
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
398+
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
399+
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
400+
maxOutputTokensMap[EModelEndpoint.openAI][model],
401+
);
402+
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
403+
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
404+
);
405+
});
406+
});
407+
366408
test('should return correct max output tokens for GPT-OSS models', () => {
367409
const { getModelMaxOutputTokens } = require('./tokens');
368410
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
@@ -446,6 +488,25 @@ describe('matchModelName', () => {
446488
expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
447489
});
448490

491+
it('should return the closest matching key for gpt-5 matches', () => {
492+
expect(matchModelName('openai/gpt-5')).toBe('gpt-5');
493+
expect(matchModelName('gpt-5-preview')).toBe('gpt-5');
494+
expect(matchModelName('gpt-5-2025-01-30')).toBe('gpt-5');
495+
expect(matchModelName('gpt-5-2025-01-30-0130')).toBe('gpt-5');
496+
});
497+
498+
it('should return the closest matching key for gpt-5-mini matches', () => {
499+
expect(matchModelName('openai/gpt-5-mini')).toBe('gpt-5-mini');
500+
expect(matchModelName('gpt-5-mini-preview')).toBe('gpt-5-mini');
501+
expect(matchModelName('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
502+
});
503+
504+
it('should return the closest matching key for gpt-5-nano matches', () => {
505+
expect(matchModelName('openai/gpt-5-nano')).toBe('gpt-5-nano');
506+
expect(matchModelName('gpt-5-nano-preview')).toBe('gpt-5-nano');
507+
expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
508+
});
509+
449510
// Tests for Google models
450511
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
451512
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');

client/src/components/Endpoints/MessageEndpointIcon.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ type EndpointIcon = {
2424

2525
function getOpenAIColor(_model: string | null | undefined) {
2626
const model = _model?.toLowerCase() ?? '';
27-
if (model && /\b(o\d)\b/i.test(model)) {
27+
if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9]\b/i.test(model))) {
2828
return '#000000';
2929
}
3030
return model.includes('gpt-4') ? '#AB68FF' : '#19C37D';

0 commit comments

Comments
 (0)