Skip to content

Commit

Permalink
♻️ refactor: update gpt-3.5-turbo model card (lobehub#1449)
Browse files Browse the repository at this point in the history
* ♻️ refactor: update gpt-3.5 model info

* ✅ test: update test
  • Loading branch information
arvinxx committed Mar 3, 2024
1 parent 61bf0ec commit d0be0c7
Show file tree
Hide file tree
Showing 10 changed files with 76 additions and 90 deletions.
5 changes: 2 additions & 3 deletions src/chains/__tests__/summaryTitle.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ import { Mock, describe, expect, it, vi } from 'vitest';

import { chatHelpers } from '@/store/chat/helpers';
import { globalHelpers } from '@/store/global/helpers';
import { LanguageModel } from '@/types/llm';
import { OpenAIChatMessage } from '@/types/openai/chat';

import { chainSummaryTitle } from '../summaryTitle';
Expand Down Expand Up @@ -50,7 +49,7 @@ describe('chainSummaryTitle', () => {
role: 'user',
},
],
model: LanguageModel.GPT3_5_16K,
model: 'gpt-4-turbo-preview',
});

// Verify that getMessagesTokenCount was called with the correct messages
Expand Down Expand Up @@ -96,7 +95,7 @@ describe('chainSummaryTitle', () => {
role: 'user',
},
],
model: 'gpt-3.5-turbo-16k',
// No model specified since the token count is below the limit
});

// Verify that getMessagesTokenCount was called with the correct messages
Expand Down
9 changes: 4 additions & 5 deletions src/chains/summaryTitle.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { chatHelpers } from '@/store/chat/helpers';
import { globalHelpers } from '@/store/global/helpers';
import { LanguageModel } from '@/types/llm';
import { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';

export const chainSummaryTitle = async (
Expand All @@ -20,11 +19,11 @@ export const chainSummaryTitle = async (
role: 'user',
},
];
// 如果超过 4k,则使用 GPT3.5 16K 模型
// 如果超过 16k,则使用 GPT-4-turbo 模型
const tokens = await chatHelpers.getMessagesTokenCount(finalMessages);
let model: LanguageModel | undefined = undefined;
if (tokens > 4000) {
model = LanguageModel.GPT3_5_16K;
let model: string | undefined = undefined;
if (tokens > 16_000) {
model = 'gpt-4-turbo-preview';
}

return {
Expand Down
3 changes: 2 additions & 1 deletion src/config/modelProviders/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ const OpenAI: ModelProviderCard = {
displayName: 'GPT-3.5 Turbo',
functionCall: true,
id: 'gpt-3.5-turbo',
tokens: 4096,
tokens: 16_385,
},
{
displayName: 'GPT-3.5 Turbo (0125)',
functionCall: true,
hidden: true,
id: 'gpt-3.5-turbo-0125',
tokens: 16_385,
},
Expand Down
3 changes: 1 addition & 2 deletions src/features/ChatInput/ActionBar/Token/TokenTag.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import { useSessionStore } from '@/store/session';
import { agentSelectors } from '@/store/session/selectors';
import { useToolStore } from '@/store/tool';
import { toolSelectors } from '@/store/tool/selectors';
import { LanguageModel } from '@/types/llm';

const format = (number: number) => numeral(number).format('0,0');

Expand All @@ -27,7 +26,7 @@ const Token = memo(() => {

const [systemRole, model] = useSessionStore((s) => [
agentSelectors.currentAgentSystemRole(s),
agentSelectors.currentAgentModel(s) as LanguageModel,
agentSelectors.currentAgentModel(s) as string,
]);

const maxTokens = useGlobalStore(modelProviderSelectors.modelMaxToken(model));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,58 @@ exports[`modelProviderSelectors > CUSTOM_MODELS > custom deletion, addition, and
},
]
`;

exports[`modelProviderSelectors > CUSTOM_MODELS > should work correct with gpt-4 1`] = `
[
{
"displayName": "GPT-3.5 Turbo (1106)",
"functionCall": true,
"hidden": undefined,
"id": "gpt-3.5-turbo-1106",
"tokens": 16385,
},
{
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务",
"displayName": "GPT-3.5 Turbo",
"functionCall": true,
"hidden": undefined,
"id": "gpt-3.5-turbo",
"tokens": 16385,
},
{
"displayName": "GPT-3.5 Turbo 16K",
"hidden": undefined,
"id": "gpt-3.5-turbo-16k",
"tokens": 16385,
},
{
"displayName": "GPT-4",
"functionCall": true,
"hidden": undefined,
"id": "gpt-4",
"tokens": 8192,
},
{
"displayName": "gpt-4-32k",
"functionCall": true,
"hidden": undefined,
"id": "gpt-4-32k",
"tokens": 32768,
},
{
"displayName": "GPT-4 Turbo Preview (1106)",
"functionCall": true,
"hidden": undefined,
"id": "gpt-4-1106-preview",
"tokens": 128000,
},
{
"description": "GPT-4 视觉预览版,支持视觉任务",
"displayName": "GPT-4 Turbo Vision (Preview)",
"hidden": undefined,
"id": "gpt-4-vision-preview",
"tokens": 128000,
"vision": true,
},
]
`;
46 changes: 1 addition & 45 deletions src/store/global/slices/settings/selectors/modelProvider.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,51 +33,7 @@ describe('modelProviderSelectors', () => {

const result = modelProviderSelectors.modelSelectList(s).filter((r) => r.enabled);

expect(result[0].chatModels).toEqual([
{
displayName: 'GPT-3.5 Turbo (1106)',
functionCall: true,
id: 'gpt-3.5-turbo-1106',
tokens: 16385,
},
{
description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
displayName: 'GPT-3.5 Turbo',
functionCall: true,
id: 'gpt-3.5-turbo',
tokens: 4096,
},
{
displayName: 'GPT-3.5 Turbo 16K',
id: 'gpt-3.5-turbo-16k',
tokens: 16385,
},
{
displayName: 'GPT-4',
functionCall: true,
id: 'gpt-4',
tokens: 8192,
},
{
displayName: 'gpt-4-32k',
functionCall: true,
id: 'gpt-4-32k',
tokens: 32768,
},
{
displayName: 'GPT-4 Turbo Preview (1106)',
functionCall: true,
id: 'gpt-4-1106-preview',
tokens: 128000,
},
{
description: 'GPT-4 视觉预览版,支持视觉任务',
displayName: 'GPT-4 Turbo Vision (Preview)',
id: 'gpt-4-vision-preview',
tokens: 128000,
vision: true,
},
]);
expect(result[0].chatModels).toMatchSnapshot();
});
it('duplicate naming model', () => {
const s = merge(initialSettingsState, {
Expand Down
8 changes: 3 additions & 5 deletions src/store/global/slices/settings/selectors/selectors.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import { LanguageModel } from '@/types/llm';

import { GlobalStore } from '../../../store';
import { settingsSelectors } from './settings';

Expand All @@ -18,7 +16,7 @@ describe('settingsSelectors', () => {
defaultAgent: {
config: {
systemRole: '',
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
tts: {
showAllLocaleVoice: false,
Expand Down Expand Up @@ -65,7 +63,7 @@ describe('settingsSelectors', () => {
defaultAgent: {
config: {
systemRole: 'user',
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
},
meta: {
avatar: 'agent-avatar.jpg',
Expand All @@ -88,7 +86,7 @@ describe('settingsSelectors', () => {
defaultAgent: {
config: {
systemRole: 'user',
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {
temperature: 0.7,
},
Expand Down
5 changes: 2 additions & 3 deletions src/store/session/slices/agent/selectors.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import { useGlobalStore } from '@/store/global';
import { settingsSelectors } from '@/store/global/selectors';
import { SessionStore } from '@/store/session';
import { LobeAgentTTSConfig } from '@/types/agent';
import { LanguageModel } from '@/types/llm';
import { MetaData } from '@/types/meta';
import { merge } from '@/utils/merge';

Expand All @@ -29,10 +28,10 @@ const currentAgentSystemRole = (s: SessionStore) => {
return currentAgentConfig(s).systemRole;
};

const currentAgentModel = (s: SessionStore): LanguageModel | string => {
const currentAgentModel = (s: SessionStore): string => {
const config = currentAgentConfig(s);

return config?.model || LanguageModel.GPT3_5;
return config?.model || 'gpt-3.5-turbo';
};

const currentAgentModelProvider = (s: SessionStore) => {
Expand Down
13 changes: 6 additions & 7 deletions src/store/session/slices/session/selectors/list.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import type { SessionStore } from '@/store/session';
import { LanguageModel } from '@/types/llm';
import { LobeAgentSession, LobeSessionType } from '@/types/session';

import { initLobeSession } from '../initialState';
Expand All @@ -12,7 +11,7 @@ describe('currentSession', () => {
{
id: '1',
config: {
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
systemRole: 'system-role',
},
Expand All @@ -21,7 +20,7 @@ describe('currentSession', () => {
{
id: '2',
config: {
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
systemRole: 'system-role',
},
Expand All @@ -46,7 +45,7 @@ describe('currentSessionSafe', () => {
{
id: '1',
config: {
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
systemRole: 'system-role',
},
Expand All @@ -55,7 +54,7 @@ describe('currentSessionSafe', () => {
{
id: '2',
config: {
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
systemRole: 'system-role',
},
Expand All @@ -80,7 +79,7 @@ describe('getSessionById', () => {
{
id: '1',
config: {
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
systemRole: 'system-role',
},
Expand All @@ -89,7 +88,7 @@ describe('getSessionById', () => {
{
id: '2',
config: {
model: LanguageModel.GPT3_5,
model: 'gpt-3.5-turbo',
params: {},
systemRole: 'system-role',
},
Expand Down
19 changes: 0 additions & 19 deletions src/types/llm.ts
Original file line number Diff line number Diff line change
@@ -1,22 +1,3 @@
/**
* LLM 模型
*/
export enum LanguageModel {
/**
* GPT 3.5 Turbo
*/
GPT3_5 = 'gpt-3.5-turbo',
GPT3_5_1106 = 'gpt-3.5-turbo-1106',
GPT3_5_16K = 'gpt-3.5-turbo-16k',
/**
* GPT 4
*/
GPT4 = 'gpt-4',
GPT4_32K = 'gpt-4-32k',
GPT4_PREVIEW = 'gpt-4-0125-preview',
GPT4_VISION_PREVIEW = 'gpt-4-vision-preview',
}

export interface ChatModelCard {
description?: string;
displayName?: string;
Expand Down

0 comments on commit d0be0c7

Please sign in to comment.