From 2190a9585b956b0035b9bd253d2c7f8158629412 Mon Sep 17 00:00:00 2001 From: arvinxx Date: Tue, 9 Apr 2024 01:39:39 +0800 Subject: [PATCH] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20refactor:=20refactor=20ope?= =?UTF-8?q?nAI=20to=20openai=20and=20azure?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.example | 8 +- src/app/settings/llm/Azure/index.tsx | 47 ++-- src/app/settings/llm/Bedrock/index.tsx | 14 +- src/app/settings/llm/OpenAI/index.tsx | 130 +---------- src/app/settings/llm/OpenRouter/index.tsx | 1 - src/app/settings/llm/TogetherAI/index.tsx | 1 - .../llm/components/ProviderConfig/index.tsx | 63 +++--- .../ProviderModelList/CustomModelOption.tsx | 16 +- .../ProviderModelList/ModelConfigModal.tsx | 26 ++- .../components/ProviderModelList/index.tsx | 164 +++++++------- src/app/settings/llm/const.ts | 6 - src/app/settings/llm/index.tsx | 3 +- src/components/ModelSelect/index.tsx | 18 +- src/config/modelProviders/azure.ts | 79 +++++++ src/const/settings/index.ts | 6 +- .../Conversation/Error/APIKeyForm/OpenAI.tsx | 4 +- src/hooks/_header.ts | 21 +- src/locales/default/setting.ts | 14 +- .../FromV3ToV4/fixtures/input-v3-azure.json | 79 +++++++ .../fixtures/input-v3-open-router.json | 79 +++++++ .../FromV3ToV4/fixtures/openai-input-v3.json | 77 +++++++ .../FromV3ToV4/fixtures/openai-output-v4.json | 71 ++++++ .../fixtures/output-v3-from-v1.json | 203 ++++++++++++++++++ src/migrations/FromV3ToV4/index.ts | 80 +++++++ src/migrations/FromV3ToV4/migrations.test.ts | 45 ++++ src/migrations/FromV3ToV4/types/v3.ts | 59 +++++ src/migrations/FromV3ToV4/types/v4.ts | 34 +++ src/migrations/index.ts | 11 +- src/services/_auth.test.ts | 10 +- src/services/_auth.ts | 60 +----- src/services/_header.ts | 25 +-- .../slices/settings/actions/llm.test.ts | 10 +- .../global/slices/settings/actions/llm.ts | 11 +- .../slices/settings/selectors/modelConfig.ts | 21 +- .../settings/selectors/modelProvider.ts | 8 + src/types/llm.ts | 8 + src/types/settings/modelProvider.ts | 2 +- src/utils/parseModels.ts | 3 + 38 files changed, 1065 insertions(+), 452 deletions(-) create mode 100644 src/config/modelProviders/azure.ts create mode 100644 src/migrations/FromV3ToV4/fixtures/input-v3-azure.json create mode 100644 src/migrations/FromV3ToV4/fixtures/input-v3-open-router.json create mode 100644 src/migrations/FromV3ToV4/fixtures/openai-input-v3.json create mode 100644 src/migrations/FromV3ToV4/fixtures/openai-output-v4.json create mode 100644 src/migrations/FromV3ToV4/fixtures/output-v3-from-v1.json create mode 100644 src/migrations/FromV3ToV4/index.ts create mode 100644 src/migrations/FromV3ToV4/migrations.test.ts create mode 100644 src/migrations/FromV3ToV4/types/v3.ts create mode 100644 src/migrations/FromV3ToV4/types/v4.ts diff --git a/.env.example b/.env.example index 0e4908306af8..700446662999 100644 --- a/.env.example +++ b/.env.example @@ -1,13 +1,9 @@ # add a access code to lock your lobe-chat application, you can set a long password to avoid leaking. If this value contains a comma, it is a password array. #ACCESS_CODE=lobe66 -# add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106 -# CUSTOM_MODELS=model1,model2,model3 - # Specify your API Key selection method, currently supporting `random` and `turn`. # API_KEY_SELECT_MODE=random -# ---- only choose one from OpenAI Service and Azure OpenAI Service ---- # ######################################## ############ OpenAI Service ############ @@ -19,13 +15,15 @@ OPENAI_API_KEY=sk-xxxxxxxxx # use a proxy to connect to the OpenAI API # OPENAI_PROXY_URL=https://api.openai.com/v1 +# add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106 +#OPENAI_ENABLED_MODELS=gpt-3.5-turbo + ######################################## ######### Azure OpenAI Service ######### ######################################## # you can learn azure OpenAI Service on https://learn.microsoft.com/en-us/azure/ai-services/openai/overview # use Azure OpenAI Service by uncomment the following line -# USE_AZURE_OPENAI=1 # The API key you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section. # AZURE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/src/app/settings/llm/Azure/index.tsx b/src/app/settings/llm/Azure/index.tsx index c40ec263da3d..8943b6c56b66 100644 --- a/src/app/settings/llm/Azure/index.tsx +++ b/src/app/settings/llm/Azure/index.tsx @@ -8,7 +8,6 @@ import { Flexbox } from 'react-layout-kit'; import { ModelProvider } from '@/libs/agent-runtime'; -import Checker from '../components/Checker'; import ProviderConfig from '../components/ProviderConfig'; import { LLMProviderApiTokenKey, LLMProviderBaseUrlKey, LLMProviderConfigKey } from '../const'; @@ -24,7 +23,7 @@ const useStyles = createStyles(({ css, token }) => ({ `, })); -const providerKey = 'azure'; +const providerKey = ModelProvider.Azure; const AzureOpenAIProvider = memo(() => { const { t } = useTranslation('setting'); @@ -33,7 +32,7 @@ const AzureOpenAIProvider = memo(() => { return ( { children: ( ({ - label: i, - value: i, - }))} + '2023-05-15', + ].map((i) => ({ label: i, value: i }))} placeholder={'20XX-XX-XX'} /> ), @@ -75,30 +72,12 @@ const AzureOpenAIProvider = memo(() => { label: t('llm.azure.azureApiVersion.title'), name: [LLMProviderConfigKey, providerKey, 'apiVersion'], }, - { - children: ( - - ), - desc: ( - - {t('llm.azure.deployments.desc')} - - ), - - label: t('llm.azure.deployments.title'), - name: [LLMProviderConfigKey, providerKey, 'deployments'], - }, - { - children: , - desc: t('llm.checker.desc'), - label: t('llm.checker.title'), - minWidth: undefined, - }, ]} + checkModel={'gpt-3.5-turbo'} + modelList={{ + azureDeployName: true, + placeholder: t('llm.azure.modelListPlaceholder'), + }} provider={providerKey} title={ diff --git a/src/app/settings/llm/Bedrock/index.tsx b/src/app/settings/llm/Bedrock/index.tsx index 5dbe26fe8561..e8c6a7a298ef 100644 --- a/src/app/settings/llm/Bedrock/index.tsx +++ b/src/app/settings/llm/Bedrock/index.tsx @@ -7,7 +7,6 @@ import { Flexbox } from 'react-layout-kit'; import { ModelProvider } from '@/libs/agent-runtime'; import { GlobalLLMProviderKey } from '@/types/settings'; -import Checker from '../components/Checker'; import ProviderConfig from '../components/ProviderConfig'; import { LLMProviderConfigKey } from '../const'; @@ -18,7 +17,7 @@ const BedrockProvider = memo(() => { return ( { label: t(`llm.${providerKey}.region.title`), name: [LLMProviderConfigKey, providerKey, 'region'], }, - { - children: ( - - ), - desc: t(`llm.${providerKey}.checker.desc`), - label: t('llm.checker.title'), - minWidth: '100%', - }, ]} - provider={providerKey} + checkModel={'anthropic.claude-instant-v1'} + provider={ModelProvider.Bedrock} title={ diff --git a/src/app/settings/llm/OpenAI/index.tsx b/src/app/settings/llm/OpenAI/index.tsx index 028fff9b34bf..8c452276285b 100644 --- a/src/app/settings/llm/OpenAI/index.tsx +++ b/src/app/settings/llm/OpenAI/index.tsx @@ -1,127 +1,15 @@ import { OpenAI } from '@lobehub/icons'; -import { Markdown } from '@lobehub/ui'; -import { AutoComplete, Input, Switch } from 'antd'; -import { createStyles } from 'antd-style'; import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { ModelProvider } from '@/libs/agent-runtime'; -import { useGlobalStore } from '@/store/global'; -import { modelConfigSelectors } from '@/store/global/selectors'; - -import Checker from '../components/Checker'; import ProviderConfig from '../components/ProviderConfig'; -import ProviderModelListSelect from '../components/ProviderModelList'; -import { LLMProviderConfigKey } from '../const'; - -const useStyles = createStyles(({ css, token }) => ({ - markdown: css` - p { - color: ${token.colorTextDescription} !important; - } - `, - tip: css` - font-size: 12px; - color: ${token.colorTextDescription}; - `, -})); -const providerKey = 'openAI'; - -const LLM = memo(() => { - const { t } = useTranslation('setting'); - const { styles } = useStyles(); - - const [useAzure] = useGlobalStore((s) => [modelConfigSelectors.enableAzure(s)]); - return ( - - ), - desc: useAzure ? t('llm.azure.token.desc') : t('llm.openai.token.desc'), - label: useAzure ? t('llm.azure.token.title') : t('llm.openai.token.title'), - name: [LLMProviderConfigKey, providerKey, 'OPENAI_API_KEY'], - }, - { - children: ( - - ), - desc: useAzure ? t('llm.azure.endpoint.desc') : t('llm.openai.endpoint.desc'), - label: useAzure ? t('llm.azure.endpoint.title') : t('llm.openai.endpoint.title'), - name: [LLMProviderConfigKey, providerKey, 'endpoint'], - }, - { - children: ( - - ), - desc: t('llm.openai.customModelName.desc'), - label: t('llm.openai.customModelName.title'), - name: [LLMProviderConfigKey, providerKey, 'enabledModels'], - }, - { - children: , - desc: t('llm.openai.useAzure.desc'), - label: t('llm.openai.useAzure.title'), - minWidth: undefined, - name: [LLMProviderConfigKey, 'openAI', 'useAzure'], - valuePropName: 'checked', - }, - { - children: ( - ({ - label: i, - value: i, - }))} - placeholder={'20XX-XX-XX'} - /> - ), - desc: ( - - {t('llm.openai.azureApiVersion.desc')} - - ), - hidden: !useAzure, - label: t('llm.openai.azureApiVersion.title'), - name: [LLMProviderConfigKey, providerKey, 'azureApiVersion'], - }, - { - children: , - desc: t('llm.checker.desc'), - label: t('llm.checker.title'), - minWidth: '100%', - }, - ]} - provider={providerKey} - title={} - /> - ); -}); +const OpenAIProvider = memo(() => ( + } + /> +)); -export default LLM; +export default OpenAIProvider; diff --git a/src/app/settings/llm/OpenRouter/index.tsx b/src/app/settings/llm/OpenRouter/index.tsx index 4c3ef85c56c9..37584a7b0678 100644 --- a/src/app/settings/llm/OpenRouter/index.tsx +++ b/src/app/settings/llm/OpenRouter/index.tsx @@ -13,7 +13,6 @@ const OpenRouterProvider = memo(() => { { ( ({ + apiKeyItems, provider, - showCustomModelName = false, showEndpoint, showApiKey = true, checkModel, canDeactivate = true, title, - configItems, checkerItem, + modelList, }) => { const { t } = useTranslation('setting'); const [form] = AntForm.useForm(); @@ -54,18 +57,24 @@ const ProviderConfig = memo( useSyncSettings(form); - const defaultFormItems = [ - showApiKey && { - children: ( - - ), - desc: t(`llm.${provider}.token.desc` as any), - label: t(`llm.${provider}.token.title` as any), - name: [LLMProviderConfigKey, provider, LLMProviderApiTokenKey], - }, + const apiKeyItem: FormItemProps[] = !showApiKey + ? [] + : apiKeyItems ?? [ + { + children: ( + + ), + desc: t(`llm.${provider}.token.desc` as any), + label: t(`llm.${provider}.token.title` as any), + name: [LLMProviderConfigKey, provider, LLMProviderApiTokenKey], + }, + ]; + + const formItems = [ + ...apiKeyItem, showEndpoint && { children: ( @@ -74,23 +83,13 @@ const ProviderConfig = memo( label: t(`llm.${provider}.endpoint.title` as any), name: [LLMProviderConfigKey, provider, LLMProviderBaseUrlKey], }, - showCustomModelName && { - children: ( - - ), - desc: t(`llm.${provider}.customModelName.desc` as any), - label: t(`llm.${provider}.customModelName.title` as any), - name: [LLMProviderConfigKey, provider, LLMProviderCustomModelKey], - }, { children: ( ), desc: t('llm.modelList.desc'), @@ -101,12 +100,12 @@ const ProviderConfig = memo( children: , desc: t('llm.checker.desc'), label: t('llm.checker.title'), - minWidth: '100%', + minWidth: undefined, }, ].filter(Boolean) as FormItemProps[]; const model: ItemGroup = { - children: configItems ?? defaultFormItems, + children: formItems, defaultActive: canDeactivate ? enabled : undefined, extra: canDeactivate ? ( diff --git a/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx b/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx index 51fa800f69d7..665cd548db78 100644 --- a/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx +++ b/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx @@ -9,7 +9,7 @@ import { Flexbox } from 'react-layout-kit'; import ModelIcon from '@/components/ModelIcon'; import { ModelInfoTags } from '@/components/ModelSelect'; import { useGlobalStore } from '@/store/global'; -import { modelConfigSelectors } from '@/store/global/slices/settings/selectors'; +import { modelConfigSelectors } from '@/store/global/selectors'; import { GlobalLLMProviderKey } from '@/types/settings'; interface CustomModelOptionProps { @@ -22,10 +22,12 @@ const CustomModelOption = memo(({ id, provider }) => { const { t: s } = useTranslation('setting'); const { modal } = App.useApp(); - const [dispatchCustomModelCards, toggleEditingCustomModelCard] = useGlobalStore((s) => [ - s.dispatchCustomModelCards, - s.toggleEditingCustomModelCard, - ]); + const [dispatchCustomModelCards, toggleEditingCustomModelCard, removeEnabledModels] = + useGlobalStore((s) => [ + s.dispatchCustomModelCards, + s.toggleEditingCustomModelCard, + s.removeEnabledModels, + ]); const modelCard = useGlobalStore( modelConfigSelectors.getCustomModelCardById({ id, provider }), isEqual, @@ -68,8 +70,10 @@ const CustomModelOption = memo(({ id, provider }) => { type: 'warning', }); + // delete model and deactive id if (isConfirm) { - dispatchCustomModelCards(provider, { id, type: 'delete' }); + await dispatchCustomModelCards(provider, { id, type: 'delete' }); + await removeEnabledModels(provider, id); } }} title={t('delete')} diff --git a/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx b/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx index 975c7178002c..cd63f0ae2577 100644 --- a/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx +++ b/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx @@ -9,13 +9,17 @@ import { modelConfigSelectors } from '@/store/global/slices/settings/selectors'; import MaxTokenSlider from './MaxTokenSlider'; -const ModelConfigModal = memo(() => { +interface ModelConfigModalProps { + provider?: string; + showAzureDeployName?: boolean; +} +const ModelConfigModal = memo(({ showAzureDeployName, provider }) => { const [formInstance] = Form.useForm(); const { t } = useTranslation('setting'); - const [open, id, provider, dispatchCustomModelCards, toggleEditingCustomModelCard] = + const [open, id, editingProvider, dispatchCustomModelCards, toggleEditingCustomModelCard] = useGlobalStore((s) => [ - !!s.editingCustomCardModel, + !!s.editingCustomCardModel && provider === s.editingCustomCardModel?.provider, s.editingCustomCardModel?.id, s.editingCustomCardModel?.provider, s.dispatchCustomModelCards, @@ -23,7 +27,7 @@ const ModelConfigModal = memo(() => { ]); const modelCard = useGlobalStore( - modelConfigSelectors.getCustomModelCardById({ id, provider }), + modelConfigSelectors.getCustomModelCardById({ id, provider: editingProvider }), isEqual, ); @@ -39,10 +43,10 @@ const ModelConfigModal = memo(() => { closeModal(); }} onOk={() => { - if (!provider || !id) return; + if (!editingProvider || !id) return; const data = formInstance.getFieldsValue(); - dispatchCustomModelCards(provider as any, { id, type: 'update', value: data }); + dispatchCustomModelCards(editingProvider as any, { id, type: 'update', value: data }); closeModal(); }} @@ -68,6 +72,16 @@ const ModelConfigModal = memo(() => { + {showAzureDeployName && ( + + + + )} (({ provider, placeholder }) => { - const { t } = useTranslation('common'); - const { t: transSetting } = useTranslation('setting'); - const chatModelCards = useGlobalStore(modelConfigSelectors.providerModelCards(provider), isEqual); - const [setModelProviderConfig, dispatchCustomModelCards] = useGlobalStore((s) => [ - s.setModelProviderConfig, - s.dispatchCustomModelCards, - ]); - const defaultEnableModel = useGlobalStore( - modelProviderSelectors.defaultEnabledProviderModels(provider), - isEqual, - ); - const enabledModels = useGlobalStore( - modelConfigSelectors.providerEnableModels(provider), - isEqual, - ); - const showReset = !!enabledModels && !isEqual(defaultEnableModel, enabledModels); +const ProviderModelListSelect = memo( + ({ provider, showAzureDeployName, notFoundContent, placeholder }) => { + const { t } = useTranslation('common'); + const { t: transSetting } = useTranslation('setting'); + const chatModelCards = useGlobalStore( + modelConfigSelectors.providerModelCards(provider), + isEqual, + ); + const [setModelProviderConfig, dispatchCustomModelCards] = useGlobalStore((s) => [ + s.setModelProviderConfig, + s.dispatchCustomModelCards, + ]); + const defaultEnableModel = useGlobalStore( + modelProviderSelectors.defaultEnabledProviderModels(provider), + isEqual, + ); + const enabledModels = useGlobalStore( + modelConfigSelectors.providerEnableModels(provider), + isEqual, + ); + const showReset = !!enabledModels && !isEqual(defaultEnableModel, enabledModels); - return ( -
-
- {showReset && ( - { - setModelProviderConfig(provider, { enabledModels: null }); - }} - size={'small'} - title={t('reset')} - /> - )} -
- - allowClear - mode="tags" - onChange={(value, options) => { - setModelProviderConfig(provider, { enabledModels: value.filter(Boolean) }); + return ( +
+
+ {showReset && ( + { + setModelProviderConfig(provider, { enabledModels: null }); + }} + size={'small'} + title={t('reset')} + /> + )} +
+ + allowClear + mode="tags" + notFoundContent={notFoundContent} + onChange={(value, options) => { + setModelProviderConfig(provider, { enabledModels: value.filter(Boolean) }); - // if there is a new model, add it to `customModelCards` - options.forEach((option: { label?: string; value?: string }, index: number) => { - // if is a known model, it should have value - // if is an unknown model, the option will be {} - if (option.value) return; + // if there is a new model, add it to `customModelCards` + options.forEach((option: { label?: string; value?: string }, index: number) => { + // if is a known model, it should have value + // if is an unknown model, the option will be {} + if (option.value) return; - const modelId = value[index]; + const modelId = value[index]; - dispatchCustomModelCards(provider, { - modelCard: { id: modelId }, - type: 'add', + dispatchCustomModelCards(provider, { + modelCard: { id: modelId }, + type: 'add', + }); }); - }); - }} - optionFilterProp="label" - optionRender={({ label, value }) => { - // model is in the chatModels - if (chatModelCards.some((c) => c.id === value)) + }} + optionFilterProp="label" + optionRender={({ label, value }) => { + // model is in the chatModels + if (chatModelCards.some((c) => c.id === value)) + return ( + + ); + + // model is defined by user in client return ( - + + {transSetting('llm.customModelCards.addNew', { id: value })} + ); - - // model is defined by user in client - return ( - - {transSetting('llm.customModelCards.addNew', { id: value })} - - ); - }} - options={chatModelCards.map((model) => ({ - label: model.displayName || model.id, - value: model.id, - }))} - placeholder={placeholder} - popupClassName={cx(styles.popup)} - value={enabledModels ?? defaultEnableModel} - /> - -
- ); -}); + }} + options={chatModelCards.map((model) => ({ + label: model.displayName || model.id, + value: model.id, + }))} + placeholder={placeholder} + popupClassName={cx(styles.popup)} + value={enabledModels ?? defaultEnableModel} + /> + +
+ ); + }, +); export default ProviderModelListSelect; diff --git a/src/app/settings/llm/const.ts b/src/app/settings/llm/const.ts index f439d8e0c3db..830926a4f5de 100644 --- a/src/app/settings/llm/const.ts +++ b/src/app/settings/llm/const.ts @@ -12,12 +12,6 @@ export const LLMProviderApiTokenKey = 'apiKey'; */ export const LLMProviderBaseUrlKey = 'endpoint'; -/** - * we use this key to define the custom model name - * equal CUSTOM_MODELS - */ -export const LLMProviderCustomModelKey = 'customModelName'; - /** * we use this key to define the custom model name * equal CUSTOM_MODELS diff --git a/src/app/settings/llm/index.tsx b/src/app/settings/llm/index.tsx index 015028d30e38..599296e62918 100644 --- a/src/app/settings/llm/index.tsx +++ b/src/app/settings/llm/index.tsx @@ -9,6 +9,7 @@ import { MORE_MODEL_PROVIDER_REQUEST_URL } from '@/const/url'; import Footer from '../features/Footer'; import Anthropic from './Anthropic'; +import Azure from './Azure'; import Bedrock from './Bedrock'; import Google from './Google'; import Groq from './Groq'; @@ -29,7 +30,7 @@ export default memo<{ showOllama: boolean }>(({ showOllama }) => { <> - {/**/} + {showOllama && } diff --git a/src/components/ModelSelect/index.tsx b/src/components/ModelSelect/index.tsx index 0ae953d2babe..d7c9e9706dcd 100644 --- a/src/components/ModelSelect/index.tsx +++ b/src/components/ModelSelect/index.tsx @@ -104,15 +104,15 @@ export const ModelInfoTags = memo(
{Math.floor(model.tokens / 1000)}K
)} - {model.isCustom && ( - -
DIY
-
- )} + {/*{model.isCustom && (*/} + {/* */} + {/*
DIY
*/} + {/* */} + {/*)}*/}
); }, diff --git a/src/config/modelProviders/azure.ts b/src/config/modelProviders/azure.ts new file mode 100644 index 000000000000..ab16ffb71710 --- /dev/null +++ b/src/config/modelProviders/azure.ts @@ -0,0 +1,79 @@ +import { ModelProviderCard } from '@/types/llm'; + +const Azure: ModelProviderCard = { + chatModels: [ + { + deploymentName: 'gpt-35-turbo', + description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务', + displayName: 'GPT-3.5 Turbo', + enabled: true, + functionCall: true, + id: 'gpt-35-turbo', + maxOutput: 4096, + tokens: 16_385, + }, + { + deploymentName: 'gpt-35-turbo-16k', + displayName: 'GPT-3.5 Turbo', + functionCall: true, + id: 'gpt-35-turbo-16k', + tokens: 16_384, + }, + { + deploymentName: 'gpt-4', + displayName: 'GPT-4 Turbo Preview', + enabled: true, + functionCall: true, + id: 'gpt-4', + tokens: 128_000, + }, + { + description: 'GPT-4 视觉预览版,支持视觉任务', + displayName: 'GPT-4 Turbo with Vision Preview', + id: 'gpt-4-vision-preview', + tokens: 128_000, + vision: true, + }, + { + displayName: 'GPT-4 Turbo Preview (1106)', + functionCall: true, + id: 'gpt-4-1106-preview', + tokens: 128_000, + }, + { + displayName: 'GPT-4', + functionCall: true, + id: 'gpt-4', + tokens: 8192, + }, + { + displayName: 'GPT-4 (0613)', + functionCall: true, + id: 'gpt-4-0613', + tokens: 8192, + }, + { + displayName: 'GPT-4 32K', + functionCall: true, + id: 'gpt-4-32k', + tokens: 32_768, + }, + { + displayName: 'GPT-4 32K (0613)', + functionCall: true, + id: 'gpt-4-32k-0613', + tokens: 32_768, + }, + { + displayName: 'GPT-4 ALL', + files: true, + functionCall: true, + id: 'gpt-4-all', + tokens: 32_768, + vision: true, + }, + ], + id: 'azure', +}; + +export default Azure; diff --git a/src/const/settings/index.ts b/src/const/settings/index.ts index acf3b274c9e7..c2f86a98836a 100644 --- a/src/const/settings/index.ts +++ b/src/const/settings/index.ts @@ -106,9 +106,9 @@ export const DEFAULT_LLM_CONFIG: GlobalLLMConfig = { enabledModels: filterEnabledModels(OllamaProvider), endpoint: '', }, - openAI: { - OPENAI_API_KEY: '', - enabled: true, + openai: { + apiKey: '', + enabled: false, enabledModels: filterEnabledModels(OpenAIProvider), }, openrouter: { diff --git a/src/features/Conversation/Error/APIKeyForm/OpenAI.tsx b/src/features/Conversation/Error/APIKeyForm/OpenAI.tsx index 2cfd8a740fc7..ac520c4e9bbc 100644 --- a/src/features/Conversation/Error/APIKeyForm/OpenAI.tsx +++ b/src/features/Conversation/Error/APIKeyForm/OpenAI.tsx @@ -30,7 +30,7 @@ const OpenAIForm = memo(() => { { - setConfig('openAI', { OPENAI_API_KEY: e.target.value }); + setConfig('openai', { apiKey: e.target.value }); }} placeholder={'sk-*****************************************'} type={'block'} @@ -39,7 +39,7 @@ const OpenAIForm = memo(() => { {showProxy ? ( { - setConfig('openAI', { endpoint: e.target.value }); + setConfig('openai', { endpoint: e.target.value }); }} placeholder={'https://api.openai.com/v1'} type={'block'} diff --git a/src/hooks/_header.ts b/src/hooks/_header.ts index 2ea780eadcb5..96506da02308 100644 --- a/src/hooks/_header.ts +++ b/src/hooks/_header.ts @@ -1,10 +1,4 @@ -import { - AZURE_OPENAI_API_VERSION, - LOBE_CHAT_ACCESS_CODE, - OPENAI_API_KEY_HEADER_KEY, - OPENAI_END_POINT, - USE_AZURE_OPENAI, -} from '@/const/fetch'; +import { LOBE_CHAT_ACCESS_CODE, OPENAI_API_KEY_HEADER_KEY, OPENAI_END_POINT } from '@/const/fetch'; import { useGlobalStore } from '@/store/global'; import { modelConfigSelectors, settingsSelectors } from '@/store/global/selectors'; @@ -13,23 +7,14 @@ import { modelConfigSelectors, settingsSelectors } from '@/store/global/selector export const createHeaderWithOpenAI = (header?: HeadersInit): HeadersInit => { const openai = modelConfigSelectors.openAIConfig(useGlobalStore.getState()); - const apiKey = openai.OPENAI_API_KEY || ''; + const apiKey = openai.apiKey || ''; const endpoint = openai.endpoint || ''; // eslint-disable-next-line no-undef - const result: HeadersInit = { + return { ...header, [LOBE_CHAT_ACCESS_CODE]: settingsSelectors.password(useGlobalStore.getState()), [OPENAI_API_KEY_HEADER_KEY]: apiKey, [OPENAI_END_POINT]: endpoint, }; - - if (openai.useAzure) { - Object.assign(result, { - [AZURE_OPENAI_API_VERSION]: openai.azureApiVersion || '', - [USE_AZURE_OPENAI]: '1', - }); - } - - return result; }; diff --git a/src/locales/default/setting.ts b/src/locales/default/setting.ts index c9e4c3fc4c45..5e776f28ff88 100644 --- a/src/locales/default/setting.ts +++ b/src/locales/default/setting.ts @@ -50,20 +50,12 @@ export default { fetch: '获取列表', title: 'Azure Api Version', }, - deployments: { - desc: '使用 id=name 的语法填写你的部署模型(如部署名和模型同名,可以只填写模型名称),多个模型使用逗号(,) 隔开', - title: 'Azure 部署模型列表', - }, - endpoint: { desc: '从 Azure 门户检查资源时,可在“密钥和终结点”部分中找到此值', placeholder: 'https://docs-test-001.openai.azure.com', title: 'Azure API 地址', }, - models: { - desc: '支持的模型', - title: '模型列表', - }, + modelListPlaceholder: '请选择或添加你的部署模型', title: 'Azure OpenAI', token: { desc: '从 Azure 门户检查资源时,可在“密钥和终结点”部分中找到此值。 可以使用 KEY1 或 KEY2', @@ -103,6 +95,10 @@ export default { config: '配置模型', confirmDelete: '即将删除该自定义模型,删除后将不可恢复,请谨慎操作。', modelConfig: { + azureDeployName: { + placeholder: '请输入 Azure 中的模型部署名称', + title: '模型部署名称', + }, displayName: { placeholder: '请输入模型的展示名称,例如 ChatGPT、GPT-4 等', title: '模型展示名称', diff --git a/src/migrations/FromV3ToV4/fixtures/input-v3-azure.json b/src/migrations/FromV3ToV4/fixtures/input-v3-azure.json new file mode 100644 index 000000000000..05c99a8d25ad --- /dev/null +++ b/src/migrations/FromV3ToV4/fixtures/input-v3-azure.json @@ -0,0 +1,79 @@ +{ + "exportType": "settings", + "state": { + "settings": { + "defaultAgent": { + "config": { + "autoCreateTopicThreshold": 2, + "displayMode": "chat", + "enableAutoCreateTopic": true, + "historyCount": 1, + "model": "gpt-4-vision-preview", + "params": { + "frequency_penalty": 0, + "presence_penalty": 0, + "temperature": 0.6, + "top_p": 1 + }, + "plugins": ["realtime-weather", "steam"], + "provider": "openai", + "systemRole": "", + "tts": { + "showAllLocaleVoice": false, + "sttLocale": "auto", + "ttsService": "openai", + "voice": { "openai": "alloy" } + } + }, + "meta": {} + }, + "languageModel": { + "anthropic": { "apiKey": "", "enabled": false }, + "azure": { "apiKey": "", "deployments": "", "enabled": false, "endpoint": "" }, + "bedrock": { + "accessKeyId": "", + "enabled": false, + "region": "us-east-1", + "secretAccessKey": "" + }, + "google": { "apiKey": "", "enabled": false }, + "groq": { "apiKey": "", "enabled": false }, + "mistral": { "apiKey": "", "enabled": false }, + "moonshot": { "apiKey": "", "enabled": false }, + "ollama": { "enabled": false, "endpoint": "" }, + "openAI": { + "OPENAI_API_KEY": "asbdasd", + "enabled": true, + "models": [], + "endpoint": "https://api.chatanywhere.com.cn", + "customModelName": "deerercsds", + "useAzure": true, + "azureApiVersion": "2024-02-15-preview" + }, + "openrouter": { "apiKey": "", "enabled": false }, + "perplexity": { "apiKey": "", "enabled": false }, + "togetherai": { "apiKey": "", "enabled": false }, + "zeroone": { "apiKey": "", "enabled": false }, + "zhipu": { "apiKey": "", "enabled": false }, + "openai": { + "OPENAI_API_KEY": "", + "models": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"], + "endpoint": "" + } + }, + "sync": { "webrtc": { "enabled": false } }, + "tool": { "dalle": { "autoGenerate": false } }, + "tts": { + "openAI": { "sttModel": "whisper-1", "ttsModel": "tts-1" }, + "sttAutoStop": true, + "sttServer": "openai" + }, + "fontSize": 14, + "language": "auto", + "themeMode": "auto", + "primaryColor": "", + "neutralColor": "" + } + }, + "version": 3 +} diff --git a/src/migrations/FromV3ToV4/fixtures/input-v3-open-router.json b/src/migrations/FromV3ToV4/fixtures/input-v3-open-router.json new file mode 100644 index 000000000000..6c9111e70022 --- /dev/null +++ b/src/migrations/FromV3ToV4/fixtures/input-v3-open-router.json @@ -0,0 +1,79 @@ +{ + "exportType": "settings", + "state": { + "settings": { + "defaultAgent": { + "config": { + "autoCreateTopicThreshold": 2, + "displayMode": "chat", + "enableAutoCreateTopic": true, + "historyCount": 1, + "model": "gpt-4-vision-preview", + "params": { + "frequency_penalty": 0, + "presence_penalty": 0, + "temperature": 0.6, + "top_p": 1 + }, + "plugins": ["realtime-weather", "steam"], + "provider": "openai", + "systemRole": "", + "tts": { + "showAllLocaleVoice": false, + "sttLocale": "auto", + "ttsService": "openai", + "voice": { "openai": "alloy" } + } + }, + "meta": {} + }, + "languageModel": { + "anthropic": { "apiKey": "", "enabled": false }, + "azure": { "apiKey": "", "deployments": "", "enabled": false, "endpoint": "" }, + "bedrock": { + "accessKeyId": "", + "enabled": false, + "region": "us-east-1", + "secretAccessKey": "" + }, + "google": { "apiKey": "", "enabled": false }, + "groq": { "apiKey": "", "enabled": false }, + "mistral": { "apiKey": "", "enabled": false }, + "moonshot": { "apiKey": "", "enabled": false }, + "ollama": { "enabled": false, "endpoint": "" }, + "openAI": { + "OPENAI_API_KEY": "asbdasd", + "enabled": true, + "models": [], + "endpoint": "", + "customModelName": "", + "useAzure": false, + "azureApiVersion": "2024-02-15-preview" + }, + "openrouter": { "apiKey": "", "enabled": true, "customModelName": "cohere/command-r-plus" }, + "perplexity": { "apiKey": "", "enabled": false }, + "togetherai": { "apiKey": "", "enabled": false }, + "zeroone": { "apiKey": "", "enabled": false }, + "zhipu": { "apiKey": "", "enabled": false }, + "openai": { + "OPENAI_API_KEY": "", + "models": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"], + "endpoint": "" + } + }, + "sync": { "webrtc": { "enabled": false } }, + "tool": { "dalle": { "autoGenerate": false } }, + "tts": { + "openAI": { "sttModel": "whisper-1", "ttsModel": "tts-1" }, + "sttAutoStop": true, + "sttServer": "openai" + }, + "fontSize": 14, + "language": "auto", + "themeMode": "auto", + "primaryColor": "", + "neutralColor": "" + } + }, + "version": 3 +} diff --git a/src/migrations/FromV3ToV4/fixtures/openai-input-v3.json b/src/migrations/FromV3ToV4/fixtures/openai-input-v3.json new file mode 100644 index 000000000000..07321a9c2e92 --- /dev/null +++ b/src/migrations/FromV3ToV4/fixtures/openai-input-v3.json @@ -0,0 +1,77 @@ +{ + "exportType": "settings", + "state": { + "settings": { + "defaultAgent": { + "config": { + "autoCreateTopicThreshold": 2, + "displayMode": "chat", + "enableAutoCreateTopic": true, + "historyCount": 1, + "model": "gpt-4-vision-preview", + "params": { + "frequency_penalty": 0, + "presence_penalty": 0, + "temperature": 0.6, + "top_p": 1 + }, + "plugins": ["realtime-weather", "steam"], + "provider": "openai", + "systemRole": "", + "tts": { + "showAllLocaleVoice": false, + "sttLocale": "auto", + "ttsService": "openai", + "voice": { "openai": "alloy" } + } + }, + "meta": {} + }, + "languageModel": { + "anthropic": { "apiKey": "", "enabled": false }, + "azure": { "apiKey": "", "deployments": "", "enabled": false, "endpoint": "" }, + "bedrock": { + "accessKeyId": "", + "enabled": false, + "region": "us-east-1", + "secretAccessKey": "" + }, + "google": { "apiKey": "", "enabled": false }, + "groq": { "apiKey": "", "enabled": false }, + "mistral": { "apiKey": "", "enabled": false }, + "moonshot": { "apiKey": "", "enabled": false }, + "ollama": { "enabled": false, "endpoint": "" }, + "openAI": { + "OPENAI_API_KEY": "asbdasd", + "enabled": true, + "models": [], + "endpoint": "https://api.chatanywhere.com.cn", + "customModelName": "deerercsds" + }, + "openrouter": { "apiKey": "", "enabled": false }, + "perplexity": { "apiKey": "", "enabled": false }, + "togetherai": { "apiKey": "", "enabled": false }, + "zeroone": { "apiKey": "", "enabled": false }, + "zhipu": { "apiKey": "", "enabled": false }, + "openai": { + "OPENAI_API_KEY": "", + "models": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"], + "endpoint": "" + } + }, + "sync": { "webrtc": { "enabled": false } }, + "tool": { "dalle": { "autoGenerate": false } }, + "tts": { + "openAI": { "sttModel": "whisper-1", "ttsModel": "tts-1" }, + "sttAutoStop": true, + "sttServer": "openai" + }, + "fontSize": 14, + "language": "auto", + "themeMode": "auto", + "primaryColor": "", + "neutralColor": "" + } + }, + "version": 3 +} diff --git a/src/migrations/FromV3ToV4/fixtures/openai-output-v4.json b/src/migrations/FromV3ToV4/fixtures/openai-output-v4.json new file mode 100644 index 000000000000..f28b565b3949 --- /dev/null +++ b/src/migrations/FromV3ToV4/fixtures/openai-output-v4.json @@ -0,0 +1,71 @@ +{ + "exportType": "settings", + "state": { + "settings": { + "defaultAgent": { + "config": { + "autoCreateTopicThreshold": 2, + "displayMode": "chat", + "enableAutoCreateTopic": true, + "historyCount": 1, + "model": "gpt-4-vision-preview", + "params": { + "frequency_penalty": 0, + "presence_penalty": 0, + "temperature": 0.6, + "top_p": 1 + }, + "plugins": ["realtime-weather", "steam"], + "provider": "openai", + "systemRole": "", + "tts": { + "showAllLocaleVoice": false, + "sttLocale": "auto", + "ttsService": "openai", + "voice": { "openai": "alloy" } + } + }, + "meta": {} + }, + "languageModel": { + "anthropic": { "apiKey": "", "enabled": false }, + "azure": { "apiKey": "", "enabled": false, "endpoint": "" }, + "bedrock": { + "accessKeyId": "", + "enabled": false, + "region": "us-east-1", + "secretAccessKey": "" + }, + "google": { "apiKey": "", "enabled": false }, + "groq": { "apiKey": "", "enabled": false }, + "mistral": { "apiKey": "", "enabled": false }, + "moonshot": { "apiKey": "", "enabled": false }, + "ollama": { "enabled": false, "endpoint": "" }, + "openrouter": { "apiKey": "", "enabled": false }, + "perplexity": { "apiKey": "", "enabled": false }, + "togetherai": { "apiKey": "", "enabled": false }, + "zeroone": { "apiKey": "", "enabled": false }, + "zhipu": { "apiKey": "", "enabled": false }, + "openai": { + "enabled": true, + "apiKey": "asbdasd", + "endpoint": "https://api.chatanywhere.com.cn", + "customModelName": "deerercsds" + } + }, + "sync": { "webrtc": { "enabled": false } }, + "tool": { "dalle": { "autoGenerate": false } }, + "tts": { + "openAI": { "sttModel": "whisper-1", "ttsModel": "tts-1" }, + "sttAutoStop": true, + "sttServer": "openai" + }, + "fontSize": 14, + "language": "auto", + "themeMode": "auto", + "primaryColor": "", + "neutralColor": "" + } + }, + "version": 4 +} diff --git a/src/migrations/FromV3ToV4/fixtures/output-v3-from-v1.json b/src/migrations/FromV3ToV4/fixtures/output-v3-from-v1.json new file mode 100644 index 000000000000..b6b39f4417e0 --- /dev/null +++ b/src/migrations/FromV3ToV4/fixtures/output-v3-from-v1.json @@ -0,0 +1,203 @@ +{ + "exportType": "sessions", + "state": { + "sessions": [ + { + "config": { + "displayMode": "chat", + "historyCount": 1, + "model": "gpt-3.5-turbo", + "params": { + "frequency_penalty": 0, + "presence_penalty": 0, + "temperature": 0.6, + "top_p": 1, + "max_tokens": 2200 + }, + "plugins": [], + "systemRole": "你是一名 Postgresql 的数据库专家。用户是一名数据库小白,你需要使用简单直白的方式告诉用户如何使用 ostgresql 和它相应的 orm 工具 primsa", + "enableCompressThreshold": false, + "enableMaxTokens": false, + "inputTemplate": "" + }, + "createdAt": 1698463356269, + "id": "06cc3e20-e870-4099-a619-c07a849d742d", + "meta": { + "avatar": "🐘", + "backgroundColor": "rgba(0,0,0,0)", + "title": "Postgresql 指南", + "description": "PostgreSQL 数据库和 Prisma ORM 工具的简单使用指南", + "tags": ["数据库", "PostgreSQL", "教程", "ORM", "Prisma"] + }, + "type": "agent", + "updatedAt": 1698463356269, + "group": "default", + "pinned": true + } + ], + "topics": [ + { + "createdAt": 1698463911747, + "id": "CZg3IEdy", + "title": "Prisma中的cuid函数作用是什么", + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "updatedAt": 1698463915716 + }, + { + "createdAt": 1698754463117, + "id": "MNWhBEh5", + "title": "启动 PostgreSQL Docker 镜像方法", + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "updatedAt": 1698754464867 + } + ], + "messages": [ + { + "content": "是的,你可以使用 Prisma 来自定义生成 `id` 的逻辑,例如使用 `nanoid` 库来生成唯一标识符。\n\n首先,你需要在项目中安装 `nanoid` 库:\n\n```\nnpm install nanoid\n```\n\n然后,你可以在 Prisma 的模型定义中使用 `@default` 指令,并提供一个自定义的默认值函数来生成 `id`。\n\n```prisma\nmodel Message {\n id String @id @default(dbgenerated(\"nanoid(10)\"))\n\n createdAt DateTime @default(now())\n updatedAt DateTime @updatedAt\n}\n```\n\n在上述模型定义中,我们使用 `dbgenerated` 指令来定义一个自定义的默认值函数。函数 `nanoid(10)` 会生成一个长度为 10 的唯一标识符。\n\n请注意,使用自定义的 `id` 生成逻辑可能会对数据库性能产生一些影响,因为每次创建新记录时都需要执行自定义函数。因此,请确保你的自定义逻辑是高效的,并在实际使用中进行测试和评估性能。", + "createdAt": 1698463981313, + "id": "n2Tsl3Dm", + "meta": {}, + "parentId": "JSZaJOGj", + "role": "assistant", + "updatedAt": 1698464004494, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "CZg3IEdy", + "fromModel": "gpt-3.5-turbo" + }, + { + "content": "如何指定 自定义函数的引入位置?", + "createdAt": 1698464173224, + "id": "xPft8Msa", + "meta": {}, + "role": "user", + "updatedAt": 1698464173226, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "CZg3IEdy" + }, + { + "content": "在 Prisma 中,你可以通过在 `schema.prisma` 文件的顶部使用 `generator` 块来指定自定义函数的引入位置。\n\n例如,假设你想使用 `nanoid` 库来生成自定义的 `id`,你可以按照以下方式配置:\n\n```prisma\ngenerator client {\n provider = \"prisma-client-js\"\n output = \"./node_modules/@prisma/client\"\n}\n\ngenerator dbgenerated {\n provider = \"prisma-dbgenerator\"\n output = \"./node_modules/@prisma/client\"\n binaryTargets = [\"native\"]\n previewFeatures = [\"nativeTypes\"]\n config = {\n \"generator\": {\n \"dbgenerated\": {\n \"output\": \"./src/db/generated\",\n \"provider\": \"./src/db/generated/provider.js\"\n }\n }\n }\n}\n```\n\n在上述配置中,我们定义了两个生成器。`client` 生成器用于生成 Prisma 客户端代码,而 `dbgenerated` 生成器用于生成数据库相关的代码。\n\n注意到 `dbgenerated` 生成器的 `config` 部分中有一个 `provider` 字段,它指定了自定义函数的引入位置。在这个示例中,我们将自定义函数放在 `./src/db/generated/provider.js` 文件中。\n\n你可以根据你的项目结构和需要来调整路径和文件名,确保指定的 `provider` 路径正确引入了自定义函数。", + "createdAt": 1698464173228, + "id": "jZc2KKRs", + "meta": {}, + "parentId": "xPft8Msa", + "role": "assistant", + "updatedAt": 1698464181896, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "CZg3IEdy", + "fromModel": "gpt-3.5-turbo" + }, + { + "content": "如何启动一个 postgres Docker 镜像?", + "createdAt": 1698754460196, + "id": "LjG9XSuq", + "meta": {}, + "role": "user", + "updatedAt": 1698754463118, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5" + }, + { + "content": "要启动一个 PostgreSQL Docker 镜像,你可以按照以下步骤操作:\n\n1. 首先,确保你已经安装了 Docker,可以在终端中运行 `docker --version` 来检查。\n\n2. 在终端中运行以下命令以下载并启动一个 PostgreSQL 镜像:\n\n ```shell\n docker run --name my-postgres -e POSTGRES_PASSWORD=mysecretpassword -p 5432:5432 -d postgres\n ```\n\n 这将从 Docker Hub 下载 PostgreSQL 镜像", + "createdAt": 1698754479751, + "id": "CL7E1Mc5", + "meta": {}, + "parentId": "LjG9XSuq", + "role": "assistant", + "updatedAt": 1698754507690, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5", + "fromModel": "gpt-3.5-turbo" + }, + { + "content": "如何指定本地存储的目录到 /www/database/postgres", + "createdAt": 1698754591763, + "id": "UIvHZr7m", + "meta": {}, + "role": "user", + "updatedAt": 1698754591764, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5" + }, + { + "content": "要将本地存储的目录映射到容器内的 `/var/lib/postgresql/data` 目录,你可以使用 Docker 的 `-v` 参数。\n\n以下是示例命令:\n\n```shell\ndocker run --name my-postgres -e POSTGRES_PASSWORD=mysecretpassword -p 5432:5432 -v /www/database/postgres:/var/lib/postgresql/data -d postgres\n```\n\n在这个命令中,我们使用了 `-v` 参数来指定本地目录 `/www/database/postgres` 映射到容器内的 `/var/lib/postgresql/data` 目录。这样,所有的 PostgreSQL 数据将会保存在本地指定的目录中。", + "createdAt": 1698754591765, + "id": "XLJOK44r", + "meta": {}, + "parentId": "UIvHZr7m", + "role": "assistant", + "updatedAt": 1698754609275, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5", + "fromModel": "gpt-3.5-turbo" + }, + { + "content": "docker run --name postgres -e POSTGRES_PASSWORD=DSLapHYdzNrai5j6 -p 33300:5432 -v /www/database/postgres:/var/lib/postgresql/data -d postgres", + "createdAt": 1698754700529, + "id": "W3J6jQjm", + "meta": {}, + "role": "user", + "updatedAt": 1698754700530, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5" + }, + { + "content": "如何启动 pgadmin", + "createdAt": 1698755749886, + "id": "L7mq0ehV", + "meta": {}, + "role": "user", + "updatedAt": 1698755749888, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5" + }, + { + "content": "要启动 pgAdmin,你可以按照以下步骤操作:\n\n1. 首先,确保你已经安装了 Docker,并且已经成功启动了 PostgreSQL Docker 容器。\n\n2. 在终端中运行以下命令以启动 pgAdmin Docker 容器:\n\n ```shell\n docker run --name my-pgadmin -p 5050:80 -e PGADMIN_DEFAULT_EMAIL=myemail@example.com -e PGADMIN_DEFAULT_PASSWORD=mypassword -d dpage/pgadmin4\n ```\n\n 在这个命令中,我们使用了 `-p` 参数来将容器的端口 80 映射到本地的端口 5050。你可以根据需要修改这个端口映射。\n\n 我们还使用了 `-e` 参数来设置 pgAdmin 的默认登录邮箱和密码。你可以根据需要修改这些值。\n\n3. 在浏览器中访问 `http://localhost:5050`,你将看到 pgAdmin 的登录界面。\n\n4. 使用在上一步中设置的邮箱和密码进行登录。\n\n5. 在 pgAdmin 中,你可以添加一个新的服务器连接,以连接到运行在 Docker 容器中的 PostgreSQL 数据库。要添加一个新的服务器连接,你需要提供以下信息:\n\n - 主机名/地址:使用 Docker 容器的 IP 地址或者 `localhost`。\n - 端口:使用 Docker 容器中 PostgreSQL 的映射端口,例如 33300。\n - 用户名和密码:使用在启动 PostgreSQL 容器时设置的用户名和密码。\n\n 完成上述信息的填写后,点击保存并连接到 PostgreSQL 服务器。\n\n现在,你已经成功启动了 pgAdmin 并连接到了你的 PostgreSQL 数据库。你可以使用 pgAdmin 来管理和操作你的数据库。", + "createdAt": 1698755749889, + "id": "d5XTX9EQ", + "meta": {}, + "parentId": "L7mq0ehV", + "role": "assistant", + "updatedAt": 1698755786183, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "topicId": "MNWhBEh5", + "fromModel": "gpt-3.5-turbo" + }, + { + "content": "abcabc", + "createdAt": 1690650544852, + "id": "KPPDiRyW", + "meta": {}, + "parentId": "42k72jMi", + "role": "function", + "updatedAt": 1690650572399, + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + + "fromModel": "gpt-3.5-turbo-16k", + "plugin": { + "apiName": "websiteCrawler", + "arguments": "{\n \"url\": \"https://mp.weixin.qq.com/s?__biz=MjM5MTA1MjAxMQ==&mid=2651264337&idx=1&sn=d7d9126578c74c912e1f0d42cb5629da&chksm=bd48ccd58a3f45c3f2cbc7d7b732c820b9e7cd6b547c06acc8170b233710b5fb5ed62f5fd94d&mpshare=1&scene=1&srcid=07294Mpw7C6JCLgtagL1cBDR&sharer_sharetime=1690622712877&sharer_shareid=0358058a42fc25387d28337fc3d22c3c#rd\"\n}", + "identifier": "websiteCrawler", + "type": "default" + } + }, + { + "content": "bbbbb", + "createdAt": 1700065743405, + "id": "9cRjevRQ", + "meta": {}, + "parentId": "3nDXtEKv", + "role": "function", + "sessionId": "06cc3e20-e870-4099-a619-c07a849d742d", + "updatedAt": 1700065751861, + "fromModel": "gpt-3.5-turbo-16k", + "plugin": { + "apiName": "getWebsiteContent", + "arguments": "{\n \"url\": \"https://nodejs.org/api/packages.html#dual-package-hazard\"\n}", + "identifier": "website-crawler", + "type": "default" + } + } + ] + }, + "version": 3 +} diff --git a/src/migrations/FromV3ToV4/index.ts b/src/migrations/FromV3ToV4/index.ts new file mode 100644 index 000000000000..787c3198c573 --- /dev/null +++ b/src/migrations/FromV3ToV4/index.ts @@ -0,0 +1,80 @@ +import type { Migration, MigrationData } from '@/migrations/VersionController'; + +import { V3ConfigState, V3LegacyConfig, V3OpenAIConfig, V3Settings } from './types/v3'; +import { V4ConfigState, V4ProviderConfig, V4Settings } from './types/v4'; + +export class MigrationV3ToV4 implements Migration { + // from this version to start migration + version = 3; + + migrate(data: MigrationData): MigrationData { + const { settings } = data.state; + + return { + ...data, + state: { + ...data.state, + settings: this.migrateSettings(settings), + }, + }; + } + + migrateSettings = (settings: V3Settings): V4Settings => { + const { languageModel } = settings; + const { openAI, togetherai, openrouter, ollama, ...res } = languageModel; + const { openai, azure } = this.migrateOpenAI(openAI); + + return { + ...settings, + languageModel: { + ...res, + azure, + ollama: this.migrateProvider(ollama), + openai, + openrouter: this.migrateProvider(openrouter), + togetherai: this.migrateProvider(togetherai), + }, + }; + }; + + migrateOpenAI = ( + openai: V3OpenAIConfig, + ): { azure: V4ProviderConfig; openai: V4ProviderConfig } => { + if (openai.useAzure) { + return { + azure: { + apiKey: openai.OPENAI_API_KEY, + enabled: true, + enabledModels: null, + endpoint: openai.endpoint, + }, + openai: { + enabled: true, + enabledModels: null, + }, + }; + } + + return { + azure: { + enabledModels: null, + }, + openai: { + apiKey: openai.OPENAI_API_KEY, + enabled: true, + enabledModels: null, + endpoint: openai.endpoint, + // customModelCards:openai.customModelName + }, + }; + }; + + migrateProvider = (provider: V3LegacyConfig): V4ProviderConfig => { + return { + apiKey: provider.apiKey, + enabled: provider.enabled, + enabledModels: [], + endpoint: provider.endpoint, + }; + }; +} diff --git a/src/migrations/FromV3ToV4/migrations.test.ts b/src/migrations/FromV3ToV4/migrations.test.ts new file mode 100644 index 000000000000..6a5784474786 --- /dev/null +++ b/src/migrations/FromV3ToV4/migrations.test.ts @@ -0,0 +1,45 @@ +import { describe } from 'vitest'; + +import { MigrationData, VersionController } from '@/migrations/VersionController'; + +import { MigrationV1ToV2 } from '../FromV1ToV2'; +import inputV1Data from '../FromV1ToV2/fixtures/input-v1-session.json'; +import { MigrationV2ToV3 } from '../FromV2ToV3'; +import openaiInputV3 from './fixtures/openai-input-v3.json'; +import openaiOutputV4 from './fixtures/openai-output-v4.json'; +import outputV3DataFromV1 from './fixtures/output-v3-from-v1.json'; +import { MigrationV3ToV4 } from './index'; + +describe('MigrationV2ToV3', () => { + let migrations; + let versionController: VersionController; + + beforeEach(() => { + migrations = [MigrationV3ToV4]; + versionController = new VersionController(migrations, 4); + }); + + describe('should migrate data correctly from previous versions', () => { + it('openai', () => { + const data: MigrationData = openaiInputV3; + + const migratedData = versionController.migrate(data); + + expect(migratedData.version).toEqual(openaiOutputV4.version); + expect(migratedData.state.settings).toEqual(openaiOutputV4.state.settings); + }); + }); + + it.skip('should work correct from v1 to v4', () => { + const data: MigrationData = inputV1Data; + + versionController = new VersionController([MigrationV2ToV3, MigrationV1ToV2], 3); + + const migratedData = versionController.migrate(data); + + expect(migratedData.version).toEqual(outputV3DataFromV1.version); + expect(migratedData.state.sessions).toEqual(outputV3DataFromV1.state.sessions); + expect(migratedData.state.topics).toEqual(outputV3DataFromV1.state.topics); + expect(migratedData.state.messages).toEqual(outputV3DataFromV1.state.messages); + }); +}); diff --git a/src/migrations/FromV3ToV4/types/v3.ts b/src/migrations/FromV3ToV4/types/v3.ts new file mode 100644 index 000000000000..674df80d0658 --- /dev/null +++ b/src/migrations/FromV3ToV4/types/v3.ts @@ -0,0 +1,59 @@ +interface V3GeneralConfig { + apiKey?: string; + enabled: boolean; + endpoint?: string; +} + +export interface V3OpenAIConfig { + OPENAI_API_KEY: string; + azureApiVersion?: string; + customModelName?: string; + enabled: boolean; + endpoint?: string; + useAzure?: boolean; +} + +export interface V3LegacyConfig { + apiKey?: string; + customModelName?: string; + enabled?: boolean; + enabledModels: string[]; + endpoint?: string; +} + +export interface V3LLMConfig { + anthropic: V3GeneralConfig; + bedrock: any; + google: V3GeneralConfig; + groq: V3GeneralConfig; + mistral: V3GeneralConfig; + moonshot: V3GeneralConfig; + ollama: V3LegacyConfig; + openAI: V3OpenAIConfig; + openrouter: V3LegacyConfig; + perplexity: V3GeneralConfig; + togetherai: V3LegacyConfig; + zeroone: V3GeneralConfig; + zhipu: V3GeneralConfig; +} + +/** + * 配置设置 + */ +export interface V3Settings { + defaultAgent: any; + fontSize: number; + language: string; + languageModel: V3LLMConfig; + neutralColor?: string; + password: string; + primaryColor?: string; + sync: any; + themeMode: string; + tool: any; + tts: any; +} + +export interface V3ConfigState { + settings: V3Settings; +} diff --git a/src/migrations/FromV3ToV4/types/v4.ts b/src/migrations/FromV3ToV4/types/v4.ts new file mode 100644 index 000000000000..8bb64fa01697 --- /dev/null +++ b/src/migrations/FromV3ToV4/types/v4.ts @@ -0,0 +1,34 @@ +import { ChatModelCard } from '@/types/llm'; + +import { V3LLMConfig, V3Settings } from './v3'; + +export interface V4ProviderConfig { + apiKey?: string; + customModelCards?: ChatModelCard[]; + enabled?: boolean; + /** + * enabled models id + */ + enabledModels: string[] | null; + endpoint?: string; +} + +export interface V4lLLMConfig + extends Omit { + azure: V4ProviderConfig; + ollama: V4ProviderConfig; + openai: V4ProviderConfig; + openrouter: V4ProviderConfig; + togetherai: V4ProviderConfig; +} + +/** + * 配置设置 + */ +export interface V4Settings extends Omit { + languageModel: V4lLLMConfig; +} + +export interface V4ConfigState { + settings: V4Settings; +} diff --git a/src/migrations/index.ts b/src/migrations/index.ts index 7a6b31894433..cf23b07adbd4 100644 --- a/src/migrations/index.ts +++ b/src/migrations/index.ts @@ -5,11 +5,18 @@ import { ConfigStateAll } from '@/types/exportConfig'; import { MigrationV0ToV1 } from './FromV0ToV1'; import { MigrationV1ToV2 } from './FromV1ToV2'; -// 当前最新的版本号 +// Current latest version export const CURRENT_CONFIG_VERSION = 3; -// 历史记录版本升级模块 +// Version migrations module const ConfigMigrations = [ + /** + * 2024.04.09 + * settings migrate the `languageModel` + * - from `openAI` to `openai`, `azure` + * - from customModelName to `enabledModels` and `customModelCards` + */ + // MigrationV3ToV4, /** * 2024.01.22 * from `group = pinned` to `pinned:true` diff --git a/src/services/_auth.test.ts b/src/services/_auth.test.ts index 5964fcaaddb0..4ad2ee339e92 100644 --- a/src/services/_auth.test.ts +++ b/src/services/_auth.test.ts @@ -147,26 +147,24 @@ describe('getProviderAuthPayload', () => { it('should return correct payload for OpenAI provider', () => { // 假设的 OpenAI 配置 const mockOpenAIConfig = { - OPENAI_API_KEY: 'openai-api-key', + apiKey: 'openai-api-key', endpoint: 'openai-endpoint', useAzure: true, azureApiVersion: 'openai-azure-api-version', }; act(() => { - setModelProviderConfig('openAI', mockOpenAIConfig); + setModelProviderConfig('openai', mockOpenAIConfig); }); const payload = getProviderAuthPayload(ModelProvider.OpenAI); expect(payload).toEqual({ - apiKey: mockOpenAIConfig.OPENAI_API_KEY, - azureApiVersion: mockOpenAIConfig.azureApiVersion, + apiKey: mockOpenAIConfig.apiKey, endpoint: mockOpenAIConfig.endpoint, - useAzure: mockOpenAIConfig.useAzure, }); }); it('should return an empty object or throw an error for an unknown provider', () => { const payload = getProviderAuthPayload('UnknownProvider'); - expect(payload).toEqual({ apiKey: '', endpoint: '' }); + expect(payload).toEqual({}); }); }); diff --git a/src/services/_auth.ts b/src/services/_auth.ts index c38201056583..229d2f38b31a 100644 --- a/src/services/_auth.ts +++ b/src/services/_auth.ts @@ -6,18 +6,6 @@ import { createJWT } from '@/utils/jwt'; export const getProviderAuthPayload = (provider: string) => { switch (provider) { - case ModelProvider.ZhiPu: { - return { apiKey: modelConfigSelectors.zhipuAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.Moonshot: { - return { apiKey: modelConfigSelectors.moonshotAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.Google: { - return { apiKey: modelConfigSelectors.googleAPIKey(useGlobalStore.getState()) }; - } - case ModelProvider.Bedrock: { const { accessKeyId, region, secretAccessKey } = modelConfigSelectors.bedrockConfig( useGlobalStore.getState(), @@ -43,53 +31,13 @@ export const getProviderAuthPayload = (provider: string) => { case ModelProvider.Ollama: { const endpoint = modelConfigSelectors.ollamaProxyUrl(useGlobalStore.getState()); - return { - endpoint, - }; - } - - case ModelProvider.Perplexity: { - return { apiKey: modelConfigSelectors.perplexityAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.Anthropic: { - const apiKey = modelConfigSelectors.anthropicAPIKey(useGlobalStore.getState()); - const endpoint = modelConfigSelectors.anthropicProxyUrl(useGlobalStore.getState()); - return { apiKey, endpoint }; + return { endpoint }; } - case ModelProvider.Mistral: { - return { apiKey: modelConfigSelectors.mistralAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.Groq: { - return { apiKey: modelConfigSelectors.groqAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.OpenRouter: { - return { apiKey: modelConfigSelectors.openrouterAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.TogetherAI: { - return { apiKey: modelConfigSelectors.togetheraiAPIKey(useGlobalStore.getState()) }; - } - - case ModelProvider.ZeroOne: { - return { apiKey: modelConfigSelectors.zerooneAPIKey(useGlobalStore.getState()) }; - } + default: { + const config = modelConfigSelectors.providerConfig(provider)(useGlobalStore.getState()); - default: - case ModelProvider.OpenAI: { - const openai = modelConfigSelectors.openAIConfig(useGlobalStore.getState()); - const apiKey = openai.OPENAI_API_KEY || ''; - const endpoint = openai.endpoint || ''; - - return { - apiKey, - azureApiVersion: openai.azureApiVersion, - endpoint, - useAzure: openai.useAzure, - }; + return { apiKey: config?.apiKey, endpoint: config?.endpoint }; } } }; diff --git a/src/services/_header.ts b/src/services/_header.ts index 4c552048bf24..11f992e8c2a3 100644 --- a/src/services/_header.ts +++ b/src/services/_header.ts @@ -1,10 +1,4 @@ -import { - AZURE_OPENAI_API_VERSION, - LOBE_CHAT_ACCESS_CODE, - OPENAI_API_KEY_HEADER_KEY, - OPENAI_END_POINT, - USE_AZURE_OPENAI, -} from '@/const/fetch'; +import { LOBE_CHAT_ACCESS_CODE, OPENAI_API_KEY_HEADER_KEY, OPENAI_END_POINT } from '@/const/fetch'; import { useGlobalStore } from '@/store/global'; import { modelConfigSelectors, settingsSelectors } from '@/store/global/selectors'; @@ -14,25 +8,14 @@ import { modelConfigSelectors, settingsSelectors } from '@/store/global/selector */ // eslint-disable-next-line no-undef export const createHeaderWithOpenAI = (header?: HeadersInit): HeadersInit => { - const openai = modelConfigSelectors.openAIConfig(useGlobalStore.getState()); - - const apiKey = openai.OPENAI_API_KEY || ''; - const endpoint = openai.endpoint || ''; + const apiKey = modelConfigSelectors.openAIAPIKey(useGlobalStore.getState()) || ''; + const endpoint = modelConfigSelectors.openAIProxyUrl(useGlobalStore.getState()) || ''; // eslint-disable-next-line no-undef - const result: HeadersInit = { + return { ...header, [LOBE_CHAT_ACCESS_CODE]: settingsSelectors.password(useGlobalStore.getState()), [OPENAI_API_KEY_HEADER_KEY]: apiKey, [OPENAI_END_POINT]: endpoint, }; - - if (openai.useAzure) { - Object.assign(result, { - [AZURE_OPENAI_API_VERSION]: openai.azureApiVersion || '', - [USE_AZURE_OPENAI]: '1', - }); - } - - return result; }; diff --git a/src/store/global/slices/settings/actions/llm.test.ts b/src/store/global/slices/settings/actions/llm.test.ts index 2a2a68247975..3196fe3d0d07 100644 --- a/src/store/global/slices/settings/actions/llm.test.ts +++ b/src/store/global/slices/settings/actions/llm.test.ts @@ -1,9 +1,9 @@ -import { act, renderHook, waitFor } from '@testing-library/react'; +import { act, renderHook } from '@testing-library/react'; import { describe, expect, it, vi } from 'vitest'; import { userService } from '@/services/user'; import { useGlobalStore } from '@/store/global'; -import { GlobalSettings, OpenAIConfig } from '@/types/settings'; +import { GeneralModelProviderConfig } from '@/types/settings'; // Mock userService vi.mock('@/services/user', () => ({ @@ -17,17 +17,17 @@ describe('SettingsAction', () => { describe('setModelProviderConfig', () => { it('should set OpenAI configuration', async () => { const { result } = renderHook(() => useGlobalStore()); - const openAIConfig: Partial = { OPENAI_API_KEY: 'test-key' }; + const openAIConfig: Partial = { apiKey: 'test-key' }; // Perform the action await act(async () => { - await result.current.setModelProviderConfig('openAI', openAIConfig); + await result.current.setModelProviderConfig('openai', openAIConfig); }); // Assert that updateUserSettings was called with the correct OpenAI configuration expect(userService.updateUserSettings).toHaveBeenCalledWith({ languageModel: { - openAI: openAIConfig, + openai: openAIConfig, }, }); }); diff --git a/src/store/global/slices/settings/actions/llm.ts b/src/store/global/slices/settings/actions/llm.ts index 54c0ce225309..7b6c11615ac1 100644 --- a/src/store/global/slices/settings/actions/llm.ts +++ b/src/store/global/slices/settings/actions/llm.ts @@ -1,6 +1,6 @@ import type { StateCreator } from 'zustand/vanilla'; -import type { GlobalStore } from '@/store/global'; +import { GlobalStore } from '@/store/global'; import { GlobalLLMConfig, GlobalLLMProviderKey } from '@/types/settings'; import { CustomModelCardDispatch, customModelCardsReducer } from '../reducers/customModelCard'; @@ -14,11 +14,11 @@ export interface LLMSettingsAction { provider: GlobalLLMProviderKey, payload: CustomModelCardDispatch, ) => Promise; + removeEnabledModels: (provider: GlobalLLMProviderKey, model: string) => Promise; setModelProviderConfig: ( provider: T, config: Partial, ) => Promise; - toggleEditingCustomModelCard: (params?: { id: string; provider: GlobalLLMProviderKey }) => void; toggleProviderEnabled: (provider: GlobalLLMProviderKey, enabled: boolean) => Promise; } @@ -38,6 +38,13 @@ export const llmSettingsSlice: StateCreator< await get().setModelProviderConfig(provider, { customModelCards: nextState }); }, + removeEnabledModels: async (provider, model) => { + const config = modelConfigSelectors.providerConfig(provider)(get()); + + await get().setModelProviderConfig(provider, { + enabledModels: config?.enabledModels?.filter((s) => s !== model).filter(Boolean), + }); + }, setModelProviderConfig: async (provider, config) => { await get().setSettings({ languageModel: { [provider]: config } }); }, diff --git a/src/store/global/slices/settings/selectors/modelConfig.ts b/src/store/global/slices/settings/selectors/modelConfig.ts index 7d753a9a9d5b..0a9d916e5511 100644 --- a/src/store/global/slices/settings/selectors/modelConfig.ts +++ b/src/store/global/slices/settings/selectors/modelConfig.ts @@ -28,20 +28,18 @@ const providerEnableModels = (provider: string) => (s: GlobalStore) => { return providerConfig(provider)(s)?.enabledModels?.filter(Boolean); }; -const openAIConfig = (s: GlobalStore) => modelProvider(s).openAI; +const openAIConfig = (s: GlobalStore) => modelProvider(s).openai; -const openAIAPIKey = (s: GlobalStore) => openAIConfig(s).OPENAI_API_KEY; -const openAIProxyUrl = (s: GlobalStore) => openAIConfig(s).endpoint; +const openAIAPIKey = (s: GlobalStore) => modelProvider(s).openai.apiKey; +const openAIProxyUrl = (s: GlobalStore) => modelProvider(s).openai.endpoint; const zhipuAPIKey = (s: GlobalStore) => modelProvider(s).zhipu.apiKey; -const zhipuProxyUrl = (s: GlobalStore) => modelProvider(s).zhipu.endpoint; const bedrockConfig = (s: GlobalStore) => modelProvider(s).bedrock; const googleAPIKey = (s: GlobalStore) => modelProvider(s).google.apiKey; -const googleProxyUrl = (s: GlobalStore) => modelProvider(s).google.endpoint; -const enableAzure = (s: GlobalStore) => modelProvider(s).openAI.useAzure; +const enableAzure = (s: GlobalStore) => modelProvider(s).azure.enabled; const azureConfig = (s: GlobalStore) => modelProvider(s).azure; const mistralAPIKey = (s: GlobalStore) => modelProvider(s).mistral.apiKey; @@ -63,14 +61,6 @@ const togetheraiAPIKey = (s: GlobalStore) => modelProvider(s).togetherai.apiKey; const zerooneAPIKey = (s: GlobalStore) => modelProvider(s).zeroone.apiKey; -// const azureModelList = (s: GlobalStore): ModelProviderCard => { -// const azure = azureConfig(s); -// return { -// chatModels: parseModelString(azure.deployments), -// id: 'azure', -// }; -// }; - const modelSelectList = (s: GlobalStore): ModelProviderCard[] => { return modelProviderSelectors.providerModelList(s).map((list) => ({ ...list, @@ -151,10 +141,9 @@ export const modelConfigSelectors = { azureConfig, // Zhipu zhipuAPIKey, - zhipuProxyUrl, // Google googleAPIKey, - googleProxyUrl, + // Bedrock bedrockConfig, diff --git a/src/store/global/slices/settings/selectors/modelProvider.ts b/src/store/global/slices/settings/selectors/modelProvider.ts index 47d0f7d73128..b26056f7362f 100644 --- a/src/store/global/slices/settings/selectors/modelProvider.ts +++ b/src/store/global/slices/settings/selectors/modelProvider.ts @@ -77,6 +77,14 @@ const processChatModels = ( }); }; +// const azureModelList = (s: GlobalStore): ModelProviderCard => { +// const azure = azureConfig(s); +// return { +// chatModels: parseModelString(azure.deployments), +// id: 'azure', +// }; +// }; + /** * define all the model list of providers * @param s diff --git a/src/types/llm.ts b/src/types/llm.ts index a13d7962edae..98bf8737ffe4 100644 --- a/src/types/llm.ts +++ b/src/types/llm.ts @@ -1,6 +1,14 @@ export interface ChatModelCard { + /** + * only used in azure + */ + deploymentName?: string; description?: string; + /** + * the name show for end user + */ displayName?: string; + /** * whether model is enabled by default */ diff --git a/src/types/settings/modelProvider.ts b/src/types/settings/modelProvider.ts index ed174d8332b2..82df42424d4b 100644 --- a/src/types/settings/modelProvider.ts +++ b/src/types/settings/modelProvider.ts @@ -72,7 +72,7 @@ export interface GlobalLLMConfig { mistral: GeneralModelProviderConfig; moonshot: GeneralModelProviderConfig; ollama: OllamaConfig; - openAI: OpenAIConfig; + openai: GeneralModelProviderConfig; openrouter: OpenRouterConfig; perplexity: GeneralModelProviderConfig; togetherai: TogetherAIConfig; diff --git a/src/utils/parseModels.ts b/src/utils/parseModels.ts index 0d067a54a856..7cf9e5015b2e 100644 --- a/src/utils/parseModels.ts +++ b/src/utils/parseModels.ts @@ -1,5 +1,8 @@ import { CustomModels } from '@/types/settings'; +/** + * Parse model string to add or remove models. + */ export const parseModelString = (modelString: string = '') => { let models: CustomModels = []; let removeAll = false;