Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/main/presenter/configPresenter/modelConfig.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { ModelType } from '@shared/model'
import { ApiEndpointType, ModelType } from '@shared/model'
import { IModelConfig, ModelConfig, ModelConfigSource } from '@shared/presenter'
import ElectronStore from 'electron-store'
import { providerDbLoader } from './providerDbLoader'
Expand Down Expand Up @@ -380,6 +380,7 @@ export class ModelConfigHelper {
functionCall: false,
reasoning: false,
type: ModelType.Chat,
apiEndpoint: ApiEndpointType.Chat,
thinkingBudget: undefined,
enableSearch: false,
forcedSearch: false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
LLM_EMBEDDING_ATTRS,
IConfigPresenter
} from '@shared/presenter'
import { ApiEndpointType } from '@shared/model'
import { createStreamEvent } from '@shared/types/core/llm-events'
import { BaseLLMProvider, SUMMARY_TITLES_PROMPT } from '../baseProvider'
import OpenAI, { AzureOpenAI } from 'openai'
Expand Down Expand Up @@ -86,6 +87,20 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
return modelCapabilities.supportsVerbosity(this.provider.id, modelId)
}

private getEffectiveApiEndpoint(modelId: string): ApiEndpointType {
const modelConfig = this.configPresenter.getModelConfig(modelId, this.provider.id)

if (modelConfig?.apiEndpoint) {
return modelConfig.apiEndpoint
}

if (isOpenAIImageGenerationModel(modelId)) {
return ApiEndpointType.Image
}

return ApiEndpointType.Chat
}

protected createOpenAIClient(): void {
// Get proxy configuration
const proxyUrl = proxyConfig.getProxyUrl()
Expand Down Expand Up @@ -1528,17 +1543,32 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
if (!this.isInitialized) throw new Error('Provider not initialized')
if (!modelId) throw new Error('Model ID is required')

if (isOpenAIImageGenerationModel(modelId)) {
yield* this.handleImgGeneration(messages, modelId)
} else {
yield* this.handleChatCompletion(
messages,
modelId,
modelConfig,
temperature,
maxTokens,
mcpTools
)
const apiEndpoint = this.getEffectiveApiEndpoint(modelId)

switch (apiEndpoint) {
case ApiEndpointType.Image:
yield* this.handleImgGeneration(messages, modelId)
break
case ApiEndpointType.Video:
yield* this.handleChatCompletion(
messages,
modelId,
modelConfig,
temperature,
maxTokens,
mcpTools
)
break
case ApiEndpointType.Chat:
default:
yield* this.handleChatCompletion(
messages,
modelId,
modelConfig,
temperature,
maxTokens,
mcpTools
)
}
}

Expand Down
44 changes: 43 additions & 1 deletion src/renderer/src/components/settings/ModelConfigDialog.vue
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,27 @@
</p>
</div>

<!-- API 端点(仅 OpenAI 兼容 provider 显示) -->
<div v-if="showApiEndpointSelector" class="space-y-2">
<Label for="apiEndpoint">{{ t('settings.model.modelConfig.apiEndpoint.label') }}</Label>
<Select v-model="config.apiEndpoint">
<SelectTrigger>
<SelectValue :placeholder="t('settings.model.modelConfig.apiEndpoint.label')" />
</SelectTrigger>
<SelectContent>
<SelectItem value="chat">
{{ t('settings.model.modelConfig.apiEndpoint.options.chat') }}
</SelectItem>
<SelectItem value="image">
{{ t('settings.model.modelConfig.apiEndpoint.options.image') }}
</SelectItem>
</SelectContent>
</Select>
<p class="text-xs text-muted-foreground">
{{ t('settings.model.modelConfig.apiEndpoint.description') }}
</p>
</div>

<!-- 视觉能力 -->
<div class="flex items-center justify-between">
<div class="space-y-0.5">
Expand Down Expand Up @@ -419,7 +440,7 @@
import { ref, computed, watch, onMounted } from 'vue'
import { storeToRefs } from 'pinia'
import { useI18n } from 'vue-i18n'
import { ModelType } from '@shared/model'
import { ApiEndpointType, ModelType } from '@shared/model'
import type { ModelConfig } from '@shared/presenter'
import { useModelConfigStore } from '@/stores/modelConfigStore'
import { useModelStore } from '@/stores/modelStore'
Expand Down Expand Up @@ -478,6 +499,22 @@ const modelStore = useModelStore()
const { customModels, allProviderModels } = storeToRefs(modelStore)
const configPresenter = usePresenter('configPresenter')

const isOpenAICompatibleProvider = computed(() => {
const EXCLUDED_PROVIDERS = [
'anthropic',
'gemini',
'vertex',
'aws-bedrock',
'github-copilot',
'ollama',
'acp'
]
const providerId = props.providerId?.toLowerCase() || ''
return !EXCLUDED_PROVIDERS.some((excluded) => providerId.includes(excluded))
})

const showApiEndpointSelector = computed(() => isOpenAICompatibleProvider.value)

const createDefaultConfig = (): ModelConfig => ({
maxTokens: 4096,
contextLength: 8192,
Expand All @@ -486,6 +523,7 @@ const createDefaultConfig = (): ModelConfig => ({
functionCall: false,
reasoning: false,
type: ModelType.Chat,
apiEndpoint: ApiEndpointType.Chat,
reasoningEffort: 'medium',
verbosity: 'medium',
enableSearch: false,
Expand Down Expand Up @@ -596,6 +634,10 @@ const loadConfig = async () => {
try {
const modelConfig = await modelConfigStore.getModelConfig(props.modelId, props.providerId)
config.value = { ...modelConfig }

if (isOpenAICompatibleProvider.value && !config.value.apiEndpoint) {
config.value.apiEndpoint = ApiEndpointType.Chat
}
} catch (error) {
console.error('Failed to load model config:', error)
config.value = createDefaultConfig()
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/da-DK/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "Søgebegrænsning",
"description": "Modelbegrænsning: Aktivering af websøgning deaktiverer funktionskald til værktøjer"
},
"apiEndpoint": {
"description": "Vælg OpenAI API-endpunktet, som denne model bruger.",
"label": "API-endpunkt",
"options": {
"chat": "tekstgenerering",
"image": "Billedgenerering"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/en-US/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "Search limitation",
"description": "Model limitation: Enabling web search will disable tool function calls"
},
"apiEndpoint": {
"description": "Select the OpenAI API endpoint used by this model.",
"label": "API endpoint",
"options": {
"chat": "Text Generation",
"image": "Image Generation"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/fa-IR/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "محدودیت جستجو",
"description": "محدودیت مدل: فعال‌سازی جستجوی اینترنتی باعث غیرفعال شدن فراخوانی توابع ابزار می‌شود"
},
"apiEndpoint": {
"description": "انتخاب نقطه پایانی API OpenAI مورد استفاده این مدل.",
"label": "نقطه پایانی API",
"options": {
"chat": "تولید متن",
"image": "تولید تصویر"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/fr-FR/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "Limitation de recherche",
"description": "Limitation du modèle : l'activation de la recherche web désactivera les appels de fonctions d'outils"
},
"apiEndpoint": {
"description": "Sélectionnez le point de terminaison de l’API OpenAI utilisé par ce modèle.",
"label": "Point de terminaison API",
"options": {
"chat": "Génération de texte",
"image": "Génération d'images"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/he-IL/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "מגבלת חיפוש",
"description": "מגבלת מודל: הפעלת חיפוש באינטרנט תשבית קריאות לפונקציות כלים"
},
"apiEndpoint": {
"description": "בחר את נקודת הקצה של API ה-OpenAI שבה משתמש המודל הזה.",
"label": "נקודת קצה API",
"options": {
"chat": "יצירת טקסט",
"image": "יצירת תמונות"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/ja-JP/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "検索制限",
"description": "モデル制限: ネット検索を有効にすると、ツール関数の呼び出しが無効になります"
},
"apiEndpoint": {
"description": "このモデルが使用する OpenAI API エンドポイントを選択。",
"label": "API エンドポイント",
"options": {
"chat": "テキスト生成",
"image": "画像生成"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/ko-KR/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "검색 제한",
"description": "모델 제한: 네트워크 검색을 활성화하면 도구 함수 호출이 비활성화됩니다"
},
"apiEndpoint": {
"description": "이 모델이 사용하는 OpenAI API 엔드포인트를 선택하세요.",
"label": "API 엔드포인트",
"options": {
"chat": "텍스트 생성",
"image": "이미지 생성"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/pt-BR/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "Limitação de busca",
"description": "Limitação do modelo: Habilitar a busca na web desabilitará as chamadas de função de ferramenta"
},
"apiEndpoint": {
"description": "Selecione o endpoint da API OpenAI usado por este modelo.",
"label": "Endpoint da API",
"options": {
"chat": "Geração de texto",
"image": "Geração de Imagens"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/ru-RU/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "Ограничение поиска",
"description": "Ограничение модели: включение веб-поиска отключит вызовы функций инструментов"
},
"apiEndpoint": {
"description": "Выберите конечную точку OpenAI API, используемую этой моделью.",
"label": "Конечная точка API",
"options": {
"chat": "генерация текста",
"image": "Генерация изображений"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/zh-CN/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,14 @@
"imageGeneration": "图像生成模型"
}
},
"apiEndpoint": {
"label": "API 端点",
"description": "选择此模型使用的 OpenAI API 端点。",
"options": {
"chat": "文本生成",
"image": "图片生成"
}
},
"resetToDefault": "重置为默认",
"saveConfig": "保存配置",
"cancel": "取消",
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/zh-HK/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "搜尋限制",
"description": "模型限制: 啟用網絡搜尋將使工具函數呼叫失效"
},
"apiEndpoint": {
"description": "選擇此模型使用的 OpenAI API 端點。",
"label": "API 端點",
"options": {
"chat": "文本生成",
"image": "圖片生成"
}
}
}
},
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/src/i18n/zh-TW/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@
"searchLimit": {
"label": "搜尋限制",
"description": "模型限制: 啟用網路搜尋將使工具函數呼叫失效"
},
"apiEndpoint": {
"description": "選擇此模型使用的 OpenAI API 端點。",
"label": "API 端點",
"options": {
"chat": "文字生成",
"image": "圖片生成"
}
}
}
},
Expand Down
6 changes: 6 additions & 0 deletions src/shared/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,9 @@ export enum ModelType {
Rerank = 'rerank',
ImageGeneration = 'imageGeneration'
}

export enum ApiEndpointType {
Chat = 'chat',
Image = 'image',
Video = 'video'
}
3 changes: 2 additions & 1 deletion src/shared/types/presenters/legacy.presenters.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import { BrowserWindow } from 'electron'
import { MessageFile } from './chat'
import { ShowResponse } from 'ollama'
import { ShortcutKeySetting } from '@/presenter/configPresenter/shortcutKeySettings'
import { ModelType } from '@shared/model'
import { ApiEndpointType, ModelType } from '@shared/model'
import type { NowledgeMemThread, NowledgeMemExportSummary } from '../nowledgeMem'
import { ProviderChange, ProviderBatchUpdate } from './provider-operations'
import type { AgentSessionLifecycleStatus } from './agent-provider'
Expand Down Expand Up @@ -162,6 +162,7 @@ export interface ModelConfig {
verbosity?: 'low' | 'medium' | 'high'
maxCompletionTokens?: number // GPT-5 series uses this parameter to replace maxTokens
conversationId?: string
apiEndpoint?: ApiEndpointType
}

export interface IModelConfig {
Expand Down