Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions src/main/presenter/llmProviderPresenter/baseProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import { CONFIG_EVENTS } from '@/events'
export abstract class BaseLLMProvider {
// 单轮会话中最大工具调用次数限制
protected static readonly MAX_TOOL_CALLS = 50
protected static readonly DEFAULT_MODEL_FETCH_TIMEOUT = 12000 // 提升到12秒作为通用默认值

protected provider: LLM_PROVIDER
protected models: MODEL_META[] = []
Expand Down Expand Up @@ -59,6 +60,14 @@ export abstract class BaseLLMProvider {
return BaseLLMProvider.MAX_TOOL_CALLS
}

/**
* 获取模型获取超时时间配置
* @returns 超时时间(毫秒)
*/
protected getModelFetchTimeout(): number {
return BaseLLMProvider.DEFAULT_MODEL_FETCH_TIMEOUT
}

/**
* 从配置中加载缓存的模型数据
* 在构造函数中调用,避免每次都需要重新获取模型列表
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1198,8 +1198,8 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
public async check(): Promise<{ isOk: boolean; errorMsg: string | null }> {
try {
if (!this.isNoModelsApi) {
// Use a reasonable timeout
const models = await this.fetchOpenAIModels({ timeout: 5000 }) // Increased timeout slightly
// Use unified timeout configuration from base class
const models = await this.fetchOpenAIModels({ timeout: this.getModelFetchTimeout() })
this.models = models // Store fetched models
}
// Potentially add a simple API call test here if needed, e.g., list models even for no-API list to check key/endpoint
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1070,8 +1070,8 @@ export class OpenAIResponsesProvider extends BaseLLMProvider {
public async check(): Promise<{ isOk: boolean; errorMsg: string | null }> {
try {
if (!this.isNoModelsApi) {
// Use a reasonable timeout
const models = await this.fetchOpenAIModels({ timeout: 5000 }) // Increased timeout slightly
// Use unified timeout configuration from base class
const models = await this.fetchOpenAIModels({ timeout: this.getModelFetchTimeout() })
this.models = models // Store fetched models
}
// Potentially add a simple API call test here if needed, e.g., list models even for no-API list to check key/endpoint
Expand Down
6 changes: 3 additions & 3 deletions src/renderer/src/views/WelcomeView.vue
Original file line number Diff line number Diff line change
Expand Up @@ -211,9 +211,9 @@ onMounted(() => {

const handleModelEnabledChange = async (model: MODEL_META, enabled: boolean) => {
try {
await settingsStore.updateModelStatus(selectedProvider.value, model.id, !enabled)
await settingsStore.updateModelStatus(selectedProvider.value, model.id, enabled)
} catch (error) {
console.error('Failed to disable model:', error)
console.error('Failed to update model status:', error)
}
console.log('handleModelEnabledChange', model, enabled)
}
Expand Down Expand Up @@ -339,7 +339,7 @@ const isFirstStep = computed(() => currentStep.value === 0)

<div
v-show="!providerModelLoading"
class="flex flex-col w-full border overflow-hidden rounded-lg max-h-80 overflow-y-auto"
class="flex flex-col w-full border rounded-lg max-h-80 overflow-y-auto"
>
<ModelConfigItem
v-for="model in settingsStore.allProviderModels.find(
Expand Down