Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/main/presenter/configPresenter/modelDefaultSettings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1358,7 +1358,7 @@ export const defaultModelsSettings: DefaultModelSetting[] = [
match: ['minimax-m1-80k'],
vision: false,
functionCall: true,
reasoning: false
reasoning: true
},
{
id: 'minimax-01',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ export const providerModelSettings: Record<string, { models: ProviderModelSettin
match: ['minimaxai/minimax-m1-80k'],
vision: false,
functionCall: true,
reasoning: false
reasoning: true
},
{
id: 'deepseek/deepseek-r1-0528',
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus } from '@shared/presenter'
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus, MODEL_META } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ConfigPresenter } from '../../configPresenter'

Expand All @@ -18,6 +18,38 @@ interface OpenRouterKeyResponse {
}
}

// Define interface for OpenRouter model response based on their API documentation
interface OpenRouterModelResponse {
id: string
name: string
description: string
created: number
context_length: number
architecture?: {
input_modalities: string[] // ["file", "image", "text"]
output_modalities: string[] // ["text"]
tokenizer: string
instruct_type: string | null
}
pricing: {
prompt: string
completion: string
request: string
image: string
web_search: string
internal_reasoning: string
input_cache_read: string
input_cache_write: string
}
top_provider?: {
context_length: number
max_completion_tokens: number
is_moderated: boolean
}
per_request_limits: any
supported_parameters?: string[]
}

export class OpenRouterProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: ConfigPresenter) {
super(provider, configPresenter)
Expand Down Expand Up @@ -147,4 +179,107 @@ export class OpenRouterProvider extends OpenAICompatibleProvider {
return { isOk: false, errorMsg: errorMessage }
}
}

/**
* Override fetchOpenAIModels to parse OpenRouter specific model data and update model configurations
* @param options - Request options
* @returns Promise<MODEL_META[]> - Array of model metadata
*/
protected async fetchOpenAIModels(options?: { timeout: number }): Promise<MODEL_META[]> {
try {
const response = await this.openai.models.list(options)
// console.log('OpenRouter models response:', JSON.stringify(response, null, 2))

const models: MODEL_META[] = []

for (const model of response.data) {
// Type the model as OpenRouter specific response
const openRouterModel = model as unknown as OpenRouterModelResponse

// Extract model information
const modelId = openRouterModel.id
const supportedParameters = openRouterModel.supported_parameters || []
const inputModalities = openRouterModel.architecture?.input_modalities || []

// Check capabilities based on supported parameters and architecture
const hasFunctionCalling = supportedParameters.includes('tools')
const hasVision = inputModalities.includes('image')
const hasReasoning =
supportedParameters.includes('reasoning') ||
supportedParameters.includes('include_reasoning')

// Get existing model configuration first
const existingConfig =
this.configPresenter.getModelConfig(modelId, this.provider.id) ?? ({} as const)

// Extract configuration values with proper fallback priority: API -> existing config -> default
const contextLength =
openRouterModel.context_length ||
openRouterModel.top_provider?.context_length ||
existingConfig.contextLength ||
4096
const maxTokens =
openRouterModel.top_provider?.max_completion_tokens || existingConfig.maxTokens || 2048

// Build new configuration based on API response
const newConfig = {
contextLength: contextLength,
maxTokens: maxTokens,
functionCall: hasFunctionCalling,
vision: hasVision,
reasoning: hasReasoning || existingConfig.reasoning, // Use API info or keep existing
temperature: existingConfig.temperature, // Keep existing temperature
type: existingConfig.type // Keep existing type
}

// Check if configuration has changed
const configChanged =
existingConfig.contextLength !== newConfig.contextLength ||
existingConfig.maxTokens !== newConfig.maxTokens ||
existingConfig.functionCall !== newConfig.functionCall ||
existingConfig.vision !== newConfig.vision ||
existingConfig.reasoning !== newConfig.reasoning

// Update configuration if changed
if (configChanged) {
// console.log(`Updating OpenRouter configuration for model ${modelId}:`, {
// old: {
// contextLength: existingConfig.contextLength,
// maxTokens: existingConfig.maxTokens,
// functionCall: existingConfig.functionCall,
// vision: existingConfig.vision,
// reasoning: existingConfig.reasoning
// },
// new: newConfig
// })

this.configPresenter.setModelConfig(modelId, this.provider.id, newConfig)
}

// Create MODEL_META object
const modelMeta: MODEL_META = {
id: modelId,
name: openRouterModel.name || modelId,
group: 'default',
providerId: this.provider.id,
isCustom: false,
contextLength: contextLength,
maxTokens: maxTokens,
description: openRouterModel.description,
vision: hasVision,
functionCall: hasFunctionCalling,
reasoning: hasReasoning || existingConfig.reasoning || false
}

models.push(modelMeta)
}

console.log(`Processed ${models.length} OpenRouter models with dynamic configuration updates`)
return models
} catch (error) {
console.error('Error fetching OpenRouter models:', error)
// Fallback to parent implementation
return super.fetchOpenAIModels(options)
}
}
}
109 changes: 108 additions & 1 deletion src/main/presenter/llmProviderPresenter/providers/ppioProvider.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus } from '@shared/presenter'
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus, MODEL_META } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ConfigPresenter } from '../../configPresenter'

Expand All @@ -7,6 +7,21 @@ interface PPIOKeyResponse {
credit_balance: number
}

// Define interface for PPIO model response
interface PPIOModelResponse {
id: string
object: string
owned_by: string
created: number
display_name: string
description: string
context_size: number
max_output_tokens: number
features?: string[]
status: number
model_type: string
}

export class PPIOProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: ConfigPresenter) {
super(provider, configPresenter)
Expand Down Expand Up @@ -120,4 +135,96 @@ export class PPIOProvider extends OpenAICompatibleProvider {
return { isOk: false, errorMsg: errorMessage }
}
}

/**
* Override fetchOpenAIModels to parse PPIO specific model data and update model configurations
* @param options - Request options
* @returns Promise<MODEL_META[]> - Array of model metadata
*/
protected async fetchOpenAIModels(options?: { timeout: number }): Promise<MODEL_META[]> {
try {
const response = await this.openai.models.list(options)
// console.log('PPIO models response:', JSON.stringify(response, null, 2))

const models: MODEL_META[] = []

for (const model of response.data) {
// Type the model as PPIO specific response
const ppioModel = model as unknown as PPIOModelResponse

// Extract model information
const modelId = ppioModel.id
const features = ppioModel.features || []

// Check features for capabilities
const hasFunctionCalling = features.includes('function-calling')
const hasVision = features.includes('vision')
// const hasStructuredOutputs = features.includes('structured-outputs')

// Get existing model configuration first
const existingConfig = this.configPresenter.getModelConfig(modelId, this.provider.id)

// Extract configuration values with proper fallback priority: API -> existing config -> default
const contextLength = ppioModel.context_size || existingConfig.contextLength || 4096
const maxTokens = ppioModel.max_output_tokens || existingConfig.maxTokens || 2048

// Build new configuration based on API response
const newConfig = {
contextLength: contextLength,
maxTokens: maxTokens,
functionCall: hasFunctionCalling,
vision: hasVision,
reasoning: existingConfig.reasoning, // Keep existing reasoning setting
temperature: existingConfig.temperature, // Keep existing temperature
type: existingConfig.type // Keep existing type
}

// Check if configuration has changed
const configChanged =
existingConfig.contextLength !== newConfig.contextLength ||
existingConfig.maxTokens !== newConfig.maxTokens ||
existingConfig.functionCall !== newConfig.functionCall ||
existingConfig.vision !== newConfig.vision

// Update configuration if changed
if (configChanged) {
// console.log(`Updating configuration for model ${modelId}:`, {
// old: {
// contextLength: existingConfig.contextLength,
// maxTokens: existingConfig.maxTokens,
// functionCall: existingConfig.functionCall,
// vision: existingConfig.vision
// },
// new: newConfig
// })

this.configPresenter.setModelConfig(modelId, this.provider.id, newConfig)
}

// Create MODEL_META object
const modelMeta: MODEL_META = {
id: modelId,
name: ppioModel.display_name || modelId,
group: 'default',
providerId: this.provider.id,
isCustom: false,
contextLength: contextLength,
maxTokens: maxTokens,
description: ppioModel.description,
vision: hasVision,
functionCall: hasFunctionCalling,
reasoning: existingConfig.reasoning || false
}

models.push(modelMeta)
}

console.log(`Processed ${models.length} PPIO models with dynamic configuration updates`)
return models
} catch (error) {
console.error('Error fetching PPIO models:', error)
// Fallback to parent implementation
return super.fetchOpenAIModels(options)
}
}
}