Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/main/presenter/llmProviderPresenter/baseProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -601,4 +601,4 @@ ${this.convertToolsToXml(tools)}
}
export const SUMMARY_TITLES_PROMPT = `
You need to summarize the user's conversation into a title of no more than 10 words, with the title language matching the user's primary language, without using punctuation or other special symbols,only output the title,here is the conversation:
`
`
136 changes: 134 additions & 2 deletions src/main/presenter/llmProviderPresenter/providers/_302AIProvider.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus } from '@shared/presenter'
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus, MODEL_META } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ConfigPresenter } from '../../configPresenter'

Expand All @@ -9,6 +9,28 @@ interface _302AIBalanceResponse {
}
}

// Define interface for 302AI model response based on actual API format
interface _302AIModelResponse {
id: string
object: string
category?: string
category_en?: string
content_length: number // This is the context length
created_on?: string
description?: string
description_en?: string
description_jp?: string
first_byte_req_time?: string
is_moderated: boolean
max_completion_tokens: number // This is the max output tokens
price?: {
input_token?: string
output_token?: string
per_request?: string
}
supported_tools: boolean // This indicates function calling support
}

export class _302AIProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: ConfigPresenter) {
super(provider, configPresenter)
Expand Down Expand Up @@ -83,7 +105,6 @@ export class _302AIProvider extends OpenAICompatibleProvider {
public async check(): Promise<{ isOk: boolean; errorMsg: string | null }> {
try {
const keyStatus = await this.getKeyStatus()

// Check if there's remaining quota
if (keyStatus.remainNum !== undefined && keyStatus.remainNum <= 0) {
return {
Expand All @@ -105,4 +126,115 @@ export class _302AIProvider extends OpenAICompatibleProvider {
return { isOk: false, errorMsg: errorMessage }
}
}

/**
* Override fetchOpenAIModels to parse 302AI specific model data and update model configurations
* @param options - Request options
* @returns Promise<MODEL_META[]> - Array of model metadata
*/
protected async fetchOpenAIModels(options?: { timeout: number }): Promise<MODEL_META[]> {
try {
const response = await this.openai.models.list(options)
// console.log('302AI models response:', JSON.stringify(response, null, 2))

const models: MODEL_META[] = []

for (const model of response.data) {
// Type the model as 302AI specific response
const _302aiModel = model as unknown as _302AIModelResponse

// Extract model information
const modelId = _302aiModel.id

// Check for function calling support using supported_tools field
const hasFunctionCalling = _302aiModel.supported_tools === true

// Check for vision support based on model ID and description patterns
const hasVision =
modelId.includes('vision') ||
modelId.includes('gpt-4o') ||
(_302aiModel.description && _302aiModel.description.includes('vision')) ||
(_302aiModel.description_en &&
_302aiModel.description_en.toLowerCase().includes('vision')) ||
modelId.includes('claude') || // Some Claude models support vision
modelId.includes('gemini') || // Gemini models often support vision
(modelId.includes('qwen') && modelId.includes('vl')) // Qwen VL models

// Get existing model configuration first
const existingConfig = this.configPresenter.getModelConfig(modelId, this.provider.id)

// Extract configuration values with proper fallback priority: API -> existing config -> default
const contextLength = _302aiModel.content_length || existingConfig.contextLength || 4096

// Use max_completion_tokens if available, otherwise fall back to existing config or default
const maxTokens =
_302aiModel.max_completion_tokens > 0
? _302aiModel.max_completion_tokens
: existingConfig.maxTokens || 2048

// Build new configuration based on API response
const newConfig = {
contextLength: contextLength,
maxTokens: maxTokens,
functionCall: hasFunctionCalling,
vision: hasVision,
reasoning: existingConfig.reasoning || false, // Keep existing reasoning setting
temperature: existingConfig.temperature, // Keep existing temperature
type: existingConfig.type // Keep existing type
}

// Check if configuration has changed
const configChanged =
existingConfig.contextLength !== newConfig.contextLength ||
existingConfig.maxTokens !== newConfig.maxTokens ||
existingConfig.functionCall !== newConfig.functionCall ||
existingConfig.vision !== newConfig.vision

// Update configuration if changed
if (configChanged) {
console.log(`Updating configuration for 302AI model ${modelId}:`, {
old: {
contextLength: existingConfig.contextLength,
maxTokens: existingConfig.maxTokens,
functionCall: existingConfig.functionCall,
vision: existingConfig.vision
},
new: newConfig,
apiData: {
content_length: _302aiModel.content_length,
max_completion_tokens: _302aiModel.max_completion_tokens,
supported_tools: _302aiModel.supported_tools,
category: _302aiModel.category,
description: _302aiModel.description
}
})

this.configPresenter.setModelConfig(modelId, this.provider.id, newConfig)
}

// Create MODEL_META object
const modelMeta: MODEL_META = {
id: modelId,
name: modelId,
group: 'default',
providerId: this.provider.id,
isCustom: false,
contextLength: contextLength,
maxTokens: maxTokens,
vision: hasVision,
functionCall: hasFunctionCalling,
reasoning: existingConfig.reasoning || false
}

models.push(modelMeta)
}

console.log(`Processed ${models.length} 302AI models with dynamic configuration updates`)
return models
} catch (error) {
console.error('Error fetching 302AI models:', error)
// Fallback to parent implementation
return super.fetchOpenAIModels(options)
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1198,10 +1198,8 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
}
}
public async summaryTitles(messages: ChatMessage[], modelId: string): Promise<string> {
const summaryText= `${SUMMARY_TITLES_PROMPT}\n\n${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}`
const fullMessage: ChatMessage[] = [
{ role: 'user', content: summaryText }
]
const summaryText = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Array content is coerced to [object Object], breaking the summary prompt

m.content can be ChatMessageContent[]; string-interpolation blindly coerces it to a string, producing [object Object] and polluting the prompt.
Extract the actual text parts instead.

-const summaryText = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}`
+const stringifyMessage = (m: ChatMessage) => {
+  if (typeof m.content === 'string') return `${m.role}: ${m.content}`
+  if (Array.isArray(m.content)) {
+    const text = m.content
+      .filter((p) => p.type === 'text' && 'text' in p && p.text)
+      .map((p) => (p as any).text)
+      .join(' ')
+    return `${m.role}: ${text}`
+  }
+  return `${m.role}:`
+}
+const summaryText = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map(stringifyMessage).join('\n')}`
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
const summaryText = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}`
const stringifyMessage = (m: ChatMessage) => {
if (typeof m.content === 'string') {
return `${m.role}: ${m.content}`
}
if (Array.isArray(m.content)) {
const text = m.content
.filter((p) => p.type === 'text' && 'text' in p && p.text)
.map((p) => (p as any).text)
.join(' ')
return `${m.role}: ${text}`
}
return `${m.role}:`
}
const summaryText = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map(stringifyMessage).join('\n')}`
🤖 Prompt for AI Agents
In src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider.ts
at line 1201, the code uses string interpolation on m.content, which can be an
array of ChatMessageContent objects, causing it to convert to "[object Object]"
and corrupt the summary prompt. To fix this, check if m.content is an array and
extract the text parts from each ChatMessageContent object, concatenating them
into a single string before interpolation. This ensures the summary prompt
contains the actual message text instead of object representations.

const fullMessage: ChatMessage[] = [{ role: 'user', content: summaryText }]
const response = await this.openAICompletion(fullMessage, modelId, 0.5)
return response.content.replace(/["']/g, '').trim()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1053,9 +1053,7 @@ export class OpenAIResponsesProvider extends BaseLLMProvider {

public async summaryTitles(messages: ChatMessage[], modelId: string): Promise<string> {
const summaryText = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}`
const fullMessage: ChatMessage[] = [
{ role: 'user', content: summaryText }
]
const fullMessage: ChatMessage[] = [{ role: 'user', content: summaryText }]
const response = await this.openAICompletion(fullMessage, modelId, 0.5)
return response.content.replace(/["']/g, '').trim()
}
Expand Down
Loading