Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/main/presenter/llmProviderPresenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import { OpenAIResponsesProvider } from './providers/openAIResponsesProvider'
import { OpenRouterProvider } from './providers/openRouterProvider'
import { MinimaxProvider } from './providers/minimaxProvider'
import { AihubmixProvider } from './providers/aihubmixProvider'
import { _302AIProvider } from './providers/_302AIProvider'
// 流的状态
interface StreamState {
isGenerating: boolean
Expand Down Expand Up @@ -93,6 +94,9 @@ export class LLMProviderPresenter implements ILlmProviderPresenter {

private createProviderInstance(provider: LLM_PROVIDER): BaseLLMProvider | undefined {
try {
if (provider.id === '302ai') {
return new _302AIProvider(provider, this.configPresenter)
}
if (provider.id === 'minimax') {
return new MinimaxProvider(provider, this.configPresenter)
}
Expand Down
108 changes: 108 additions & 0 deletions src/main/presenter/llmProviderPresenter/providers/_302AIProvider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ConfigPresenter } from '../../configPresenter'

// Define interface for 302AI API balance response
interface _302AIBalanceResponse {
data: {
balance: string
}
}

export class _302AIProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: ConfigPresenter) {
super(provider, configPresenter)
}

async completions(
messages: ChatMessage[],
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}

async generateText(
prompt: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: prompt
}
],
modelId,
temperature,
maxTokens
)
}

/**
* Get current API key status from 302AI
* @returns Promise<KeyStatus> API key status information
*/
public async getKeyStatus(): Promise<KeyStatus> {
if (!this.provider.apiKey) {
throw new Error('API key is required')
}

const response = await fetch('https://api.302.ai/dashboard/balance', {
method: 'GET',
headers: {
Authorization: `Bearer ${this.provider.apiKey}`,
'Content-Type': 'application/json'
}
})

if (!response.ok) {
const errorText = await response.text()
throw new Error(
`302AI API key check failed: ${response.status} ${response.statusText} - ${errorText}`
)
}

const balanceResponse: _302AIBalanceResponse = await response.json()
const balance = parseFloat(balanceResponse.data.balance)
const remaining = '$' + balanceResponse.data.balance

return {
limit_remaining: remaining,
remainNum: balance
}
}

/**
* Override check method to use 302AI's API key status endpoint
* @returns Promise<{ isOk: boolean; errorMsg: string | null }>
*/
public async check(): Promise<{ isOk: boolean; errorMsg: string | null }> {
try {
const keyStatus = await this.getKeyStatus()

// Check if there's remaining quota
if (keyStatus.remainNum !== undefined && keyStatus.remainNum <= 0) {
return {
isOk: false,
errorMsg: `API key quota exhausted. Remaining: ${keyStatus.limit_remaining}`
}
}

return { isOk: true, errorMsg: null }
} catch (error: unknown) {
let errorMessage = 'An unknown error occurred during 302AI API key check.'
if (error instanceof Error) {
errorMessage = error.message
} else if (typeof error === 'string') {
errorMessage = error
}

console.error('302AI API key check failed:', error)
return { isOk: false, errorMsg: errorMessage }
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,25 +41,6 @@ export class AihubmixProvider extends OpenAICompatibleProvider {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}

async summaries(
text: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: `You need to summarize the user's conversation into a title of no more than 10 words, with the title language matching the user's primary language, without using punctuation or other special symbols:\n${text}`
}
],
modelId,
temperature,
maxTokens
)
}

async generateText(
prompt: string,
modelId: string,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import {
MCPToolDefinition,
ChatMessage
} from '@shared/presenter'
import { BaseLLMProvider } from '../baseProvider'
import { BaseLLMProvider, SUMMARY_TITLES_PROMPT } from '../baseProvider'
import { ConfigPresenter } from '../../configPresenter'
import Anthropic from '@anthropic-ai/sdk'
import { presenter } from '@/presenter'
Expand Down Expand Up @@ -497,13 +497,7 @@ export class AnthropicProvider extends BaseLLMProvider {
}

public async summaryTitles(messages: ChatMessage[], modelId: string): Promise<string> {
const prompt = `
请为以下对话生成一个简短的标题,不超过6个字:

${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}

只输出标题,不要有任何额外文字。
`
const prompt = `${SUMMARY_TITLES_PROMPT}\n\n${messages.map((m) => `${m.role}: ${m.content}`).join('\n')}`
const response = await this.generateText(prompt, modelId, 0.3, 50)

return response.content.trim()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { LLM_PROVIDER, LLMResponse, ChatMessage, KeyStatus } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ConfigPresenter } from '../../configPresenter'
import { SUMMARY_TITLES_PROMPT } from '../baseProvider'

// Define interface for DeepSeek API key response
interface DeepSeekBalanceResponse {
Expand Down Expand Up @@ -37,7 +38,7 @@ export class DeepseekProvider extends OpenAICompatibleProvider {
[
{
role: 'user',
content: `You need to summarize the user's conversation into a title of no more than 10 words, with the title language matching the user's primary language, without using punctuation or other special symbols:\n${text}`
content: `${SUMMARY_TITLES_PROMPT}\n\n${text}`
}
],
modelId,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,25 +67,6 @@ export class DoubaoProvider extends OpenAICompatibleProvider {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}

async summaries(
text: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: `请总结以下内容,使用简洁的语言,突出重点:\n${text}`
}
],
modelId,
temperature,
maxTokens
)
}

async generateText(
prompt: string,
modelId: string,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,25 +35,6 @@ export class GithubProvider extends OpenAICompatibleProvider {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}

async summaries(
text: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: `请总结以下内容,使用简洁的语言,突出重点:\n${text}`
}
],
modelId,
temperature,
maxTokens
)
}

async generateText(
prompt: string,
modelId: string,
Expand Down
4 changes: 3 additions & 1 deletion src/renderer/src/components/settings/ProviderApiConfig.vue
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,9 @@ const handleOAuthError = (error: string) => {

const getKeyStatus = async () => {
if (
['ppio', 'openrouter', 'siliconcloud', 'silicon', 'deepseek'].includes(props.provider.id) &&
['ppio', 'openrouter', 'siliconcloud', 'silicon', 'deepseek', '302ai'].includes(
props.provider.id
) &&
props.provider.apiKey
) {
try {
Expand Down