-
Notifications
You must be signed in to change notification settings - Fork 625
feat: add web search support with configurable options for dashscope #851
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,6 +13,7 @@ import { OpenAICompatibleProvider } from './openAICompatibleProvider' | |
| export class DashscopeProvider extends OpenAICompatibleProvider { | ||
| // 支持 enable_thinking 参数的模型列表(双模式模型) | ||
| private static readonly ENABLE_THINKING_MODELS: string[] = [ | ||
| // 开源版 | ||
| 'qwen3-235b-a22b', | ||
| 'qwen3-32b', | ||
| 'qwen3-30b-a3b', | ||
|
|
@@ -23,6 +24,20 @@ export class DashscopeProvider extends OpenAICompatibleProvider { | |
| 'qwen3-0.6b' | ||
| ] | ||
|
|
||
| // 支持 enable_search 参数的模型列表(联网搜索) | ||
| private static readonly ENABLE_SEARCH_MODELS: string[] = [ | ||
| 'qwen-max', | ||
| 'qwen-plus', | ||
| 'qwen-plus-latest', | ||
| 'qwen-plus-2025-07-14', | ||
| 'qwen-flash', | ||
| 'qwen-flash-2025-07-28', | ||
| 'qwen-turbo', | ||
| 'qwen-turbo-latest', | ||
| 'qwen-turbo-2025-07-15', | ||
| 'qwq-plus' | ||
| ] | ||
|
|
||
| constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { | ||
| super(provider, configPresenter) | ||
| } | ||
|
|
@@ -40,7 +55,19 @@ export class DashscopeProvider extends OpenAICompatibleProvider { | |
| } | ||
|
|
||
| /** | ||
| * 重写 coreStream 方法以支持 DashScope 的 enable_thinking 参数 | ||
| * 检查模型是否支持 enable_search 参数 | ||
| * @param modelId 模型ID | ||
| * @returns boolean 是否支持 enable_search | ||
| */ | ||
| private supportsEnableSearch(modelId: string): boolean { | ||
| const normalizedModelId = modelId.toLowerCase() | ||
| return DashscopeProvider.ENABLE_SEARCH_MODELS.some((supportedModel) => | ||
| normalizedModelId.includes(supportedModel) | ||
| ) | ||
| } | ||
|
|
||
| /** | ||
| * 重写 coreStream 方法以支持 DashScope 的 enable_thinking 和 enable_search 参数 | ||
| */ | ||
| async *coreStream( | ||
| messages: ChatMessage[], | ||
|
|
@@ -54,24 +81,41 @@ export class DashscopeProvider extends OpenAICompatibleProvider { | |
| if (!modelId) throw new Error('Model ID is required') | ||
|
|
||
| const shouldAddEnableThinking = this.supportsEnableThinking(modelId) && modelConfig?.reasoning | ||
| const shouldAddEnableSearch = this.supportsEnableSearch(modelId) && modelConfig?.enableSearch | ||
|
|
||
| if (shouldAddEnableThinking) { | ||
| if (shouldAddEnableThinking || shouldAddEnableSearch) { | ||
| // 原始的 create 方法 | ||
| const originalCreate = this.openai.chat.completions.create.bind(this.openai.chat.completions) | ||
| // 替换 create 方法以添加 enable_thinking 参数 | ||
| // 替换 create 方法以添加 enable_thinking 和 enable_search 参数 | ||
| this.openai.chat.completions.create = ((params: any, options?: any) => { | ||
| const modifiedParams = { | ||
| ...params, | ||
| enable_thinking: true | ||
| const modifiedParams = { ...params } | ||
|
|
||
| if (shouldAddEnableThinking) { | ||
| modifiedParams.enable_thinking = true | ||
| if (modelConfig?.thinkingBudget) { | ||
| modifiedParams.thinking_budget = modelConfig.thinkingBudget | ||
| } | ||
| } | ||
| if (modelConfig?.thinkingBudget) { | ||
| modifiedParams.thinking_budget = modelConfig.thinkingBudget | ||
|
|
||
| if (shouldAddEnableSearch) { | ||
| modifiedParams.enable_search = true | ||
| if (modelConfig?.forcedSearch) { | ||
| modifiedParams.forced_search = true | ||
| } | ||
| if (modelConfig?.searchStrategy) { | ||
| modifiedParams.search_strategy = modelConfig.searchStrategy | ||
| } | ||
| } | ||
|
|
||
| return originalCreate(modifiedParams, options) | ||
| }) as any | ||
|
|
||
|
Comment on lines
+86
to
112
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Avoid monkey-patching the SDK method — race condition and cross-request leakage. Overriding Proposed safer approach hierarchy (prefer top-most feasible):
Diff within this block: - if (shouldAddEnableThinking || shouldAddEnableSearch) {
+ if (shouldAddEnableThinking || shouldAddEnableSearch) {
+ // Prevent concurrent patching on the shared client
+ if ((this as any)._createPatchInProgress) {
+ // Fallback: skip overrides rather than risking cross-request leakage
+ yield* super.coreStream(messages, modelId, modelConfig, temperature, maxTokens, mcpTools)
+ return
+ }
+ ;(this as any)._createPatchInProgress = true
// 原始的 create 方法
const originalCreate = this.openai.chat.completions.create.bind(this.openai.chat.completions)
// 替换 create 方法以添加 enable_thinking 和 enable_search 参数
this.openai.chat.completions.create = ((params: any, options?: any) => {
const modifiedParams = { ...params }
if (shouldAddEnableThinking) {
modifiedParams.enable_thinking = true
if (modelConfig?.thinkingBudget) {
modifiedParams.thinking_budget = modelConfig.thinkingBudget
}
}
if (shouldAddEnableSearch) {
modifiedParams.enable_search = true
if (modelConfig?.forcedSearch) {
modifiedParams.forced_search = true
}
if (modelConfig?.searchStrategy) {
modifiedParams.search_strategy = modelConfig.searchStrategy
}
}
return originalCreate(modifiedParams, options)
}) as any
try {
const effectiveModelConfig = {
...modelConfig,
reasoning: false,
enableSearch: false
}
- yield* super.coreStream(
+ try {
+ yield* super.coreStream(
messages,
modelId,
effectiveModelConfig,
temperature,
maxTokens,
mcpTools
- )
+ )
+ } catch (err: any) {
+ // Structured logging; avoid sensitive values
+ console.error(JSON.stringify({
+ level: 'ERROR',
+ ts: new Date().toISOString(),
+ where: 'DashscopeProvider.coreStream',
+ code: 'DASHSCOPE_SEARCH_PATCH_ERROR',
+ modelId,
+ enableThinking: !!shouldAddEnableThinking,
+ enableSearch: !!shouldAddEnableSearch,
+ message: err?.message || 'unknown error',
+ stack: err?.stack
+ }))
+ throw err
+ }
} finally {
this.openai.chat.completions.create = originalCreate
+ ;(this as any)._createPatchInProgress = false
}And add this class field (outside the patch block, near other fields) to make intent explicit: // NOTE: serialized to avoid concurrent monkey-patches on shared client
private _createPatchInProgress?: booleanLong-term, please replace the monkey-patch with an overridable param-construction hook in the base provider. I can sketch that change if helpful. Also applies to: 114-119 |
||
| try { | ||
| const effectiveModelConfig = { ...modelConfig, reasoning: false } | ||
| const effectiveModelConfig = { | ||
| ...modelConfig, | ||
| reasoning: false, | ||
| enableSearch: false | ||
| } | ||
| yield* super.coreStream( | ||
| messages, | ||
| modelId, | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.