Skip to content

Commit b976464

Browse files
committed
stronger typing
1 parent 60a9e70 commit b976464

File tree

7 files changed

+270
-195
lines changed

7 files changed

+270
-195
lines changed

apps/sim/providers/anthropic/core.ts

Lines changed: 129 additions & 112 deletions
Large diffs are not rendered by default.

apps/sim/providers/azure-openai/index.ts

Lines changed: 27 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
import { createLogger } from '@sim/logger'
22
import { AzureOpenAI } from 'openai'
3-
import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions'
3+
import type {
4+
ChatCompletion,
5+
ChatCompletionCreateParamsBase,
6+
ChatCompletionCreateParamsStreaming,
7+
ChatCompletionMessageParam,
8+
ChatCompletionTool,
9+
ChatCompletionToolChoiceOption,
10+
} from 'openai/resources/chat/completions'
11+
import type { ReasoningEffort } from 'openai/resources/shared'
412
import { env } from '@/lib/core/config/env'
513
import type { StreamingExecution } from '@/executor/types'
614
import { MAX_TOOL_ITERATIONS } from '@/providers'
@@ -16,6 +24,7 @@ import {
1624
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
1725
import { executeResponsesProviderRequest } from '@/providers/openai/core'
1826
import type {
27+
FunctionCallResponse,
1928
ProviderConfig,
2029
ProviderRequest,
2130
ProviderResponse,
@@ -59,7 +68,7 @@ async function executeChatCompletionsRequest(
5968
endpoint: azureEndpoint,
6069
})
6170

62-
const allMessages: any[] = []
71+
const allMessages: ChatCompletionMessageParam[] = []
6372

6473
if (request.systemPrompt) {
6574
allMessages.push({
@@ -76,12 +85,12 @@ async function executeChatCompletionsRequest(
7685
}
7786

7887
if (request.messages) {
79-
allMessages.push(...request.messages)
88+
allMessages.push(...(request.messages as ChatCompletionMessageParam[]))
8089
}
8190

82-
const tools = request.tools?.length
91+
const tools: ChatCompletionTool[] | undefined = request.tools?.length
8392
? request.tools.map((tool) => ({
84-
type: 'function',
93+
type: 'function' as const,
8594
function: {
8695
name: tool.id,
8796
description: tool.description,
@@ -90,7 +99,7 @@ async function executeChatCompletionsRequest(
9099
}))
91100
: undefined
92101

93-
const payload: any = {
102+
const payload: ChatCompletionCreateParamsBase & { verbosity?: string } = {
94103
model: deploymentName,
95104
messages: allMessages,
96105
}
@@ -99,7 +108,7 @@ async function executeChatCompletionsRequest(
99108
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
100109

101110
if (request.reasoningEffort !== undefined && request.reasoningEffort !== 'auto')
102-
payload.reasoning_effort = request.reasoningEffort
111+
payload.reasoning_effort = request.reasoningEffort as ReasoningEffort
103112
if (request.verbosity !== undefined && request.verbosity !== 'auto')
104113
payload.verbosity = request.verbosity
105114

@@ -123,8 +132,8 @@ async function executeChatCompletionsRequest(
123132
const { tools: filteredTools, toolChoice } = preparedTools
124133

125134
if (filteredTools?.length && toolChoice) {
126-
payload.tools = filteredTools
127-
payload.tool_choice = toolChoice
135+
payload.tools = filteredTools as ChatCompletionTool[]
136+
payload.tool_choice = toolChoice as ChatCompletionToolChoiceOption
128137

129138
logger.info('Azure OpenAI request configuration:', {
130139
toolCount: filteredTools.length,
@@ -233,7 +242,7 @@ async function executeChatCompletionsRequest(
233242
const forcedTools = preparedTools?.forcedTools || []
234243
let usedForcedTools: string[] = []
235244

236-
let currentResponse = await azureOpenAI.chat.completions.create(payload)
245+
let currentResponse = (await azureOpenAI.chat.completions.create(payload)) as ChatCompletion
237246
const firstResponseTime = Date.now() - initialCallTime
238247

239248
let content = currentResponse.choices[0]?.message?.content || ''
@@ -242,8 +251,8 @@ async function executeChatCompletionsRequest(
242251
output: currentResponse.usage?.completion_tokens || 0,
243252
total: currentResponse.usage?.total_tokens || 0,
244253
}
245-
const toolCalls = []
246-
const toolResults = []
254+
const toolCalls: (FunctionCallResponse & { success: boolean })[] = []
255+
const toolResults: Record<string, unknown>[] = []
247256
const currentMessages = [...allMessages]
248257
let iterationCount = 0
249258
let modelTime = firstResponseTime
@@ -262,7 +271,7 @@ async function executeChatCompletionsRequest(
262271

263272
const firstCheckResult = checkForForcedToolUsage(
264273
currentResponse,
265-
originalToolChoice,
274+
originalToolChoice ?? 'auto',
266275
logger,
267276
forcedTools,
268277
usedForcedTools
@@ -358,10 +367,10 @@ async function executeChatCompletionsRequest(
358367
duration: duration,
359368
})
360369

361-
let resultContent: any
370+
let resultContent: Record<string, unknown>
362371
if (result.success) {
363-
toolResults.push(result.output)
364-
resultContent = result.output
372+
toolResults.push(result.output as Record<string, unknown>)
373+
resultContent = result.output as Record<string, unknown>
365374
} else {
366375
resultContent = {
367376
error: true,
@@ -411,11 +420,11 @@ async function executeChatCompletionsRequest(
411420
}
412421

413422
const nextModelStartTime = Date.now()
414-
currentResponse = await azureOpenAI.chat.completions.create(nextPayload)
423+
currentResponse = (await azureOpenAI.chat.completions.create(nextPayload)) as ChatCompletion
415424

416425
const nextCheckResult = checkForForcedToolUsage(
417426
currentResponse,
418-
nextPayload.tool_choice,
427+
nextPayload.tool_choice ?? 'auto',
419428
logger,
420429
forcedTools,
421430
usedForcedTools

apps/sim/providers/azure-openai/utils.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import type { Logger } from '@sim/logger'
2+
import type OpenAI from 'openai'
23
import type { ChatCompletionChunk } from 'openai/resources/chat/completions'
34
import type { CompletionUsage } from 'openai/resources/completions'
45
import type { Stream } from 'openai/streaming'
@@ -20,8 +21,8 @@ export function createReadableStreamFromAzureOpenAIStream(
2021
* Uses the shared OpenAI-compatible forced tool usage helper.
2122
*/
2223
export function checkForForcedToolUsage(
23-
response: any,
24-
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any },
24+
response: OpenAI.Chat.Completions.ChatCompletion,
25+
toolChoice: string | { type: string; function?: { name: string }; name?: string },
2526
_logger: Logger,
2627
forcedTools: string[],
2728
usedForcedTools: string[]

apps/sim/providers/bedrock/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,7 @@ export const bedrockProvider: ProviderConfig = {
416416
input: initialCost.input,
417417
output: initialCost.output,
418418
total: initialCost.total,
419+
pricing: initialCost.pricing,
419420
}
420421

421422
const toolCalls: any[] = []
@@ -867,6 +868,7 @@ export const bedrockProvider: ProviderConfig = {
867868
input: cost.input,
868869
output: cost.output,
869870
total: cost.total,
871+
pricing: cost.pricing,
870872
},
871873
toolCalls:
872874
toolCalls.length > 0

apps/sim/providers/openai/core.ts

Lines changed: 31 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import type { Logger } from '@sim/logger'
2+
import type OpenAI from 'openai'
23
import type { StreamingExecution } from '@/executor/types'
34
import { MAX_TOOL_ITERATIONS } from '@/providers'
45
import type { Message, ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types'
@@ -30,7 +31,7 @@ type ToolChoice = PreparedTools['toolChoice']
3031
* - Sets additionalProperties: false on all object types.
3132
* - Ensures required includes ALL property keys.
3233
*/
33-
function enforceStrictSchema(schema: any): any {
34+
function enforceStrictSchema(schema: Record<string, unknown>): Record<string, unknown> {
3435
if (!schema || typeof schema !== 'object') return schema
3536

3637
const result = { ...schema }
@@ -41,31 +42,37 @@ function enforceStrictSchema(schema: any): any {
4142

4243
// Recursively process properties and ensure required includes all keys
4344
if (result.properties && typeof result.properties === 'object') {
44-
const propKeys = Object.keys(result.properties)
45+
const propKeys = Object.keys(result.properties as Record<string, unknown>)
4546
result.required = propKeys // Strict mode requires ALL properties
4647
result.properties = Object.fromEntries(
47-
Object.entries(result.properties).map(([key, value]) => [key, enforceStrictSchema(value)])
48+
Object.entries(result.properties as Record<string, unknown>).map(([key, value]) => [
49+
key,
50+
enforceStrictSchema(value as Record<string, unknown>),
51+
])
4852
)
4953
}
5054
}
5155

5256
// Handle array items
5357
if (result.type === 'array' && result.items) {
54-
result.items = enforceStrictSchema(result.items)
58+
result.items = enforceStrictSchema(result.items as Record<string, unknown>)
5559
}
5660

5761
// Handle anyOf, oneOf, allOf
5862
for (const keyword of ['anyOf', 'oneOf', 'allOf']) {
5963
if (Array.isArray(result[keyword])) {
60-
result[keyword] = result[keyword].map(enforceStrictSchema)
64+
result[keyword] = (result[keyword] as Record<string, unknown>[]).map(enforceStrictSchema)
6165
}
6266
}
6367

6468
// Handle $defs / definitions
6569
for (const defKey of ['$defs', 'definitions']) {
6670
if (result[defKey] && typeof result[defKey] === 'object') {
6771
result[defKey] = Object.fromEntries(
68-
Object.entries(result[defKey]).map(([key, value]) => [key, enforceStrictSchema(value)])
72+
Object.entries(result[defKey] as Record<string, unknown>).map(([key, value]) => [
73+
key,
74+
enforceStrictSchema(value as Record<string, unknown>),
75+
])
6976
)
7077
}
7178
}
@@ -123,7 +130,7 @@ export async function executeResponsesProviderRequest(
123130

124131
const initialInput = buildResponsesInputFromMessages(allMessages)
125132

126-
const basePayload: Record<string, any> = {
133+
const basePayload: Record<string, unknown> = {
127134
model: config.modelName,
128135
}
129136

@@ -139,13 +146,13 @@ export async function executeResponsesProviderRequest(
139146

140147
if (request.verbosity !== undefined && request.verbosity !== 'auto') {
141148
basePayload.text = {
142-
...(basePayload.text ?? {}),
149+
...((basePayload.text as Record<string, unknown>) ?? {}),
143150
verbosity: request.verbosity,
144151
}
145152
}
146153

147154
// Store response format config - for Azure with tools, we defer applying it until after tool calls complete
148-
let deferredTextFormat: { type: string; name: string; schema: any; strict: boolean } | undefined
155+
let deferredTextFormat: OpenAI.Responses.ResponseFormatTextJSONSchemaConfig | undefined
149156
const hasTools = !!request.tools?.length
150157
const isAzure = config.providerId === 'azure-openai'
151158

@@ -171,7 +178,7 @@ export async function executeResponsesProviderRequest(
171178
)
172179
} else {
173180
basePayload.text = {
174-
...(basePayload.text ?? {}),
181+
...((basePayload.text as Record<string, unknown>) ?? {}),
175182
format: textFormat,
176183
}
177184
logger.info(`Added JSON schema response format to ${config.providerLabel} request`)
@@ -231,7 +238,10 @@ export async function executeResponsesProviderRequest(
231238
}
232239
}
233240

234-
const createRequestBody = (input: ResponsesInputItem[], overrides: Record<string, any> = {}) => ({
241+
const createRequestBody = (
242+
input: ResponsesInputItem[],
243+
overrides: Record<string, unknown> = {}
244+
) => ({
235245
...basePayload,
236246
input,
237247
...overrides,
@@ -247,7 +257,9 @@ export async function executeResponsesProviderRequest(
247257
}
248258
}
249259

250-
const postResponses = async (body: Record<string, any>) => {
260+
const postResponses = async (
261+
body: Record<string, unknown>
262+
): Promise<OpenAI.Responses.Response> => {
251263
const response = await fetch(config.endpoint, {
252264
method: 'POST',
253265
headers: config.headers,
@@ -496,10 +508,10 @@ export async function executeResponsesProviderRequest(
496508
duration: duration,
497509
})
498510

499-
let resultContent: any
511+
let resultContent: Record<string, unknown>
500512
if (result.success) {
501513
toolResults.push(result.output)
502-
resultContent = result.output
514+
resultContent = result.output as Record<string, unknown>
503515
} else {
504516
resultContent = {
505517
error: true,
@@ -615,11 +627,11 @@ export async function executeResponsesProviderRequest(
615627
}
616628

617629
// Make final call with the response format - build payload without tools
618-
const finalPayload: Record<string, any> = {
630+
const finalPayload: Record<string, unknown> = {
619631
model: config.modelName,
620632
input: formattedInput,
621633
text: {
622-
...(basePayload.text ?? {}),
634+
...((basePayload.text as Record<string, unknown>) ?? {}),
623635
format: deferredTextFormat,
624636
},
625637
}
@@ -635,7 +647,7 @@ export async function executeResponsesProviderRequest(
635647
}
636648
if (request.verbosity !== undefined && request.verbosity !== 'auto') {
637649
finalPayload.text = {
638-
...finalPayload.text,
650+
...((finalPayload.text as Record<string, unknown>) ?? {}),
639651
verbosity: request.verbosity,
640652
}
641653
}
@@ -679,10 +691,10 @@ export async function executeResponsesProviderRequest(
679691
const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output)
680692

681693
// For Azure with deferred format in streaming mode, include the format in the streaming call
682-
const streamOverrides: Record<string, any> = { stream: true, tool_choice: 'auto' }
694+
const streamOverrides: Record<string, unknown> = { stream: true, tool_choice: 'auto' }
683695
if (deferredTextFormat) {
684696
streamOverrides.text = {
685-
...(basePayload.text ?? {}),
697+
...((basePayload.text as Record<string, unknown>) ?? {}),
686698
format: deferredTextFormat,
687699
}
688700
}

0 commit comments

Comments
 (0)