Skip to content

Commit 6ea24c9

Browse files
committed
feat: request logging
1 parent abbf600 commit 6ea24c9

File tree

5 files changed

+339
-10
lines changed

5 files changed

+339
-10
lines changed

packages/opencode/src/cli/cmd/tui/thread.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ export const TuiThreadCommand = cmd({
9494
env: Object.fromEntries(
9595
Object.entries(process.env).filter((entry): entry is [string, string] => entry[1] !== undefined),
9696
),
97+
argv: process.argv,
9798
})
9899
worker.onerror = (e) => {
99100
Log.Default.error(e)

packages/opencode/src/cli/cmd/tui/worker.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ await Log.init({
1818
if (Installation.isLocal()) return "DEBUG"
1919
return "INFO"
2020
})(),
21+
requestLog: process.argv.includes("--request-log"),
2122
})
2223

2324
process.on("unhandledRejection", (e) => {

packages/opencode/src/index.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,10 @@ const cli = yargs(hideBin(process.argv))
5656
type: "string",
5757
choices: ["DEBUG", "INFO", "WARN", "ERROR"],
5858
})
59+
.option("request-log", {
60+
describe: "log provider requests and responses to a separate file",
61+
type: "boolean",
62+
})
5963
.middleware(async (opts) => {
6064
await Log.init({
6165
print: process.argv.includes("--print-logs"),
@@ -65,6 +69,7 @@ const cli = yargs(hideBin(process.argv))
6569
if (Installation.isLocal()) return "DEBUG"
6670
return "INFO"
6771
})(),
72+
requestLog: process.argv.includes("--request-log"),
6873
})
6974

7075
process.env.AGENT = "1"
@@ -151,6 +156,8 @@ try {
151156
}
152157
process.exitCode = 1
153158
} finally {
159+
// Flush any pending request logs before exiting
160+
await Log.flushRequestLog()
154161
// Some subprocesses don't react properly to SIGTERM and similar signals.
155162
// Most notably, some docker-container-based MCP servers don't handle such signals unless
156163
// run using `docker run --init`.

packages/opencode/src/provider/provider.ts

Lines changed: 199 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -991,6 +991,7 @@ export namespace Provider {
991991
// Preserve custom fetch if it exists, wrap it with timeout logic
992992
const fetchFn = customFetch ?? fetch
993993
const opts = init ?? {}
994+
const startTime = Date.now()
994995

995996
if (options["timeout"] !== undefined && options["timeout"] !== null) {
996997
const signals: AbortSignal[] = []
@@ -1002,6 +1003,11 @@ export namespace Provider {
10021003
opts.signal = combined
10031004
}
10041005

1006+
const url = typeof input === "string" ? input : input.url
1007+
1008+
// Generate request ID for correlating request/response
1009+
const requestId = Math.random().toString(36).substring(2, 8)
1010+
10051011
// Strip openai itemId metadata following what codex does
10061012
// Codex uses #[serde(skip_serializing)] on id fields for all item types:
10071013
// Message, Reasoning, FunctionCall, LocalShellCall, CustomToolCall, WebSearchCall
@@ -1020,11 +1026,199 @@ export namespace Provider {
10201026
}
10211027
}
10221028

1023-
return fetchFn(input, {
1024-
...opts,
1025-
// @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682
1026-
timeout: false,
1027-
})
1029+
// Log request if enabled
1030+
if (Log.isRequestLoggingEnabled()) {
1031+
const requestData: any = {
1032+
type: "REQUEST",
1033+
requestId,
1034+
provider: model.providerID,
1035+
model: model.id,
1036+
url,
1037+
method: opts.method ?? "GET",
1038+
}
1039+
1040+
// Parse and filter the request body
1041+
if (opts.body) {
1042+
try {
1043+
const body = typeof opts.body === "string" ? JSON.parse(opts.body) : opts.body
1044+
const filteredBody: any = {}
1045+
1046+
// Extract messages (OpenCode format uses 'input', OpenAI uses 'messages')
1047+
const messages = body.input || body.messages
1048+
if (messages && Array.isArray(messages)) {
1049+
filteredBody.messages = messages.map((msg: any) => {
1050+
const role = msg.role
1051+
let content: string
1052+
1053+
// Handle different content formats
1054+
if (typeof msg.content === "string") {
1055+
content = msg.content
1056+
} else if (Array.isArray(msg.content)) {
1057+
// Multi-part content (text + images, etc)
1058+
const textPart = msg.content.find((p: any) => p.type === "text" || p.text)
1059+
content = textPart ? textPart.text || textPart.content || "[complex content]" : "[complex content]"
1060+
} else {
1061+
content = "[complex content]"
1062+
}
1063+
1064+
return { role, content }
1065+
})
1066+
}
1067+
1068+
// Summarize tools if present
1069+
if (body.tools && Array.isArray(body.tools)) {
1070+
filteredBody.tools_count = body.tools.length
1071+
filteredBody.tools_summary = body.tools
1072+
.map((t: any) => t.function?.name || t.name)
1073+
.filter(Boolean)
1074+
.join(", ")
1075+
}
1076+
1077+
// Keep other useful fields
1078+
if (body.model) filteredBody.model = body.model
1079+
if (body.max_tokens) filteredBody.max_tokens = body.max_tokens
1080+
if (body.max_output_tokens) filteredBody.max_output_tokens = body.max_output_tokens
1081+
if (body.temperature !== undefined) filteredBody.temperature = body.temperature
1082+
if (body.stream !== undefined) filteredBody.stream = body.stream
1083+
1084+
requestData.body = filteredBody
1085+
} catch {
1086+
requestData.body = "[parse error]"
1087+
}
1088+
}
1089+
1090+
await Log.logRequest(requestData)
1091+
}
1092+
1093+
try {
1094+
const response = await fetchFn(input, {
1095+
...opts,
1096+
// @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682
1097+
timeout: false,
1098+
})
1099+
1100+
// Log response if enabled
1101+
if (Log.isRequestLoggingEnabled()) {
1102+
const clonedResponse = response.clone()
1103+
const responseText = await clonedResponse.text().catch(() => "[stream or unreadable]")
1104+
const responseData: any = {
1105+
type: "RESPONSE",
1106+
requestId,
1107+
provider: model.providerID,
1108+
model: model.id,
1109+
url,
1110+
status: response.status,
1111+
duration: Date.now() - startTime,
1112+
}
1113+
1114+
// Parse and extract useful information from response
1115+
let completion = ""
1116+
let tokenUsage: any = null
1117+
1118+
// Try to parse as JSON first (non-streaming response)
1119+
try {
1120+
const jsonResponse = JSON.parse(responseText)
1121+
1122+
// Extract completion text
1123+
if (jsonResponse.choices && Array.isArray(jsonResponse.choices)) {
1124+
const firstChoice = jsonResponse.choices[0]
1125+
completion = firstChoice?.message?.content || firstChoice?.text || ""
1126+
} else if (jsonResponse.content) {
1127+
completion = jsonResponse.content
1128+
} else if (jsonResponse.text) {
1129+
completion = jsonResponse.text
1130+
}
1131+
1132+
// Extract token usage
1133+
if (jsonResponse.usage) {
1134+
tokenUsage = jsonResponse.usage
1135+
}
1136+
} catch {
1137+
// If not JSON, try parsing as SSE stream
1138+
if (responseText.includes("data:")) {
1139+
const lines = responseText.split("\n")
1140+
1141+
for (const line of lines) {
1142+
if (!line.startsWith("data:")) continue
1143+
1144+
const dataStr = line.substring(5).trim()
1145+
if (dataStr === "[DONE]") continue
1146+
1147+
try {
1148+
const data = JSON.parse(dataStr)
1149+
1150+
// OpenAI format: extract delta content
1151+
if (data.choices?.[0]?.delta?.content) {
1152+
completion += data.choices[0].delta.content
1153+
}
1154+
1155+
// OpenAI format: final message
1156+
if (data.choices?.[0]?.message?.content) {
1157+
completion = data.choices[0].message.content
1158+
}
1159+
1160+
// OpenCode format: extract text from various event types
1161+
if (data.type === "response.output_text.done" && data.text) {
1162+
completion = data.text
1163+
}
1164+
if (data.type === "content.delta" && data.delta?.text) {
1165+
completion += data.delta.text
1166+
}
1167+
if (data.type === "response.done" && data.response?.output?.[0]?.content?.[0]?.text) {
1168+
completion = data.response.output[0].content[0].text
1169+
}
1170+
1171+
// Extract token usage from various formats
1172+
if (data.usage) {
1173+
tokenUsage = data.usage
1174+
}
1175+
if (data.response?.usage) {
1176+
tokenUsage = data.response.usage
1177+
}
1178+
} catch {
1179+
// Skip lines that can't be parsed
1180+
}
1181+
}
1182+
}
1183+
}
1184+
1185+
// Add completion text (no truncation)
1186+
if (completion) {
1187+
responseData.completion = completion
1188+
} else {
1189+
responseData.completion = "[no text extracted]"
1190+
}
1191+
1192+
// Add token usage if found
1193+
if (tokenUsage) {
1194+
responseData.input_tokens = tokenUsage.input_tokens || tokenUsage.prompt_tokens
1195+
responseData.output_tokens = tokenUsage.output_tokens || tokenUsage.completion_tokens
1196+
responseData.total_tokens =
1197+
tokenUsage.total_tokens ||
1198+
(tokenUsage.input_tokens || tokenUsage.prompt_tokens || 0) +
1199+
(tokenUsage.output_tokens || tokenUsage.completion_tokens || 0)
1200+
}
1201+
1202+
await Log.logRequest(responseData)
1203+
}
1204+
1205+
return response
1206+
} catch (error) {
1207+
// Log error if enabled
1208+
if (Log.isRequestLoggingEnabled()) {
1209+
const errorData = {
1210+
type: "ERROR",
1211+
requestId,
1212+
provider: model.providerID,
1213+
model: model.id,
1214+
url,
1215+
error: error instanceof Error ? error.message : String(error),
1216+
duration: Date.now() - startTime,
1217+
}
1218+
await Log.logRequest(errorData)
1219+
}
1220+
throw error
1221+
}
10281222
}
10291223

10301224
const bundledFn = BUNDLED_PROVIDERS[model.api.npm]

0 commit comments

Comments
 (0)