Skip to content

Commit 54db890

Browse files
committed
Merge
1 parent 9ec980b commit 54db890

File tree

5 files changed

+175
-3
lines changed

5 files changed

+175
-3
lines changed

.changeset/brown-hounds-divide.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@langchain/openai": patch
3+
---
4+
5+
fix(openai): Convert OpenAI responses API usage to tracing format
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import { OpenAI as OpenAIClient } from "openai";
2+
import {
3+
StandardImageBlock,
4+
StandardTextBlock,
5+
UsageMetadata,
6+
} from "@langchain/core/messages";
7+
8+
/**
9+
* Handle multi modal response content.
10+
*
11+
* @param content The content of the message.
12+
* @param messages The messages of the response.
13+
* @returns The new content of the message.
14+
*/
15+
export function handleMultiModalOutput(
16+
content: string,
17+
messages: unknown
18+
): (StandardImageBlock | StandardTextBlock)[] | string {
19+
/**
20+
* Handle OpenRouter image responses
21+
* @see https://openrouter.ai/docs/features/multimodal/image-generation#api-usage
22+
*/
23+
if (
24+
messages &&
25+
typeof messages === "object" &&
26+
"images" in messages &&
27+
Array.isArray(messages.images)
28+
) {
29+
const images = messages.images
30+
.filter((image) => typeof image?.image_url?.url === "string")
31+
.map(
32+
(image) =>
33+
({
34+
type: "image",
35+
url: image.image_url.url as string,
36+
source_type: "url",
37+
} as const)
38+
);
39+
return [{ type: "text", text: content, source_type: "text" }, ...images];
40+
}
41+
42+
return content;
43+
}
44+
45+
export function _convertOpenAIResponsesUsageToLangChainUsage(
46+
usage?: OpenAIClient.Responses.ResponseUsage
47+
): UsageMetadata {
48+
// TODO: Remove raw OpenAI usage details in v1
49+
const inputTokenDetails = {
50+
...(usage?.input_tokens_details?.cached_tokens != null && {
51+
...usage?.input_tokens_details,
52+
cache_read: usage?.input_tokens_details?.cached_tokens,
53+
}),
54+
};
55+
const outputTokenDetails = {
56+
...(usage?.output_tokens_details?.reasoning_tokens != null && {
57+
...usage?.output_tokens_details,
58+
reasoning: usage?.output_tokens_details?.reasoning_tokens,
59+
}),
60+
};
61+
return {
62+
input_tokens: usage?.input_tokens ?? 0,
63+
output_tokens: usage?.output_tokens ?? 0,
64+
total_tokens: usage?.total_tokens ?? 0,
65+
input_token_details: inputTokenDetails,
66+
output_token_details: outputTokenDetails,
67+
};
68+
}

libs/providers/langchain-openai/src/chat_models.ts

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ import {
9999
getStructuredOutputMethod,
100100
interopZodResponseFormat,
101101
handleMultiModalOutput,
102+
_convertOpenAIResponsesUsageToLangChainUsage,
102103
} from "./utils/output.js";
103104
import {
104105
_convertMessagesToOpenAIParams,
@@ -1449,7 +1450,9 @@ export class ChatOpenAIResponses<
14491450
content,
14501451
tool_calls,
14511452
invalid_tool_calls,
1452-
usage_metadata: response.usage,
1453+
usage_metadata: _convertOpenAIResponsesUsageToLangChainUsage(
1454+
response.usage
1455+
),
14531456
additional_kwargs,
14541457
response_metadata,
14551458
});
@@ -1527,7 +1530,10 @@ export class ChatOpenAIResponses<
15271530
} else if (chunk.type === "response.completed") {
15281531
const msg = this._convertResponsesMessageToBaseMessage(chunk.response);
15291532

1530-
usage_metadata = chunk.response.usage;
1533+
usage_metadata = _convertOpenAIResponsesUsageToLangChainUsage(
1534+
chunk.response.usage
1535+
);
1536+
15311537
if (chunk.response.text?.format?.type === "json_schema") {
15321538
additional_kwargs.parsed ??= JSON.parse(msg.text);
15331539
}

libs/providers/langchain-openai/src/tests/chat_models.test.ts

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { toJsonSchema } from "@langchain/core/utils/json_schema";
55
import { load } from "@langchain/core/load";
66
import { tool } from "@langchain/core/tools";
77
import { ChatOpenAI } from "../chat_models.js";
8+
import { _convertOpenAIResponsesUsageToLangChainUsage } from "../utils/output.js";
89

910
describe("ChatOpenAI", () => {
1011
describe("should initialize with correct values", () => {
@@ -632,4 +633,70 @@ describe("ChatOpenAI", () => {
632633
]);
633634
});
634635
});
636+
637+
describe("Responses API usage metadata conversion", () => {
638+
it("should convert OpenAI Responses usage to LangChain format with cached tokens", () => {
639+
const usage = {
640+
input_tokens: 100,
641+
output_tokens: 50,
642+
total_tokens: 150,
643+
input_tokens_details: {
644+
cached_tokens: 75,
645+
text_tokens: 25,
646+
},
647+
output_tokens_details: {
648+
reasoning_tokens: 10,
649+
text_tokens: 40,
650+
},
651+
};
652+
653+
const result = _convertOpenAIResponsesUsageToLangChainUsage(usage as any);
654+
655+
expect(result).toEqual({
656+
input_tokens: 100,
657+
output_tokens: 50,
658+
total_tokens: 150,
659+
input_token_details: {
660+
cached_tokens: 75,
661+
text_tokens: 25,
662+
cache_read: 75,
663+
},
664+
output_token_details: {
665+
reasoning_tokens: 10,
666+
text_tokens: 40,
667+
reasoning: 10,
668+
},
669+
});
670+
});
671+
672+
it("should handle missing usage details gracefully", () => {
673+
const usage = {
674+
input_tokens: 100,
675+
output_tokens: 50,
676+
total_tokens: 150,
677+
};
678+
679+
const result = _convertOpenAIResponsesUsageToLangChainUsage(usage as any);
680+
681+
expect(result).toEqual({
682+
input_tokens: 100,
683+
output_tokens: 50,
684+
total_tokens: 150,
685+
input_token_details: {},
686+
output_token_details: {},
687+
});
688+
});
689+
690+
it("should handle undefined usage", () => {
691+
const result = _convertOpenAIResponsesUsageToLangChainUsage(undefined);
692+
693+
expect(result).toEqual({
694+
input_tokens: 0,
695+
output_tokens: 0,
696+
total_tokens: 0,
697+
input_token_details: {},
698+
output_token_details: {},
699+
});
700+
});
701+
});
635702
});

libs/providers/langchain-openai/src/utils/output.ts

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import { OpenAI as OpenAIClient } from "openai";
12
import {
23
InteropZodType,
34
isZodSchemaV3,
@@ -6,7 +7,7 @@ import {
67
import { toJSONSchema as toJSONSchemaV4, parse as parseV4 } from "zod/v4/core";
78
import { ResponseFormatJSONSchema } from "openai/resources";
89
import { zodResponseFormat } from "openai/helpers/zod";
9-
import { ContentBlock } from "@langchain/core/messages";
10+
import { ContentBlock, UsageMetadata } from "@langchain/core/messages";
1011

1112
const SUPPORTED_METHODS = [
1213
"jsonSchema",
@@ -161,3 +162,28 @@ export function handleMultiModalOutput(
161162

162163
return content;
163164
}
165+
166+
export function _convertOpenAIResponsesUsageToLangChainUsage(
167+
usage?: OpenAIClient.Responses.ResponseUsage
168+
): UsageMetadata {
169+
// TODO: Remove raw OpenAI usage details in v1
170+
const inputTokenDetails = {
171+
...(usage?.input_tokens_details?.cached_tokens != null && {
172+
...usage?.input_tokens_details,
173+
cache_read: usage?.input_tokens_details?.cached_tokens,
174+
}),
175+
};
176+
const outputTokenDetails = {
177+
...(usage?.output_tokens_details?.reasoning_tokens != null && {
178+
...usage?.output_tokens_details,
179+
reasoning: usage?.output_tokens_details?.reasoning_tokens,
180+
}),
181+
};
182+
return {
183+
input_tokens: usage?.input_tokens ?? 0,
184+
output_tokens: usage?.output_tokens ?? 0,
185+
total_tokens: usage?.total_tokens ?? 0,
186+
input_token_details: inputTokenDetails,
187+
output_token_details: outputTokenDetails,
188+
};
189+
}

0 commit comments

Comments
 (0)