Skip to content

Commit c9305d9

Browse files
fix(ai): properly handle token usage for ai@>=5.0.0 (#6707)
* fix(ai): add support for AI SDK v5 token properties AI SDK v5 renamed token usage properties from promptTokens/completionTokens to inputTokens/outputTokens/totalTokens. This was causing token metrics to not show up for streaming operations. The getUsage() function now checks for v5 properties first, then falls back to v4 for backwards compatibility: - ai.usage.inputTokens (v5) → ai.usage.promptTokens (v4) - ai.usage.outputTokens (v5) → ai.usage.completionTokens (v4) - ai.usage.totalTokens (v5) → computed sum (v4) See: https://ai-sdk.dev/docs/migration-guides/migration-guide-5-0#usage-token-properties * fix tests --------- Co-authored-by: Mark Anthony Cianfrani <mark@cianfrani.me>
1 parent 4dec315 commit c9305d9

File tree

2 files changed

+43
-3
lines changed

2 files changed

+43
-3
lines changed

packages/dd-trace/src/llmobs/plugins/ai/util.js

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,19 +45,24 @@ function getOperation (span) {
4545

4646
/**
4747
* Get the LLM token usage from the span tags
48+
* Supports both AI SDK v4 (promptTokens/completionTokens) and v5 (inputTokens/outputTokens)
4849
* @template T extends {inputTokens: number, outputTokens: number, totalTokens: number}
4950
* @param {T} tags
5051
* @returns {Pick<T, 'inputTokens' | 'outputTokens' | 'totalTokens'>}
5152
*/
5253
function getUsage (tags) {
5354
const usage = {}
54-
const inputTokens = tags['ai.usage.promptTokens']
55-
const outputTokens = tags['ai.usage.completionTokens']
55+
56+
// AI SDK v5 uses inputTokens/outputTokens, v4 uses promptTokens/completionTokens
57+
// Check v5 properties first, fall back to v4
58+
const inputTokens = tags['ai.usage.inputTokens'] ?? tags['ai.usage.promptTokens']
59+
const outputTokens = tags['ai.usage.outputTokens'] ?? tags['ai.usage.completionTokens']
5660

5761
if (inputTokens != null) usage.inputTokens = inputTokens
5862
if (outputTokens != null) usage.outputTokens = outputTokens
5963

60-
const totalTokens = inputTokens + outputTokens
64+
// v5 provides totalTokens directly, v4 requires computation
65+
const totalTokens = tags['ai.usage.totalTokens'] ?? (inputTokens + outputTokens)
6166
if (!Number.isNaN(totalTokens)) usage.totalTokens = totalTokens
6267

6368
return usage

packages/dd-trace/test/llmobs/plugins/ai/index.spec.js

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ const {
1717
MOCK_NUMBER,
1818
MOCK_OBJECT
1919
} = require('../../util')
20+
const assert = require('node:assert')
2021

2122
chai.Assertion.addMethod('deepEqualWithMockValues', deepEqualWithMockValues)
2223

@@ -279,6 +280,15 @@ describe('Plugin', () => {
279280

280281
expect(llmobsSpans[0]).to.deepEqualWithMockValues(expectedWorkflowSpan)
281282
expect(llmobsSpans[1]).to.deepEqualWithMockValues(expectedLlmSpan)
283+
284+
// manually asserting the token metrics are set correctly
285+
// TODO(MLOB-4234): the llmobs span event assertions are slightly buggy and need to be re-worked
286+
assert.ok(typeof llmobsSpans[1].metrics.input_tokens === 'number')
287+
assert.ok(llmobsSpans[1].metrics.input_tokens > 0)
288+
assert.ok(typeof llmobsSpans[1].metrics.output_tokens === 'number')
289+
assert.ok(llmobsSpans[1].metrics.output_tokens > 0)
290+
assert.ok(typeof llmobsSpans[1].metrics.total_tokens === 'number')
291+
assert.ok(llmobsSpans[1].metrics.total_tokens > 0)
282292
})
283293

284294
it('creates a span for streamObject', async () => {
@@ -338,6 +348,15 @@ describe('Plugin', () => {
338348

339349
expect(llmobsSpans[0]).to.deepEqualWithMockValues(expectedWorkflowSpan)
340350
expect(llmobsSpans[1]).to.deepEqualWithMockValues(expectedLlmSpan)
351+
352+
// manually asserting the token metrics are set correctly
353+
// TODO(MLOB-4234): the llmobs span event assertions are slightly buggy and need to be re-worked
354+
assert.ok(typeof llmobsSpans[1].metrics.input_tokens === 'number')
355+
assert.ok(llmobsSpans[1].metrics.input_tokens > 0)
356+
assert.ok(typeof llmobsSpans[1].metrics.output_tokens === 'number')
357+
assert.ok(llmobsSpans[1].metrics.output_tokens > 0)
358+
assert.ok(typeof llmobsSpans[1].metrics.total_tokens === 'number')
359+
assert.ok(llmobsSpans[1].metrics.total_tokens > 0)
341360
})
342361

343362
it('creates a span for a tool call', async () => {
@@ -692,6 +711,22 @@ describe('Plugin', () => {
692711
expect(llmSpan).to.deepEqualWithMockValues(expectedLlmSpan)
693712
expect(toolCallSpan).to.deepEqualWithMockValues(expectedToolCallSpan)
694713
expect(llmSpan2).to.deepEqualWithMockValues(expectedLlmSpan2)
714+
715+
// manually asserting the token metrics are set correctly
716+
// TODO(MLOB-4234): the llmobs span event assertions are slightly buggy and need to be re-worked
717+
assert.ok(typeof llmSpan.metrics.input_tokens === 'number')
718+
assert.ok(llmSpan.metrics.input_tokens > 0)
719+
assert.ok(typeof llmSpan.metrics.output_tokens === 'number')
720+
assert.ok(llmSpan.metrics.output_tokens > 0)
721+
assert.ok(typeof llmSpan.metrics.total_tokens === 'number')
722+
assert.ok(llmSpan.metrics.total_tokens > 0)
723+
724+
assert.ok(typeof llmSpan2.metrics.input_tokens === 'number')
725+
assert.ok(llmSpan2.metrics.input_tokens > 0)
726+
assert.ok(typeof llmSpan2.metrics.output_tokens === 'number')
727+
assert.ok(llmSpan2.metrics.output_tokens > 0)
728+
assert.ok(typeof llmSpan2.metrics.total_tokens === 'number')
729+
assert.ok(llmSpan2.metrics.total_tokens > 0)
695730
})
696731

697732
it('creates a span that respects the functionId', async () => {

0 commit comments

Comments
 (0)