Skip to content

Commit a288694

Browse files
authored
feat(ai): Expose provider metadata as an attribute on exported OTEL spans (#7096)
## Background Right now there doesn't appear to be a way for tracing providers to get accurate metadata if users are using Anthropic/OpenAI/Gemini's prompt caching features. The tokens used to create/read caches are returned in a providerMetadata field on the response that does not seem to be passed anywhere into the exported spans. ## Summary Adds a new `ai.response.providerMetadata` span attribute that exposes this response field. ## Verification Ran `pnpm build` and modified the `anthropic-cache-control.ts` example. Saw the new span attribute present: ``` { resource: { attributes: { 'service.name': 'unknown_service:/usr/local/bin/node', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '2.0.1' } }, instrumentationScope: { name: 'ai', version: undefined, schemaUrl: undefined }, ... attributes: { 'operation.name': 'ai.generateText', 'ai.operationId': 'ai.generateText', 'ai.model.provider': 'anthropic.messages', 'ai.model.id': 'claude-3-5-sonnet-20240620', 'ai.settings.maxRetries': 2, 'ai.prompt': `...`, 'ai.response.providerMetadata': '{"anthropic":{"cacheCreationInputTokens":2157}}', 'ai.usage.promptTokens': 10, 'ai.usage.completionTokens': 456 }, ... } ``` ## Tasks - [x] Tests have been added / updated (for bug fixes / features) - [x] Documentation has been added / updated (for bug fixes / features) - [x] A _patch_ changeset for relevant packages has been added (for bug fixes / features - run `pnpm changeset` in the project root) - [x] Formatting issues have been fixed (run `pnpm prettier-fix` in the project root) ## Related Issues Fixes #7079
1 parent 382b510 commit a288694

File tree

14 files changed

+106
-12
lines changed

14 files changed

+106
-12
lines changed

.changeset/five-pumpkins-joke.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'ai': patch
3+
---
4+
5+
Expose provider metadata as an attribute on exported OTEL spans

content/docs/03-ai-sdk-core/60-telemetry.mdx

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,6 +265,7 @@ Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.s
265265
- `ai.model.id`: the id of the model
266266
- `ai.model.provider`: the provider of the model
267267
- `ai.request.headers.*`: the request headers that were passed in through `headers`
268+
- `ai.response.providerMetadata`: provider specific metadata returned with the generation response
268269
- `ai.settings.maxRetries`: the maximum number of retries that were set
269270
- `ai.telemetry.functionId`: the functionId that was set through `telemetry.functionId`
270271
- `ai.telemetry.metadata.*`: the metadata that was passed in through `telemetry.metadata`
@@ -302,6 +303,7 @@ Many spans that use embedding models (`ai.embed`, `ai.embed.doEmbed`, `ai.embedM
302303
- `ai.model.id`: the id of the model
303304
- `ai.model.provider`: the provider of the model
304305
- `ai.request.headers.*`: the request headers that were passed in through `headers`
306+
- `ai.response.providerMetadata`: provider specific metadata returned with the generation response
305307
- `ai.settings.maxRetries`: the maximum number of retries that were set
306308
- `ai.telemetry.functionId`: the functionId that was set through `telemetry.functionId`
307309
- `ai.telemetry.metadata.*`: the metadata that was passed in through `telemetry.metadata`

packages/ai/core/generate-object/__snapshots__/generate-object.test.ts.snap

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ exports[`telemetry > should record telemetry data when enabled with mode "json"
143143
"ai.response.id": "test-id-from-model",
144144
"ai.response.model": "test-response-model-id",
145145
"ai.response.object": "{ "content": "Hello, world!" }",
146+
"ai.response.providerMetadata": "{"testprovider":{"test1":"value1"}}",
146147
"ai.response.timestamp": "1970-01-01T00:00:10.000Z",
147148
"ai.settings.frequencyPenalty": 0.3,
148149
"ai.settings.maxRetries": 2,

packages/ai/core/generate-object/__snapshots__/stream-object.test.ts.snap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,7 @@ exports[`streamObject > telemetry > should record telemetry data when enabled wi
243243
"ai.request.headers.header1": "value1",
244244
"ai.request.headers.header2": "value2",
245245
"ai.response.object": "{"content":"Hello, world!"}",
246+
"ai.response.providerMetadata": "{"testprovider":{"testkey":"testvalue"}}",
246247
"ai.schema": "{"type":"object","properties":{"content":{"type":"string"}},"required":["content"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}",
247248
"ai.schema.description": "test description",
248249
"ai.schema.name": "test-name",
@@ -278,6 +279,7 @@ exports[`streamObject > telemetry > should record telemetry data when enabled wi
278279
"ai.response.id": "id-0",
279280
"ai.response.model": "mock-model-id",
280281
"ai.response.object": "{"content":"Hello, world!"}",
282+
"ai.response.providerMetadata": "{"testprovider":{"testkey":"testvalue"}}",
281283
"ai.response.timestamp": "1970-01-01T00:00:00.000Z",
282284
"ai.settings.frequencyPenalty": 0.3,
283285
"ai.settings.maxRetries": 2,

packages/ai/core/generate-object/generate-object.test.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1166,6 +1166,11 @@ describe('telemetry', () => {
11661166
timestamp: new Date(10000),
11671167
modelId: 'test-response-model-id',
11681168
},
1169+
providerMetadata: {
1170+
testprovider: {
1171+
test1: 'value1',
1172+
},
1173+
},
11691174
}),
11701175
}),
11711176
schema: z.object({ content: z.string() }),

packages/ai/core/generate-object/generate-object.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -572,7 +572,9 @@ export async function generateObject<SCHEMA, RESULT>({
572572
'ai.response.model': responseData.modelId,
573573
'ai.response.timestamp':
574574
responseData.timestamp.toISOString(),
575-
575+
'ai.response.providerMetadata': JSON.stringify(
576+
result.providerMetadata,
577+
),
576578
'ai.usage.promptTokens': result.usage.promptTokens,
577579
'ai.usage.completionTokens':
578580
result.usage.completionTokens,
@@ -698,7 +700,9 @@ export async function generateObject<SCHEMA, RESULT>({
698700
'ai.response.model': responseData.modelId,
699701
'ai.response.timestamp':
700702
responseData.timestamp.toISOString(),
701-
703+
'ai.response.providerMetadata': JSON.stringify(
704+
result.providerMetadata,
705+
),
702706
'ai.usage.promptTokens': result.usage.promptTokens,
703707
'ai.usage.completionTokens':
704708
result.usage.completionTokens,

packages/ai/core/generate-object/stream-object.test.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,11 @@ describe('streamObject', () => {
6464
type: 'finish',
6565
finishReason: 'stop',
6666
usage: { completionTokens: 10, promptTokens: 3 },
67+
providerMetadata: {
68+
testprovider: {
69+
testkey: 'testvalue',
70+
},
71+
},
6772
},
6873
]),
6974
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
@@ -1944,6 +1949,11 @@ describe('streamObject', () => {
19441949
type: 'finish',
19451950
finishReason: 'stop',
19461951
usage: { completionTokens: 10, promptTokens: 3 },
1952+
providerMetadata: {
1953+
testprovider: {
1954+
testkey: 'testvalue',
1955+
},
1956+
},
19471957
},
19481958
]),
19491959
rawCall: { rawPrompt: 'prompt', rawSettings: {} },

packages/ai/core/generate-object/stream-object.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -935,6 +935,8 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
935935
'ai.response.model': response.modelId,
936936
'ai.response.timestamp':
937937
response.timestamp.toISOString(),
938+
'ai.response.providerMetadata':
939+
JSON.stringify(providerMetadata),
938940

939941
'ai.usage.promptTokens': finalUsage.promptTokens,
940942
'ai.usage.completionTokens':
@@ -965,6 +967,8 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
965967
'ai.response.object': {
966968
output: () => JSON.stringify(object),
967969
},
970+
'ai.response.providerMetadata':
971+
JSON.stringify(providerMetadata),
968972
},
969973
}),
970974
);

packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1745,6 +1745,7 @@ exports[`telemetry > should record telemetry data when enabled 1`] = `
17451745
"ai.request.headers.header1": "value1",
17461746
"ai.request.headers.header2": "value2",
17471747
"ai.response.finishReason": "stop",
1748+
"ai.response.providerMetadata": "{"testprovider":{"testkey":"testvalue"}}",
17481749
"ai.response.text": "Hello, world!",
17491750
"ai.settings.frequencyPenalty": 0.3,
17501751
"ai.settings.maxRetries": 2,
@@ -1779,6 +1780,7 @@ exports[`telemetry > should record telemetry data when enabled 1`] = `
17791780
"ai.response.finishReason": "stop",
17801781
"ai.response.id": "test-id-from-model",
17811782
"ai.response.model": "test-response-model-id",
1783+
"ai.response.providerMetadata": "{"testprovider":{"testkey":"testvalue"}}",
17821784
"ai.response.text": "Hello, world!",
17831785
"ai.response.timestamp": "1970-01-01T00:00:10.000Z",
17841786
"ai.settings.frequencyPenalty": 0.3,

packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap

Lines changed: 52 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2489,11 +2489,19 @@ exports[`streamText > options.onFinish > should send files 1`] = `
24892489

24902490
exports[`streamText > options.onFinish > should send sources 1`] = `
24912491
{
2492-
"experimental_providerMetadata": undefined,
2492+
"experimental_providerMetadata": {
2493+
"testprovider": {
2494+
"testkey": "testvalue",
2495+
},
2496+
},
24932497
"files": [],
24942498
"finishReason": "stop",
24952499
"logprobs": undefined,
2496-
"providerMetadata": undefined,
2500+
"providerMetadata": {
2501+
"testprovider": {
2502+
"testkey": "testvalue",
2503+
},
2504+
},
24972505
"reasoning": undefined,
24982506
"reasoningDetails": [],
24992507
"request": {},
@@ -2541,12 +2549,20 @@ exports[`streamText > options.onFinish > should send sources 1`] = `
25412549
],
25422550
"steps": [
25432551
{
2544-
"experimental_providerMetadata": undefined,
2552+
"experimental_providerMetadata": {
2553+
"testprovider": {
2554+
"testkey": "testvalue",
2555+
},
2556+
},
25452557
"files": [],
25462558
"finishReason": "stop",
25472559
"isContinued": false,
25482560
"logprobs": undefined,
2549-
"providerMetadata": undefined,
2561+
"providerMetadata": {
2562+
"testprovider": {
2563+
"testkey": "testvalue",
2564+
},
2565+
},
25502566
"reasoning": undefined,
25512567
"reasoningDetails": [],
25522568
"request": {},
@@ -3065,6 +3081,7 @@ exports[`streamText > options.transform > with base transformation > telemetry s
30653081
"ai.operationId": "ai.streamText",
30663082
"ai.prompt": "{"prompt":"test-input"}",
30673083
"ai.response.finishReason": "stop",
3084+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"TEST VALUE"}}",
30683085
"ai.response.text": "HELLO, WORLD!",
30693086
"ai.response.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"VALUE"}}]",
30703087
"ai.settings.maxRetries": 2,
@@ -3093,6 +3110,7 @@ exports[`streamText > options.transform > with base transformation > telemetry s
30933110
"ai.response.model": "mock-model-id",
30943111
"ai.response.msToFinish": 500,
30953112
"ai.response.msToFirstChunk": 100,
3113+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
30963114
"ai.response.text": "Hello, world!",
30973115
"ai.response.timestamp": "1970-01-01T00:00:00.000Z",
30983116
"ai.response.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"VALUE"}}]",
@@ -3583,12 +3601,20 @@ exports[`streamText > result.fullStream > should send sources 1`] = `
35833601
"type": "source",
35843602
},
35853603
{
3586-
"experimental_providerMetadata": undefined,
3604+
"experimental_providerMetadata": {
3605+
"testprovider": {
3606+
"testkey": "testvalue",
3607+
},
3608+
},
35873609
"finishReason": "stop",
35883610
"isContinued": false,
35893611
"logprobs": undefined,
35903612
"messageId": "msg-0",
3591-
"providerMetadata": undefined,
3613+
"providerMetadata": {
3614+
"testprovider": {
3615+
"testkey": "testvalue",
3616+
},
3617+
},
35923618
"request": {},
35933619
"response": {
35943620
"headers": undefined,
@@ -3605,10 +3631,18 @@ exports[`streamText > result.fullStream > should send sources 1`] = `
36053631
"warnings": undefined,
36063632
},
36073633
{
3608-
"experimental_providerMetadata": undefined,
3634+
"experimental_providerMetadata": {
3635+
"testprovider": {
3636+
"testkey": "testvalue",
3637+
},
3638+
},
36093639
"finishReason": "stop",
36103640
"logprobs": undefined,
3611-
"providerMetadata": undefined,
3641+
"providerMetadata": {
3642+
"testprovider": {
3643+
"testkey": "testvalue",
3644+
},
3645+
},
36123646
"response": {
36133647
"headers": undefined,
36143648
"id": "id-1",
@@ -4475,12 +4509,20 @@ exports[`streamText > result.steps > should add the reasoning from the model res
44754509
exports[`streamText > result.steps > should add the sources from the model response to the step result 1`] = `
44764510
[
44774511
{
4478-
"experimental_providerMetadata": undefined,
4512+
"experimental_providerMetadata": {
4513+
"testprovider": {
4514+
"testkey": "testvalue",
4515+
},
4516+
},
44794517
"files": [],
44804518
"finishReason": "stop",
44814519
"isContinued": false,
44824520
"logprobs": undefined,
4483-
"providerMetadata": undefined,
4521+
"providerMetadata": {
4522+
"testprovider": {
4523+
"testkey": "testvalue",
4524+
},
4525+
},
44844526
"reasoning": undefined,
44854527
"reasoningDetails": [],
44864528
"request": {},

0 commit comments

Comments
 (0)