Skip to content

Commit 45088a2

Browse files
authored
feat(node): Switch to new semantic conventions for Vercel AI (#16476)
resolves #16453 In getsentry/sentry-conventions#57 we deprecated some of the `ai.X` attributes in favour of OTEL's `gen_ai.X` attributes. This updates the Vercel AI integration to address these deprecations. These changes are based on https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data, and we created `attributes.ts` to track these as constants. See the relay change here to switch to new attributes for measuring token usage: getsentry/relay#4768
1 parent b8dd290 commit 45088a2

File tree

3 files changed

+846
-40
lines changed

3 files changed

+846
-40
lines changed

dev-packages/node-integration-tests/suites/tracing/ai/test.ts

Lines changed: 24 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,18 @@ describe('ai', () => {
1212
spans: expect.arrayContaining([
1313
expect.objectContaining({
1414
data: expect.objectContaining({
15-
'ai.completion_tokens.used': 20,
1615
'ai.model.id': 'mock-model-id',
1716
'ai.model.provider': 'mock-provider',
18-
'ai.model_id': 'mock-model-id',
1917
'ai.operationId': 'ai.generateText',
2018
'ai.pipeline.name': 'generateText',
21-
'ai.prompt_tokens.used': 10,
2219
'ai.response.finishReason': 'stop',
2320
'ai.settings.maxRetries': 2,
2421
'ai.settings.maxSteps': 1,
2522
'ai.streaming': false,
26-
'ai.total_tokens.used': 30,
27-
'ai.usage.completionTokens': 20,
28-
'ai.usage.promptTokens': 10,
23+
'gen_ai.response.model': 'mock-model-id',
24+
'gen_ai.usage.input_tokens': 10,
25+
'gen_ai.usage.output_tokens': 20,
26+
'gen_ai.usage.total_tokens': 30,
2927
'operation.name': 'ai.generateText',
3028
'sentry.op': 'ai.pipeline.generateText',
3129
'sentry.origin': 'auto.vercelai.otel',
@@ -47,18 +45,17 @@ describe('ai', () => {
4745
'gen_ai.system': 'mock-provider',
4846
'gen_ai.request.model': 'mock-model-id',
4947
'ai.pipeline.name': 'generateText.doGenerate',
50-
'ai.model_id': 'mock-model-id',
5148
'ai.streaming': false,
5249
'ai.response.finishReason': 'stop',
5350
'ai.response.model': 'mock-model-id',
54-
'ai.usage.promptTokens': 10,
55-
'ai.usage.completionTokens': 20,
51+
'ai.response.id': expect.any(String),
52+
'ai.response.timestamp': expect.any(String),
5653
'gen_ai.response.finish_reasons': ['stop'],
5754
'gen_ai.usage.input_tokens': 10,
5855
'gen_ai.usage.output_tokens': 20,
59-
'ai.completion_tokens.used': 20,
60-
'ai.prompt_tokens.used': 10,
61-
'ai.total_tokens.used': 30,
56+
'gen_ai.response.id': expect.any(String),
57+
'gen_ai.response.model': 'mock-model-id',
58+
'gen_ai.usage.total_tokens': 30,
6259
}),
6360
description: 'generateText.doGenerate',
6461
op: 'ai.run.doGenerate',
@@ -67,22 +64,21 @@ describe('ai', () => {
6764
}),
6865
expect.objectContaining({
6966
data: expect.objectContaining({
70-
'ai.completion_tokens.used': 20,
7167
'ai.model.id': 'mock-model-id',
7268
'ai.model.provider': 'mock-provider',
73-
'ai.model_id': 'mock-model-id',
74-
'ai.prompt': '{"prompt":"Where is the second span?"}',
7569
'ai.operationId': 'ai.generateText',
7670
'ai.pipeline.name': 'generateText',
77-
'ai.prompt_tokens.used': 10,
71+
'ai.prompt': '{"prompt":"Where is the second span?"}',
7872
'ai.response.finishReason': 'stop',
79-
'ai.input_messages': '{"prompt":"Where is the second span?"}',
73+
'ai.response.text': expect.any(String),
8074
'ai.settings.maxRetries': 2,
8175
'ai.settings.maxSteps': 1,
8276
'ai.streaming': false,
83-
'ai.total_tokens.used': 30,
84-
'ai.usage.completionTokens': 20,
85-
'ai.usage.promptTokens': 10,
77+
'gen_ai.prompt': '{"prompt":"Where is the second span?"}',
78+
'gen_ai.response.model': 'mock-model-id',
79+
'gen_ai.usage.input_tokens': 10,
80+
'gen_ai.usage.output_tokens': 20,
81+
'gen_ai.usage.total_tokens': 30,
8682
'operation.name': 'ai.generateText',
8783
'sentry.op': 'ai.pipeline.generateText',
8884
'sentry.origin': 'auto.vercelai.otel',
@@ -104,18 +100,20 @@ describe('ai', () => {
104100
'gen_ai.system': 'mock-provider',
105101
'gen_ai.request.model': 'mock-model-id',
106102
'ai.pipeline.name': 'generateText.doGenerate',
107-
'ai.model_id': 'mock-model-id',
108103
'ai.streaming': false,
109104
'ai.response.finishReason': 'stop',
110105
'ai.response.model': 'mock-model-id',
111-
'ai.usage.promptTokens': 10,
112-
'ai.usage.completionTokens': 20,
106+
'ai.response.id': expect.any(String),
107+
'ai.response.text': expect.any(String),
108+
'ai.response.timestamp': expect.any(String),
109+
'ai.prompt.format': expect.any(String),
110+
'ai.prompt.messages': expect.any(String),
113111
'gen_ai.response.finish_reasons': ['stop'],
114112
'gen_ai.usage.input_tokens': 10,
115113
'gen_ai.usage.output_tokens': 20,
116-
'ai.completion_tokens.used': 20,
117-
'ai.prompt_tokens.used': 10,
118-
'ai.total_tokens.used': 30,
114+
'gen_ai.response.id': expect.any(String),
115+
'gen_ai.response.model': 'mock-model-id',
116+
'gen_ai.usage.total_tokens': 30,
119117
}),
120118
description: 'generateText.doGenerate',
121119
op: 'ai.run.doGenerate',

0 commit comments

Comments
 (0)