Skip to content

Commit a26bdaf

Browse files
authored
feat(node): Update Vercel AI span attributes (#16580)
resolve #16572
1 parent a744088 commit a26bdaf

File tree

5 files changed

+42
-20
lines changed

5 files changed

+42
-20
lines changed

dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => {
3434
expect(firstPipelineSpan?.data?.['ai.model.id']).toBe('mock-model-id');
3535
expect(firstPipelineSpan?.data?.['ai.model.provider']).toBe('mock-provider');
3636
expect(firstPipelineSpan?.data?.['ai.prompt']).toContain('Where is the first span?');
37-
expect(firstPipelineSpan?.data?.['ai.response.text']).toBe('First span here!');
37+
expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!');
3838
expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
3939
expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */
4040

4141
// Second AI call - explicitly enabled telemetry
4242
const secondPipelineSpan = aiPipelineSpans[0];
4343
expect(secondPipelineSpan?.data?.['ai.prompt']).toContain('Where is the second span?');
44-
expect(secondPipelineSpan?.data?.['ai.response.text']).toContain('Second span here!');
44+
expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!');
4545

4646
// Third AI call - with tool calls
4747
/* const thirdPipelineSpan = aiPipelineSpans[2];
@@ -50,7 +50,7 @@ test('should create AI spans with correct attributes', async ({ page }) => {
5050
expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */
5151

5252
// Tool call span
53-
/* const toolSpan = toolCallSpans[0];
53+
/* const toolSpan = toolCallSpans[0];
5454
expect(toolSpan?.data?.['ai.toolCall.name']).toBe('getWeather');
5555
expect(toolSpan?.data?.['ai.toolCall.id']).toBe('call-1');
5656
expect(toolSpan?.data?.['ai.toolCall.args']).toContain('San Francisco');

dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@ Sentry.init({
77
tracesSampleRate: 1.0,
88
sendDefaultPii: true,
99
transport: loggingTransport,
10-
integrations: [Sentry.vercelAIIntegration()],
10+
integrations: [Sentry.vercelAIIntegration({ force: true })],
1111
});

dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ Sentry.init({
66
release: '1.0',
77
tracesSampleRate: 1.0,
88
transport: loggingTransport,
9-
integrations: [Sentry.vercelAIIntegration()],
9+
integrations: [Sentry.vercelAIIntegration({ force: true })],
1010
});

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ describe('Vercel AI integration', () => {
7373
'ai.pipeline.name': 'generateText',
7474
'ai.prompt': '{"prompt":"Where is the second span?"}',
7575
'ai.response.finishReason': 'stop',
76-
'ai.response.text': expect.any(String),
76+
'gen_ai.response.text': expect.any(String),
7777
'ai.settings.maxRetries': 2,
7878
'ai.settings.maxSteps': 1,
7979
'ai.streaming': false,
@@ -108,10 +108,10 @@ describe('Vercel AI integration', () => {
108108
'ai.response.finishReason': 'stop',
109109
'ai.response.model': 'mock-model-id',
110110
'ai.response.id': expect.any(String),
111-
'ai.response.text': expect.any(String),
111+
'gen_ai.response.text': expect.any(String),
112112
'ai.response.timestamp': expect.any(String),
113113
'ai.prompt.format': expect.any(String),
114-
'ai.prompt.messages': expect.any(String),
114+
'gen_ai.request.messages': expect.any(String),
115115
'gen_ai.response.finish_reasons': ['stop'],
116116
'gen_ai.usage.input_tokens': 10,
117117
'gen_ai.usage.output_tokens': 20,
@@ -210,7 +210,7 @@ describe('Vercel AI integration', () => {
210210
'ai.pipeline.name': 'generateText',
211211
'ai.prompt': '{"prompt":"Where is the first span?"}',
212212
'ai.response.finishReason': 'stop',
213-
'ai.response.text': 'First span here!',
213+
'gen_ai.response.text': 'First span here!',
214214
'ai.settings.maxRetries': 2,
215215
'ai.settings.maxSteps': 1,
216216
'ai.streaming': false,
@@ -236,11 +236,11 @@ describe('Vercel AI integration', () => {
236236
'ai.operationId': 'ai.generateText.doGenerate',
237237
'ai.pipeline.name': 'generateText.doGenerate',
238238
'ai.prompt.format': 'prompt',
239-
'ai.prompt.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
239+
'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
240240
'ai.response.finishReason': 'stop',
241241
'ai.response.id': expect.any(String),
242242
'ai.response.model': 'mock-model-id',
243-
'ai.response.text': 'First span here!',
243+
'gen_ai.response.text': 'First span here!',
244244
'ai.response.timestamp': expect.any(String),
245245
'ai.settings.maxRetries': 2,
246246
'ai.streaming': false,
@@ -270,7 +270,7 @@ describe('Vercel AI integration', () => {
270270
'ai.pipeline.name': 'generateText',
271271
'ai.prompt': '{"prompt":"Where is the second span?"}',
272272
'ai.response.finishReason': 'stop',
273-
'ai.response.text': expect.any(String),
273+
'gen_ai.response.text': expect.any(String),
274274
'ai.settings.maxRetries': 2,
275275
'ai.settings.maxSteps': 1,
276276
'ai.streaming': false,
@@ -305,10 +305,10 @@ describe('Vercel AI integration', () => {
305305
'ai.response.finishReason': 'stop',
306306
'ai.response.model': 'mock-model-id',
307307
'ai.response.id': expect.any(String),
308-
'ai.response.text': expect.any(String),
308+
'gen_ai.response.text': expect.any(String),
309309
'ai.response.timestamp': expect.any(String),
310310
'ai.prompt.format': expect.any(String),
311-
'ai.prompt.messages': expect.any(String),
311+
'gen_ai.request.messages': expect.any(String),
312312
'gen_ai.response.finish_reasons': ['stop'],
313313
'gen_ai.usage.input_tokens': 10,
314314
'gen_ai.usage.output_tokens': 20,
@@ -330,8 +330,8 @@ describe('Vercel AI integration', () => {
330330
'ai.pipeline.name': 'generateText',
331331
'ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
332332
'ai.response.finishReason': 'tool-calls',
333-
'ai.response.text': 'Tool call completed!',
334-
'ai.response.toolCalls': expect.any(String),
333+
'gen_ai.response.text': 'Tool call completed!',
334+
'gen_ai.response.tool_calls': expect.any(String),
335335
'ai.settings.maxRetries': 2,
336336
'ai.settings.maxSteps': 1,
337337
'ai.streaming': false,
@@ -357,15 +357,15 @@ describe('Vercel AI integration', () => {
357357
'ai.operationId': 'ai.generateText.doGenerate',
358358
'ai.pipeline.name': 'generateText.doGenerate',
359359
'ai.prompt.format': expect.any(String),
360-
'ai.prompt.messages': expect.any(String),
360+
'gen_ai.request.messages': expect.any(String),
361361
'ai.prompt.toolChoice': expect.any(String),
362-
'ai.prompt.tools': expect.any(Array),
362+
'gen_ai.request.available_tools': expect.any(Array),
363363
'ai.response.finishReason': 'tool-calls',
364364
'ai.response.id': expect.any(String),
365365
'ai.response.model': 'mock-model-id',
366-
'ai.response.text': 'Tool call completed!',
366+
'gen_ai.response.text': 'Tool call completed!',
367367
'ai.response.timestamp': expect.any(String),
368-
'ai.response.toolCalls': expect.any(String),
368+
'gen_ai.response.tool_calls': expect.any(String),
369369
'ai.settings.maxRetries': 2,
370370
'ai.streaming': false,
371371
'gen_ai.request.model': 'mock-model-id',

packages/node/src/integrations/tracing/vercelai/index.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@ import {
99
AI_MODEL_ID_ATTRIBUTE,
1010
AI_MODEL_PROVIDER_ATTRIBUTE,
1111
AI_PROMPT_ATTRIBUTE,
12+
AI_PROMPT_MESSAGES_ATTRIBUTE,
13+
AI_PROMPT_TOOLS_ATTRIBUTE,
14+
AI_RESPONSE_TEXT_ATTRIBUTE,
15+
AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
1216
AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE,
1317
AI_TOOL_CALL_ID_ATTRIBUTE,
1418
AI_TOOL_CALL_NAME_ATTRIBUTE,
@@ -193,6 +197,24 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
193197
attributes['gen_ai.usage.total_tokens'] =
194198
attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE];
195199
}
200+
201+
// Rename AI SDK attributes to standardized gen_ai attributes
202+
if (attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] != undefined) {
203+
attributes['gen_ai.request.messages'] = attributes[AI_PROMPT_MESSAGES_ATTRIBUTE];
204+
delete attributes[AI_PROMPT_MESSAGES_ATTRIBUTE];
205+
}
206+
if (attributes[AI_RESPONSE_TEXT_ATTRIBUTE] != undefined) {
207+
attributes['gen_ai.response.text'] = attributes[AI_RESPONSE_TEXT_ATTRIBUTE];
208+
delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE];
209+
}
210+
if (attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] != undefined) {
211+
attributes['gen_ai.response.tool_calls'] = attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE];
212+
delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE];
213+
}
214+
if (attributes[AI_PROMPT_TOOLS_ATTRIBUTE] != undefined) {
215+
attributes['gen_ai.request.available_tools'] = attributes[AI_PROMPT_TOOLS_ATTRIBUTE];
216+
delete attributes[AI_PROMPT_TOOLS_ATTRIBUTE];
217+
}
196218
}
197219
}
198220

0 commit comments

Comments
 (0)