@@ -2,14 +2,15 @@ import { afterAll, describe, expect } from 'vitest';
2
2
import { cleanupChildProcesses , createEsmAndCjsTests } from '../../../utils/runner' ;
3
3
4
4
// `ai` SDK only support Node 18+
5
- describe ( 'ai ' , ( ) => {
5
+ describe ( 'Vercel AI integration ' , ( ) => {
6
6
afterAll ( ( ) => {
7
7
cleanupChildProcesses ( ) ;
8
8
} ) ;
9
9
10
- const EXPECTED_TRANSACTION = {
10
+ const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
11
11
transaction : 'main' ,
12
12
spans : expect . arrayContaining ( [
13
+ // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false
13
14
expect . objectContaining ( {
14
15
data : expect . objectContaining ( {
15
16
'ai.completion_tokens.used' : 20 ,
@@ -35,48 +36,51 @@ describe('ai', () => {
35
36
origin : 'auto.vercelai.otel' ,
36
37
status : 'ok' ,
37
38
} ) ,
39
+ // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false
38
40
expect . objectContaining ( {
39
41
data : expect . objectContaining ( {
40
- 'sentry.origin' : 'auto.vercelai.otel' ,
41
- 'sentry.op' : 'ai.run.doGenerate' ,
42
- 'operation.name' : 'ai.generateText.doGenerate' ,
43
- 'ai.operationId' : 'ai.generateText.doGenerate' ,
44
- 'ai.model.provider' : 'mock-provider' ,
42
+ 'ai.completion_tokens.used' : 20 ,
45
43
'ai.model.id' : 'mock-model-id' ,
46
- 'ai.settings.maxRetries' : 2 ,
47
- 'gen_ai.system' : 'mock-provider' ,
48
- 'gen_ai.request.model' : 'mock-model-id' ,
49
- 'ai.pipeline.name' : 'generateText.doGenerate' ,
44
+ 'ai.model.provider' : 'mock-provider' ,
50
45
'ai.model_id' : 'mock-model-id' ,
51
- 'ai.streaming' : false ,
52
- 'ai.response.finishReason' : 'stop' ,
53
- 'ai.response.model' : 'mock-model-id' ,
54
- 'ai.usage.promptTokens' : 10 ,
55
- 'ai.usage.completionTokens' : 20 ,
56
- 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
57
- 'gen_ai.usage.input_tokens' : 10 ,
58
- 'gen_ai.usage.output_tokens' : 20 ,
59
- 'ai.completion_tokens.used' : 20 ,
46
+ 'ai.operationId' : 'ai.generateText' ,
47
+ 'ai.pipeline.name' : 'generateText' ,
60
48
'ai.prompt_tokens.used' : 10 ,
49
+ 'ai.response.finishReason' : 'stop' ,
50
+ 'ai.settings.maxRetries' : 2 ,
51
+ 'ai.settings.maxSteps' : 1 ,
52
+ 'ai.streaming' : false ,
61
53
'ai.total_tokens.used' : 30 ,
54
+ 'ai.usage.completionTokens' : 20 ,
55
+ 'ai.usage.promptTokens' : 10 ,
56
+ 'operation.name' : 'ai.generateText' ,
57
+ 'sentry.op' : 'ai.pipeline.generateText' ,
58
+ 'sentry.origin' : 'auto.vercelai.otel' ,
62
59
} ) ,
63
- description : 'generateText.doGenerate ' ,
64
- op : 'ai.run.doGenerate ' ,
60
+ description : 'generateText' ,
61
+ op : 'ai.pipeline.generateText ' ,
65
62
origin : 'auto.vercelai.otel' ,
66
63
status : 'ok' ,
67
64
} ) ,
65
+ ] ) ,
66
+ } ;
67
+
68
+ const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
69
+ transaction : 'main' ,
70
+ spans : expect . arrayContaining ( [
71
+ // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
68
72
expect . objectContaining ( {
69
73
data : expect . objectContaining ( {
70
74
'ai.completion_tokens.used' : 20 ,
71
75
'ai.model.id' : 'mock-model-id' ,
72
76
'ai.model.provider' : 'mock-provider' ,
73
77
'ai.model_id' : 'mock-model-id' ,
74
- 'ai.prompt' : '{"prompt":"Where is the second span?"}' ,
78
+ 'ai.prompt' : '{"prompt":"Where is the first span?"}' ,
75
79
'ai.operationId' : 'ai.generateText' ,
76
80
'ai.pipeline.name' : 'generateText' ,
77
81
'ai.prompt_tokens.used' : 10 ,
78
82
'ai.response.finishReason' : 'stop' ,
79
- 'ai.input_messages' : '{"prompt":"Where is the second span?"}' ,
83
+ 'ai.input_messages' : '{"prompt":"Where is the first span?"}' ,
80
84
'ai.settings.maxRetries' : 2 ,
81
85
'ai.settings.maxSteps' : 1 ,
82
86
'ai.streaming' : false ,
@@ -92,42 +96,46 @@ describe('ai', () => {
92
96
origin : 'auto.vercelai.otel' ,
93
97
status : 'ok' ,
94
98
} ) ,
99
+ // Second span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
95
100
expect . objectContaining ( {
96
101
data : expect . objectContaining ( {
97
- 'sentry.origin' : 'auto.vercelai.otel' ,
98
- 'sentry.op' : 'ai.run.doGenerate' ,
99
- 'operation.name' : 'ai.generateText.doGenerate' ,
100
- 'ai.operationId' : 'ai.generateText.doGenerate' ,
101
- 'ai.model.provider' : 'mock-provider' ,
102
+ 'ai.completion_tokens.used' : 20 ,
102
103
'ai.model.id' : 'mock-model-id' ,
103
- 'ai.settings.maxRetries' : 2 ,
104
- 'gen_ai.system' : 'mock-provider' ,
105
- 'gen_ai.request.model' : 'mock-model-id' ,
106
- 'ai.pipeline.name' : 'generateText.doGenerate' ,
104
+ 'ai.model.provider' : 'mock-provider' ,
107
105
'ai.model_id' : 'mock-model-id' ,
108
- 'ai.streaming' : false ,
109
- 'ai.response.finishReason' : 'stop' ,
110
- 'ai.response.model' : 'mock-model-id' ,
111
- 'ai.usage.promptTokens' : 10 ,
112
- 'ai.usage.completionTokens' : 20 ,
113
- 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
114
- 'gen_ai.usage.input_tokens' : 10 ,
115
- 'gen_ai.usage.output_tokens' : 20 ,
116
- 'ai.completion_tokens.used' : 20 ,
106
+ 'ai.prompt' : '{"prompt":"Where is the second span?"}' ,
107
+ 'ai.operationId' : 'ai.generateText' ,
108
+ 'ai.pipeline.name' : 'generateText' ,
117
109
'ai.prompt_tokens.used' : 10 ,
110
+ 'ai.response.finishReason' : 'stop' ,
111
+ 'ai.input_messages' : '{"prompt":"Where is the second span?"}' ,
112
+ 'ai.settings.maxRetries' : 2 ,
113
+ 'ai.settings.maxSteps' : 1 ,
114
+ 'ai.streaming' : false ,
118
115
'ai.total_tokens.used' : 30 ,
116
+ 'ai.usage.completionTokens' : 20 ,
117
+ 'ai.usage.promptTokens' : 10 ,
118
+ 'operation.name' : 'ai.generateText' ,
119
+ 'sentry.op' : 'ai.pipeline.generateText' ,
120
+ 'sentry.origin' : 'auto.vercelai.otel' ,
119
121
} ) ,
120
- description : 'generateText.doGenerate ' ,
121
- op : 'ai.run.doGenerate ' ,
122
+ description : 'generateText' ,
123
+ op : 'ai.pipeline.generateText ' ,
122
124
origin : 'auto.vercelai.otel' ,
123
125
status : 'ok' ,
124
126
} ) ,
125
127
] ) ,
126
128
} ;
127
129
128
130
createEsmAndCjsTests ( __dirname , 'scenario.mjs' , 'instrument.mjs' , ( createRunner , test ) => {
129
- test ( 'creates ai related spans ' , async ( ) => {
130
- await createRunner ( ) . expect ( { transaction : EXPECTED_TRANSACTION } ) . start ( ) . completed ( ) ;
131
+ test ( 'creates ai related spans with sendDefaultPii: false' , async ( ) => {
132
+ await createRunner ( ) . expect ( { transaction : EXPECTED_TRANSACTION_DEFAULT_PII_FALSE } ) . start ( ) . completed ( ) ;
133
+ } ) ;
134
+ } ) ;
135
+
136
+ createEsmAndCjsTests ( __dirname , 'scenario.mjs' , 'instrument-with-pii.mjs' , ( createRunner , test ) => {
137
+ test ( 'creates ai related spans with sendDefaultPii: true' , async ( ) => {
138
+ await createRunner ( ) . expect ( { transaction : EXPECTED_TRANSACTION_DEFAULT_PII_TRUE } ) . start ( ) . completed ( ) ;
131
139
} ) ;
132
140
} ) ;
133
141
} ) ;
0 commit comments