Skip to content

feat: Add opt-in vercelAiIntegration to cloudflare & vercel-edge #16732

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jun 26, 2025
1 change: 1 addition & 0 deletions packages/cloudflare/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ export { CloudflareClient } from './client';
export { getDefaultIntegrations } from './sdk';

export { fetchIntegration } from './integrations/fetch';
export { vercelAIIntegration } from './integrations/tracing/vercelai';

export { instrumentD1WithSentry } from './d1';

Expand Down
51 changes: 51 additions & 0 deletions packages/cloudflare/src/integrations/tracing/vercelai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
/**
* This is a copy of the Vercel AI integration from the node SDK.
*
* The only difference is that it does not use `@opentelemetry/instrumentation`
* because Cloudflare Workers do not support it.
*
* Therefore, we cannot automatically patch setting `experimental_telemetry: { isEnabled: true }`
* and users have to manually set this to get spans.
*/

import type { IntegrationFn } from '@sentry/core';
import { addVercelAiProcessors, defineIntegration } from '@sentry/core';

const INTEGRATION_NAME = 'VercelAI';

const _vercelAIIntegration = (() => {
return {
name: INTEGRATION_NAME,
setup(client) {
addVercelAiProcessors(client);
},
};
}) satisfies IntegrationFn;

/**
* Adds Sentry tracing instrumentation for the [ai](https://www.npmjs.com/package/ai) library.
* This integration is not enabled by default, you need to manually add it.
*
* For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry).
*
* You need to enable collecting spans for a specific call by setting
* `experimental_telemetry.isEnabled` to `true` in the first argument of the function call.
*
* ```javascript
* const result = await generateText({
* model: openai('gpt-4-turbo'),
* experimental_telemetry: { isEnabled: true },
* });
* ```
*
* If you want to collect inputs and outputs for a specific call, you must specifically opt-in to each
* function call by setting `experimental_telemetry.recordInputs` and `experimental_telemetry.recordOutputs`
* to `true`.
*
* ```javascript
* const result = await generateText({
* model: openai('gpt-4-turbo'),
* experimental_telemetry: { isEnabled: true, recordInputs: true, recordOutputs: true },
* });
*/
export const vercelAIIntegration = defineIntegration(_vercelAIIntegration);
8 changes: 8 additions & 0 deletions packages/cloudflare/src/utils/addOriginToSpan.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import type { Span } from '@opentelemetry/api';
import type { SpanOrigin } from '@sentry/core';
import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';

/** Adds an origin to an OTEL Span. */
export function addOriginToSpan(span: Span, origin: SpanOrigin): void {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin);
}
8 changes: 8 additions & 0 deletions packages/cloudflare/src/utils/commonjs.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
/** Detect CommonJS. */
export function isCjs(): boolean {
try {
return typeof module !== 'undefined' && typeof module.exports !== 'undefined';
} catch {
return false;
}
}
3 changes: 2 additions & 1 deletion packages/core/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,8 @@ export abstract class Client<O extends ClientOptions = ClientOptions> {
): void;

/**
* Register a callback for whenever a span is ended.
* Register a callback for after a span is ended.
* NOTE: The span cannot be mutated anymore in this callback.
* Receives the span as argument.
* @returns {() => void} A function that, when executed, removes the registered callback.
*/
Expand Down
1 change: 1 addition & 0 deletions packages/core/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ export { captureFeedback } from './feedback';
export type { ReportDialogOptions } from './report-dialog';
export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/exports';
export { consoleLoggingIntegration } from './logs/console-integration';
export { addVercelAiProcessors } from './utils/vercel-ai';

export type { FeatureFlag } from './utils/featureFlags';
export {
Expand Down
221 changes: 221 additions & 0 deletions packages/core/src/utils/vercel-ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,221 @@
import type { Client } from '../client';
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../semanticAttributes';
import type { Event } from '../types-hoist/event';
import type { Span, SpanAttributes, SpanJSON, SpanOrigin } from '../types-hoist/span';
import { spanToJSON } from './spanUtils';
import {
AI_MODEL_ID_ATTRIBUTE,
AI_MODEL_PROVIDER_ATTRIBUTE,
AI_PROMPT_ATTRIBUTE,
AI_PROMPT_MESSAGES_ATTRIBUTE,
AI_PROMPT_TOOLS_ATTRIBUTE,
AI_RESPONSE_TEXT_ATTRIBUTE,
AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE,
AI_TOOL_CALL_ID_ATTRIBUTE,
AI_TOOL_CALL_NAME_ATTRIBUTE,
AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE,
AI_USAGE_PROMPT_TOKENS_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
} from './vercel-ai-attributes';

function addOriginToSpan(span: Span, origin: SpanOrigin): void {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin);
}

/**
* Post-process spans emitted by the Vercel AI SDK.
* This is supposed to be used in `client.on('spanStart', ...)
*/
function onVercelAiSpanStart(span: Span): void {
const { data: attributes, description: name } = spanToJSON(span);

if (!name) {
return;
}

// Tool call spans
// https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans
if (attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && name === 'ai.toolCall') {
processToolCallSpan(span, attributes);
return;
}

// The AI and Provider must be defined for generate, stream, and embed spans.
// The id of the model
const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE];
// the provider of the model
const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE];
if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) {
return;
}

processGenerateSpan(span, name, attributes);
}

const vercelAiEventProcessor = Object.assign(
(event: Event): Event => {
if (event.type === 'transaction' && event.spans) {
for (const span of event.spans) {
// this mutates spans in-place
processEndedVercelAiSpan(span);
}
}
return event;
},
{ id: 'VercelAiEventProcessor' },
);

/**
* Post-process spans emitted by the Vercel AI SDK.
*/
function processEndedVercelAiSpan(span: SpanJSON): void {
const { data: attributes, origin } = span;

if (origin !== 'auto.vercelai.otel') {
return;
}

renameAttributeKey(attributes, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE);
renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE);

if (
typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' &&
typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number'
) {
attributes['gen_ai.usage.total_tokens'] =
attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE];
}

// Rename AI SDK attributes to standardized gen_ai attributes
renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages');
renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text');
renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls');
renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools');
}

/**
* Renames an attribute key in the provided attributes object if the old key exists.
* This function safely handles null and undefined values.
*/
function renameAttributeKey(attributes: Record<string, unknown>, oldKey: string, newKey: string): void {
if (attributes[oldKey] != null) {
attributes[newKey] = attributes[oldKey];
// eslint-disable-next-line @typescript-eslint/no-dynamic-delete
delete attributes[oldKey];
}
}

function processToolCallSpan(span: Span, attributes: SpanAttributes): void {
addOriginToSpan(span, 'auto.vercelai.otel');
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool');
span.setAttribute('gen_ai.tool.call.id', attributes[AI_TOOL_CALL_ID_ATTRIBUTE]);
span.setAttribute('gen_ai.tool.name', attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]);
span.updateName(`execute_tool ${attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]}`);
}

function processGenerateSpan(span: Span, name: string, attributes: SpanAttributes): void {
addOriginToSpan(span, 'auto.vercelai.otel');

const nameWthoutAi = name.replace('ai.', '');
span.setAttribute('ai.pipeline.name', nameWthoutAi);
span.updateName(nameWthoutAi);

// If a Telemetry name is set and it is a pipeline span, use that as the operation name
const functionId = attributes[AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE];
if (functionId && typeof functionId === 'string' && name.split('.').length - 1 === 1) {
span.updateName(`${nameWthoutAi} ${functionId}`);
span.setAttribute('ai.pipeline.name', functionId);
}

if (attributes[AI_PROMPT_ATTRIBUTE]) {
span.setAttribute('gen_ai.prompt', attributes[AI_PROMPT_ATTRIBUTE]);
}
if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) {
span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]);
}
span.setAttribute('ai.streaming', name.includes('stream'));

// Generate Spans
if (name === 'ai.generateText') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent');
return;
}

if (name === 'ai.generateText.doGenerate') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_text');
span.updateName(`generate_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`);
return;
}

if (name === 'ai.streamText') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent');
return;
}

if (name === 'ai.streamText.doStream') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_text');
span.updateName(`stream_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`);
return;
}

if (name === 'ai.generateObject') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent');
return;
}

if (name === 'ai.generateObject.doGenerate') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_object');
span.updateName(`generate_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`);
return;
}

if (name === 'ai.streamObject') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent');
return;
}

if (name === 'ai.streamObject.doStream') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_object');
span.updateName(`stream_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`);
return;
}

if (name === 'ai.embed') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent');
return;
}

if (name === 'ai.embed.doEmbed') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed');
span.updateName(`embed ${attributes[AI_MODEL_ID_ATTRIBUTE]}`);
return;
}

if (name === 'ai.embedMany') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent');
return;
}

if (name === 'ai.embedMany.doEmbed') {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed_many');
span.updateName(`embed_many ${attributes[AI_MODEL_ID_ATTRIBUTE]}`);
return;
}

if (name.startsWith('ai.stream')) {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run');
return;
}
}

/**
* Add event processors to the given client to process Vercel AI spans.
*/
export function addVercelAiProcessors(client: Client): void {
client.on('spanStart', onVercelAiSpanStart);
// Note: We cannot do this on `spanEnd`, because the span cannot be mutated anymore at this point
client.addEventProcessor(vercelAiEventProcessor);
}
3 changes: 3 additions & 0 deletions packages/nextjs/src/index.types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ export declare function init(
export declare const linkedErrorsIntegration: typeof clientSdk.linkedErrorsIntegration;
export declare const contextLinesIntegration: typeof clientSdk.contextLinesIntegration;

// Different implementation in server and worker
export declare const vercelAIIntegration: typeof serverSdk.vercelAIIntegration;

export declare const getDefaultIntegrations: (options: Options) => Integration[];
export declare const defaultStackParser: StackParser;

Expand Down
Loading
Loading