Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
c68750f
Update LLM SDK instrumentations for better otel semantic convention c…
dmontagu Dec 16, 2025
43ba96c
Update to set gen_ai.input.messages and gen_ai.output.messages instea…
brightsparc Dec 21, 2025
e6e305a
Re-add inputs_to_events and responses_output_events for openai_agents…
brightsparc Jan 23, 2026
9ad3829
Update test snapshots for new semantic convention attributes
brightsparc Jan 23, 2026
9728871
Update test_anthropic_bedrock snapshot for new semantic convention at…
brightsparc Jan 23, 2026
a47faf8
Use IsJson() wrapper for gen_ai message attributes in tests
brightsparc Jan 23, 2026
45e8669
Convert remaining gen_ai message attributes to use IsJson() in tests
brightsparc Jan 23, 2026
622f804
Fix formatting
brightsparc Jan 23, 2026
2e4ad43
Fix pyright
brightsparc Jan 23, 2026
55bc09d
Use types with convert_openai_response_to_semconv, increase coverage
brightsparc Jan 23, 2026
1f7304b
Fix coverage
brightsparc Jan 23, 2026
9e846e9
Addressed comment for parse_json_attributes=True, added TypedDict to…
brightsparc Jan 23, 2026
ceec303
Fix format
brightsparc Jan 23, 2026
8df81ab
Fix pyright
brightsparc Jan 23, 2026
27b0dbf
Fix typing_extensions import for python 3.9
brightsparc Jan 23, 2026
9988939
Use union since python 3.9 doesn't support pipe
brightsparc Jan 23, 2026
b657c6f
Fix linting
brightsparc Jan 23, 2026
2fa8fed
Update operation.name to be unique for different openai methods
brightsparc Jan 23, 2026
3aef5b9
Fix coverage
brightsparc Jan 23, 2026
69cd76f
Update coverage
brightsparc Jan 24, 2026
709fc9d
Fix connection issues, will next split off PRs
brightsparc Jan 24, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
169 changes: 168 additions & 1 deletion logfire/_internal/integrations/llm_providers/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,27 @@
from typing import TYPE_CHECKING, Any, cast

import anthropic
from anthropic.types import Message, TextBlock, TextDelta
from anthropic.types import Message, TextBlock, TextDelta, ToolUseBlock

from logfire._internal.utils import handle_internal_errors

from .semconv import (
INPUT_MESSAGES,
INPUT_TOKENS,
OPERATION_NAME,
OUTPUT_MESSAGES,
OUTPUT_TOKENS,
PROVIDER_NAME,
REQUEST_MAX_TOKENS,
REQUEST_MODEL,
REQUEST_STOP_SEQUENCES,
REQUEST_TEMPERATURE,
REQUEST_TOP_K,
REQUEST_TOP_P,
RESPONSE_FINISH_REASONS,
RESPONSE_ID,
RESPONSE_MODEL,
SYSTEM_INSTRUCTIONS,
TOOL_DEFINITIONS,
)
from .types import EndpointConfig, StreamState
Expand Down Expand Up @@ -68,9 +77,19 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
'request_data': json_data,
PROVIDER_NAME: 'anthropic',
OPERATION_NAME: 'chat',
REQUEST_MODEL: json_data.get('model'),
}
_extract_request_parameters(json_data, span_data)

# Convert messages to semantic convention format
messages: list[dict[str, Any]] = json_data.get('messages', [])
system: str | list[dict[str, Any]] | None = json_data.get('system')
if messages or system:
input_messages, system_instructions = convert_anthropic_messages_to_semconv(messages, system)
span_data[INPUT_MESSAGES] = input_messages
if system_instructions:
span_data[SYSTEM_INSTRUCTIONS] = system_instructions

return EndpointConfig(
message_template='Message with {request_data[model]!r}',
span_data=span_data,
Expand All @@ -82,12 +101,141 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
'url': url,
PROVIDER_NAME: 'anthropic',
}
if 'model' in json_data:
span_data[REQUEST_MODEL] = json_data['model']
return EndpointConfig(
message_template='Anthropic API call to {url!r}',
span_data=span_data,
)


def convert_anthropic_messages_to_semconv(
messages: list[dict[str, Any]],
system: str | list[dict[str, Any]] | None = None,
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
"""Convert Anthropic messages format to OTel Gen AI Semantic Convention format.

Returns a tuple of (input_messages, system_instructions).
"""
input_messages: list[dict[str, Any]] = []
system_instructions: list[dict[str, Any]] = []

# Handle system parameter (Anthropic uses a separate 'system' parameter)
if system:
if isinstance(system, str):
system_instructions.append({'type': 'text', 'content': system})
else: # pragma: no cover
for part in system:
if part.get('type') == 'text':
system_instructions.append({'type': 'text', 'content': part.get('text', '')})
else:
system_instructions.append(part)

for msg in messages:
role = msg.get('role', 'unknown')
content = msg.get('content')

parts: list[dict[str, Any]] = []

if content is not None:
if isinstance(content, str):
parts.append({'type': 'text', 'content': content})
elif isinstance(content, list):
for part in cast('list[dict[str, Any] | str]', content):
parts.append(_convert_anthropic_content_part(part))

input_messages.append(
{
'role': role,
'parts': parts,
}
)

return input_messages, system_instructions


def _convert_anthropic_content_part(part: dict[str, Any] | str) -> dict[str, Any]:
"""Convert a single Anthropic content part to semconv format."""
if isinstance(part, str): # pragma: no cover
return {'type': 'text', 'content': part}

part_type = part.get('type', 'text')
if part_type == 'text':
return {'type': 'text', 'content': part.get('text', '')}
elif part_type == 'image': # pragma: no cover
source = part.get('source', {})
if source.get('type') == 'base64':
return {
'type': 'blob',
'modality': 'image',
'content': source.get('data', ''),
'media_type': source.get('media_type'),
}
elif source.get('type') == 'url':
return {'type': 'uri', 'modality': 'image', 'uri': source.get('url', '')}
else:
return {'type': 'image', **part}
elif part_type == 'tool_use':
return {
'type': 'tool_call',
'id': part.get('id'),
'name': part.get('name'),
'arguments': part.get('input'),
}
elif part_type == 'tool_result': # pragma: no cover
result_content = part.get('content')
if isinstance(result_content, list):
# Extract text from tool result content
text_parts: list[str] = []
for p in cast('list[dict[str, Any] | str]', result_content):
if isinstance(p, dict) and p.get('type') == 'text':
text_parts.append(str(p.get('text', '')))
elif isinstance(p, str):
text_parts.append(p)
result_text = ' '.join(text_parts)
else:
result_text = str(result_content) if result_content else ''
return {
'type': 'tool_call_response',
'id': part.get('tool_use_id'),
'response': result_text,
}
else: # pragma: no cover
# Return as generic part
return {'type': part_type, **{k: v for k, v in part.items() if k != 'type'}}


def convert_anthropic_response_to_semconv(message: Message) -> dict[str, Any]:
"""Convert an Anthropic response message to OTel Gen AI Semantic Convention format."""
parts: list[dict[str, Any]] = []

for block in message.content:
if isinstance(block, TextBlock):
parts.append({'type': 'text', 'content': block.text})
elif isinstance(block, ToolUseBlock):
parts.append(
{
'type': 'tool_call',
'id': block.id,
'name': block.name,
'arguments': block.input,
}
)
elif hasattr(block, 'type'): # pragma: no cover
# Handle other block types generically
block_dict = block.model_dump() if hasattr(block, 'model_dump') else dict(block)
parts.append(_convert_anthropic_content_part(block_dict))

result: dict[str, Any] = {
'role': message.role,
'parts': parts,
}
if message.stop_reason:
result['finish_reason'] = message.stop_reason

return result


def content_from_messages(chunk: anthropic.types.MessageStreamEvent) -> str | None:
if hasattr(chunk, 'content_block'):
return chunk.content_block.text if isinstance(chunk.content_block, TextBlock) else None # type: ignore
Expand All @@ -113,6 +261,7 @@ def get_response_data(self) -> Any:
def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT:
"""Updates the span based on the type of response."""
if isinstance(response, Message): # pragma: no branch
# Keep response_data for backward compatibility
message: dict[str, Any] = {'role': 'assistant'}
for block in response.content:
if block.type == 'text':
Expand All @@ -128,6 +277,24 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT:
}
)
span.set_attribute('response_data', {'message': message, 'usage': response.usage})

# Add semantic convention attributes
span.set_attribute(RESPONSE_MODEL, response.model)
span.set_attribute(RESPONSE_ID, response.id)

# Add token usage
if response.usage:
span.set_attribute(INPUT_TOKENS, response.usage.input_tokens)
span.set_attribute(OUTPUT_TOKENS, response.usage.output_tokens)

# Add finish reason
if response.stop_reason:
span.set_attribute(RESPONSE_FINISH_REASONS, [response.stop_reason])
Comment on lines +282 to +292
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the kind of thing I'm talking about when I say:

Similar to #1593 (comment), please move changes which only add small attributes to a separate PR, these can be easily merged into main without breaking anything or significantly increasing span size. The messages format is a whole thing on its own.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, I will work on breaking this up into 3 PRs.

  1. Scalar values
  2. Input Messages
  3. Output Messages

Once I have these I will link to this PR and it can be closed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

2 and 3 can stay together

Copy link
Contributor Author

@brightsparc brightsparc Jan 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, I missed this message, will combine 3 into 2

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Closing in favour of #1657 and #1666


# Add semantic convention output messages
output_message = convert_anthropic_response_to_semconv(response)
span.set_attribute(OUTPUT_MESSAGES, [output_message])

return response


Expand Down
Loading
Loading