3535import json
3636from contextlib import contextmanager
3737from dataclasses import asdict , dataclass , field
38+ from enum import Enum
3839from typing import Any , Dict , List , Optional , Tuple
3940from uuid import UUID
4041
6768from opentelemetry .util .types import AttributeValue
6869
6970from .instruments import Instruments
70- from .types import Error , InputMessage , LLMInvocation , OutputMessage , Text
71+ from .types import Error , InputMessage , LLMInvocation , OutputMessage
7172
7273
7374@dataclass
@@ -144,21 +145,41 @@ def _collect_finish_reasons(generations: List[OutputMessage]) -> List[str]:
144145 return finish_reasons
145146
146147
147- def _maybe_set_span_input_messages (
148- span : Span , messages : List [InputMessage ]
149- ) -> None :
148+ class _CaptureTarget (Enum ):
149+ SPAN = "span"
150+ EVENT = "event"
151+
152+
153+ def _should_capture_content (mode : _CaptureTarget ) -> bool :
150154 # if GEN_AI stability mode is DEFAULT, do not capture message content
151155 if (
152156 _OpenTelemetrySemanticConventionStability ._get_opentelemetry_stability_opt_in_mode (
153157 _OpenTelemetryStabilitySignalType .GEN_AI ,
154158 )
155159 == _StabilityMode .DEFAULT
156160 ):
157- return
158- if get_content_capturing_mode () not in (
159- ContentCapturingMode .SPAN_ONLY ,
160- ContentCapturingMode .SPAN_AND_EVENT ,
161- ):
161+ return False
162+
163+ capture_mode = get_content_capturing_mode ()
164+ if mode == _CaptureTarget .SPAN :
165+ return (
166+ capture_mode == ContentCapturingMode .SPAN_ONLY
167+ or capture_mode == ContentCapturingMode .SPAN_AND_EVENT
168+ )
169+
170+ if mode == _CaptureTarget .EVENT :
171+ return (
172+ capture_mode == ContentCapturingMode .EVENT_ONLY
173+ or capture_mode == ContentCapturingMode .SPAN_AND_EVENT
174+ )
175+
176+ return False
177+
178+
179+ def _maybe_set_span_input_messages (
180+ span : Span , messages : List [InputMessage ]
181+ ) -> None :
182+ if not _should_capture_content (_CaptureTarget .SPAN ):
162183 return
163184 message_parts : List [Dict [str , Any ]] = [
164185 asdict (message ) for message in messages
@@ -167,20 +188,17 @@ def _maybe_set_span_input_messages(
167188 span .set_attribute ("gen_ai.input.messages" , json .dumps (message_parts ))
168189
169190
170- def _set_chat_generation_attrs (
191+ def _maybe_set_span_output_messages (
171192 span : Span , generations : List [OutputMessage ]
172193) -> None :
173- for index , chat_generation in enumerate (generations ):
174- # TODO: use dataclass to dict - Handle multiple responses
175- content : Optional [str ] = None
176- for part in chat_generation .parts :
177- if isinstance (part , Text ):
178- content = part .content
179- break
180- # Upcoming semconv fields
181- span .set_attribute (f"gen_ai.completion.{ index } .content" , content or "" )
194+ if not _should_capture_content (_CaptureTarget .SPAN ):
195+ return
196+ generation_parts : List [Dict [str , Any ]] = [
197+ asdict (generation ) for generation in generations
198+ ]
199+ if generation_parts :
182200 span .set_attribute (
183- f "gen_ai.completion. { index } .role " , chat_generation . role
201+ "gen_ai.output.messages " , json . dumps ( generation_parts )
184202 )
185203
186204
@@ -369,7 +387,7 @@ def finish(self, invocation: LLMInvocation):
369387 self ._apply_common_span_attributes (span , invocation )
370388 )
371389 _maybe_set_span_input_messages (span , invocation .messages )
372- _set_chat_generation_attrs (span , invocation .chat_generations )
390+ _maybe_set_span_output_messages (span , invocation .chat_generations )
373391 _record_token_metrics (
374392 self ._token_histogram ,
375393 prompt_tokens ,
0 commit comments