Skip to content

Commit 2b4d96c

Browse files
keith-deckerzhirafovod
authored andcommitted
Add provider.name, rename client to handler
1 parent 5c009b3 commit 2b4d96c

File tree

3 files changed

+41
-31
lines changed

3 files changed

+41
-31
lines changed

util/opentelemetry-util-genai/pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ classifiers = [
2525
"Programming Language :: Python :: 3.13",
2626
]
2727
dependencies = [
28-
"opentelemetry-instrumentation ~= 0.51b0",
29-
"opentelemetry-semantic-conventions ~= 0.51b0",
30-
"opentelemetry-api>=1.31.0",
28+
"opentelemetry-instrumentation ~= 0.57b0",
29+
"opentelemetry-semantic-conventions ~= 0.57b0",
30+
"opentelemetry-api>=1.36.0",
3131
]
3232

3333
[project.optional-dependencies]

util/opentelemetry-util-genai/src/opentelemetry/util/genai/emitters.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,9 @@ def _message_to_event(message, system, framework) -> Optional[Event]:
7070
body = {"content": content}
7171
attributes = {
7272
# TODO: add below to opentelemetry.semconv._incubating.attributes.gen_ai_attributes
73+
"gen_ai.provider.name": system, # Added in 1.37 - https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/gen-ai.md#gen-ai-provider-name
7374
"gen_ai.framework": framework,
74-
GenAI.GEN_AI_SYSTEM: system,
75+
GenAI.GEN_AI_SYSTEM: system, # Deprecated: Removed in 1.37
7576
}
7677

7778
return Event(
@@ -88,8 +89,9 @@ def _chat_generation_to_event(
8889
if chat_generation.content:
8990
attributes = {
9091
# TODO: add below to opentelemetry.semconv._incubating.attributes.gen_ai_attributes
92+
"gen_ai.provider.name": system, # added in 1.37 - https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/gen-ai.md#gen-ai-provider-name
9193
"gen_ai.framework": framework,
92-
GenAI.GEN_AI_SYSTEM: system,
94+
GenAI.GEN_AI_SYSTEM: system, # Deprecated: removed in 1.37
9395
}
9496

9597
message = {
@@ -121,7 +123,7 @@ def _get_metric_attributes(
121123
"gen_ai.framework": framework,
122124
}
123125
if system:
124-
attributes[GenAI.GEN_AI_SYSTEM] = system
126+
attributes["gen_ai.provider.name"] = system
125127
if operation_name:
126128
attributes[GenAI.GEN_AI_OPERATION_NAME] = operation_name
127129
if request_model:
@@ -243,7 +245,11 @@ def emit(self, invocation: LLMInvocation):
243245
span.set_attribute("gen_ai.framework", framework)
244246

245247
if system is not None:
246-
span.set_attribute(GenAI.GEN_AI_SYSTEM, system)
248+
span.set_attribute(
249+
GenAI.GEN_AI_SYSTEM, system
250+
) # Deprecated: use "gen_ai.provider.name"
251+
# TODO: add below to opentelemetry.semconv._incubating.attributes.gen_ai_attributes
252+
span.set_attribute("gen_ai.provider.name", system)
247253

248254
finish_reasons = []
249255
for index, chat_generation in enumerate(
@@ -450,7 +456,11 @@ def emit(self, invocation: LLMInvocation):
450456
span.set_attribute(
451457
"gen_ai.framework", invocation.attributes.get("framework")
452458
)
453-
span.set_attribute(GenAI.GEN_AI_SYSTEM, system)
459+
span.set_attribute(
460+
GenAI.GEN_AI_SYSTEM, system
461+
) # Deprecated: use "gen_ai.provider.name"
462+
# TODO: add below to opentelemetry.semconv._incubating.attributes.gen_ai_attributes
463+
span.set_attribute("gen_ai.provider.name", system)
454464

455465
finish_reasons = []
456466
for index, chat_generation in enumerate(

util/opentelemetry-util-genai/src/opentelemetry/util/genai/client.py renamed to util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import time
1616
from threading import Lock
17-
from typing import List, Optional
17+
from typing import Any, List, Optional
1818
from uuid import UUID
1919

2020
from opentelemetry._events import get_event_logger
@@ -30,13 +30,13 @@
3030
from .version import __version__
3131

3232

33-
class TelemetryClient:
33+
class TelemetryHandler:
3434
"""
35-
High-level client managing GenAI invocation lifecycles and emitting
35+
High-level handler managing GenAI invocation lifecycles and emitting
3636
them as spans, metrics, and events.
3737
"""
3838

39-
def __init__(self, emitter_type_full: bool = True, **kwargs):
39+
def __init__(self, emitter_type_full: bool = True, **kwargs: Any):
4040
tracer_provider = kwargs.get("tracer_provider")
4141
self._tracer = get_tracer(
4242
__name__,
@@ -79,8 +79,8 @@ def start_llm(
7979
prompts: List[Message],
8080
run_id: UUID,
8181
parent_run_id: Optional[UUID] = None,
82-
**attributes,
83-
):
82+
**attributes: Any,
83+
) -> None:
8484
invocation = LLMInvocation(
8585
messages=prompts,
8686
run_id=run_id,
@@ -95,7 +95,7 @@ def stop_llm(
9595
self,
9696
run_id: UUID,
9797
chat_generations: List[ChatGeneration],
98-
**attributes,
98+
**attributes: Any,
9999
) -> LLMInvocation:
100100
with self._lock:
101101
invocation = self._llm_registry.pop(run_id)
@@ -106,7 +106,7 @@ def stop_llm(
106106
return invocation
107107

108108
def fail_llm(
109-
self, run_id: UUID, error: Error, **attributes
109+
self, run_id: UUID, error: Error, **attributes: Any
110110
) -> LLMInvocation:
111111
with self._lock:
112112
invocation = self._llm_registry.pop(run_id)
@@ -117,28 +117,28 @@ def fail_llm(
117117

118118

119119
# Singleton accessor
120-
_default_client: TelemetryClient | None = None
120+
_default_handler: Optional[TelemetryHandler] = None
121121

122122

123-
def get_telemetry_client(
124-
emitter_type_full: bool = True, **kwargs
125-
) -> TelemetryClient:
126-
global _default_client
127-
if _default_client is None:
128-
_default_client = TelemetryClient(
123+
def get_telemetry_handler(
124+
emitter_type_full: bool = True, **kwargs: Any
125+
) -> TelemetryHandler:
126+
global _default_handler
127+
if _default_handler is None:
128+
_default_handler = TelemetryHandler(
129129
emitter_type_full=emitter_type_full, **kwargs
130130
)
131-
return _default_client
131+
return _default_handler
132132

133133

134134
# Module‐level convenience functions
135135
def llm_start(
136136
prompts: List[Message],
137137
run_id: UUID,
138138
parent_run_id: Optional[UUID] = None,
139-
**attributes,
140-
):
141-
return get_telemetry_client().start_llm(
139+
**attributes: Any,
140+
) -> None:
141+
return get_telemetry_handler().start_llm(
142142
prompts=prompts,
143143
run_id=run_id,
144144
parent_run_id=parent_run_id,
@@ -147,14 +147,14 @@ def llm_start(
147147

148148

149149
def llm_stop(
150-
run_id: UUID, chat_generations: List[ChatGeneration], **attributes
150+
run_id: UUID, chat_generations: List[ChatGeneration], **attributes: Any
151151
) -> LLMInvocation:
152-
return get_telemetry_client().stop_llm(
152+
return get_telemetry_handler().stop_llm(
153153
run_id=run_id, chat_generations=chat_generations, **attributes
154154
)
155155

156156

157-
def llm_fail(run_id: UUID, error: Error, **attributes) -> LLMInvocation:
158-
return get_telemetry_client().fail_llm(
157+
def llm_fail(run_id: UUID, error: Error, **attributes: Any) -> LLMInvocation:
158+
return get_telemetry_handler().fail_llm(
159159
run_id=run_id, error=error, **attributes
160160
)

0 commit comments

Comments
 (0)