Skip to content

Commit 1a11630

Browse files
wrisazhirafovod
authored andcommitted
removed env
1 parent b61437f commit 1a11630

File tree

16 files changed

+937
-353
lines changed

16 files changed

+937
-353
lines changed
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
Installation
2+
============
3+
4+
Option 1: pip + requirements.txt
5+
---------------------------------
6+
::
7+
8+
python3 -m venv .venv
9+
source .venv/bin/activate
10+
pip install -r requirements.txt
11+
12+
Option 2: Poetry
13+
----------------
14+
::
15+
16+
poetry install
17+
18+
Running Tests
19+
=============
20+
21+
After installing dependencies, simply run:
22+
23+
::
24+
25+
pytest
26+
27+
This will discover and run `tests/test_sdk.py`.
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
[build-system]
2+
requires = ["hatchling"]
3+
build-backend = "hatchling.build"
4+
5+
[project]
6+
name = "opentelemetry-genai-sdk"
7+
dynamic = ["version"]
8+
description = "OpenTelemetry GenAI SDK"
9+
readme = "README.rst"
10+
license = "Apache-2.0"
11+
requires-python = ">=3.8"
12+
authors = [
13+
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
14+
]
15+
classifiers = [
16+
"Development Status :: 4 - Beta",
17+
"Intended Audience :: Developers",
18+
"License :: OSI Approved :: Apache Software License",
19+
"Programming Language :: Python",
20+
"Programming Language :: Python :: 3",
21+
"Programming Language :: Python :: 3.9",
22+
"Programming Language :: Python :: 3.10",
23+
"Programming Language :: Python :: 3.11",
24+
"Programming Language :: Python :: 3.12",
25+
"Programming Language :: Python :: 3.13",
26+
]
27+
dependencies = [
28+
"opentelemetry-api ~= 1.30",
29+
"opentelemetry-instrumentation ~= 0.51b0",
30+
"opentelemetry-semantic-conventions ~= 0.51b0",
31+
"opentelemetry-api>=1.31.0",
32+
"opentelemetry-sdk>=1.31.0",
33+
]
34+
35+
[project.optional-dependencies]
36+
test = [
37+
"pytest>=7.0.0",
38+
]
39+
# evaluation = ["deepevals>=0.1.0", "openlit-sdk>=0.1.0"]
40+
41+
[project.urls]
42+
Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-genai-sdk"
43+
Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib"
44+
45+
[tool.hatch.version]
46+
path = "src/opentelemetry/genai/sdk/version.py"
47+
48+
[tool.hatch.build.targets.sdist]
49+
include = [
50+
"/src",
51+
"/tests",
52+
]
53+
54+
[tool.hatch.build.targets.wheel]
55+
packages = ["src/opentelemetry"]
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# OpenTelemetry SDK
2+
opentelemetry-api>=1.34.0
3+
opentelemetry-sdk>=1.34.0
4+
5+
# Testing
6+
pytest>=7.0.0
7+
8+
# (Optional) evaluation libraries
9+
# deepevals>=0.1.0
10+
# openlit-sdk>=0.1.0
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import time
16+
from threading import Lock
17+
from typing import List, Optional
18+
from uuid import UUID
19+
20+
from .types import LLMInvocation
21+
from .exporters import SpanMetricEventExporter, SpanMetricExporter
22+
from .data import Message, ChatGeneration, Error
23+
24+
from opentelemetry.instrumentation.langchain.version import __version__
25+
from opentelemetry.metrics import get_meter
26+
from opentelemetry.trace import get_tracer
27+
from opentelemetry._events import get_event_logger
28+
from opentelemetry.semconv.schemas import Schemas
29+
30+
31+
class TelemetryClient:
32+
"""
33+
High-level client managing GenAI invocation lifecycles and exporting
34+
them as spans, metrics, and events.
35+
"""
36+
def __init__(self, exporter_type_full: bool = True, **kwargs):
37+
tracer_provider = kwargs.get("tracer_provider")
38+
self._tracer = get_tracer(
39+
__name__, __version__, tracer_provider, schema_url=Schemas.V1_28_0.value
40+
)
41+
42+
meter_provider = kwargs.get("meter_provider")
43+
self._meter = get_meter(
44+
__name__, __version__, meter_provider, schema_url=Schemas.V1_28_0.value
45+
)
46+
47+
event_logger_provider = kwargs.get("event_logger_provider")
48+
self._event_logger = get_event_logger(
49+
__name__, __version__, event_logger_provider=event_logger_provider, schema_url=Schemas.V1_28_0.value
50+
)
51+
52+
self._exporter = (
53+
SpanMetricEventExporter(tracer=self._tracer, meter=self._meter, event_logger=self._event_logger)
54+
if exporter_type_full
55+
else SpanMetricExporter(tracer=self._tracer, meter=self._meter)
56+
)
57+
58+
self._llm_registry: dict[UUID, LLMInvocation] = {}
59+
self._lock = Lock()
60+
61+
def start_llm(self, prompts: List[Message], run_id: UUID, parent_run_id: Optional[UUID] = None, **attributes):
62+
invocation = LLMInvocation(messages=prompts , run_id=run_id, parent_run_id=parent_run_id, attributes=attributes)
63+
with self._lock:
64+
self._llm_registry[invocation.run_id] = invocation
65+
self._exporter.init(invocation)
66+
67+
def stop_llm(self, run_id: UUID, chat_generations: List[ChatGeneration], **attributes) -> LLMInvocation:
68+
with self._lock:
69+
invocation = self._llm_registry.pop(run_id)
70+
invocation.end_time = time.time()
71+
invocation.chat_generations = chat_generations
72+
invocation.attributes.update(attributes)
73+
self._exporter.export(invocation)
74+
return invocation
75+
76+
def fail_llm(self, run_id: UUID, error: Error, **attributes) -> LLMInvocation:
77+
with self._lock:
78+
invocation = self._llm_registry.pop(run_id)
79+
invocation.end_time = time.time()
80+
invocation.attributes.update(**attributes)
81+
self._exporter.error(error, invocation)
82+
return invocation
83+
84+
# Singleton accessor
85+
_default_client: TelemetryClient | None = None
86+
87+
def get_telemetry_client(exporter_type_full: bool = True, **kwargs) -> TelemetryClient:
88+
global _default_client
89+
if _default_client is None:
90+
_default_client = TelemetryClient(exporter_type_full=exporter_type_full, **kwargs)
91+
return _default_client
92+
93+
# Module‐level convenience functions
94+
def llm_start(prompts: List[Message], run_id: UUID, parent_run_id: Optional[UUID] = None, **attributes):
95+
return get_telemetry_client().start_llm(prompts=prompts, run_id=run_id, parent_run_id=parent_run_id, **attributes)
96+
97+
def llm_stop(run_id: UUID, chat_generations: List[ChatGeneration], **attributes) -> LLMInvocation:
98+
return get_telemetry_client().stop_llm(run_id=run_id, chat_generations=chat_generations, **attributes)
99+
100+
def llm_fail(run_id: UUID, error: Error, **attributes) -> LLMInvocation:
101+
return get_telemetry_client().fail_llm(run_id=run_id, error=error, **attributes)
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
from dataclasses import dataclass
2+
3+
4+
@dataclass
5+
class Message:
6+
content: str
7+
type: str
8+
9+
@dataclass
10+
class ChatGeneration:
11+
content: str
12+
type: str
13+
finish_reason: str = None
14+
15+
@dataclass
16+
class Error:
17+
message: str
18+
type: type[BaseException]
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
from abc import ABC, abstractmethod
2+
from .types import LLMInvocation
3+
4+
5+
class EvaluationResult:
6+
"""
7+
Standardized result for any GenAI evaluation.
8+
"""
9+
def __init__(self, score: float, details: dict = None):
10+
self.score = score
11+
self.details = details or {}
12+
13+
14+
class Evaluator(ABC):
15+
"""
16+
Abstract base: any evaluation backend must implement.
17+
"""
18+
@abstractmethod
19+
def evaluate(self, invocation: LLMInvocation) -> EvaluationResult:
20+
"""
21+
Evaluate a completed LLMInvocation and return a result.
22+
"""
23+
pass
24+
25+
class DeepEvalsEvaluator(Evaluator):
26+
"""
27+
Uses DeepEvals library for LLM-as-judge evaluations.
28+
"""
29+
def __init__(self, config: dict = None):
30+
# e.g. load models, setup API keys
31+
self.config = config or {}
32+
33+
def evaluate(self, invocation: LLMInvocation) -> EvaluationResult:
34+
# stub: integrate with deepevals SDK
35+
# result = deepevals.judge(invocation.prompt, invocation.response, **self.config)
36+
score = 0.0 # placeholder
37+
details = {"method": "deepevals"}
38+
return EvaluationResult(score=score, details=details)
39+
40+
41+
class OpenLitEvaluator(Evaluator):
42+
"""
43+
Uses OpenLit or similar OSS evaluation library.
44+
"""
45+
def __init__(self, config: dict = None):
46+
self.config = config or {}
47+
48+
def evaluate(self, invocation: LLMInvocation) -> EvaluationResult:
49+
# stub: integrate with openlit SDK
50+
score = 0.0 # placeholder
51+
details = {"method": "openlit"}
52+
return EvaluationResult(score=score, details=details)
53+
54+
55+
# Registry for easy lookup
56+
EVALUATORS = {
57+
"deepevals": DeepEvalsEvaluator,
58+
"openlit": OpenLitEvaluator,
59+
}
60+
61+
62+
def get_evaluator(name: str, config: dict = None) -> Evaluator:
63+
"""
64+
Factory: return an evaluator by name.
65+
"""
66+
cls = EVALUATORS.get(name.lower())
67+
if not cls:
68+
raise ValueError(f"Unknown evaluator: {name}")
69+
return cls(config)

0 commit comments

Comments
 (0)