Skip to content

Commit

Permalink
Updated semantic conventio based on otel community
Browse files Browse the repository at this point in the history
  • Loading branch information
gyliu513 committed Apr 23, 2024
1 parent 0f7a465 commit 568375b
Show file tree
Hide file tree
Showing 11 changed files with 31 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ def _wrap(
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "Anthropic",
SpanAttributes.LLM_SYSTEM: "Anthropic",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
},
)
Expand Down Expand Up @@ -516,7 +516,7 @@ async def _awrap(
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "Anthropic",
SpanAttributes.LLM_SYSTEM: "Anthropic",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
},
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def _handle_call(span, kwargs, response):

(vendor, model) = kwargs.get("modelId").split(".")

_set_span_attribute(span, SpanAttributes.LLM_VENDOR, vendor)
_set_span_attribute(span, SpanAttributes.LLM_SYSTEM, vendor)
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, model)

if vendor == "cohere":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "Cohere",
SpanAttributes.LLM_SYSTEM: "Cohere",
SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
},
) as span:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
),
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "OpenAI",
SpanAttributes.LLM_SYSTEM: "OpenAI",
SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
},
) as span:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ async def acomplete_wrapper(tracer, wrapped, instance: CustomLLM, args, kwargs):

@dont_throw
def _handle_request(span, llm_request_type, args, kwargs, instance: CustomLLM):
_set_span_attribute(span, SpanAttributes.LLM_VENDOR, instance.__class__.__name__)
_set_span_attribute(span, SpanAttributes.LLM_SYSTEM, instance.__class__.__name__)
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, llm_request_type.value)
_set_span_attribute(
span, SpanAttributes.LLM_REQUEST_MODEL, instance.metadata.model_name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def _set_request_attributes(span, kwargs):
return

_set_api_attributes(span)
_set_span_attribute(span, SpanAttributes.LLM_VENDOR, "OpenAI")
_set_span_attribute(span, SpanAttributes.LLM_SYSTEM, "OpenAI")
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
_set_span_attribute(
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "Replicate",
SpanAttributes.LLM_SYSTEM: "Replicate",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
},
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def _set_input_attributes(span, instance, args, kwargs):
span, SpanAttributes.LLM_REQUEST_MODEL, instance.model.config.name_or_path
)
_set_span_attribute(
span, SpanAttributes.LLM_VENDOR, instance.model.config.model_type
span, SpanAttributes.LLM_SYSTEM, instance.model.config.model_type
)
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, "completion")
_set_span_attribute(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs):
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "VertexAI",
SpanAttributes.LLM_SYSTEM: "VertexAI",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
},
)
Expand Down Expand Up @@ -313,7 +313,7 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "VertexAI",
SpanAttributes.LLM_SYSTEM: "VertexAI",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
},
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def _wrap(
name,
kind=SpanKind.CLIENT,
attributes={
SpanAttributes.LLM_VENDOR: "Watsonx",
SpanAttributes.LLM_SYSTEM: "Watsonx",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
},
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,34 @@


class SpanAttributes:
# Semantic Conventions for LLM requests, this needs to be removed after
# OpenTelemetry Semantic Conventions support Gen AI.
# Issue at https://github.com/open-telemetry/opentelemetry-python/issues/3868
# Refer to https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/llm-spans.md
# for more detail for LLM spans from OpenTelemetry Community.
LLM_SYSTEM = "gen_ai.system"
LLM_REQUEST_MODEL = "gen_ai.request.model"
LLM_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
LLM_TEMPERATURE = "gen_ai.request.temperature"
LLM_TOP_P = "gen_ai.request.top_p"
LLM_PROMPTS = "gen_ai.prompt"
LLM_COMPLETIONS = "gen_ai.completion"
LLM_RESPONSE_MODEL = "gen_ai.response.model"
LLM_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens"
LLM_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens"
# To be added
# LLM_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reasons"
# LLM_RESPONSE_ID = "gen_ai.response.id"

# LLM
LLM_VENDOR = "llm.vendor"
LLM_REQUEST_TYPE = "llm.request.type"
LLM_REQUEST_MODEL = "llm.request.model"
LLM_RESPONSE_MODEL = "llm.response.model"
LLM_REQUEST_MAX_TOKENS = "llm.request.max_tokens"
LLM_USAGE_TOTAL_TOKENS = "llm.usage.total_tokens"
LLM_USAGE_COMPLETION_TOKENS = "llm.usage.completion_tokens"
LLM_USAGE_PROMPT_TOKENS = "llm.usage.prompt_tokens"
LLM_TEMPERATURE = "llm.temperature"
LLM_USER = "llm.user"
LLM_HEADERS = "llm.headers"
LLM_TOP_P = "llm.top_p"
LLM_TOP_K = "llm.top_k"
LLM_IS_STREAMING = "llm.is_streaming"
LLM_FREQUENCY_PENALTY = "llm.frequency_penalty"
LLM_PRESENCE_PENALTY = "llm.presence_penalty"
LLM_PROMPTS = "llm.prompts"
LLM_COMPLETIONS = "llm.completions"
LLM_CHAT_STOP_SEQUENCES = "llm.chat.stop_sequences"
LLM_REQUEST_FUNCTIONS = "llm.request.functions"

Expand Down

0 comments on commit 568375b

Please sign in to comment.