Skip to content

Commit 1ceb034

Browse files
committed
adds telemetry
1 parent 75ad966 commit 1ceb034

File tree

11 files changed

+689
-160
lines changed

11 files changed

+689
-160
lines changed

lib/chains/llm_chain.ex

Lines changed: 70 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -387,17 +387,27 @@ defmodule LangChain.Chains.LLMChain do
387387
&run_until_success/1
388388
end
389389

390-
# Run the chain and return the success or error results. NOTE: We do not add
391-
# the current LLM to the list and process everything through a single
392-
# codepath because failing after attempted fallbacks returns a different
393-
# error.
394-
if Keyword.has_key?(opts, :with_fallbacks) do
395-
# run function and using fallbacks as needed.
396-
with_fallbacks(chain, opts, function_to_run)
397-
else
398-
# run it directly right now and return the success or error
399-
function_to_run.(chain)
400-
end
390+
# Add telemetry for chain execution
391+
metadata = %{
392+
chain_type: "llm_chain",
393+
mode: Keyword.get(opts, :mode, "default"),
394+
message_count: length(chain.messages),
395+
tool_count: length(chain.tools)
396+
}
397+
398+
LangChain.Telemetry.span([:langchain, :chain, :execute], metadata, fn ->
399+
# Run the chain and return the success or error results. NOTE: We do not add
400+
# the current LLM to the list and process everything through a single
401+
# codepath because failing after attempted fallbacks returns a different
402+
# error.
403+
if Keyword.has_key?(opts, :with_fallbacks) do
404+
# run function and using fallbacks as needed.
405+
with_fallbacks(chain, opts, function_to_run)
406+
else
407+
# run it directly right now and return the success or error
408+
function_to_run.(chain)
409+
end
410+
end)
401411
rescue
402412
err in LangChainError ->
403413
{:error, chain, err}
@@ -943,54 +953,62 @@ defmodule LangChain.Chains.LLMChain do
943953
verbose = Keyword.get(opts, :verbose, false)
944954
context = Keyword.get(opts, :context, nil)
945955

946-
try do
947-
if verbose, do: IO.inspect(function.name, label: "EXECUTING FUNCTION")
948-
949-
case Function.execute(function, call.arguments, context) do
950-
{:ok, llm_result, processed_result} ->
951-
if verbose, do: IO.inspect(processed_result, label: "FUNCTION PROCESSED RESULT")
952-
# successful execution and storage of processed_content.
953-
ToolResult.new!(%{
954-
tool_call_id: call.call_id,
955-
content: llm_result,
956-
processed_content: processed_result,
957-
name: function.name,
958-
display_text: function.display_text
959-
})
960-
961-
{:ok, result} ->
962-
if verbose, do: IO.inspect(result, label: "FUNCTION RESULT")
963-
# successful execution.
964-
ToolResult.new!(%{
965-
tool_call_id: call.call_id,
966-
content: result,
967-
name: function.name,
968-
display_text: function.display_text
969-
})
956+
metadata = %{
957+
tool_name: function.name,
958+
tool_call_id: call.call_id,
959+
async: function.async
960+
}
970961

971-
{:error, reason} when is_binary(reason) ->
972-
if verbose, do: IO.inspect(reason, label: "FUNCTION ERROR")
962+
LangChain.Telemetry.span([:langchain, :tool, :call], metadata, fn ->
963+
try do
964+
if verbose, do: IO.inspect(function.name, label: "EXECUTING FUNCTION")
965+
966+
case Function.execute(function, call.arguments, context) do
967+
{:ok, llm_result, processed_result} ->
968+
if verbose, do: IO.inspect(processed_result, label: "FUNCTION PROCESSED RESULT")
969+
# successful execution and storage of processed_content.
970+
ToolResult.new!(%{
971+
tool_call_id: call.call_id,
972+
content: llm_result,
973+
processed_content: processed_result,
974+
name: function.name,
975+
display_text: function.display_text
976+
})
977+
978+
{:ok, result} ->
979+
if verbose, do: IO.inspect(result, label: "FUNCTION RESULT")
980+
# successful execution.
981+
ToolResult.new!(%{
982+
tool_call_id: call.call_id,
983+
content: result,
984+
name: function.name,
985+
display_text: function.display_text
986+
})
987+
988+
{:error, reason} when is_binary(reason) ->
989+
if verbose, do: IO.inspect(reason, label: "FUNCTION ERROR")
990+
991+
ToolResult.new!(%{
992+
tool_call_id: call.call_id,
993+
content: reason,
994+
name: function.name,
995+
display_text: function.display_text,
996+
is_error: true
997+
})
998+
end
999+
rescue
1000+
err ->
1001+
Logger.error(
1002+
"Function #{function.name} failed in execution. Exception: #{LangChainError.format_exception(err, __STACKTRACE__)}"
1003+
)
9731004

9741005
ToolResult.new!(%{
9751006
tool_call_id: call.call_id,
976-
content: reason,
977-
name: function.name,
978-
display_text: function.display_text,
1007+
content: "ERROR executing tool: #{inspect(err)}",
9791008
is_error: true
9801009
})
9811010
end
982-
rescue
983-
err ->
984-
Logger.error(
985-
"Function #{function.name} failed in execution. Exception: #{LangChainError.format_exception(err, __STACKTRACE__)}"
986-
)
987-
988-
ToolResult.new!(%{
989-
tool_call_id: call.call_id,
990-
content: "ERROR executing tool: #{inspect(err)}",
991-
is_error: true
992-
})
993-
end
1011+
end)
9941012
end
9951013

9961014
@doc """

lib/chat_models/chat_anthropic.ex

Lines changed: 45 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -324,19 +324,39 @@ defmodule LangChain.ChatModels.ChatAnthropic do
324324
end
325325

326326
def call(%ChatAnthropic{} = anthropic, messages, functions) when is_list(messages) do
327-
try do
328-
# make base api request and perform high-level success/failure checks
329-
case do_api_request(anthropic, messages, functions) do
330-
{:error, %LangChainError{} = error} ->
331-
{:error, error}
332-
333-
parsed_data ->
334-
{:ok, parsed_data}
327+
metadata = %{
328+
model: anthropic.model,
329+
message_count: length(messages),
330+
tools_count: length(functions)
331+
}
332+
333+
LangChain.Telemetry.span([:langchain, :llm, :call], metadata, fn ->
334+
try do
335+
# Track the prompt being sent
336+
LangChain.Telemetry.llm_prompt(
337+
%{system_time: System.system_time()},
338+
%{model: anthropic.model, messages: messages}
339+
)
340+
341+
# make base api request and perform high-level success/failure checks
342+
case do_api_request(anthropic, messages, functions) do
343+
{:error, %LangChainError{} = error} ->
344+
{:error, error}
345+
346+
parsed_data ->
347+
# Track the response being received
348+
LangChain.Telemetry.llm_response(
349+
%{system_time: System.system_time()},
350+
%{model: anthropic.model, response: parsed_data}
351+
)
352+
353+
{:ok, parsed_data}
354+
end
355+
rescue
356+
err in LangChainError ->
357+
{:error, err}
335358
end
336-
rescue
337-
err in LangChainError ->
338-
{:error, err}
339-
end
359+
end)
340360
end
341361

342362
# Call Anthropic's API.
@@ -429,6 +449,12 @@ defmodule LangChain.ChatModels.ChatAnthropic do
429449
tools,
430450
retry_count
431451
) do
452+
# Track the prompt being sent for streaming
453+
LangChain.Telemetry.llm_prompt(
454+
%{system_time: System.system_time(), streaming: true},
455+
%{model: anthropic.model, messages: messages}
456+
)
457+
432458
Req.new(
433459
url: url(anthropic),
434460
json: for_api(anthropic, messages, tools),
@@ -450,6 +476,13 @@ defmodule LangChain.ChatModels.ChatAnthropic do
450476
get_ratelimit_info(response.headers)
451477
])
452478

479+
# Track the stream completion
480+
LangChain.Telemetry.emit_event(
481+
[:langchain, :llm, :response, streaming: true],
482+
%{system_time: System.system_time()},
483+
%{model: anthropic.model}
484+
)
485+
453486
data
454487

455488
# The error tuple was successfully received from the API. Unwrap it and

lib/chat_models/chat_bumblebee.ex

Lines changed: 50 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -231,19 +231,40 @@ defmodule LangChain.ChatModels.ChatBumblebee do
231231
end
232232

233233
def call(%ChatBumblebee{} = model, messages, functions) when is_list(messages) do
234-
try do
235-
# make base api request and perform high-level success/failure checks
236-
case do_serving_request(model, messages, functions) do
237-
{:error, reason} ->
238-
{:error, reason}
239-
240-
parsed_data ->
241-
{:ok, parsed_data}
234+
metadata = %{
235+
model: inspect(model.serving),
236+
template_format: model.template_format,
237+
message_count: length(messages),
238+
tools_count: length(functions)
239+
}
240+
241+
LangChain.Telemetry.span([:langchain, :llm, :call], metadata, fn ->
242+
try do
243+
# Track the prompt being sent
244+
LangChain.Telemetry.llm_prompt(
245+
%{system_time: System.system_time()},
246+
%{model: inspect(model.serving), messages: messages}
247+
)
248+
249+
# make base api request and perform high-level success/failure checks
250+
case do_serving_request(model, messages, functions) do
251+
{:error, reason} ->
252+
{:error, reason}
253+
254+
parsed_data ->
255+
# Track the response being received
256+
LangChain.Telemetry.llm_response(
257+
%{system_time: System.system_time()},
258+
%{model: inspect(model.serving), response: parsed_data}
259+
)
260+
261+
{:ok, parsed_data}
262+
end
263+
rescue
264+
err in LangChainError ->
265+
{:error, err}
242266
end
243-
rescue
244-
err in LangChainError ->
245-
{:error, err}
246-
end
267+
end)
247268
end
248269

249270
@doc false
@@ -461,6 +482,16 @@ defmodule LangChain.ChatModels.ChatBumblebee do
461482
when is_binary(content) do
462483
fire_token_usage_callback(model, token_summary)
463484

485+
# Track non-streaming response completion
486+
LangChain.Telemetry.emit_event(
487+
[:langchain, :llm, :response, streaming: false],
488+
%{system_time: System.system_time()},
489+
%{
490+
model: inspect(model.serving),
491+
response_size: byte_size(content)
492+
}
493+
)
494+
464495
case Message.new(%{role: :assistant, status: :complete, content: content}) do
465496
{:ok, message} ->
466497
# execute the callback with the final message
@@ -495,6 +526,13 @@ defmodule LangChain.ChatModels.ChatBumblebee do
495526
{:done, %{token_summary: token_summary}} ->
496527
fire_token_usage_callback(model, token_summary)
497528

529+
# Track stream completion
530+
LangChain.Telemetry.emit_event(
531+
[:langchain, :llm, :response, streaming: true],
532+
%{system_time: System.system_time()},
533+
%{model: inspect(model.serving)}
534+
)
535+
498536
final_delta = MessageDelta.new!(%{role: :assistant, status: :complete})
499537
Callbacks.fire(model.callbacks, :on_llm_new_delta, [final_delta])
500538
final_delta

lib/chat_models/chat_google_ai.ex

Lines changed: 40 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -405,18 +405,38 @@ defmodule LangChain.ChatModels.ChatGoogleAI do
405405

406406
def call(%ChatGoogleAI{} = google_ai, messages, tools)
407407
when is_list(messages) do
408-
try do
409-
case do_api_request(google_ai, messages, tools) do
410-
{:error, reason} ->
411-
{:error, reason}
408+
metadata = %{
409+
model: google_ai.model,
410+
message_count: length(messages),
411+
tools_count: length(tools)
412+
}
413+
414+
LangChain.Telemetry.span([:langchain, :llm, :call], metadata, fn ->
415+
try do
416+
# Track the prompt being sent
417+
LangChain.Telemetry.llm_prompt(
418+
%{system_time: System.system_time()},
419+
%{model: google_ai.model, messages: messages}
420+
)
421+
422+
case do_api_request(google_ai, messages, tools) do
423+
{:error, reason} ->
424+
{:error, reason}
425+
426+
parsed_data ->
427+
# Track the response being received
428+
LangChain.Telemetry.llm_response(
429+
%{system_time: System.system_time()},
430+
%{model: google_ai.model, response: parsed_data}
431+
)
412432

413-
parsed_data ->
414-
{:ok, parsed_data}
433+
{:ok, parsed_data}
434+
end
435+
rescue
436+
err in LangChainError ->
437+
{:error, err.message}
415438
end
416-
rescue
417-
err in LangChainError ->
418-
{:error, err.message}
419-
end
439+
end)
420440
end
421441

422442
@doc false
@@ -442,6 +462,16 @@ defmodule LangChain.ChatModels.ChatGoogleAI do
442462
{:error, reason}
443463

444464
result ->
465+
# Track non-streaming response completion
466+
LangChain.Telemetry.emit_event(
467+
[:langchain, :llm, :response, streaming: false],
468+
%{system_time: System.system_time()},
469+
%{
470+
model: google_ai.model,
471+
response_size: byte_size(inspect(result))
472+
}
473+
)
474+
445475
Callbacks.fire(google_ai.callbacks, :on_llm_new_message, [result])
446476
result
447477
end

0 commit comments

Comments
 (0)