Skip to content

Commit 486de37

Browse files
committed
Merge branch 'main' of github.com:brainlid/langchain
* 'main' of github.com:brainlid/langchain: adds telemetry (#284) check that the requested tool_name exists - return an error if it does not exist in the chain added LLMChain.run_until_tool_used/3 (#292)
2 parents 3644c55 + 3f8d32e commit 486de37

File tree

15 files changed

+1475
-185
lines changed

15 files changed

+1475
-185
lines changed

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,16 @@ See the [`LangChain.ChatModels.ChatBumblebee` documentation](https://hexdocs.pm/
240240

241241
## Testing
242242

243+
Before you can run the tests, make sure you have the environment variables set.
244+
245+
You can do this by running:
246+
247+
```
248+
source .envrc_template
249+
```
250+
251+
Or you can copy it to `.envrc` and populate it with your private API values.
252+
243253
To run all the tests including the ones that perform live calls against the OpenAI API, use the following command:
244254

245255
```

lib/chains/llm_chain.ex

Lines changed: 246 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,53 @@ defmodule LangChain.Chains.LLMChain do
118118
119119
See `LangChain.Chains.LLMChain.run/2` for more details.
120120
121+
## Run Until Tool Used
122+
123+
The `run_until_tool_used/3` function makes it easy to instruct an LLM to use a
124+
set of tools and then call a specific tool to present the results. This is
125+
particularly useful for complex workflows where you want the LLM to perform
126+
multiple operations and then finalize with a specific action.
127+
128+
This works well for receiving a final structured output after multiple tools
129+
are used.
130+
131+
When the specified tool is successfully called, the chain stops processing and
132+
returns the result. This prevents unnecessary additional LLM calls and
133+
provides a clear termination point for your workflow.
134+
135+
{:ok, %LLMChain{} = updated_chain, %ToolResult{} = tool_result} =
136+
%{llm: ChatOpenAI.new!(%{stream: false})}
137+
|> LLMChain.new!()
138+
|> LLMChain.add_tools([special_search, report_results])
139+
|> LLMChain.add_message(Message.new_system!())
140+
|> LLMChain.add_message(Message.new_user!("..."))
141+
|> LLMChain.run_until_tool_used("final_summary")
142+
143+
The function returns a tuple with three elements:
144+
- `:ok` - Indicating success
145+
- The updated chain with all messages and tool calls
146+
- The specific tool result that matched the requested tool name
147+
148+
To prevent runaway function calls, a default `max_runs` value of 25 is set.
149+
You can adjust this as needed:
150+
151+
# Allow up to 50 runs before timing out
152+
LLMChain.run_until_tool_used(chain, "final_summary", max_runs: 50)
153+
154+
The function also supports fallbacks, allowing you to gracefully handle LLM
155+
failures:
156+
157+
LLMChain.run_until_tool_used(chain, "final_summary",
158+
max_runs: 10,
159+
with_fallbacks: [fallback_llm],
160+
before_fallback: fn chain ->
161+
# Modify chain before using fallback LLM
162+
chain
163+
end
164+
)
165+
166+
See `LangChain.Chains.LLMChain.run_until_tool_used/3` for more details.
167+
121168
"""
122169
use Ecto.Schema
123170
import Ecto.Changeset
@@ -172,7 +219,7 @@ defmodule LangChain.Chains.LLMChain do
172219
# Internally managed. The list of exchanged messages during a `run` function
173220
# execution. A single run can result in a number of newly created messages.
174221
# It generates an Assistant message with one or more ToolCalls, the message
175-
# with tool results where some of them may have failed requiring the LLM to
222+
# with tool results where some of them may have failed, requiring the LLM to
176223
# try again. This list tracks the full set of exchanged messages during a
177224
# single run.
178225
field :exchanged_messages, {:array, :any}, default: [], virtual: true
@@ -362,14 +409,7 @@ defmodule LangChain.Chains.LLMChain do
362409
try do
363410
raise_on_obsolete_run_opts(opts)
364411
raise_when_no_messages(chain)
365-
366-
# set the callback function on the chain
367-
if chain.verbose, do: IO.inspect(chain.llm, label: "LLM")
368-
369-
if chain.verbose, do: IO.inspect(chain.messages, label: "MESSAGES")
370-
371-
tools = chain.tools
372-
if chain.verbose, do: IO.inspect(tools, label: "TOOLS")
412+
initial_run_logging(chain)
373413

374414
# clear the set of exchanged messages.
375415
chain = clear_exchanged_messages(chain)
@@ -387,23 +427,46 @@ defmodule LangChain.Chains.LLMChain do
387427
&run_until_success/1
388428
end
389429

390-
# Run the chain and return the success or error results. NOTE: We do not add
391-
# the current LLM to the list and process everything through a single
392-
# codepath because failing after attempted fallbacks returns a different
393-
# error.
394-
if Keyword.has_key?(opts, :with_fallbacks) do
395-
# run function and using fallbacks as needed.
396-
with_fallbacks(chain, opts, function_to_run)
397-
else
398-
# run it directly right now and return the success or error
399-
function_to_run.(chain)
400-
end
430+
# Add telemetry for chain execution
431+
metadata = %{
432+
chain_type: "llm_chain",
433+
mode: Keyword.get(opts, :mode, "default"),
434+
message_count: length(chain.messages),
435+
tool_count: length(chain.tools)
436+
}
437+
438+
LangChain.Telemetry.span([:langchain, :chain, :execute], metadata, fn ->
439+
# Run the chain and return the success or error results. NOTE: We do not add
440+
# the current LLM to the list and process everything through a single
441+
# codepath because failing after attempted fallbacks returns a different
442+
# error.
443+
if Keyword.has_key?(opts, :with_fallbacks) do
444+
# run function and using fallbacks as needed.
445+
with_fallbacks(chain, opts, function_to_run)
446+
else
447+
# run it directly right now and return the success or error
448+
function_to_run.(chain)
449+
end
450+
end)
401451
rescue
402452
err in LangChainError ->
403453
{:error, chain, err}
404454
end
405455
end
406456

457+
defp initial_run_logging(%LLMChain{verbose: false} = _chain), do: :ok
458+
459+
defp initial_run_logging(%LLMChain{verbose: true} = chain) do
460+
# set the callback function on the chain
461+
if chain.verbose, do: IO.inspect(chain.llm, label: "LLM")
462+
463+
if chain.verbose, do: IO.inspect(chain.messages, label: "MESSAGES")
464+
465+
if chain.verbose, do: IO.inspect(chain.tools, label: "TOOLS")
466+
467+
:ok
468+
end
469+
407470
defp with_fallbacks(%LLMChain{} = chain, opts, run_fn) do
408471
# Sources of inspiration:
409472
# - https://python.langchain.com/v0.1/docs/guides/productionization/fallbacks/
@@ -461,12 +524,18 @@ defmodule LangChain.Chains.LLMChain do
461524
end
462525

463526
# Repeatedly run the chain until we get a successful ToolResponse or processed
464-
# assistant message. Once we've reached success, it is not submitted back to the LLM,
465-
# the process ends there.
527+
# assistant message. Once we've reached a successful response, it is not
528+
# submitted back to the LLM, the process ends there.
466529
@spec run_until_success(t()) :: {:ok, t()} | {:error, t(), LangChainError.t()}
467-
defp run_until_success(%LLMChain{last_message: %Message{} = last_message} = chain) do
530+
defp run_until_success(
531+
%LLMChain{last_message: %Message{} = last_message} = chain,
532+
force_recurse \\ false
533+
) do
468534
stop_or_recurse =
469535
cond do
536+
force_recurse ->
537+
:recurse
538+
470539
chain.current_failure_count >= chain.max_retry_count ->
471540
{:error, chain,
472541
LangChainError.exception(
@@ -528,6 +597,111 @@ defmodule LangChain.Chains.LLMChain do
528597
end
529598
end
530599

600+
@doc """
601+
Run the chain until a specific tool call is made. This makes it easy for an
602+
LLM to make multiple tool calls and call a specific tool to return a result,
603+
signaling the end of the operation.
604+
605+
## Options
606+
607+
- `max_runs`: The maximum number of times to run the chain. To prevent runaway
608+
calls, it defaults to 25. When exceeded, a `%LangChainError{type: "exceeded_max_runs"}`
609+
is returned in the error response.
610+
611+
- `with_fallbacks: [...]` - Provide a list of chat models to use as a fallback
612+
when one fails. This helps a production system remain operational when an
613+
API limit is reached, an LLM service is overloaded or down, or something
614+
else new an exciting goes wrong.
615+
616+
When all fallbacks fail, a `%LangChainError{type: "all_fallbacks_failed"}`
617+
is returned in the error response.
618+
619+
- `before_fallback: fn chain -> modified_chain end` - A `before_fallback`
620+
function is called before the LLM call is made. **NOTE: When provided, it
621+
also fires for the first attempt.** This allows a chain to be modified or
622+
replaced before running against the configured LLM. This is helpful, for
623+
example, when a different system prompt is needed for Anthropic vs OpenAI.
624+
"""
625+
@spec run_until_tool_used(t(), String.t()) ::
626+
{:ok, t(), Message.t()} | {:error, t(), LangChainError.t()}
627+
def run_until_tool_used(%LLMChain{} = chain, tool_name, opts \\ []) do
628+
# clear the set of exchanged messages.
629+
chain = clear_exchanged_messages(chain)
630+
631+
# Check if the tool_name exists in the registered tools
632+
if Map.has_key?(chain._tool_map, tool_name) do
633+
# Preserve fallback options and max_runs count if set explicitly.
634+
do_run_until_tool_used(chain, tool_name, Keyword.put_new(opts, :max_runs, 25))
635+
else
636+
{:error, chain,
637+
LangChainError.exception(
638+
type: "invalid_tool_name",
639+
message: "Tool name '#{tool_name}' not found in available tools"
640+
)}
641+
end
642+
end
643+
644+
defp do_run_until_tool_used(%LLMChain{} = chain, tool_name, opts) do
645+
max_runs = Keyword.get(opts, :max_runs)
646+
647+
if max_runs <= 0 do
648+
{:error, chain,
649+
LangChainError.exception(
650+
type: "exceeded_max_runs",
651+
message: "Exceeded maximum number of runs"
652+
)}
653+
else
654+
# Decrement max_runs for next recursion
655+
next_opts = Keyword.put(opts, :max_runs, max_runs - 1)
656+
657+
run_result =
658+
try do
659+
# Run the chain and return the success or error results. NOTE: We do
660+
# not add the current LLM to the list and process everything through a
661+
# single codepath because failing after attempted fallbacks returns a
662+
# different error.
663+
#
664+
# The run_until_success passes in a `true` force it to recuse and call
665+
# even if a ToolResult was successfully run. We check _which_ tool
666+
# result was returned here and make a separate decision.
667+
if Keyword.has_key?(opts, :with_fallbacks) do
668+
# run function and using fallbacks as needed.
669+
with_fallbacks(chain, opts, &run_until_success(&1, true))
670+
else
671+
# run it directly right now and return the success or error
672+
run_until_success(chain, true)
673+
end
674+
rescue
675+
err in LangChainError ->
676+
{:error, chain, err}
677+
end
678+
679+
case run_result do
680+
{:ok, updated_chain} ->
681+
# Check if the last message contains a tool call matching the
682+
# specified name
683+
case updated_chain.last_message do
684+
%Message{role: :tool, tool_results: tool_results} when is_list(tool_results) ->
685+
matching_call = Enum.find(tool_results, &(&1.name == tool_name))
686+
687+
if matching_call do
688+
{:ok, updated_chain, matching_call}
689+
else
690+
# If no matching tool result found, continue running.
691+
do_run_until_tool_used(updated_chain, tool_name, next_opts)
692+
end
693+
694+
_ ->
695+
# If no tool results in last message, continue running
696+
do_run_until_tool_used(updated_chain, tool_name, next_opts)
697+
end
698+
699+
{:error, updated_chain, reason} ->
700+
{:error, updated_chain, reason}
701+
end
702+
end
703+
end
704+
531705
# internal reusable function for running the chain
532706
@spec do_run(t()) :: {:ok, t()} | {:error, t(), LangChainError.t()}
533707
defp do_run(%LLMChain{current_failure_count: current_count, max_retry_count: max} = chain)
@@ -943,54 +1117,62 @@ defmodule LangChain.Chains.LLMChain do
9431117
verbose = Keyword.get(opts, :verbose, false)
9441118
context = Keyword.get(opts, :context, nil)
9451119

946-
try do
947-
if verbose, do: IO.inspect(function.name, label: "EXECUTING FUNCTION")
948-
949-
case Function.execute(function, call.arguments, context) do
950-
{:ok, llm_result, processed_result} ->
951-
if verbose, do: IO.inspect(processed_result, label: "FUNCTION PROCESSED RESULT")
952-
# successful execution and storage of processed_content.
953-
ToolResult.new!(%{
954-
tool_call_id: call.call_id,
955-
content: llm_result,
956-
processed_content: processed_result,
957-
name: function.name,
958-
display_text: function.display_text
959-
})
960-
961-
{:ok, result} ->
962-
if verbose, do: IO.inspect(result, label: "FUNCTION RESULT")
963-
# successful execution.
964-
ToolResult.new!(%{
965-
tool_call_id: call.call_id,
966-
content: result,
967-
name: function.name,
968-
display_text: function.display_text
969-
})
1120+
metadata = %{
1121+
tool_name: function.name,
1122+
tool_call_id: call.call_id,
1123+
async: function.async
1124+
}
9701125

971-
{:error, reason} when is_binary(reason) ->
972-
if verbose, do: IO.inspect(reason, label: "FUNCTION ERROR")
1126+
LangChain.Telemetry.span([:langchain, :tool, :call], metadata, fn ->
1127+
try do
1128+
if verbose, do: IO.inspect(function.name, label: "EXECUTING FUNCTION")
1129+
1130+
case Function.execute(function, call.arguments, context) do
1131+
{:ok, llm_result, processed_result} ->
1132+
if verbose, do: IO.inspect(processed_result, label: "FUNCTION PROCESSED RESULT")
1133+
# successful execution and storage of processed_content.
1134+
ToolResult.new!(%{
1135+
tool_call_id: call.call_id,
1136+
content: llm_result,
1137+
processed_content: processed_result,
1138+
name: function.name,
1139+
display_text: function.display_text
1140+
})
1141+
1142+
{:ok, result} ->
1143+
if verbose, do: IO.inspect(result, label: "FUNCTION RESULT")
1144+
# successful execution.
1145+
ToolResult.new!(%{
1146+
tool_call_id: call.call_id,
1147+
content: result,
1148+
name: function.name,
1149+
display_text: function.display_text
1150+
})
1151+
1152+
{:error, reason} when is_binary(reason) ->
1153+
if verbose, do: IO.inspect(reason, label: "FUNCTION ERROR")
1154+
1155+
ToolResult.new!(%{
1156+
tool_call_id: call.call_id,
1157+
content: reason,
1158+
name: function.name,
1159+
display_text: function.display_text,
1160+
is_error: true
1161+
})
1162+
end
1163+
rescue
1164+
err ->
1165+
Logger.error(
1166+
"Function #{function.name} failed in execution. Exception: #{LangChainError.format_exception(err, __STACKTRACE__)}"
1167+
)
9731168

9741169
ToolResult.new!(%{
9751170
tool_call_id: call.call_id,
976-
content: reason,
977-
name: function.name,
978-
display_text: function.display_text,
1171+
content: "ERROR executing tool: #{inspect(err)}",
9791172
is_error: true
9801173
})
9811174
end
982-
rescue
983-
err ->
984-
Logger.error(
985-
"Function #{function.name} failed in execution. Exception: #{LangChainError.format_exception(err, __STACKTRACE__)}"
986-
)
987-
988-
ToolResult.new!(%{
989-
tool_call_id: call.call_id,
990-
content: "ERROR executing tool: #{inspect(err)}",
991-
is_error: true
992-
})
993-
end
1175+
end)
9941176
end
9951177

9961178
@doc """

0 commit comments

Comments
 (0)