-
Notifications
You must be signed in to change notification settings - Fork 3.8k
Description
Checked other resources
- This is a bug, not a usage question. For questions, please use GitHub Discussions.
- I added a clear and detailed title that summarizes the issue.
- I read what a minimal reproducible example is (https://stackoverflow.com/help/minimal-reproducible-example).
- I included a self-contained, minimal example that demonstrates the issue INCLUDING all the relevant imports. The code run AS IS to reproduce the issue.
Example Code
from typing_extensions import TypedDict, Literal
from langgraph.graph import StateGraph, START, END, MessagesState
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import Command, interrupt
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from langchain_core.messages import AIMessage
from IPython.display import Image, display
@tool
def weather_search(city: str):
"""Search for the weather"""
print("----")
print(f"Searching for: {city}")
print("----")
return "Sunny!"
model = ChatOpenAI(
model="glm-4-flash",
).bind_tools([weather_search])
class State(MessagesState):
"""Simple state."""
def call_llm(state):
return {"messages": [model.invoke(state["messages"])]}
def human_review_node(state) -> Command[Literal["call_llm", "run_tool"]]:
last_message = state["messages"][-1]
tool_call = last_message.tool_calls[-1]
# this is the value we'll be providing via Command(resume=<human_review>)
human_review = interrupt(
{
"question": "Is this correct?",
# Surface tool calls for review
"tool_call": tool_call,
}
)
review_action = human_review["action"]
review_data = human_review.get("data")
# if approved, call the tool
if review_action == "continue":
return Command(goto="run_tool")
# update the AI message AND call tools
elif review_action == "update":
updated_message = {
"role": "ai",
"content": last_message.content,
"tool_calls": [
{
"id": tool_call["id"],
"name": tool_call["name"],
# This the update provided by the human
"args": review_data,
}
],
# This is important - this needs to be the same as the message you replacing!
# Otherwise, it will show up as a separate message
"id": last_message.id,
}
return Command(goto="run_tool", update={"messages": [updated_message]})
# provide feedback to LLM
elif review_action == "feedback":
# NOTE: we're adding feedback message as a ToolMessage
# to preserve the correct order in the message history
# (AI messages with tool calls need to be followed by tool call messages)
tool_message = {
"role": "tool",
# This is our natural language feedback
"content": review_data,
"name": tool_call["name"],
"tool_call_id": tool_call["id"],
}
return Command(goto="call_llm", update={"messages": [tool_message]})
def run_tool(state):
new_messages = []
tools = {"weather_search": weather_search}
tool_calls = state["messages"][-1].tool_calls
for tool_call in tool_calls:
tool = tools[tool_call["name"]]
result = tool.invoke(tool_call["args"])
new_messages.append(
{
"role": "tool",
"name": tool_call["name"],
"content": result,
"tool_call_id": tool_call["id"],
}
)
return {"messages": new_messages}
def route_after_llm(state) -> Literal[END, "human_review_node"]:
if len(state["messages"][-1].tool_calls) == 0:
return END
else:
return "human_review_node"
builder = StateGraph(State)
builder.add_node(call_llm)
builder.add_node(run_tool)
builder.add_node(human_review_node)
builder.add_edge(START, "call_llm")
builder.add_conditional_edges("call_llm", route_after_llm)
builder.add_edge("run_tool", "call_llm")
# Set up memory
memory = MemorySaver()
# Add
graph = builder.compile(checkpointer=memory)
# View
display(Image(graph.get_graph().draw_mermaid_png()))
# Input
initial_input = {"messages": [{"role": "user", "content": "what's the weather in sf?"}]}
# Thread
thread = {"configurable": {"thread_id": "2"}}
# Run the graph until the first interruption
async for event in graph.astream_events(initial_input, thread, stream_mode="updates",version="v2"):
print(event)
print("\n")Error Message and Stack Trace (if applicable)
{'event': 'on_chain_start', 'data': {'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'name': 'LangGraph', 'tags': [], 'run_id': 'c6bf9294-29b9-4331-9bc9-5349f79228a7', 'metadata': {'thread_id': '2'}, 'parent_ids': []}
{'event': 'on_chain_start', 'data': {'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'name': '__start__', 'tags': ['graph:step:3', 'langsmith:hidden'], 'run_id': '5c86e9e9-b113-4776-bd7f-4f3e46da91ff', 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
{'event': 'on_chain_start', 'data': {'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'name': '_write', 'tags': ['seq:step:1', 'langsmith:hidden', 'langsmith:hidden'], 'run_id': '0c25e246-5cf7-44b3-b193-9b870a61bdfd', 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '5c86e9e9-b113-4776-bd7f-4f3e46da91ff']}
{'event': 'on_chain_end', 'data': {'output': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}, 'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'run_id': '0c25e246-5cf7-44b3-b193-9b870a61bdfd', 'name': '_write', 'tags': ['seq:step:1', 'langsmith:hidden', 'langsmith:hidden'], 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '5c86e9e9-b113-4776-bd7f-4f3e46da91ff']}
{'event': 'on_chain_start', 'data': {'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'name': '_write', 'tags': ['seq:step:3', 'langsmith:hidden', 'langsmith:hidden'], 'run_id': '75449080-1927-4bb1-8cba-4d4a70e5a2d5', 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '5c86e9e9-b113-4776-bd7f-4f3e46da91ff']}
{'event': 'on_chain_end', 'data': {'output': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}, 'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'run_id': '75449080-1927-4bb1-8cba-4d4a70e5a2d5', 'name': '_write', 'tags': ['seq:step:3', 'langsmith:hidden', 'langsmith:hidden'], 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '5c86e9e9-b113-4776-bd7f-4f3e46da91ff']}
{'event': 'on_chain_stream', 'run_id': '5c86e9e9-b113-4776-bd7f-4f3e46da91ff', 'name': '__start__', 'tags': ['graph:step:3', 'langsmith:hidden'], 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'data': {'chunk': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
{'event': 'on_chain_end', 'data': {'output': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}, 'input': {'messages': [{'role': 'user', 'content': "what's the weather in sf?"}]}}, 'run_id': '5c86e9e9-b113-4776-bd7f-4f3e46da91ff', 'name': '__start__', 'tags': ['graph:step:3', 'langsmith:hidden'], 'metadata': {'thread_id': '2', 'langgraph_step': 3, 'langgraph_node': '__start__', 'langgraph_triggers': ['__start__'], 'langgraph_path': ('__pregel_pull', '__start__'), 'langgraph_checkpoint_ns': '__start__:107cb6c8-1895-c324-e839-bc82fb1e2737'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
{'event': 'on_chain_start', 'data': {'input': {'messages': [HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='61a1414b-5555-4ac4-ad63-5db470350b20'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084362498981044777', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 150, 'total_tokens': 161, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-8cd3f724-6685-4724-b53c-2f0fcf88d19b-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084362498981044777', 'type': 'tool_call'}], usage_metadata={'input_tokens': 150, 'output_tokens': 11, 'total_tokens': 161, 'input_token_details': {}, 'output_token_details': {}}), HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='da1d59a8-2a5e-497e-b866-d42e8df1170f')]}}, 'name': 'call_llm', 'tags': ['graph:step:4'], 'run_id': '21e36659-9021-49ec-8d38-f17bb96cd575', 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
{'event': 'on_chain_start', 'data': {'input': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}, 'name': '_write', 'tags': ['seq:step:2', 'langsmith:hidden'], 'run_id': '696b2080-a06c-4c38-a92b-151df8602d99', 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '21e36659-9021-49ec-8d38-f17bb96cd575']}
{'event': 'on_chain_end', 'data': {'output': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}, 'input': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}, 'run_id': '696b2080-a06c-4c38-a92b-151df8602d99', 'name': '_write', 'tags': ['seq:step:2', 'langsmith:hidden'], 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '21e36659-9021-49ec-8d38-f17bb96cd575']}
{'event': 'on_chain_start', 'data': {'input': {'messages': [HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='61a1414b-5555-4ac4-ad63-5db470350b20'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084362498981044777', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 150, 'total_tokens': 161, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-8cd3f724-6685-4724-b53c-2f0fcf88d19b-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084362498981044777', 'type': 'tool_call'}], usage_metadata={'input_tokens': 150, 'output_tokens': 11, 'total_tokens': 161, 'input_token_details': {}, 'output_token_details': {}}), HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='da1d59a8-2a5e-497e-b866-d42e8df1170f'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}, 'name': 'route_after_llm', 'tags': ['seq:step:4'], 'run_id': 'fbd2315c-a59e-475c-8b66-143ab89bb381', 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '21e36659-9021-49ec-8d38-f17bb96cd575']}
{'event': 'on_chain_end', 'data': {'output': 'human_review_node', 'input': {'messages': [HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='61a1414b-5555-4ac4-ad63-5db470350b20'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084362498981044777', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 150, 'total_tokens': 161, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-8cd3f724-6685-4724-b53c-2f0fcf88d19b-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084362498981044777', 'type': 'tool_call'}], usage_metadata={'input_tokens': 150, 'output_tokens': 11, 'total_tokens': 161, 'input_token_details': {}, 'output_token_details': {}}), HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='da1d59a8-2a5e-497e-b866-d42e8df1170f'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}, 'run_id': 'fbd2315c-a59e-475c-8b66-143ab89bb381', 'name': 'route_after_llm', 'tags': ['seq:step:4'], 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7', '21e36659-9021-49ec-8d38-f17bb96cd575']}
{'event': 'on_chain_stream', 'run_id': '21e36659-9021-49ec-8d38-f17bb96cd575', 'name': 'call_llm', 'tags': ['graph:step:4'], 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'data': {'chunk': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
{'event': 'on_chain_end', 'data': {'output': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}, 'input': {'messages': [HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='61a1414b-5555-4ac4-ad63-5db470350b20'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084362498981044777', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 150, 'total_tokens': 161, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-8cd3f724-6685-4724-b53c-2f0fcf88d19b-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084362498981044777', 'type': 'tool_call'}], usage_metadata={'input_tokens': 150, 'output_tokens': 11, 'total_tokens': 161, 'input_token_details': {}, 'output_token_details': {}}), HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='da1d59a8-2a5e-497e-b866-d42e8df1170f')]}}, 'run_id': '21e36659-9021-49ec-8d38-f17bb96cd575', 'name': 'call_llm', 'tags': ['graph:step:4'], 'metadata': {'thread_id': '2', 'langgraph_step': 4, 'langgraph_node': 'call_llm', 'langgraph_triggers': ['start:call_llm'], 'langgraph_path': ('__pregel_pull', 'call_llm'), 'langgraph_checkpoint_ns': 'call_llm:8f3c1837-53e6-d2ef-80da-1d0763a4b983'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
{'event': 'on_chain_stream', 'run_id': 'c6bf9294-29b9-4331-9bc9-5349f79228a7', 'name': 'LangGraph', 'tags': [], 'metadata': {'thread_id': '2'}, 'data': {'chunk': {'call_llm': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}}, 'parent_ids': []}
{'event': 'on_chain_start', 'data': {'input': {'messages': [HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='61a1414b-5555-4ac4-ad63-5db470350b20'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084362498981044777', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 150, 'total_tokens': 161, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-8cd3f724-6685-4724-b53c-2f0fcf88d19b-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084362498981044777', 'type': 'tool_call'}], usage_metadata={'input_tokens': 150, 'output_tokens': 11, 'total_tokens': 161, 'input_token_details': {}, 'output_token_details': {}}), HumanMessage(content="what's the weather in sf?", additional_kwargs={}, response_metadata={}, id='da1d59a8-2a5e-497e-b866-d42e8df1170f'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_-9084357448097776084', 'function': {'arguments': '{"city": "San Francisco"}', 'name': 'weather_search'}, 'type': 'function', 'index': 0}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 170, 'total_tokens': 181, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'glm-4-flash', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-e4820d82-02dd-4625-b2cc-8348092846dc-0', tool_calls=[{'name': 'weather_search', 'args': {'city': 'San Francisco'}, 'id': 'call_-9084357448097776084', 'type': 'tool_call'}], usage_metadata={'input_tokens': 170, 'output_tokens': 11, 'total_tokens': 181, 'input_token_details': {}, 'output_token_details': {}})]}}, 'name': 'human_review_node', 'tags': ['graph:step:5'], 'run_id': 'b7ea95ed-312e-41c9-a983-33c75e64ca13', 'metadata': {'thread_id': '2', 'langgraph_step': 5, 'langgraph_node': 'human_review_node', 'langgraph_triggers': ['branch:call_llm:route_after_llm:human_review_node'], 'langgraph_path': ('__pregel_pull', 'human_review_node'), 'langgraph_checkpoint_ns': 'human_review_node:6b060d62-e073-6c79-a745-0789cdc4ffbe'}, 'parent_ids': ['c6bf9294-29b9-4331-9bc9-5349f79228a7']}
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[37], line 8
5 thread = {"configurable": {"thread_id": "2"}}
7 # Run the graph until the first interruption
----> 8 async for event in graph.astream_events(initial_input, thread, stream_mode="updates",version="v2"):
9 print(event)
10 print("\n")
File ~\.conda\envs\study\lib\site-packages\langchain_core\runnables\base.py:1388, in Runnable.astream_events(self, input, config, version, include_names, include_types, include_tags, exclude_names, exclude_types, exclude_tags, **kwargs)
1385 raise NotImplementedError(msg)
1387 async with aclosing(event_stream):
-> 1388 async for event in event_stream:
1389 yield event
File ~\.conda\envs\study\lib\site-packages\langchain_core\tracers\event_stream.py:1012, in _astream_events_implementation_v2(runnable, input, config, include_names, include_types, include_tags, exclude_names, exclude_types, exclude_tags, **kwargs)
1010 # Await it anyway, to run any cleanup code, and propagate any exceptions
1011 with contextlib.suppress(asyncio.CancelledError):
-> 1012 await task
File ~\.conda\envs\study\lib\site-packages\langchain_core\tracers\event_stream.py:967, in _astream_events_implementation_v2.<locals>.consume_astream()
964 try:
965 # if astream also calls tap_output_aiter this will be a no-op
966 async with aclosing(runnable.astream(input, config, **kwargs)) as stream:
--> 967 async for _ in event_streamer.tap_output_aiter(run_id, stream):
968 # All the content will be picked up
969 pass
970 finally:
File ~\.conda\envs\study\lib\site-packages\langchain_core\tracers\event_stream.py:203, in _AstreamEventsCallbackHandler.tap_output_aiter(self, run_id, output)
201 yield cast(T, first)
202 # consume the rest of the output
--> 203 async for chunk in output:
204 self._send(
205 {**event, "data": {"chunk": chunk}},
206 run_info["run_type"],
207 )
208 yield chunk
File ~\.conda\envs\study\lib\site-packages\langgraph\pregel\__init__.py:1874, in Pregel.astream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
1868 # Similarly to Bulk Synchronous Parallel / Pregel model
1869 # computation proceeds in steps, while there are channel updates
1870 # channel updates from step N are only visible in step N+1
1871 # channels are guaranteed to be immutable for the duration of the step,
1872 # with channel updates applied only at the transition between steps
1873 while loop.tick(input_keys=self.input_channels):
-> 1874 async for _ in runner.atick(
1875 loop.tasks.values(),
1876 timeout=self.step_timeout,
1877 retry_policy=self.retry_policy,
1878 get_waiter=get_waiter,
1879 ):
1880 # emit output
1881 for o in output():
1882 yield o
File ~\.conda\envs\study\lib\site-packages\langgraph\pregel\runner.py:362, in PregelRunner.atick(self, tasks, reraise, timeout, retry_policy, get_waiter)
360 t = tasks[0]
361 try:
--> 362 await arun_with_retry(
363 t,
364 retry_policy,
365 stream=self.use_astream,
366 configurable={
367 CONFIG_KEY_SEND: partial(writer, t),
368 CONFIG_KEY_CALL: partial(call, t),
369 },
370 )
371 self.commit(t, None)
372 except Exception as exc:
File ~\.conda\envs\study\lib\site-packages\langgraph\pregel\retry.py:127, in arun_with_retry(task, retry_policy, stream, configurable)
125 # run the task
126 if stream:
--> 127 async for _ in task.proc.astream(task.input, config):
128 pass
129 # if successful, end
File ~\.conda\envs\study\lib\site-packages\langgraph\utils\runnable.py:568, in RunnableSeq.astream(self, input, config, **kwargs)
566 output: Any = None
567 add_supported = False
--> 568 async for chunk in aiterator:
569 yield chunk
570 # collect final output
File ~\.conda\envs\study\lib\site-packages\langchain_core\tracers\event_stream.py:180, in _AstreamEventsCallbackHandler.tap_output_aiter(self, run_id, output)
178 tap = self.is_tapped.setdefault(run_id, sentinel)
179 # wait for first chunk
--> 180 first = await py_anext(output, default=sentinel)
181 if first is sentinel:
182 return
File ~\.conda\envs\study\lib\site-packages\langchain_core\utils\aiter.py:76, in py_anext.<locals>.anext_impl()
69 async def anext_impl() -> Union[T, Any]:
70 try:
71 # The C code is way more low-level than this, as it implements
72 # all methods of the iterator protocol. In this implementation
73 # we're relying on higher-level coroutine concepts, but that's
74 # exactly what we want -- crosstest pure-Python high-level
75 # implementation and low-level C anext() iterators.
---> 76 return await __anext__(iterator)
77 except StopAsyncIteration:
78 return default
File ~\.conda\envs\study\lib\site-packages\langchain_core\runnables\base.py:1455, in Runnable.atransform(self, input, config, **kwargs)
1452 final: Input
1453 got_first_val = False
-> 1455 async for ichunk in input:
1456 # The default implementation of transform is to buffer input and
1457 # then call stream.
1458 # It'll attempt to gather all input into a single chunk using
1459 # the `+` operator.
1460 # If the input is not addable, then we'll assume that we can
1461 # only operate on the last chunk,
1462 # and we'll iterate until we get to the last chunk.
1463 if not got_first_val:
1464 final = ichunk
File ~\.conda\envs\study\lib\site-packages\langchain_core\runnables\base.py:1455, in Runnable.atransform(self, input, config, **kwargs)
1452 final: Input
1453 got_first_val = False
-> 1455 async for ichunk in input:
1456 # The default implementation of transform is to buffer input and
1457 # then call stream.
1458 # It'll attempt to gather all input into a single chunk using
1459 # the `+` operator.
1460 # If the input is not addable, then we'll assume that we can
1461 # only operate on the last chunk,
1462 # and we'll iterate until we get to the last chunk.
1463 if not got_first_val:
1464 final = ichunk
File ~\.conda\envs\study\lib\site-packages\langchain_core\runnables\base.py:1018, in Runnable.astream(self, input, config, **kwargs)
1000 async def astream(
1001 self,
1002 input: Input,
1003 config: Optional[RunnableConfig] = None,
1004 **kwargs: Optional[Any],
1005 ) -> AsyncIterator[Output]:
1006 """
1007 Default implementation of astream, which calls ainvoke.
1008 Subclasses should override this method if they support streaming output.
(...)
1016 The output of the Runnable.
1017 """
-> 1018 yield await self.ainvoke(input, config, **kwargs)
File ~\.conda\envs\study\lib\site-packages\langgraph\utils\runnable.py:238, in RunnableCallable.ainvoke(self, input, config, **kwargs)
236 ret = await asyncio.create_task(coro, context=context)
237 else:
--> 238 ret = await self.afunc(input, **kwargs)
239 if isinstance(ret, Runnable) and self.recurse:
240 return await ret.ainvoke(input, config)
File ~\.conda\envs\study\lib\site-packages\langchain_core\runnables\config.py:588, in run_in_executor(executor_or_config, func, *args, **kwargs)
584 raise RuntimeError from exc
586 if executor_or_config is None or isinstance(executor_or_config, dict):
587 # Use default executor with context copied from current context
--> 588 return await asyncio.get_running_loop().run_in_executor(
589 None,
590 cast(Callable[..., T], partial(copy_context().run, wrapper)),
591 )
593 return await asyncio.get_running_loop().run_in_executor(executor_or_config, wrapper)
File ~\.conda\envs\study\lib\concurrent\futures\thread.py:58, in _WorkItem.run(self)
55 return
57 try:
---> 58 result = self.fn(*self.args, **self.kwargs)
59 except BaseException as exc:
60 self.future.set_exception(exc)
File ~\.conda\envs\study\lib\site-packages\langchain_core\runnables\config.py:579, in run_in_executor.<locals>.wrapper()
577 def wrapper() -> T:
578 try:
--> 579 return func(*args, **kwargs)
580 except StopIteration as exc:
581 # StopIteration can't be set on an asyncio.Future
582 # it raises a TypeError and leaves the Future pending forever
583 # so we need to convert it to a RuntimeError
584 raise RuntimeError from exc
Cell In[35], line 46, in human_review_node(state)
43 tool_call = last_message.tool_calls[-1]
45 # this is the value we'll be providing via Command(resume=<human_review>)
---> 46 human_review = interrupt(
47 {
48 "question": "Is this correct?",
49 # Surface tool calls for review
50 "tool_call": tool_call,
51 }
52 )
54 review_action = human_review["action"]
55 review_data = human_review.get("data")
File ~\.conda\envs\study\lib\site-packages\langgraph\types.py:458, in interrupt(value)
455 from langgraph.errors import GraphInterrupt
456 from langgraph.utils.config import get_configurable
--> 458 conf = get_configurable()
459 # track interrupt index
460 scratchpad: PregelScratchpad = conf[CONFIG_KEY_SCRATCHPAD]
File ~\.conda\envs\study\lib\site-packages\langgraph\utils\config.py:312, in get_configurable()
310 return var_config[CONF]
311 else:
--> 312 raise RuntimeError("Called get_configurable outside of a runnable context")
RuntimeError: Called get_configurable outside of a runnable contextDescription
My code is based on https://langchain-ai.github.io/langgraph/how-tos/human_in_the_loop/review-tool-calls/#simple-usage.
-
The error does not occur when I make no modifications.
-
The error occurs only when I change stream to astream_events.
-
The error does not occur if interrupt is not triggered.
System Info
System Information
OS: Linux
OS Version: #1 SMP Tue Nov 5 00:21:55 UTC 2024
Python Version: 3.10.14 (main, May 6 2024, 19:42:50) [GCC 11.2.0]
Package Information
langchain_core: 0.3.28
langchain: 0.3.13
langchain_community: 0.3.13
langsmith: 0.1.147
langchain_anthropic: 0.1.20
langchain_cohere: 0.3.4
langchain_experimental: 0.3.4
langchain_google_genai: 2.0.7
langchain_ollama: 0.2.2
langchain_openai: 0.2.14
langchain_qdrant: 0.1.4
langchain_text_splitters: 0.3.4
langgraph_sdk: 0.1.48
Optional packages not installed
langserve
Other Dependencies
aiohttp: 3.11.11
anthropic: 0.42.0
async-timeout: 4.0.3
cohere: 5.13.4
dataclasses-json: 0.6.7
defusedxml: 0.7.1
fastembed: 0.3.6
filetype: 1.2.0
google-generativeai: 0.8.3
httpx: 0.27.2
httpx-sse: 0.4.0
jsonpatch: 1.33
langsmith-pyo3: Installed. No version info available.
numpy: 1.26.4
ollama: 0.4.4
openai: 1.58.1
orjson: 3.10.12
packaging: 24.2
pandas: 2.2.3
pydantic: 2.10.4
pydantic-settings: 2.7.0
PyYAML: 6.0.2
qdrant-client: 1.11.3
requests: 2.32.3
requests-toolbelt: 1.0.0
SQLAlchemy: 2.0.36
tabulate: 0.9.0
tenacity: 8.5.0
tiktoken: 0.7.0
typing-extensions: 4.12.2