Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

promote Dev #105

Merged
merged 29 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
130d579
Capability to load skills from external location
Aug 26, 2024
b501540
readme instructions for ADDITIONAL_SKILL_DIRS env var
Aug 28, 2024
5c8c527
avoid warning on additional skills
Sep 6, 2024
01ddb7d
bug fix
deepak-akkil Sep 6, 2024
14f55a8
Merge pull request #99 from EmergenceAI/fix_test_processor_config
deepak-akkil Sep 6, 2024
102d8ca
Windows-specific commands for running server locally
danielkornev Sep 7, 2024
d21f667
first version of loop detection
deepak-akkil Sep 10, 2024
b0e99ad
minor change in return statement
deepak-akkil Sep 10, 2024
ec1d0b9
Make LLM config available in API route
deepak-akkil Sep 10, 2024
096a4c8
Merge pull request #100 from danielkornev/patch-1
deepak-akkil Sep 10, 2024
d1e04ef
do not open the url, if browser already in it
deepak-akkil Sep 10, 2024
9035f88
config for allowing planner to have user input
Sep 10, 2024
0c15f6e
Updates to make LLM config dict[str,Any]
deepak-akkil Sep 11, 2024
51ac2d8
clean up print statements
deepak-akkil Sep 11, 2024
54c6256
Update README.md
deepak-akkil Sep 11, 2024
0a57d03
linting
Sep 11, 2024
9c69db6
remove unneeded import
Sep 11, 2024
5c45327
Merge pull request #102 from EmergenceAI/llm-config-in-api-route
teaxio Sep 11, 2024
df8a745
linting
Sep 12, 2024
f0365df
Merge pull request #101 from EmergenceAI/loop_detection
teaxio Sep 12, 2024
c24c6e9
max chat rounds for planner and browser nav agents
Sep 12, 2024
5228dc4
detect if ran out of turns and send a message about it
Sep 15, 2024
6e79ac9
Merge pull request #103 from EmergenceAI/max_chat_rounds
teaxio Sep 16, 2024
14653e1
bug fix regarding saving planner chatter
deepak-akkil Sep 16, 2024
54b6081
Merge branch 'dev' of https://github.com/EmergenceAI/Agent-E into dev
deepak-akkil Sep 16, 2024
80ef975
bug fix regarding saving planner chatter in api route
deepak-akkil Sep 16, 2024
c888423
incorrect instructions in readme
Sep 16, 2024
fed2962
typo
Sep 16, 2024
7f48ba6
Ability to enable more verbose logging for openai and autogen
Sep 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
first version of loop detection
Useful for smaller models
  • Loading branch information
deepak-akkil committed Sep 10, 2024
commit d21f6675fa7415c6f5b109730a8559aeeb408320
14 changes: 12 additions & 2 deletions ae/core/autogen_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
import nest_asyncio # type: ignore
import openai

from collections import defaultdict

#from autogen import Cache
from ae.config import SOURCE_LOG_FOLDER_PATH
from ae.core.agents.browser_nav_agent import BrowserNavAgent
Expand All @@ -19,6 +21,7 @@
from ae.core.prompts import LLM_PROMPTS
from ae.core.skills.get_url import geturl
from ae.utils.autogen_sequential_function_call import UserProxyAgent_SequentialFunctionExecution
from ae.utils.detect_llm_loops import is_agent_stuck_in_loop
from ae.utils.logger import logger
from ae.utils.response_parser import parse_response
from ae.utils.ui_messagetype import MessageType
Expand Down Expand Up @@ -119,7 +122,7 @@ def my_custom_summary_method(sender: autogen.ConversableAgent,recipient: autogen
self.__save_chat_log(list(messages_str_keys.values())[0]) # type: ignore
last_message=recipient.last_message(sender)["content"] # type: ignore
if not last_message or last_message.strip() == "": # type: ignore
return "I received an empty message. Try a different approach."
return "I received an empty message. This is not an empty and is recoverable. Try to reformulate the task..."
elif "##TERMINATE TASK##" in last_message:
last_message=last_message.replace("##TERMINATE TASK##", "") # type: ignore
last_message=last_message+" "+ get_url() # type: ignore
Expand Down Expand Up @@ -280,10 +283,17 @@ def __create_browser_nav_executor_agent(self):

"""
def is_browser_executor_termination_message(x: dict[str, str])->bool: # type: ignore

tools_call:Any = x.get("tool_calls", "")
if tools_call :
return False
chat_messages=self.agents_map["browser_nav_executor"].chat_messages #type: ignore
# Get the only key from the dictionary
agent_key = next(iter(chat_messages)) # type: ignore
# Get the chat messages corresponding to the only key
messages = chat_messages[agent_key] # type: ignore
return is_agent_stuck_in_loop(messages) # type: ignore
else:
print("Terminating browser executor")
return True

browser_nav_executor_agent = UserProxyAgent_SequentialFunctionExecution(
Expand Down
3 changes: 2 additions & 1 deletion ae/utils/autogen_sequential_function_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
class UserProxyAgent_SequentialFunctionExecution(UserProxyAgent):
def __init__(self, *args, **kwargs): # type: ignore
super().__init__(*args, **kwargs) # type: ignore
self.register_reply(Agent, UserProxyAgent_SequentialFunctionExecution.sequential_generate_tool_calls_reply) # type: ignore
#position = 2 allows termination check to be called earlier, this helps detect loops.
self.register_reply(Agent, UserProxyAgent_SequentialFunctionExecution.sequential_generate_tool_calls_reply, position=2) # type: ignore


def sequential_generate_tool_calls_reply( # type: ignore
Expand Down
44 changes: 44 additions & 0 deletions ae/utils/detect_llm_loops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from typing import Any, Dict, List
from ae.utils.logger import logger


def is_agent_stuck_in_loop(messages: List[Dict[str, Any]]) -> bool:
"""
Detects loops in the agent's responses by iterating over the last N responses.

Parameters
----------
messages : List[Dict[str, Any]]
A list of dictionaries representing the agent's messages.

Returns
-------
bool
True if a loop is detected, False otherwise.
"""
number_of_turns_to_check_for_loops = 6
# Detect any loops by checking the last 3 tool responses and their corresponding tool calls
if len(messages) > number_of_turns_to_check_for_loops:
last_six_items = messages[-number_of_turns_to_check_for_loops:]
logger.debug(f"More than {number_of_turns_to_check_for_loops} messages in the conversation. Checking for loops..")
# Filter items by role
tool_calls = [item for item in last_six_items if item.get("role") == "assistant"]

# Check if function attributes are the same for tool items
if tool_calls:
tool_functions = [item.get("tool_calls", [{}])[0].get("function") for item in tool_calls]
logger.debug(f"Last 3 tool calls: {tool_functions}")
if all(func == tool_functions[0] for func in tool_functions):
logger.debug("Last 3 tool calls are identical. Checking Tool responses..")
# Check if content attributes are the same for assistant items
tool_responses = [item for item in last_six_items if item.get("role") == "tool"]

if tool_responses:
assistant_contents = [item.get("content") for item in tool_responses]
logger.debug(f"Last N tool responses: {assistant_contents}")
if all(content == assistant_contents[0] for content in assistant_contents):
logger.debug("Last 3 tool responses are identical. Terminating")
logger.info("Terminating browser executor since a loop was detected...")
return True

return False