Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

promote Dev #105

Merged
merged 29 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
130d579
Capability to load skills from external location
Aug 26, 2024
b501540
readme instructions for ADDITIONAL_SKILL_DIRS env var
Aug 28, 2024
5c8c527
avoid warning on additional skills
Sep 6, 2024
01ddb7d
bug fix
deepak-akkil Sep 6, 2024
14f55a8
Merge pull request #99 from EmergenceAI/fix_test_processor_config
deepak-akkil Sep 6, 2024
102d8ca
Windows-specific commands for running server locally
danielkornev Sep 7, 2024
d21f667
first version of loop detection
deepak-akkil Sep 10, 2024
b0e99ad
minor change in return statement
deepak-akkil Sep 10, 2024
ec1d0b9
Make LLM config available in API route
deepak-akkil Sep 10, 2024
096a4c8
Merge pull request #100 from danielkornev/patch-1
deepak-akkil Sep 10, 2024
d1e04ef
do not open the url, if browser already in it
deepak-akkil Sep 10, 2024
9035f88
config for allowing planner to have user input
Sep 10, 2024
0c15f6e
Updates to make LLM config dict[str,Any]
deepak-akkil Sep 11, 2024
51ac2d8
clean up print statements
deepak-akkil Sep 11, 2024
54c6256
Update README.md
deepak-akkil Sep 11, 2024
0a57d03
linting
Sep 11, 2024
9c69db6
remove unneeded import
Sep 11, 2024
5c45327
Merge pull request #102 from EmergenceAI/llm-config-in-api-route
teaxio Sep 11, 2024
df8a745
linting
Sep 12, 2024
f0365df
Merge pull request #101 from EmergenceAI/loop_detection
teaxio Sep 12, 2024
c24c6e9
max chat rounds for planner and browser nav agents
Sep 12, 2024
5228dc4
detect if ran out of turns and send a message about it
Sep 15, 2024
6e79ac9
Merge pull request #103 from EmergenceAI/max_chat_rounds
teaxio Sep 16, 2024
14653e1
bug fix regarding saving planner chatter
deepak-akkil Sep 16, 2024
54b6081
Merge branch 'dev' of https://github.com/EmergenceAI/Agent-E into dev
deepak-akkil Sep 16, 2024
80ef975
bug fix regarding saving planner chatter in api route
deepak-akkil Sep 16, 2024
c888423
incorrect instructions in readme
Sep 16, 2024
fed2962
typo
Sep 16, 2024
7f48ba6
Ability to enable more verbose logging for openai and autogen
Sep 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Updates to make LLM config dict[str,Any]
  • Loading branch information
deepak-akkil committed Sep 11, 2024
commit 0c15f6e1dad25c1ea172e291b919cbd85c5deb53
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -161,4 +161,5 @@ requirements.txt
Pipfile

# file containing LLM config for the agents
agents_llm_config.json
agents_llm_config.json
ae/testing.py
29 changes: 15 additions & 14 deletions ae/core/agents_llm_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,11 @@ class AgentsLLMConfig:
"model_base_url": "base_url",
}

def __init__(self, env_file_path: str = ".env", config_string: Optional[str] = None) -> None:
def __init__(self, env_file_path: str = ".env", llm_config: Optional[dict[str,Any] | None] = None) -> None:
load_dotenv(env_file_path, verbose=True, override=True)
if config_string:
self.config: dict[str, Any] = self.load_config_from_string(config_string)
if llm_config:

self.config: dict[str, Any] = self.load_config_from_api(llm_config)
else:
self.config: dict[str, Any] = self._load_config()

Expand All @@ -54,8 +55,8 @@ def _load_config(self) -> dict[str, Any]:
raw_config = file_config[config_file_ref_key]

# Process configurations for both planner_agent and browser_nav_agent
planner_config = self._normalize_config_from_file(raw_config.get("planner_agent", {}))
browser_nav_config = self._normalize_config_from_file(raw_config.get("browser_nav_agent", {}))
planner_config = self._normalize_config(raw_config.get("planner_agent", {}))
browser_nav_config = self._normalize_config(raw_config.get("browser_nav_agent", {}))

config = {
"planner_agent": planner_config,
Expand Down Expand Up @@ -85,32 +86,32 @@ def _load_config(self) -> dict[str, Any]:

return config

def load_config_from_string(self, config_string: str) -> dict[str, Any]:
def load_config_from_api(self, llm_config: dict[str, Any]) -> dict[str, Any]:
"""
Load configuration from a JSON string.
Load configuration from a JSON provided during execution.

Parameters
----------
config_string : str
A JSON string representing the configuration.
config_string : dict[str,Any]
A JSON representing the configuration.

Returns
-------
dict[str, Any]
The loaded and normalized configuration.
"""
try:
raw_config = json.loads(config_string)

logger.info("Loading configuration from provided string")

# Process configurations for both planner_agent and browser_nav_agent
planner_config = self._normalize_config_from_file(raw_config.get("planner_agent", {}))
browser_nav_config = self._normalize_config_from_file(raw_config.get("browser_nav_agent", {}))
planner_config = self._normalize_config(llm_config.get("planner_agent", {}))
browser_nav_config = self._normalize_config(llm_config.get("browser_nav_agent", {}))

config = {
"planner_agent": planner_config,
"browser_nav_agent": browser_nav_config,
"other_settings": {k: v for k, v in raw_config.items() if k not in ["planner_agent", "browser_nav_agent"]},
"other_settings": {k: v for k, v in llm_config.items() if k not in ["planner_agent", "browser_nav_agent"]},
}

return config
Expand All @@ -119,7 +120,7 @@ def load_config_from_string(self, config_string: str) -> dict[str, Any]:
logger.error(f"Error decoding JSON string: {e}")
raise e

def _normalize_config_from_file(self, agent_config: dict[str, Any]) -> dict[str, Any]:
def _normalize_config(self, agent_config: dict[str, Any]) -> dict[str, Any]:
"""Normalize agent-specific config from a file, grouping keys into model_config_params, llm_config_params, and other_settings."""
model_config = {}
llm_config_params = {}
Expand Down
22 changes: 14 additions & 8 deletions ae/server/api_routes.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from ast import Dict
import asyncio
import json
import logging
import os
from typing import Any
import uuid
from queue import Empty
from queue import Queue
Expand Down Expand Up @@ -38,7 +40,7 @@

class CommandQueryModel(BaseModel):
command: str = Field(..., description="The command related to web navigation to execute.") # Required field with description
llm_config: str | None = Field(None, description="The LLM configuration string to use for the agents.")
llm_config: dict[str,Any] | None = Field(None, description="The LLM configuration string to use for the agents.")
clientid: str | None = Field(None, description="Client identifier, optional")
request_originator: str | None = Field(None, description="Optional id of the request originator")

Expand Down Expand Up @@ -73,10 +75,11 @@ async def execute_task(request: Request, query_model: CommandQueryModel):
notification_queue = Queue() # type: ignore
transaction_id = str(uuid.uuid4()) if query_model.clientid is None else query_model.clientid
register_notification_listener(notification_queue)
print(f"Config: {transaction_id}")
return StreamingResponse(run_task(request, transaction_id, query_model.command, browser_manager, notification_queue, query_model.request_originator, query_model.llm_config), media_type="text/event-stream")


def run_task(request: Request, transaction_id: str, command: str, playwright_manager: browserManager.PlaywrightManager, notification_queue: Queue, request_originator: str|None = None, llm_config: str|None = None): # type: ignore
def run_task(request: Request, transaction_id: str, command: str, playwright_manager: browserManager.PlaywrightManager, notification_queue: Queue, request_originator: str|None = None, llm_config: dict[str,Any]|None = None): # type: ignore
"""
Run the task to process the command and generate events.

Expand Down Expand Up @@ -123,27 +126,30 @@ async def event_generator():



async def process_command(command: str, playwright_manager: browserManager.PlaywrightManager, config_string:str|None = None):
async def process_command(command: str, playwright_manager: browserManager.PlaywrightManager, llm_config:dict[str,Any]|None = None):
"""
Process the command and send notifications.

Args:
command (str): The command to process.
playwright_manager (PlaywrightManager): The manager handling browser interactions and notifications.
"""
print(f"LLM Config via API : {llm_config}")
await playwright_manager.go_to_homepage() # Go to the homepage before processing the command
current_url = await playwright_manager.get_current_url()
await playwright_manager.notify_user("Processing command", MessageType.INFO)

# Load the configuration using AgentsLLMConfig
if config_string is None:
llm_config = AgentsLLMConfig()
normalized_llm_config = None
if llm_config is None:
normalized_llm_config = AgentsLLMConfig()
else:
llm_config = AgentsLLMConfig(config_string=config_string)
print("Applying LLM Config")
normalized_llm_config = AgentsLLMConfig(llm_config=llm_config)

# Retrieve planner agent and browser nav agent configurations
planner_agent_config = llm_config.get_planner_agent_config()
browser_nav_agent_config = llm_config.get_browser_nav_agent_config()
planner_agent_config = normalized_llm_config.get_planner_agent_config()
browser_nav_agent_config = normalized_llm_config.get_browser_nav_agent_config()

ag = await AutogenWrapper.create(planner_agent_config, browser_nav_agent_config)
command_exec_result = await ag.process_command(command, current_url) # type: ignore
Expand Down