Skip to content

Commit 8e6e82c

Browse files
Merge pull request #765 from MervinPraison/claude/issue-752-20250708_204518
refactor: Sync agent.py with llm.py patterns - Phase 1
2 parents ea24336 + debbd9c commit 8e6e82c

File tree

2 files changed

+458
-100
lines changed

2 files changed

+458
-100
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 130 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import os
22
import time
33
import json
4+
import copy
45
import logging
56
import asyncio
67
from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
@@ -831,6 +832,127 @@ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, to
831832

832833
return current_response
833834

835+
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
836+
"""Build messages list for chat completion.
837+
838+
Args:
839+
prompt: The user prompt (str or list)
840+
temperature: Temperature for the chat
841+
output_json: Optional Pydantic model for JSON output
842+
output_pydantic: Optional Pydantic model for JSON output (alias)
843+
844+
Returns:
845+
tuple: (messages list, original prompt)
846+
"""
847+
messages = []
848+
849+
# Build system prompt if enabled
850+
system_prompt = None
851+
if self.use_system_prompt:
852+
system_prompt = f"""{self.backstory}\n
853+
Your Role: {self.role}\n
854+
Your Goal: {self.goal}
855+
"""
856+
if output_json:
857+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
858+
elif output_pydantic:
859+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
860+
861+
messages.append({"role": "system", "content": system_prompt})
862+
863+
# Add chat history
864+
messages.extend(self.chat_history)
865+
866+
# Handle prompt modifications for JSON output
867+
original_prompt = prompt
868+
if output_json or output_pydantic:
869+
if isinstance(prompt, str):
870+
prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
871+
elif isinstance(prompt, list):
872+
# Create a deep copy to avoid modifying the original
873+
prompt = copy.deepcopy(prompt)
874+
for item in prompt:
875+
if item.get("type") == "text":
876+
item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
877+
break
878+
879+
# Add prompt to messages
880+
if isinstance(prompt, list):
881+
# If we receive a multimodal prompt list, place it directly in the user message
882+
messages.append({"role": "user", "content": prompt})
883+
else:
884+
messages.append({"role": "user", "content": prompt})
885+
886+
return messages, original_prompt
887+
888+
def _format_tools_for_completion(self, tools=None):
889+
"""Format tools for OpenAI completion API.
890+
891+
Supports:
892+
- Pre-formatted OpenAI tools (dicts with type='function')
893+
- Lists of pre-formatted tools
894+
- Callable functions
895+
- String function names
896+
- Objects with to_openai_tool() method
897+
898+
Args:
899+
tools: List of tools in various formats or None to use self.tools
900+
901+
Returns:
902+
List of formatted tools or empty list
903+
"""
904+
if tools is None:
905+
tools = self.tools
906+
907+
if not tools:
908+
return []
909+
910+
formatted_tools = []
911+
for tool in tools:
912+
# Handle pre-formatted OpenAI tools
913+
if isinstance(tool, dict) and tool.get('type') == 'function':
914+
# Validate nested dictionary structure before accessing
915+
if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
916+
formatted_tools.append(tool)
917+
else:
918+
logging.warning(f"Skipping malformed OpenAI tool: missing function or name")
919+
# Handle lists of tools
920+
elif isinstance(tool, list):
921+
for subtool in tool:
922+
if isinstance(subtool, dict) and subtool.get('type') == 'function':
923+
# Validate nested dictionary structure before accessing
924+
if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
925+
formatted_tools.append(subtool)
926+
else:
927+
logging.warning(f"Skipping malformed OpenAI tool in list: missing function or name")
928+
# Handle string tool names
929+
elif isinstance(tool, str):
930+
tool_def = self._generate_tool_definition(tool)
931+
if tool_def:
932+
formatted_tools.append(tool_def)
933+
else:
934+
logging.warning(f"Could not generate definition for tool: {tool}")
935+
# Handle objects with to_openai_tool method (MCP tools)
936+
elif hasattr(tool, "to_openai_tool"):
937+
formatted_tools.append(tool.to_openai_tool())
938+
# Handle callable functions
939+
elif callable(tool):
940+
tool_def = self._generate_tool_definition(tool.__name__)
941+
if tool_def:
942+
formatted_tools.append(tool_def)
943+
else:
944+
logging.warning(f"Tool {tool} not recognized")
945+
946+
# Validate JSON serialization before returning
947+
if formatted_tools:
948+
try:
949+
json.dumps(formatted_tools) # Validate serialization
950+
except (TypeError, ValueError) as e:
951+
logging.error(f"Tools are not JSON serializable: {e}")
952+
return []
953+
954+
return formatted_tools
955+
834956
def generate_task(self) -> 'Task':
835957
"""Generate a Task object from the agent's instructions"""
836958
from ..task.task import Task
@@ -1045,26 +1167,8 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
10451167
start_time = time.time()
10461168
logging.debug(f"{self.name} sending messages to LLM: {messages}")
10471169

1048-
formatted_tools = []
1049-
if tools is None:
1050-
tools = self.tools
1051-
if tools:
1052-
for tool in tools:
1053-
if isinstance(tool, str):
1054-
# Generate tool definition for string tool names
1055-
tool_def = self._generate_tool_definition(tool)
1056-
if tool_def:
1057-
formatted_tools.append(tool_def)
1058-
else:
1059-
logging.warning(f"Could not generate definition for tool: {tool}")
1060-
elif isinstance(tool, dict):
1061-
formatted_tools.append(tool)
1062-
elif hasattr(tool, "to_openai_tool"):
1063-
formatted_tools.append(tool.to_openai_tool())
1064-
elif callable(tool):
1065-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
1066-
else:
1067-
logging.warning(f"Tool {tool} not recognized")
1170+
# Use the new _format_tools_for_completion helper method
1171+
formatted_tools = self._format_tools_for_completion(tools)
10681172

10691173
try:
10701174
# Use the custom LLM instance if available
@@ -1297,40 +1401,8 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12971401
display_error(f"Error in LLM chat: {e}")
12981402
return None
12991403
else:
1300-
if self.use_system_prompt:
1301-
system_prompt = f"""{self.backstory}\n
1302-
Your Role: {self.role}\n
1303-
Your Goal: {self.goal}
1304-
"""
1305-
if output_json:
1306-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
1307-
elif output_pydantic:
1308-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
1309-
else:
1310-
system_prompt = None
1311-
1312-
messages = []
1313-
if system_prompt:
1314-
messages.append({"role": "system", "content": system_prompt})
1315-
messages.extend(self.chat_history)
1316-
1317-
# Modify prompt if output_json or output_pydantic is specified
1318-
original_prompt = prompt
1319-
if output_json or output_pydantic:
1320-
if isinstance(prompt, str):
1321-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
1322-
elif isinstance(prompt, list):
1323-
# For multimodal prompts, append to the text content
1324-
for item in prompt:
1325-
if item["type"] == "text":
1326-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
1327-
break
1328-
1329-
if isinstance(prompt, list):
1330-
# If we receive a multimodal prompt list, place it directly in the user message
1331-
messages.append({"role": "user", "content": prompt})
1332-
else:
1333-
messages.append({"role": "user", "content": prompt})
1404+
# Use the new _build_messages helper method
1405+
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
13341406

13351407
final_response_text = None
13361408
reflection_count = 0
@@ -1566,38 +1638,8 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
15661638
return None
15671639

15681640
# For OpenAI client
1569-
if self.use_system_prompt:
1570-
system_prompt = f"""{self.backstory}\n
1571-
Your Role: {self.role}\n
1572-
Your Goal: {self.goal}
1573-
"""
1574-
if output_json:
1575-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
1576-
elif output_pydantic:
1577-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
1578-
else:
1579-
system_prompt = None
1580-
1581-
messages = []
1582-
if system_prompt:
1583-
messages.append({"role": "system", "content": system_prompt})
1584-
messages.extend(self.chat_history)
1585-
1586-
# Modify prompt if output_json or output_pydantic is specified
1587-
original_prompt = prompt
1588-
if output_json or output_pydantic:
1589-
if isinstance(prompt, str):
1590-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
1591-
elif isinstance(prompt, list):
1592-
for item in prompt:
1593-
if item["type"] == "text":
1594-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
1595-
break
1596-
1597-
if isinstance(prompt, list):
1598-
messages.append({"role": "user", "content": prompt})
1599-
else:
1600-
messages.append({"role": "user", "content": prompt})
1641+
# Use the new _build_messages helper method
1642+
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
16011643

16021644
reflection_count = 0
16031645
start_time = time.time()
@@ -1619,20 +1661,8 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
16191661
agent_tools=agent_tools
16201662
)
16211663

1622-
# Format tools if provided
1623-
formatted_tools = []
1624-
if tools:
1625-
for tool in tools:
1626-
if isinstance(tool, str):
1627-
tool_def = self._generate_tool_definition(tool)
1628-
if tool_def:
1629-
formatted_tools.append(tool_def)
1630-
elif isinstance(tool, dict):
1631-
formatted_tools.append(tool)
1632-
elif hasattr(tool, "to_openai_tool"):
1633-
formatted_tools.append(tool.to_openai_tool())
1634-
elif callable(tool):
1635-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
1664+
# Use the new _format_tools_for_completion helper method
1665+
formatted_tools = self._format_tools_for_completion(tools)
16361666

16371667
# Create async OpenAI client
16381668
async_client = AsyncOpenAI()

0 commit comments

Comments
 (0)