Skip to content

Commit 47f66f3

Browse files
fix: Address reviewer feedback on self-reflection test files
- Move test files from src/praisonai-agents/ to tests/ directory per repository guidelines - Replace print statements and return values with proper assert statements - Fix f-string issues without placeholders - Add comprehensive exception handling for test failures - Create improved test structure following repository patterns 🤖 Generated with [Claude Code](https://claude.ai/code) Co-authored-by: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent 277a779 commit 47f66f3

File tree

4 files changed

+241
-0
lines changed

4 files changed

+241
-0
lines changed
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#!/usr/bin/env python3
2+
"""Test LLM class directly to verify self-reflection fix"""
3+
4+
from praisonaiagents.llm import LLM
5+
from praisonaiagents.tools import calculator
6+
7+
def test_llm_direct():
8+
"""Test LLM class directly with self-reflection and tools"""
9+
print("=== Testing LLM Direct with Self-Reflection and Tools ===")
10+
11+
# Create LLM instance
12+
llm = LLM(model="gpt-4o-mini")
13+
14+
# Test with self-reflection and tools
15+
try:
16+
response = llm.get_response(
17+
prompt="Calculate 15 * 23 and verify your answer",
18+
system_prompt="You are a helpful math assistant. Use tools when needed.",
19+
tools=[calculator],
20+
self_reflect=True,
21+
min_reflect=1,
22+
max_reflect=2,
23+
verbose=True
24+
)
25+
26+
print(f"\nResponse: {response}")
27+
28+
assert response, "LLM self-reflection with tools failed to produce a response."
29+
print("\n✅ SUCCESS: LLM self-reflection with tools is working!")
30+
31+
except Exception as e:
32+
print(f"\n❌ ERROR: {str(e)}")
33+
raise AssertionError(f"Test failed with error: {str(e)}")
34+
35+
if __name__ == "__main__":
36+
test_llm_direct()
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
#!/usr/bin/env python3
2+
"""Comprehensive test script to verify self-reflection works with tools after the fix"""
3+
4+
from praisonaiagents import Agent, Task, PraisonAIAgents
5+
from praisonaiagents.tools import duckduckgo_search
6+
7+
def test_self_reflection_with_tools():
8+
"""Test self-reflection with tools - should work after the fix"""
9+
print("=== Testing Self-Reflection WITH Tools ===")
10+
11+
# Create an agent with self-reflection and tools
12+
agent = Agent(
13+
role="Senior Research Analyst",
14+
goal="Analyze and provide insights on given topics",
15+
backstory="You are an expert analyst with strong critical thinking skills",
16+
self_reflect=True,
17+
llm="gemini/gemini-2.5-flash-lite-preview-06-17",
18+
verbose=True,
19+
tools=[duckduckgo_search]
20+
)
21+
22+
# Create a task
23+
task = Task(
24+
description="Search for recent developments in AI and provide a brief analysis",
25+
expected_output="A detailed analysis report",
26+
agent=agent
27+
)
28+
29+
# Create and start the agents
30+
agents = PraisonAIAgents(
31+
agents=[agent],
32+
tasks=[task],
33+
process="sequential"
34+
)
35+
36+
try:
37+
# Start execution
38+
result = agents.start()
39+
print(f"Result with tools: {result}")
40+
41+
assert result, "Self-reflection with tools failed to produce a result."
42+
print("\n✅ SUCCESS: Self-reflection with tools is working!")
43+
return result
44+
45+
except Exception as e:
46+
print(f"\n❌ ERROR: {str(e)}")
47+
raise AssertionError(f"Test with tools failed: {str(e)}")
48+
49+
def test_self_reflection_without_tools():
50+
"""Test self-reflection without tools - should work (baseline)"""
51+
print("\n=== Testing Self-Reflection WITHOUT Tools ===")
52+
53+
# Create an agent with self-reflection but no tools
54+
agent = Agent(
55+
role="Senior Research Analyst",
56+
goal="Analyze and provide insights on given topics",
57+
backstory="You are an expert analyst with strong critical thinking skills",
58+
self_reflect=True,
59+
llm="gemini/gemini-2.5-flash-lite-preview-06-17",
60+
verbose=True
61+
)
62+
63+
# Create a task
64+
task = Task(
65+
description="Analyze recent developments in AI",
66+
expected_output="A detailed analysis report",
67+
agent=agent
68+
)
69+
70+
# Create and start the agents
71+
agents = PraisonAIAgents(
72+
agents=[agent],
73+
tasks=[task],
74+
process="sequential"
75+
)
76+
77+
try:
78+
# Start execution
79+
result = agents.start()
80+
print(f"Result without tools: {result}")
81+
82+
assert result, "Self-reflection without tools failed to produce a result."
83+
print("\n✅ SUCCESS: Self-reflection without tools is working!")
84+
return result
85+
86+
except Exception as e:
87+
print(f"\n❌ ERROR: {str(e)}")
88+
raise AssertionError(f"Test without tools failed: {str(e)}")
89+
90+
if __name__ == "__main__":
91+
print("Testing self-reflection fix...")
92+
93+
# Test without tools (should work)
94+
try:
95+
result_without_tools = test_self_reflection_without_tools()
96+
without_tools_success = True
97+
except Exception as e:
98+
print(f"Test without tools failed: {e}")
99+
without_tools_success = False
100+
101+
# Test with tools (should work after fix)
102+
try:
103+
result_with_tools = test_self_reflection_with_tools()
104+
with_tools_success = True
105+
except Exception as e:
106+
print(f"Test with tools failed: {e}")
107+
with_tools_success = False
108+
109+
print("\n=== Test Summary ===")
110+
print(f"Without tools: {'SUCCESS' if without_tools_success else 'FAILED'}")
111+
print(f"With tools: {'SUCCESS' if with_tools_success else 'FAILED'}")
112+
113+
if with_tools_success:
114+
print("\n✅ Fix verified: Self-reflection now works with tools!")
115+
else:
116+
print("\n❌ Fix failed: Self-reflection still not working with tools")
117+
raise AssertionError("Self-reflection with tools test failed")
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
#!/usr/bin/env python3
2+
"""Simple test to verify the self-reflection fix works"""
3+
4+
from praisonaiagents import Agent
5+
from praisonaiagents.tools import calculator
6+
7+
def test_self_reflection_fix():
8+
"""Test that self-reflection works with tools after the fix"""
9+
print("=== Testing Self-Reflection Fix ===")
10+
11+
# Create an agent with self-reflection and a simple tool
12+
agent = Agent(
13+
role="Math Assistant",
14+
goal="Solve math problems accurately",
15+
backstory="You are a helpful math assistant",
16+
self_reflect=True,
17+
llm="gpt-4o-mini", # Use a more widely available model
18+
verbose=True,
19+
tools=[calculator],
20+
min_reflect=1,
21+
max_reflect=2
22+
)
23+
24+
# Test with a simple calculation that might trigger self-reflection
25+
try:
26+
response = agent.start("What is 25 * 17? Show your work and double-check the answer.")
27+
print(f"\nResponse: {response}")
28+
29+
assert response, "Self-reflection with tools failed to produce a response."
30+
print("\n✅ SUCCESS: Self-reflection with tools is working!")
31+
32+
except Exception as e:
33+
print(f"\n❌ ERROR: {str(e)}")
34+
raise AssertionError(f"Test failed with error: {str(e)}")
35+
36+
if __name__ == "__main__":
37+
test_self_reflection_fix()
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
#!/usr/bin/env python3
2+
"""Test to verify the self-reflection fix works with tools"""
3+
4+
from praisonaiagents import Agent, Task, PraisonAIAgents
5+
from praisonaiagents.tools import calculator
6+
7+
def test_self_reflection_fix_verification():
8+
"""Test that self-reflection now works with tools"""
9+
print("=== Testing Self-Reflection Fix Verification ===")
10+
11+
# Create an agent with self-reflection enabled and tools
12+
agent = Agent(
13+
name="MathAgent",
14+
role="Math Assistant",
15+
goal="Solve mathematical problems accurately",
16+
backstory="You are a helpful math assistant who double-checks calculations",
17+
tools=[calculator],
18+
self_reflect=True, # This should now work with tools
19+
min_reflect=1,
20+
max_reflect=2,
21+
verbose=True
22+
)
23+
24+
# Define a task that would benefit from self-reflection
25+
task = Task(
26+
description="Calculate 123 * 456 and verify the result is correct",
27+
expected_output="The calculation result with verification",
28+
agent=agent,
29+
name="math_calculation"
30+
)
31+
32+
# Create and run the agents
33+
agents = PraisonAIAgents(
34+
agents=[agent],
35+
tasks=[task],
36+
process="sequential"
37+
)
38+
39+
try:
40+
result = agents.start()
41+
42+
assert result, "Self-reflection with tools failed to produce a result."
43+
print("\n✅ SUCCESS: Self-reflection with tools is working!")
44+
print(f"Result: {result}")
45+
46+
except Exception as e:
47+
print(f"\n❌ ERROR: {str(e)}")
48+
raise AssertionError(f"Test failed with error: {str(e)}")
49+
50+
if __name__ == "__main__":
51+
test_self_reflection_fix_verification()

0 commit comments

Comments
 (0)