Skip to content

Commit 99db438

Browse files
Merge pull request #996 from MervinPraison/claude/pr-990-20250718-1429
Fix: Comprehensive telemetry cleanup to prevent agent termination issues
2 parents 7ff43ce + ed7a4c0 commit 99db438

File tree

2 files changed

+85
-23
lines changed

2 files changed

+85
-23
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 32 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1189,8 +1189,9 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11891189
self._final_display_shown = True
11901190

11911191
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1192-
# Reset the final display flag for each new conversation
1193-
self._final_display_shown = False
1192+
try:
1193+
# Reset the final display flag for each new conversation
1194+
self._final_display_shown = False
11941195

11951196
# Log all parameter values when in debug mode
11961197
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
@@ -1525,6 +1526,9 @@ def __init__(self, data):
15251526
# Rollback chat history
15261527
self.chat_history = self.chat_history[:chat_history_length]
15271528
return None
1529+
finally:
1530+
# Ensure proper cleanup of telemetry system to prevent hanging
1531+
self._cleanup_telemetry()
15281532

15291533
def clean_json_output(self, output: str) -> str:
15301534
"""Clean and extract JSON from response text."""
@@ -1540,8 +1544,9 @@ def clean_json_output(self, output: str) -> str:
15401544

15411545
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
15421546
"""Async version of chat method with self-reflection support."""
1543-
# Reset the final display flag for each new conversation
1544-
self._final_display_shown = False
1547+
try:
1548+
# Reset the final display flag for each new conversation
1549+
self._final_display_shown = False
15451550

15461551
# Log all parameter values when in debug mode
15471552
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
@@ -1836,6 +1841,9 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
18361841
total_time = time.time() - start_time
18371842
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
18381843
return None
1844+
finally:
1845+
# Ensure proper cleanup of telemetry system to prevent hanging
1846+
self._cleanup_telemetry()
18391847

18401848
async def _achat_completion(self, response, tools, reasoning_steps=False):
18411849
"""Async version of _chat_completion method"""
@@ -1929,12 +1937,9 @@ async def _achat_completion(self, response, tools, reasoning_steps=False):
19291937

19301938
async def astart(self, prompt: str, **kwargs):
19311939
"""Async version of start method"""
1932-
try:
1933-
result = await self.achat(prompt, **kwargs)
1934-
return result
1935-
finally:
1936-
# Ensure proper cleanup of telemetry system to prevent hanging
1937-
self._cleanup_telemetry()
1940+
# achat() method handles its own cleanup
1941+
result = await self.achat(prompt, **kwargs)
1942+
return result
19381943

19391944
def run(self):
19401945
"""Alias for start() method"""
@@ -1956,17 +1961,15 @@ def _cleanup_telemetry(self):
19561961

19571962
def start(self, prompt: str, **kwargs):
19581963
"""Start the agent with a prompt. This is a convenience method that wraps chat()."""
1959-
try:
1960-
# Check if streaming is enabled and user wants streaming chunks
1961-
if self.stream and kwargs.get('stream', True):
1962-
result = self._start_stream(prompt, **kwargs)
1963-
return result
1964-
else:
1965-
result = self.chat(prompt, **kwargs)
1966-
return result
1967-
finally:
1968-
# Ensure proper cleanup of telemetry system to prevent hanging
1969-
self._cleanup_telemetry()
1964+
# Check if streaming is enabled and user wants streaming chunks
1965+
if self.stream and kwargs.get('stream', True):
1966+
# For streaming, cleanup is handled by _start_stream method
1967+
result = self._start_stream(prompt, **kwargs)
1968+
return result
1969+
else:
1970+
# For non-streaming, chat() method handles its own cleanup
1971+
result = self.chat(prompt, **kwargs)
1972+
return result
19701973

19711974
def _start_stream(self, prompt: str, **kwargs):
19721975
"""Generator method that yields streaming chunks from the agent."""
@@ -1989,8 +1992,12 @@ def _start_stream(self, prompt: str, **kwargs):
19891992
prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
19901993

19911994
# Get streaming response using the internal streaming method
1992-
for chunk in self._chat_stream(prompt, **kwargs):
1993-
yield chunk
1995+
try:
1996+
for chunk in self._chat_stream(prompt, **kwargs):
1997+
yield chunk
1998+
finally:
1999+
# Ensure proper cleanup of telemetry system to prevent hanging
2000+
self._cleanup_telemetry()
19942001

19952002
def _chat_stream(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, **kwargs):
19962003
"""Internal streaming method that yields chunks from the LLM response."""
@@ -2153,6 +2160,7 @@ def execute(self, task, context=None):
21532160
prompt = task
21542161
else:
21552162
prompt = str(task)
2163+
# chat() method handles its own cleanup
21562164
return self.chat(prompt)
21572165

21582166
async def aexecute(self, task, context=None):
@@ -2167,6 +2175,7 @@ async def aexecute(self, task, context=None):
21672175
task_name = getattr(task, 'name', None)
21682176
task_description = getattr(task, 'description', None)
21692177
task_id = getattr(task, 'id', None)
2178+
# achat() method handles its own cleanup
21702179
return await self.achat(prompt, task_name=task_name, task_description=task_description, task_id=task_id)
21712180

21722181
async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:

test_telemetry_fix.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Test the telemetry cleanup fix
4+
"""
5+
import sys
6+
import os
7+
import signal
8+
from datetime import datetime
9+
10+
# Add signal handler for timeout
11+
def timeout_handler(signum, frame):
12+
print("SUCCESS: Program terminated within timeout - fix is working!")
13+
sys.exit(0)
14+
15+
signal.signal(signal.SIGALRM, timeout_handler)
16+
signal.alarm(15) # 15 second timeout
17+
18+
try:
19+
# Set environment variable to disable telemetry (for testing)
20+
os.environ['PRAISONAI_TELEMETRY_DISABLED'] = 'true'
21+
22+
from praisonaiagents import Agent
23+
24+
print(f"[{datetime.now()}] Starting agent termination test...")
25+
26+
# Create agent with minimal setup
27+
agent = Agent(
28+
instructions="You are a helpful assistant",
29+
llm="gpt-4o-mini"
30+
)
31+
32+
print(f"[{datetime.now()}] Agent created successfully")
33+
34+
# Test the start method (which was hanging)
35+
print(f"[{datetime.now()}] Running agent.start()...")
36+
response = agent.start("Hello, just say hi back!")
37+
38+
print(f"[{datetime.now()}] Agent completed successfully!")
39+
print(f"Response: {response}")
40+
41+
# If we get here, the fix worked
42+
print(f"[{datetime.now()}] SUCCESS: Program should terminate properly now!")
43+
44+
except Exception as e:
45+
print(f"ERROR: Exception occurred: {e}")
46+
import traceback
47+
traceback.print_exc()
48+
sys.exit(1)
49+
finally:
50+
# Cancel the alarm
51+
signal.alarm(0)
52+
53+
print("Test completed - program should exit now.")

0 commit comments

Comments
 (0)