Skip to content

Commit d0d4d1f

Browse files
Fix: Agent.start() now auto-consumes generator for better UX
- Modified Agent.start() method to automatically consume the generator by default - Added return_generator=True parameter for backwards compatibility - Made MCP import optional to prevent import errors - Updated examples to demonstrate new behavior - Fixes issue where basic-agents.py would stop without producing output 🤖 Generated with [Claude Code](https://claude.ai/code) Co-authored-by: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent 2c93361 commit d0d4d1f

File tree

4 files changed

+46
-6
lines changed

4 files changed

+46
-6
lines changed

src/praisonai-agents/basic-agents.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,5 @@
55
llm="gpt-4o-mini"
66
)
77

8+
# The start() method now automatically consumes the generator and displays the output
89
agent.start("Why sky is Blue?")

src/praisonai-agents/praisonaiagents/__init__.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,13 @@
3434
from .agents.autoagents import AutoAgents
3535
from .knowledge.knowledge import Knowledge
3636
from .knowledge.chunking import Chunking
37-
from .mcp.mcp import MCP
37+
# MCP support (optional)
38+
try:
39+
from .mcp.mcp import MCP
40+
_mcp_available = True
41+
except ImportError:
42+
_mcp_available = False
43+
MCP = None
3844
from .session import Session
3945
from .memory.memory import Memory
4046
from .guardrails import GuardrailResult, LLMGuardrail
@@ -124,7 +130,6 @@ def disable_telemetry():
124130
'async_display_callbacks',
125131
'Knowledge',
126132
'Chunking',
127-
'MCP',
128133
'GuardrailResult',
129134
'LLMGuardrail',
130135
'Handoff',
@@ -137,4 +142,8 @@ def disable_telemetry():
137142
'disable_telemetry',
138143
'MinimalTelemetry',
139144
'TelemetryCollector'
140-
]
145+
]
146+
147+
# Add MCP to __all__ if available
148+
if _mcp_available:
149+
__all__.append('MCP')

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1981,12 +1981,41 @@ def _cleanup_telemetry(self):
19811981
logging.debug(f"Error cleaning up telemetry: {e}")
19821982

19831983
def start(self, prompt: str, **kwargs):
1984-
"""Start the agent with a prompt. This is a convenience method that wraps chat()."""
1984+
"""Start the agent with a prompt. This is a convenience method that wraps chat().
1985+
1986+
This method is designed to be convenient for simple use cases. By default, it will
1987+
automatically consume the generator when streaming is enabled and return the final
1988+
response, while still displaying the output to the user.
1989+
1990+
For advanced use cases that need the raw generator (e.g., for custom streaming
1991+
handling), use return_generator=True.
1992+
1993+
Args:
1994+
prompt: The prompt to send to the agent
1995+
**kwargs: Additional arguments to pass to the chat method
1996+
- stream: If explicitly set to False, disables streaming
1997+
- return_generator: If True, returns the raw generator for custom handling
1998+
1999+
Returns:
2000+
The final response from the agent (default), or a generator if return_generator=True
2001+
"""
19852002
try:
2003+
# Check if user explicitly wants the raw generator for custom handling
2004+
return_generator = kwargs.pop('return_generator', False)
2005+
19862006
# Check if streaming is enabled and user wants streaming chunks
19872007
if self.stream and kwargs.get('stream', True):
19882008
result = self._start_stream(prompt, **kwargs)
1989-
return result
2009+
2010+
if return_generator:
2011+
# Return the raw generator for advanced users
2012+
return result
2013+
else:
2014+
# Auto-consume the generator for convenience while preserving display
2015+
final_response = None
2016+
for chunk in result:
2017+
final_response = chunk # Last chunk is typically the final response
2018+
return final_response
19902019
else:
19912020
result = self.chat(prompt, **kwargs)
19922021
return result

src/praisonai-agents/realtime-streaming.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,6 @@
88
stream=True
99
)
1010

11-
for chunk in agent.start("Write a report on about the history of the world"):
11+
# Use return_generator=True to get the raw generator for custom streaming handling
12+
for chunk in agent.start("Write a report on about the history of the world", return_generator=True):
1213
print(chunk, end="", flush=True)

0 commit comments

Comments
 (0)