Skip to content

Commit a9641ad

Browse files
Merge branch 'main' into claude/issue-958-20250716-2304
2 parents 58f0d2c + 8fa6ffa commit a9641ad

24 files changed

+549
-302
lines changed

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,10 @@ agent.start('Write a movie script about a robot in Mars');
120120

121121
![PraisonAI CLI Demo](docs/demo/praisonai-cli-demo.gif)
122122

123+
## Star History
124+
125+
[![Star History Chart](https://api.star-history.com/svg?repos=MervinPraison/PraisonAI&type=Date)](https://docs.praison.ai)
126+
123127
## AI Agents Flow
124128

125129
```mermaid
@@ -557,10 +561,6 @@ uv pip install -r pyproject.toml --extra "crewai,autogen"
557561
- 🖼️ Vision Language Model (VLM) Support
558562
- 🎙️ Real-time Voice Interaction
559563

560-
## Star History
561-
562-
[![Star History Chart](https://api.star-history.com/svg?repos=MervinPraison/PraisonAI&type=Date)](https://docs.praison.ai)
563-
564564
## Video Tutorials
565565

566566
| Topic | Video |

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
flask \
19-
"praisonai>=2.2.67" \
19+
"praisonai>=2.2.69" \
2020
"praisonai[api]" \
2121
gunicorn \
2222
markdown

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.2.67" \
19+
"praisonai>=2.2.69" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=2.2.67" \
23+
"praisonai>=2.2.69" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.2.67" \
19+
"praisonai>=2.2.69" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

docker/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ healthcheck:
121121
## 📦 Package Versions
122122
123123
All Docker images use consistent, up-to-date versions:
124-
- PraisonAI: `>=2.2.67`
124+
- PraisonAI: `>=2.2.69`
125125
- PraisonAI Agents: `>=0.0.92`
126126
- Python: `3.11-slim`
127127

@@ -218,7 +218,7 @@ docker-compose up -d
218218
### Version Pinning
219219
To use specific versions, update the Dockerfile:
220220
```dockerfile
221-
RUN pip install "praisonai==2.2.67" "praisonaiagents==0.0.92"
221+
RUN pip install "praisonai==2.2.69" "praisonaiagents==0.0.92"
222222
```
223223

224224
## 🌐 Production Deployment

src/praisonai-agents/agentic_parallelisation.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -94,12 +94,17 @@ async def main():
9494
process="workflow",
9595
verbose=True
9696
)
97-
results = await workflow.astart()
97+
results = await workflow.astart(dict_output=True)
9898

9999
print("\nParallel Processing Results:")
100-
for task_id, result in results["task_results"].items():
101-
if result:
102-
print(f"Task {task_id}: {result.raw}")
100+
101+
# Handle both string and dictionary return types
102+
if isinstance(results, dict) and "task_results" in results:
103+
for task_id, result in results["task_results"].items():
104+
if result:
105+
print(f"Task {task_id}: {result.raw}")
106+
else:
107+
print("Final result:", results)
103108

104109
if __name__ == "__main__":
105110
asyncio.run(main())

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 43 additions & 36 deletions
Large diffs are not rendered by default.

src/praisonai-agents/praisonaiagents/agents/agents.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -362,14 +362,20 @@ def _get_multimodal_message(text_prompt, images):
362362
_get_multimodal_message(task_prompt, task.images),
363363
tools=tools,
364364
output_json=task.output_json,
365-
output_pydantic=task.output_pydantic
365+
output_pydantic=task.output_pydantic,
366+
task_name=task.name,
367+
task_description=task.description,
368+
task_id=task.id
366369
)
367370
else:
368371
agent_output = await executor_agent.achat(
369372
task_prompt,
370373
tools=tools,
371374
output_json=task.output_json,
372-
output_pydantic=task.output_pydantic
375+
output_pydantic=task.output_pydantic,
376+
task_name=task.name,
377+
task_description=task.description,
378+
task_id=task.id
373379
)
374380

375381
if agent_output:
@@ -1138,7 +1144,7 @@ async def handle_query(request: Request, query_data: Optional[AgentQuery] = None
11381144
try:
11391145
# Use async version if available, otherwise use sync version
11401146
if asyncio.iscoroutinefunction(agent_instance.chat):
1141-
response = await agent_instance.achat(current_input)
1147+
response = await agent_instance.achat(current_input, task_name=None, task_description=None, task_id=None)
11421148
else:
11431149
# Run sync function in a thread to avoid blocking
11441150
loop = asyncio.get_running_loop()
@@ -1294,7 +1300,7 @@ async def execute_workflow_tool(query: str) -> str: # Renamed for clarity
12941300
try:
12951301
logging.debug(f"Processing with agent: {agent_instance.name}")
12961302
if hasattr(agent_instance, 'achat') and asyncio.iscoroutinefunction(agent_instance.achat):
1297-
response = await agent_instance.achat(current_input, tools=agent_instance.tools)
1303+
response = await agent_instance.achat(current_input, tools=agent_instance.tools, task_name=None, task_description=None, task_id=None)
12981304
elif hasattr(agent_instance, 'chat'): # Fallback to sync chat if achat not suitable
12991305
loop = asyncio.get_running_loop()
13001306
response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci, tools=agent_instance.tools))

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 69 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -477,6 +477,49 @@ def _validate_and_filter_ollama_arguments(self, function_name: str, arguments: D
477477
logging.debug(f"[OLLAMA_FIX] Error validating arguments for {function_name}: {e}")
478478
return arguments
479479

480+
def _handle_ollama_sequential_logic(self, iteration_count: int, accumulated_tool_results: List[Any],
481+
response_text: str, messages: List[Dict]) -> tuple:
482+
"""
483+
Handle Ollama sequential tool execution logic to prevent premature tool summary generation.
484+
485+
This method implements the two-step process:
486+
1. After reaching threshold with tool results, add explicit final answer prompt
487+
2. Only generate tool summary if LLM still doesn't respond after explicit prompt
488+
489+
Args:
490+
iteration_count: Current iteration count
491+
accumulated_tool_results: List of tool results from all iterations
492+
response_text: Current LLM response text
493+
messages: Message history list to potentially modify
494+
495+
Returns:
496+
tuple: (should_break, final_response_text, iteration_count)
497+
- should_break: Whether to break the iteration loop
498+
- final_response_text: Text to use as final response (None if continuing)
499+
- iteration_count: Updated iteration count
500+
"""
501+
if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
502+
return False, None, iteration_count
503+
504+
# For Ollama: if we have meaningful tool results but empty responses,
505+
# give LLM one final chance with explicit prompt for final answer
506+
if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
507+
# Add explicit prompt asking for final answer
508+
messages.append({
509+
"role": "user",
510+
"content": self.OLLAMA_FINAL_ANSWER_PROMPT
511+
})
512+
# Continue to next iteration to get the final response
513+
iteration_count += 1
514+
return False, None, iteration_count
515+
else:
516+
# If still no response after final answer prompt, generate summary
517+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
518+
if tool_summary:
519+
return True, tool_summary, iteration_count
520+
521+
return False, None, iteration_count
522+
480523
def _needs_system_message_skip(self) -> bool:
481524
"""Check if this model requires skipping system messages"""
482525
if not self.model:
@@ -1132,11 +1175,15 @@ def get_response(
11321175

11331176
# Special handling for Ollama to prevent infinite loops
11341177
# Only generate summary after multiple iterations to allow sequential execution
1135-
if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1136-
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1137-
if tool_summary:
1138-
final_response_text = tool_summary
1139-
break
1178+
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
1179+
iteration_count, accumulated_tool_results, response_text, messages
1180+
)
1181+
if should_break:
1182+
final_response_text = tool_summary_text
1183+
break
1184+
elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1185+
# Continue iteration after adding final answer prompt
1186+
continue
11401187

11411188
# Safety check: prevent infinite loops for any provider
11421189
if iteration_count >= 5:
@@ -1911,11 +1958,15 @@ async def get_response_async(
19111958

19121959
# Special handling for Ollama to prevent infinite loops
19131960
# Only generate summary after multiple iterations to allow sequential execution
1914-
if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1915-
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1916-
if tool_summary:
1917-
final_response_text = tool_summary
1918-
break
1961+
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
1962+
iteration_count, accumulated_tool_results, response_text, messages
1963+
)
1964+
if should_break:
1965+
final_response_text = tool_summary_text
1966+
break
1967+
elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1968+
# Continue iteration after adding final answer prompt
1969+
continue
19191970

19201971
# Safety check: prevent infinite loops for any provider
19211972
if iteration_count >= 5:
@@ -2417,18 +2468,14 @@ def response(
24172468
)
24182469

24192470
if stream:
2420-
if verbose:
2421-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2422-
for chunk in litellm.completion(**completion_params):
2423-
content = self._process_streaming_chunk(chunk)
2424-
if content:
2425-
response_text += content
2426-
live.update(display_generating(response_text, start_time))
2427-
else:
2471+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
24282472
for chunk in litellm.completion(**completion_params):
24292473
content = self._process_streaming_chunk(chunk)
24302474
if content:
24312475
response_text += content
2476+
live.update(display_generating(response_text, start_time))
2477+
if content:
2478+
response_text += content
24322479
else:
24332480
response = litellm.completion(**completion_params)
24342481
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
@@ -2517,18 +2564,14 @@ async def aresponse(
25172564
)
25182565

25192566
if stream:
2520-
if verbose:
2521-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2522-
async for chunk in await litellm.acompletion(**completion_params):
2523-
content = self._process_streaming_chunk(chunk)
2524-
if content:
2525-
response_text += content
2526-
live.update(display_generating(response_text, start_time))
2527-
else:
2567+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
25282568
async for chunk in await litellm.acompletion(**completion_params):
25292569
content = self._process_streaming_chunk(chunk)
25302570
if content:
25312571
response_text += content
2572+
live.update(display_generating(response_text, start_time))
2573+
if content:
2574+
response_text += content
25322575
else:
25332576
response = await litellm.acompletion(**completion_params)
25342577
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""

0 commit comments

Comments
 (0)