Skip to content

Commit 9fb062f

Browse files
Merge pull request #961 from MervinPraison/claude/issue-958-20250716-2304
fix: ensure consistent Task/Response formatting across all LLM providers
2 parents 8fa6ffa + a9641ad commit 9fb062f

File tree

1 file changed

+15
-5
lines changed
  • src/praisonai-agents/praisonaiagents/agent

1 file changed

+15
-5
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,8 @@ def __init__(
354354
self.instructions = instructions
355355
# Check for model name in environment variable if not provided
356356
self._using_custom_llm = False
357+
# Flag to track if final result has been displayed to prevent duplicates
358+
self._final_display_shown = False
357359

358360
# Store OpenAI client parameters for lazy initialization
359361
self._openai_api_key = api_key
@@ -1173,18 +1175,23 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11731175
task_description=task_description,
11741176
task_id=task_id
11751177
)
1176-
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1177-
if self.verbose and not self._using_custom_llm:
1178+
# Always display final interaction when verbose is True to ensure consistent formatting
1179+
# This ensures both OpenAI and custom LLM providers (like Gemini) show formatted output
1180+
if self.verbose and not self._final_display_shown:
11781181
display_interaction(prompt, response, markdown=self.markdown,
11791182
generation_time=generation_time, console=self.console,
11801183
agent_name=self.name,
11811184
agent_role=self.role,
11821185
agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1183-
task_name=task_name,
1184-
task_description=task_description,
1185-
task_id=task_id)
1186+
task_name=None, # Not available in this context
1187+
task_description=None, # Not available in this context
1188+
task_id=None) # Not available in this context
1189+
self._final_display_shown = True
11861190

11871191
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1192+
# Reset the final display flag for each new conversation
1193+
self._final_display_shown = False
1194+
11881195
# Log all parameter values when in debug mode
11891196
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
11901197
param_info = {
@@ -1533,6 +1540,9 @@ def clean_json_output(self, output: str) -> str:
15331540

15341541
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
15351542
"""Async version of chat method with self-reflection support."""
1543+
# Reset the final display flag for each new conversation
1544+
self._final_display_shown = False
1545+
15361546
# Log all parameter values when in debug mode
15371547
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
15381548
param_info = {

0 commit comments

Comments
 (0)