@@ -1371,6 +1371,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13711371 if not response :
13721372 # Rollback chat history on response failure
13731373 self .chat_history = self .chat_history [:chat_history_length ]
1374+ self ._cleanup_telemetry ()
13741375 return None
13751376
13761377 response_text = response .choices [0 ].message .content .strip ()
@@ -1385,11 +1386,13 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13851386 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
13861387 # Execute callback after validation
13871388 self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
1389+ self ._cleanup_telemetry ()
13881390 return validated_response
13891391 except Exception as e :
13901392 logging .error (f"Agent { self .name } : Guardrail validation failed for JSON output: { e } " )
13911393 # Rollback chat history on guardrail failure
13921394 self .chat_history = self .chat_history [:chat_history_length ]
1395+ self ._cleanup_telemetry ()
13931396 return None
13941397
13951398 if not self .self_reflect :
@@ -1491,11 +1494,13 @@ def __init__(self, data):
14911494 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
14921495 # Execute callback after validation
14931496 self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
1497+ self ._cleanup_telemetry ()
14941498 return validated_response
14951499 except Exception as e :
14961500 logging .error (f"Agent { self .name } : Guardrail validation failed after reflection: { e } " )
14971501 # Rollback chat history on guardrail failure
14981502 self .chat_history = self .chat_history [:chat_history_length ]
1503+ self ._cleanup_telemetry ()
14991504 return None
15001505
15011506 # Check if we've hit max reflections
@@ -1509,11 +1514,13 @@ def __init__(self, data):
15091514 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
15101515 # Execute callback after validation
15111516 self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
1517+ self ._cleanup_telemetry ()
15121518 return validated_response
15131519 except Exception as e :
15141520 logging .error (f"Agent { self .name } : Guardrail validation failed after max reflections: { e } " )
15151521 # Rollback chat history on guardrail failure
15161522 self .chat_history = self .chat_history [:chat_history_length ]
1523+ self ._cleanup_telemetry ()
15171524 return None
15181525
15191526 # If not satisfactory and not at max reflections, continue with regeneration
@@ -1646,11 +1653,13 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
16461653 validated_response = self ._apply_guardrail_with_retry (response_text , prompt , temperature , tools , task_name , task_description , task_id )
16471654 # Execute callback after validation
16481655 self ._execute_callback_and_display (normalized_content , validated_response , time .time () - start_time , task_name , task_description , task_id )
1656+ self ._cleanup_telemetry ()
16491657 return validated_response
16501658 except Exception as e :
16511659 logging .error (f"Agent { self .name } : Guardrail validation failed for custom LLM: { e } " )
16521660 # Rollback chat history on guardrail failure
16531661 self .chat_history = self .chat_history [:chat_history_length ]
1662+ self ._cleanup_telemetry ()
16541663 return None
16551664 except Exception as e :
16561665 # Rollback chat history if LLM call fails
@@ -1710,6 +1719,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
17101719 if self ._openai_client is None :
17111720 error_msg = "OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider."
17121721 display_error (error_msg )
1722+ self ._cleanup_telemetry ()
17131723 return None
17141724
17151725 # Make the API call based on the type of request
@@ -1726,6 +1736,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
17261736 logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
17271737 # Execute callback after tool completion
17281738 self ._execute_callback_and_display (original_prompt , result , time .time () - start_time , task_name , task_description , task_id )
1739+ self ._cleanup_telemetry ()
17291740 return result
17301741 elif output_json or output_pydantic :
17311742 response = await self ._openai_client .async_client .chat .completions .create (
@@ -1740,6 +1751,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
17401751 logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
17411752 # Execute callback after JSON/Pydantic completion
17421753 self ._execute_callback_and_display (original_prompt , response_text , time .time () - start_time , task_name , task_description , task_id )
1754+ self ._cleanup_telemetry ()
17431755 return response_text
17441756 else :
17451757 response = await self ._openai_client .async_client .chat .completions .create (
@@ -1780,6 +1792,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
17801792 if logging .getLogger ().getEffectiveLevel () == logging .DEBUG :
17811793 total_time = time .time () - start_time
17821794 logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1795+ self ._cleanup_telemetry ()
17831796 return response_text
17841797
17851798 reflection_response = await self ._openai_client .async_client .beta .chat .completions .parse (
0 commit comments