Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions src/agents/extensions/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,10 +339,23 @@ async def _fetch_response(
f"Response format: {response_format}\n"
)

reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
# Build reasoning_effort - use dict only when summary is present (OpenAI feature)
# Otherwise pass string for backward compatibility with all providers
reasoning_effort: dict[str, Any] | str | None = None
if model_settings.reasoning:
if model_settings.reasoning.summary is not None:
# Dict format when summary is needed (OpenAI only)
reasoning_effort = {
"effort": model_settings.reasoning.effort,
"summary": model_settings.reasoning.summary,
}
elif model_settings.reasoning.effort is not None:
# String format for compatibility with all providers
reasoning_effort = model_settings.reasoning.effort

# Enable developers to pass non-OpenAI compatible reasoning_effort data like "none"
# Priority order:
# 1. model_settings.reasoning.effort
# 1. model_settings.reasoning (effort + summary)
# 2. model_settings.extra_body["reasoning_effort"]
# 3. model_settings.extra_args["reasoning_effort"]
if (
Expand Down
44 changes: 44 additions & 0 deletions tests/models/test_litellm_extra_body.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ async def fake_acompletion(model, messages=None, **kwargs):
previous_response_id=None,
)

# reasoning_effort is string when no summary is provided (backward compatible)
assert captured["reasoning_effort"] == "low"
assert settings.extra_body == {"reasoning_effort": "high"}

Expand Down Expand Up @@ -155,3 +156,46 @@ async def fake_acompletion(model, messages=None, **kwargs):
assert captured["reasoning_effort"] == "none"
assert captured["custom_param"] == "custom"
assert settings.extra_args == {"reasoning_effort": "low", "custom_param": "custom"}


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
async def test_reasoning_summary_is_preserved(monkeypatch):
"""
Ensure reasoning.summary is preserved when passing ModelSettings.reasoning.

This test verifies the fix for GitHub issue:
https://github.com/BerriAI/litellm/issues/17428

Previously, only reasoning.effort was extracted, losing the summary field.
Now we pass a dict with both effort and summary to LiteLLM.
"""
from openai.types.shared import Reasoning

captured: dict[str, object] = {}

async def fake_acompletion(model, messages=None, **kwargs):
captured.update(kwargs)
msg = Message(role="assistant", content="ok")
choice = Choices(index=0, message=msg)
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))

monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
settings = ModelSettings(
reasoning=Reasoning(effort="medium", summary="auto"),
)
model = LitellmModel(model="test-model")

await model.get_response(
system_instructions=None,
input=[],
model_settings=settings,
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
previous_response_id=None,
)

# Both effort and summary should be preserved in the dict
assert captured["reasoning_effort"] == {"effort": "medium", "summary": "auto"}