Skip to content

Commit d258d8d

Browse files
authored
fix(litellm): preserve reasoning.summary when passing to LiteLLM (#2144)
1 parent a9d95b4 commit d258d8d

File tree

2 files changed

+59
-2
lines changed

2 files changed

+59
-2
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,10 +339,23 @@ async def _fetch_response(
339339
f"Response format: {response_format}\n"
340340
)
341341

342-
reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
342+
# Build reasoning_effort - use dict only when summary is present (OpenAI feature)
343+
# Otherwise pass string for backward compatibility with all providers
344+
reasoning_effort: dict[str, Any] | str | None = None
345+
if model_settings.reasoning:
346+
if model_settings.reasoning.summary is not None:
347+
# Dict format when summary is needed (OpenAI only)
348+
reasoning_effort = {
349+
"effort": model_settings.reasoning.effort,
350+
"summary": model_settings.reasoning.summary,
351+
}
352+
elif model_settings.reasoning.effort is not None:
353+
# String format for compatibility with all providers
354+
reasoning_effort = model_settings.reasoning.effort
355+
343356
# Enable developers to pass non-OpenAI compatible reasoning_effort data like "none"
344357
# Priority order:
345-
# 1. model_settings.reasoning.effort
358+
# 1. model_settings.reasoning (effort + summary)
346359
# 2. model_settings.extra_body["reasoning_effort"]
347360
# 3. model_settings.extra_args["reasoning_effort"]
348361
if (

tests/models/test_litellm_extra_body.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ async def fake_acompletion(model, messages=None, **kwargs):
115115
previous_response_id=None,
116116
)
117117

118+
# reasoning_effort is string when no summary is provided (backward compatible)
118119
assert captured["reasoning_effort"] == "low"
119120
assert settings.extra_body == {"reasoning_effort": "high"}
120121

@@ -155,3 +156,46 @@ async def fake_acompletion(model, messages=None, **kwargs):
155156
assert captured["reasoning_effort"] == "none"
156157
assert captured["custom_param"] == "custom"
157158
assert settings.extra_args == {"reasoning_effort": "low", "custom_param": "custom"}
159+
160+
161+
@pytest.mark.allow_call_model_methods
162+
@pytest.mark.asyncio
163+
async def test_reasoning_summary_is_preserved(monkeypatch):
164+
"""
165+
Ensure reasoning.summary is preserved when passing ModelSettings.reasoning.
166+
167+
This test verifies the fix for GitHub issue:
168+
https://github.com/BerriAI/litellm/issues/17428
169+
170+
Previously, only reasoning.effort was extracted, losing the summary field.
171+
Now we pass a dict with both effort and summary to LiteLLM.
172+
"""
173+
from openai.types.shared import Reasoning
174+
175+
captured: dict[str, object] = {}
176+
177+
async def fake_acompletion(model, messages=None, **kwargs):
178+
captured.update(kwargs)
179+
msg = Message(role="assistant", content="ok")
180+
choice = Choices(index=0, message=msg)
181+
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
182+
183+
monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
184+
settings = ModelSettings(
185+
reasoning=Reasoning(effort="medium", summary="auto"),
186+
)
187+
model = LitellmModel(model="test-model")
188+
189+
await model.get_response(
190+
system_instructions=None,
191+
input=[],
192+
model_settings=settings,
193+
tools=[],
194+
output_schema=None,
195+
handoffs=[],
196+
tracing=ModelTracing.DISABLED,
197+
previous_response_id=None,
198+
)
199+
200+
# Both effort and summary should be preserved in the dict
201+
assert captured["reasoning_effort"] == {"effort": "medium", "summary": "auto"}

0 commit comments

Comments
 (0)