Skip to content

Commit ce0b086

Browse files
committed
add test
1 parent a26be13 commit ce0b086

File tree

1 file changed

+81
-0
lines changed

1 file changed

+81
-0
lines changed

tests/test_litellm/litellm_core_utils/test_streaming_chunk_builder_utils.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
Function,
1717
ModelResponseStream,
1818
PromptTokensDetails,
19+
ServerToolUse,
1920
StreamingChoices,
2021
Usage,
2122
)
@@ -325,3 +326,83 @@ def test_stream_chunk_builder_litellm_usage_chunks():
325326
assert usage.prompt_tokens == 50
326327
assert usage.completion_tokens == 27
327328
assert usage.total_tokens == 77
329+
330+
331+
def test_stream_chunk_builder_anthropic_web_search():
332+
# Prepare two mocked streaming chunks with usage split across them
333+
chunk1 = ModelResponseStream(
334+
id="chatcmpl-mocked-usage-1",
335+
created=1745513206,
336+
model="claude-sonnet-4-5-20250929",
337+
object="chat.completion.chunk",
338+
system_fingerprint=None,
339+
choices=[
340+
StreamingChoices(
341+
finish_reason=None,
342+
index=0,
343+
delta=Delta(
344+
provider_specific_fields=None,
345+
content="",
346+
role=None,
347+
function_call=None,
348+
tool_calls=None,
349+
audio=None,
350+
),
351+
logprobs=None,
352+
)
353+
],
354+
provider_specific_fields=None,
355+
stream_options={"include_usage": True},
356+
usage=Usage(
357+
completion_tokens=0,
358+
prompt_tokens=50,
359+
total_tokens=50,
360+
completion_tokens_details=None,
361+
server_tool_use=ServerToolUse(web_search_requests=2),
362+
prompt_tokens_details=None,
363+
),
364+
)
365+
366+
chunk2 = ModelResponseStream(
367+
id="chatcmpl-mocked-usage-1",
368+
created=1745513207,
369+
model="claude-sonnet-4-5-20250929",
370+
object="chat.completion.chunk",
371+
system_fingerprint=None,
372+
choices=[
373+
StreamingChoices(
374+
finish_reason="stop",
375+
index=0,
376+
delta=Delta(
377+
provider_specific_fields=None,
378+
content=None,
379+
role=None,
380+
function_call=None,
381+
tool_calls=None,
382+
audio=None,
383+
),
384+
logprobs=None,
385+
)
386+
],
387+
provider_specific_fields=None,
388+
stream_options={"include_usage": True},
389+
usage=Usage(
390+
completion_tokens=27,
391+
prompt_tokens=0,
392+
total_tokens=27,
393+
completion_tokens_details=None,
394+
prompt_tokens_details=None,
395+
),
396+
)
397+
398+
chunks = [chunk1, chunk2]
399+
processor = ChunkProcessor(chunks=chunks)
400+
401+
usage = processor.calculate_usage(
402+
chunks=chunks, model="claude-sonnet-4-5-20250929", completion_output=""
403+
)
404+
405+
assert usage.prompt_tokens == 50
406+
assert usage.completion_tokens == 27
407+
assert usage.total_tokens == 77
408+
assert usage.server_tool_use['web_search_requests'] == 2

0 commit comments

Comments
 (0)