Skip to content

Commit c51eadd

Browse files
authored
openai[patch]: propagate service_tier to response metadata (#31089)
1 parent 6110c3f commit c51eadd

File tree

3 files changed

+10
-2
lines changed

3 files changed

+10
-2
lines changed

libs/partners/openai/langchain_openai/chat_models/base.py

+5
Original file line numberDiff line numberDiff line change
@@ -736,6 +736,8 @@ def _convert_chunk_to_generation_chunk(
736736
generation_info["model_name"] = model_name
737737
if system_fingerprint := chunk.get("system_fingerprint"):
738738
generation_info["system_fingerprint"] = system_fingerprint
739+
if service_tier := chunk.get("service_tier"):
740+
generation_info["service_tier"] = service_tier
739741

740742
logprobs = choice.get("logprobs")
741743
if logprobs:
@@ -1020,6 +1022,8 @@ def _create_chat_result(
10201022
}
10211023
if "id" in response_dict:
10221024
llm_output["id"] = response_dict["id"]
1025+
if "service_tier" in response_dict:
1026+
llm_output["service_tier"] = response_dict["service_tier"]
10231027

10241028
if isinstance(response, openai.BaseModel) and getattr(
10251029
response, "choices", None
@@ -3243,6 +3247,7 @@ def _construct_lc_result_from_responses_api(
32433247
"status",
32443248
"user",
32453249
"model",
3250+
"service_tier",
32463251
)
32473252
}
32483253
if metadata:

libs/partners/openai/tests/integration_tests/chat_models/test_base.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,7 @@ def test_response_metadata() -> None:
350350
"logprobs",
351351
"system_fingerprint",
352352
"finish_reason",
353+
"service_tier",
353354
)
354355
)
355356
assert "content" in result.response_metadata["logprobs"]
@@ -367,6 +368,7 @@ async def test_async_response_metadata() -> None:
367368
"logprobs",
368369
"system_fingerprint",
369370
"finish_reason",
371+
"service_tier",
370372
)
371373
)
372374
assert "content" in result.response_metadata["logprobs"]
@@ -380,7 +382,7 @@ def test_response_metadata_streaming() -> None:
380382
full = chunk if full is None else full + chunk
381383
assert all(
382384
k in cast(BaseMessageChunk, full).response_metadata
383-
for k in ("logprobs", "finish_reason")
385+
for k in ("logprobs", "finish_reason", "service_tier")
384386
)
385387
assert "content" in cast(BaseMessageChunk, full).response_metadata["logprobs"]
386388

@@ -393,7 +395,7 @@ async def test_async_response_metadata_streaming() -> None:
393395
full = chunk if full is None else full + chunk
394396
assert all(
395397
k in cast(BaseMessageChunk, full).response_metadata
396-
for k in ("logprobs", "finish_reason")
398+
for k in ("logprobs", "finish_reason", "service_tier")
397399
)
398400
assert "content" in cast(BaseMessageChunk, full).response_metadata["logprobs"]
399401

libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py

+1
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ def _check_response(response: Optional[BaseMessage]) -> None:
4747
assert response.usage_metadata["output_tokens"] > 0
4848
assert response.usage_metadata["total_tokens"] > 0
4949
assert response.response_metadata["model_name"]
50+
assert response.response_metadata["service_tier"]
5051
for tool_output in response.additional_kwargs["tool_outputs"]:
5152
assert tool_output["id"]
5253
assert tool_output["status"]

0 commit comments

Comments
 (0)