File tree 3 files changed +10
-2
lines changed
langchain_openai/chat_models
tests/integration_tests/chat_models
3 files changed +10
-2
lines changed Original file line number Diff line number Diff line change @@ -736,6 +736,8 @@ def _convert_chunk_to_generation_chunk(
736
736
generation_info ["model_name" ] = model_name
737
737
if system_fingerprint := chunk .get ("system_fingerprint" ):
738
738
generation_info ["system_fingerprint" ] = system_fingerprint
739
+ if service_tier := chunk .get ("service_tier" ):
740
+ generation_info ["service_tier" ] = service_tier
739
741
740
742
logprobs = choice .get ("logprobs" )
741
743
if logprobs :
@@ -1020,6 +1022,8 @@ def _create_chat_result(
1020
1022
}
1021
1023
if "id" in response_dict :
1022
1024
llm_output ["id" ] = response_dict ["id" ]
1025
+ if "service_tier" in response_dict :
1026
+ llm_output ["service_tier" ] = response_dict ["service_tier" ]
1023
1027
1024
1028
if isinstance (response , openai .BaseModel ) and getattr (
1025
1029
response , "choices" , None
@@ -3243,6 +3247,7 @@ def _construct_lc_result_from_responses_api(
3243
3247
"status" ,
3244
3248
"user" ,
3245
3249
"model" ,
3250
+ "service_tier" ,
3246
3251
)
3247
3252
}
3248
3253
if metadata :
Original file line number Diff line number Diff line change @@ -350,6 +350,7 @@ def test_response_metadata() -> None:
350
350
"logprobs" ,
351
351
"system_fingerprint" ,
352
352
"finish_reason" ,
353
+ "service_tier" ,
353
354
)
354
355
)
355
356
assert "content" in result .response_metadata ["logprobs" ]
@@ -367,6 +368,7 @@ async def test_async_response_metadata() -> None:
367
368
"logprobs" ,
368
369
"system_fingerprint" ,
369
370
"finish_reason" ,
371
+ "service_tier" ,
370
372
)
371
373
)
372
374
assert "content" in result .response_metadata ["logprobs" ]
@@ -380,7 +382,7 @@ def test_response_metadata_streaming() -> None:
380
382
full = chunk if full is None else full + chunk
381
383
assert all (
382
384
k in cast (BaseMessageChunk , full ).response_metadata
383
- for k in ("logprobs" , "finish_reason" )
385
+ for k in ("logprobs" , "finish_reason" , "service_tier" )
384
386
)
385
387
assert "content" in cast (BaseMessageChunk , full ).response_metadata ["logprobs" ]
386
388
@@ -393,7 +395,7 @@ async def test_async_response_metadata_streaming() -> None:
393
395
full = chunk if full is None else full + chunk
394
396
assert all (
395
397
k in cast (BaseMessageChunk , full ).response_metadata
396
- for k in ("logprobs" , "finish_reason" )
398
+ for k in ("logprobs" , "finish_reason" , "service_tier" )
397
399
)
398
400
assert "content" in cast (BaseMessageChunk , full ).response_metadata ["logprobs" ]
399
401
Original file line number Diff line number Diff line change @@ -47,6 +47,7 @@ def _check_response(response: Optional[BaseMessage]) -> None:
47
47
assert response .usage_metadata ["output_tokens" ] > 0
48
48
assert response .usage_metadata ["total_tokens" ] > 0
49
49
assert response .response_metadata ["model_name" ]
50
+ assert response .response_metadata ["service_tier" ]
50
51
for tool_output in response .additional_kwargs ["tool_outputs" ]:
51
52
assert tool_output ["id" ]
52
53
assert tool_output ["status" ]
You can’t perform that action at this time.
0 commit comments