Skip to content

Commit a12936b

Browse files
feat(api): make model required for the responses/compact endpoint
1 parent f20a9a1 commit a12936b

File tree

4 files changed

+141
-129
lines changed

4 files changed

+141
-129
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 137
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fe8a79e6fd407e6c9afec60971f03076b65f711ccd6ea16457933b0e24fb1f6d.yml
3-
openapi_spec_hash: 38c0a73f4e08843732c5f8002a809104
4-
config_hash: 2c350086d87a4b4532077363087840e7
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-88d85ff87ad8983262af2b729762a6e05fd509468bb691529bc2f81e4ce27c69.yml
3+
openapi_spec_hash: 46a55acbccd0147534017b92c1f4dd99
4+
config_hash: 141b101c9f13b90e21af74e1686f1f41

src/openai/resources/responses/responses.py

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1526,8 +1526,6 @@ def cancel(
15261526
def compact(
15271527
self,
15281528
*,
1529-
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
1530-
instructions: Optional[str] | Omit = omit,
15311529
model: Union[
15321530
Literal[
15331531
"gpt-5.1",
@@ -1614,8 +1612,9 @@ def compact(
16141612
],
16151613
str,
16161614
None,
1617-
]
1618-
| Omit = omit,
1615+
],
1616+
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
1617+
instructions: Optional[str] | Omit = omit,
16191618
previous_response_id: Optional[str] | Omit = omit,
16201619
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
16211620
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1628,19 +1627,19 @@ def compact(
16281627
Compact conversation
16291628
16301629
Args:
1630+
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
1631+
wide range of models with different capabilities, performance characteristics,
1632+
and price points. Refer to the
1633+
[model guide](https://platform.openai.com/docs/models) to browse and compare
1634+
available models.
1635+
16311636
input: Text, image, or file inputs to the model, used to generate a response
16321637
16331638
instructions: A system (or developer) message inserted into the model's context. When used
16341639
along with `previous_response_id`, the instructions from a previous response
16351640
will not be carried over to the next response. This makes it simple to swap out
16361641
system (or developer) messages in new responses.
16371642
1638-
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
1639-
wide range of models with different capabilities, performance characteristics,
1640-
and price points. Refer to the
1641-
[model guide](https://platform.openai.com/docs/models) to browse and compare
1642-
available models.
1643-
16441643
previous_response_id: The unique ID of the previous response to the model. Use this to create
16451644
multi-turn conversations. Learn more about
16461645
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -1658,9 +1657,9 @@ def compact(
16581657
"/responses/compact",
16591658
body=maybe_transform(
16601659
{
1660+
"model": model,
16611661
"input": input,
16621662
"instructions": instructions,
1663-
"model": model,
16641663
"previous_response_id": previous_response_id,
16651664
},
16661665
response_compact_params.ResponseCompactParams,
@@ -3140,8 +3139,6 @@ async def cancel(
31403139
async def compact(
31413140
self,
31423141
*,
3143-
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
3144-
instructions: Optional[str] | Omit = omit,
31453142
model: Union[
31463143
Literal[
31473144
"gpt-5.1",
@@ -3228,8 +3225,9 @@ async def compact(
32283225
],
32293226
str,
32303227
None,
3231-
]
3232-
| Omit = omit,
3228+
],
3229+
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
3230+
instructions: Optional[str] | Omit = omit,
32333231
previous_response_id: Optional[str] | Omit = omit,
32343232
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
32353233
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -3242,19 +3240,19 @@ async def compact(
32423240
Compact conversation
32433241
32443242
Args:
3243+
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
3244+
wide range of models with different capabilities, performance characteristics,
3245+
and price points. Refer to the
3246+
[model guide](https://platform.openai.com/docs/models) to browse and compare
3247+
available models.
3248+
32453249
input: Text, image, or file inputs to the model, used to generate a response
32463250
32473251
instructions: A system (or developer) message inserted into the model's context. When used
32483252
along with `previous_response_id`, the instructions from a previous response
32493253
will not be carried over to the next response. This makes it simple to swap out
32503254
system (or developer) messages in new responses.
32513255
3252-
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
3253-
wide range of models with different capabilities, performance characteristics,
3254-
and price points. Refer to the
3255-
[model guide](https://platform.openai.com/docs/models) to browse and compare
3256-
available models.
3257-
32583256
previous_response_id: The unique ID of the previous response to the model. Use this to create
32593257
multi-turn conversations. Learn more about
32603258
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -3272,9 +3270,9 @@ async def compact(
32723270
"/responses/compact",
32733271
body=await async_maybe_transform(
32743272
{
3273+
"model": model,
32753274
"input": input,
32763275
"instructions": instructions,
3277-
"model": model,
32783276
"previous_response_id": previous_response_id,
32793277
},
32803278
response_compact_params.ResponseCompactParams,

src/openai/types/responses/response_compact_params.py

Lines changed: 98 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,111 @@
33
from __future__ import annotations
44

55
from typing import Union, Iterable, Optional
6-
from typing_extensions import Literal, TypedDict
6+
from typing_extensions import Literal, Required, TypedDict
77

88
from .response_input_item_param import ResponseInputItemParam
99

1010
__all__ = ["ResponseCompactParams"]
1111

1212

1313
class ResponseCompactParams(TypedDict, total=False):
14+
model: Required[
15+
Union[
16+
Literal[
17+
"gpt-5.1",
18+
"gpt-5.1-2025-11-13",
19+
"gpt-5.1-codex",
20+
"gpt-5.1-mini",
21+
"gpt-5.1-chat-latest",
22+
"gpt-5",
23+
"gpt-5-mini",
24+
"gpt-5-nano",
25+
"gpt-5-2025-08-07",
26+
"gpt-5-mini-2025-08-07",
27+
"gpt-5-nano-2025-08-07",
28+
"gpt-5-chat-latest",
29+
"gpt-4.1",
30+
"gpt-4.1-mini",
31+
"gpt-4.1-nano",
32+
"gpt-4.1-2025-04-14",
33+
"gpt-4.1-mini-2025-04-14",
34+
"gpt-4.1-nano-2025-04-14",
35+
"o4-mini",
36+
"o4-mini-2025-04-16",
37+
"o3",
38+
"o3-2025-04-16",
39+
"o3-mini",
40+
"o3-mini-2025-01-31",
41+
"o1",
42+
"o1-2024-12-17",
43+
"o1-preview",
44+
"o1-preview-2024-09-12",
45+
"o1-mini",
46+
"o1-mini-2024-09-12",
47+
"gpt-4o",
48+
"gpt-4o-2024-11-20",
49+
"gpt-4o-2024-08-06",
50+
"gpt-4o-2024-05-13",
51+
"gpt-4o-audio-preview",
52+
"gpt-4o-audio-preview-2024-10-01",
53+
"gpt-4o-audio-preview-2024-12-17",
54+
"gpt-4o-audio-preview-2025-06-03",
55+
"gpt-4o-mini-audio-preview",
56+
"gpt-4o-mini-audio-preview-2024-12-17",
57+
"gpt-4o-search-preview",
58+
"gpt-4o-mini-search-preview",
59+
"gpt-4o-search-preview-2025-03-11",
60+
"gpt-4o-mini-search-preview-2025-03-11",
61+
"chatgpt-4o-latest",
62+
"codex-mini-latest",
63+
"gpt-4o-mini",
64+
"gpt-4o-mini-2024-07-18",
65+
"gpt-4-turbo",
66+
"gpt-4-turbo-2024-04-09",
67+
"gpt-4-0125-preview",
68+
"gpt-4-turbo-preview",
69+
"gpt-4-1106-preview",
70+
"gpt-4-vision-preview",
71+
"gpt-4",
72+
"gpt-4-0314",
73+
"gpt-4-0613",
74+
"gpt-4-32k",
75+
"gpt-4-32k-0314",
76+
"gpt-4-32k-0613",
77+
"gpt-3.5-turbo",
78+
"gpt-3.5-turbo-16k",
79+
"gpt-3.5-turbo-0301",
80+
"gpt-3.5-turbo-0613",
81+
"gpt-3.5-turbo-1106",
82+
"gpt-3.5-turbo-0125",
83+
"gpt-3.5-turbo-16k-0613",
84+
"o1-pro",
85+
"o1-pro-2025-03-19",
86+
"o3-pro",
87+
"o3-pro-2025-06-10",
88+
"o3-deep-research",
89+
"o3-deep-research-2025-06-26",
90+
"o4-mini-deep-research",
91+
"o4-mini-deep-research-2025-06-26",
92+
"computer-use-preview",
93+
"computer-use-preview-2025-03-11",
94+
"gpt-5-codex",
95+
"gpt-5-pro",
96+
"gpt-5-pro-2025-10-06",
97+
"gpt-5.1-codex-max",
98+
],
99+
str,
100+
None,
101+
]
102+
]
103+
"""Model ID used to generate the response, like `gpt-5` or `o3`.
104+
105+
OpenAI offers a wide range of models with different capabilities, performance
106+
characteristics, and price points. Refer to the
107+
[model guide](https://platform.openai.com/docs/models) to browse and compare
108+
available models.
109+
"""
110+
14111
input: Union[str, Iterable[ResponseInputItemParam], None]
15112
"""Text, image, or file inputs to the model, used to generate a response"""
16113

@@ -22,101 +119,6 @@ class ResponseCompactParams(TypedDict, total=False):
22119
system (or developer) messages in new responses.
23120
"""
24121

25-
model: Union[
26-
Literal[
27-
"gpt-5.1",
28-
"gpt-5.1-2025-11-13",
29-
"gpt-5.1-codex",
30-
"gpt-5.1-mini",
31-
"gpt-5.1-chat-latest",
32-
"gpt-5",
33-
"gpt-5-mini",
34-
"gpt-5-nano",
35-
"gpt-5-2025-08-07",
36-
"gpt-5-mini-2025-08-07",
37-
"gpt-5-nano-2025-08-07",
38-
"gpt-5-chat-latest",
39-
"gpt-4.1",
40-
"gpt-4.1-mini",
41-
"gpt-4.1-nano",
42-
"gpt-4.1-2025-04-14",
43-
"gpt-4.1-mini-2025-04-14",
44-
"gpt-4.1-nano-2025-04-14",
45-
"o4-mini",
46-
"o4-mini-2025-04-16",
47-
"o3",
48-
"o3-2025-04-16",
49-
"o3-mini",
50-
"o3-mini-2025-01-31",
51-
"o1",
52-
"o1-2024-12-17",
53-
"o1-preview",
54-
"o1-preview-2024-09-12",
55-
"o1-mini",
56-
"o1-mini-2024-09-12",
57-
"gpt-4o",
58-
"gpt-4o-2024-11-20",
59-
"gpt-4o-2024-08-06",
60-
"gpt-4o-2024-05-13",
61-
"gpt-4o-audio-preview",
62-
"gpt-4o-audio-preview-2024-10-01",
63-
"gpt-4o-audio-preview-2024-12-17",
64-
"gpt-4o-audio-preview-2025-06-03",
65-
"gpt-4o-mini-audio-preview",
66-
"gpt-4o-mini-audio-preview-2024-12-17",
67-
"gpt-4o-search-preview",
68-
"gpt-4o-mini-search-preview",
69-
"gpt-4o-search-preview-2025-03-11",
70-
"gpt-4o-mini-search-preview-2025-03-11",
71-
"chatgpt-4o-latest",
72-
"codex-mini-latest",
73-
"gpt-4o-mini",
74-
"gpt-4o-mini-2024-07-18",
75-
"gpt-4-turbo",
76-
"gpt-4-turbo-2024-04-09",
77-
"gpt-4-0125-preview",
78-
"gpt-4-turbo-preview",
79-
"gpt-4-1106-preview",
80-
"gpt-4-vision-preview",
81-
"gpt-4",
82-
"gpt-4-0314",
83-
"gpt-4-0613",
84-
"gpt-4-32k",
85-
"gpt-4-32k-0314",
86-
"gpt-4-32k-0613",
87-
"gpt-3.5-turbo",
88-
"gpt-3.5-turbo-16k",
89-
"gpt-3.5-turbo-0301",
90-
"gpt-3.5-turbo-0613",
91-
"gpt-3.5-turbo-1106",
92-
"gpt-3.5-turbo-0125",
93-
"gpt-3.5-turbo-16k-0613",
94-
"o1-pro",
95-
"o1-pro-2025-03-19",
96-
"o3-pro",
97-
"o3-pro-2025-06-10",
98-
"o3-deep-research",
99-
"o3-deep-research-2025-06-26",
100-
"o4-mini-deep-research",
101-
"o4-mini-deep-research-2025-06-26",
102-
"computer-use-preview",
103-
"computer-use-preview-2025-03-11",
104-
"gpt-5-codex",
105-
"gpt-5-pro",
106-
"gpt-5-pro-2025-10-06",
107-
"gpt-5.1-codex-max",
108-
],
109-
str,
110-
None,
111-
]
112-
"""Model ID used to generate the response, like `gpt-5` or `o3`.
113-
114-
OpenAI offers a wide range of models with different capabilities, performance
115-
characteristics, and price points. Refer to the
116-
[model guide](https://platform.openai.com/docs/models) to browse and compare
117-
available models.
118-
"""
119-
120122
previous_response_id: Optional[str]
121123
"""The unique ID of the previous response to the model.
122124

0 commit comments

Comments
 (0)