Skip to content

Commit a423a7e

Browse files
docs(api): updates to API spec
1 parent acf730e commit a423a7e

File tree

8 files changed

+163
-63
lines changed

8 files changed

+163
-63
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 31
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-8de5485a6892144049ab388a3dc5378c3319cbbe551c3db1e512bb620ab414ec.yml
3-
openapi_spec_hash: c93452a50c9ef635a5fae65830c3cc81
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-a0e83ebfd81856b538d16c7a41ccdc11687ee558e1435a40f3bb7e0d6e1ab7c8.yml
3+
openapi_spec_hash: 8ac62303a9158c13f344975cb0786bc6
44
config_hash: 1f6d0bf7309d0007e28ab85b89a0de85

README.md

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -254,10 +254,13 @@ client = Writer()
254254

255255
chat_completion = client.chat.chat(
256256
messages=[{"role": "user"}],
257-
model="palmyra-x-004",
258-
stream_options={"include_usage": True},
257+
model="model",
258+
response_format={
259+
"type": "text",
260+
"json_schema": {},
261+
},
259262
)
260-
print(chat_completion.stream_options)
263+
print(chat_completion.response_format)
261264
```
262265

263266
## File uploads

src/writerai/resources/chat.py

Lines changed: 72 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,11 @@ def chat(
5555
self,
5656
*,
5757
messages: Iterable[chat_chat_params.Message],
58-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
58+
model: str,
5959
logprobs: bool | NotGiven = NOT_GIVEN,
6060
max_tokens: int | NotGiven = NOT_GIVEN,
6161
n: int | NotGiven = NOT_GIVEN,
62+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
6263
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
6364
stream: Literal[False] | NotGiven = NOT_GIVEN,
6465
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
@@ -84,7 +85,8 @@ def chat(
8485
the model to respond to. The array must contain at least one message.
8586
8687
model: The [ID of the model](https://dev.writer.com/home/models) to use for creating
87-
the chat completion.
88+
the chat completion. Supports `palmyra-x-004`, `palmyra-fin`, `palmyra-med`,
89+
`palmyra-creative`, and `palmyra-x-003-instruct`.
8890
8991
logprobs: Specifies whether to return log probabilities of the output tokens.
9092
@@ -96,6 +98,13 @@ def chat(
9698
single request. This parameter allows for generating multiple responses,
9799
offering a variety of potential replies from which to choose.
98100
101+
response_format: The response format to use for the chat completion, available with
102+
`palmyra-x-004`.
103+
104+
`text` is the default response format. [JSON Schema](https://json-schema.org/)
105+
is supported for structured responses. If you specify `json_schema`, you must
106+
also provide a `json_schema` object.
107+
99108
stop: A token or sequence of tokens that, when generated, will cause the model to stop
100109
producing further content. This can be a single token or an array of tokens,
101110
acting as a signal to end the output.
@@ -142,11 +151,12 @@ def chat(
142151
self,
143152
*,
144153
messages: Iterable[chat_chat_params.Message],
145-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
154+
model: str,
146155
stream: Literal[True],
147156
logprobs: bool | NotGiven = NOT_GIVEN,
148157
max_tokens: int | NotGiven = NOT_GIVEN,
149158
n: int | NotGiven = NOT_GIVEN,
159+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
150160
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
151161
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
152162
temperature: float | NotGiven = NOT_GIVEN,
@@ -171,7 +181,8 @@ def chat(
171181
the model to respond to. The array must contain at least one message.
172182
173183
model: The [ID of the model](https://dev.writer.com/home/models) to use for creating
174-
the chat completion.
184+
the chat completion. Supports `palmyra-x-004`, `palmyra-fin`, `palmyra-med`,
185+
`palmyra-creative`, and `palmyra-x-003-instruct`.
175186
176187
stream: Indicates whether the response should be streamed incrementally as it is
177188
generated or only returned once fully complete. Streaming can be useful for
@@ -187,6 +198,13 @@ def chat(
187198
single request. This parameter allows for generating multiple responses,
188199
offering a variety of potential replies from which to choose.
189200
201+
response_format: The response format to use for the chat completion, available with
202+
`palmyra-x-004`.
203+
204+
`text` is the default response format. [JSON Schema](https://json-schema.org/)
205+
is supported for structured responses. If you specify `json_schema`, you must
206+
also provide a `json_schema` object.
207+
190208
stop: A token or sequence of tokens that, when generated, will cause the model to stop
191209
producing further content. This can be a single token or an array of tokens,
192210
acting as a signal to end the output.
@@ -229,11 +247,12 @@ def chat(
229247
self,
230248
*,
231249
messages: Iterable[chat_chat_params.Message],
232-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
250+
model: str,
233251
stream: bool,
234252
logprobs: bool | NotGiven = NOT_GIVEN,
235253
max_tokens: int | NotGiven = NOT_GIVEN,
236254
n: int | NotGiven = NOT_GIVEN,
255+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
237256
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
238257
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
239258
temperature: float | NotGiven = NOT_GIVEN,
@@ -258,7 +277,8 @@ def chat(
258277
the model to respond to. The array must contain at least one message.
259278
260279
model: The [ID of the model](https://dev.writer.com/home/models) to use for creating
261-
the chat completion.
280+
the chat completion. Supports `palmyra-x-004`, `palmyra-fin`, `palmyra-med`,
281+
`palmyra-creative`, and `palmyra-x-003-instruct`.
262282
263283
stream: Indicates whether the response should be streamed incrementally as it is
264284
generated or only returned once fully complete. Streaming can be useful for
@@ -274,6 +294,13 @@ def chat(
274294
single request. This parameter allows for generating multiple responses,
275295
offering a variety of potential replies from which to choose.
276296
297+
response_format: The response format to use for the chat completion, available with
298+
`palmyra-x-004`.
299+
300+
`text` is the default response format. [JSON Schema](https://json-schema.org/)
301+
is supported for structured responses. If you specify `json_schema`, you must
302+
also provide a `json_schema` object.
303+
277304
stop: A token or sequence of tokens that, when generated, will cause the model to stop
278305
producing further content. This can be a single token or an array of tokens,
279306
acting as a signal to end the output.
@@ -316,10 +343,11 @@ def chat(
316343
self,
317344
*,
318345
messages: Iterable[chat_chat_params.Message],
319-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
346+
model: str,
320347
logprobs: bool | NotGiven = NOT_GIVEN,
321348
max_tokens: int | NotGiven = NOT_GIVEN,
322349
n: int | NotGiven = NOT_GIVEN,
350+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
323351
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
324352
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
325353
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
@@ -343,6 +371,7 @@ def chat(
343371
"logprobs": logprobs,
344372
"max_tokens": max_tokens,
345373
"n": n,
374+
"response_format": response_format,
346375
"stop": stop,
347376
"stream": stream,
348377
"stream_options": stream_options,
@@ -457,10 +486,11 @@ async def chat(
457486
self,
458487
*,
459488
messages: Iterable[chat_chat_params.Message],
460-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
489+
model: str,
461490
logprobs: bool | NotGiven = NOT_GIVEN,
462491
max_tokens: int | NotGiven = NOT_GIVEN,
463492
n: int | NotGiven = NOT_GIVEN,
493+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
464494
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
465495
stream: Literal[False] | NotGiven = NOT_GIVEN,
466496
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
@@ -486,7 +516,8 @@ async def chat(
486516
the model to respond to. The array must contain at least one message.
487517
488518
model: The [ID of the model](https://dev.writer.com/home/models) to use for creating
489-
the chat completion.
519+
the chat completion. Supports `palmyra-x-004`, `palmyra-fin`, `palmyra-med`,
520+
`palmyra-creative`, and `palmyra-x-003-instruct`.
490521
491522
logprobs: Specifies whether to return log probabilities of the output tokens.
492523
@@ -498,6 +529,13 @@ async def chat(
498529
single request. This parameter allows for generating multiple responses,
499530
offering a variety of potential replies from which to choose.
500531
532+
response_format: The response format to use for the chat completion, available with
533+
`palmyra-x-004`.
534+
535+
`text` is the default response format. [JSON Schema](https://json-schema.org/)
536+
is supported for structured responses. If you specify `json_schema`, you must
537+
also provide a `json_schema` object.
538+
501539
stop: A token or sequence of tokens that, when generated, will cause the model to stop
502540
producing further content. This can be a single token or an array of tokens,
503541
acting as a signal to end the output.
@@ -544,11 +582,12 @@ async def chat(
544582
self,
545583
*,
546584
messages: Iterable[chat_chat_params.Message],
547-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
585+
model: str,
548586
stream: Literal[True],
549587
logprobs: bool | NotGiven = NOT_GIVEN,
550588
max_tokens: int | NotGiven = NOT_GIVEN,
551589
n: int | NotGiven = NOT_GIVEN,
590+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
552591
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
553592
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
554593
temperature: float | NotGiven = NOT_GIVEN,
@@ -573,7 +612,8 @@ async def chat(
573612
the model to respond to. The array must contain at least one message.
574613
575614
model: The [ID of the model](https://dev.writer.com/home/models) to use for creating
576-
the chat completion.
615+
the chat completion. Supports `palmyra-x-004`, `palmyra-fin`, `palmyra-med`,
616+
`palmyra-creative`, and `palmyra-x-003-instruct`.
577617
578618
stream: Indicates whether the response should be streamed incrementally as it is
579619
generated or only returned once fully complete. Streaming can be useful for
@@ -589,6 +629,13 @@ async def chat(
589629
single request. This parameter allows for generating multiple responses,
590630
offering a variety of potential replies from which to choose.
591631
632+
response_format: The response format to use for the chat completion, available with
633+
`palmyra-x-004`.
634+
635+
`text` is the default response format. [JSON Schema](https://json-schema.org/)
636+
is supported for structured responses. If you specify `json_schema`, you must
637+
also provide a `json_schema` object.
638+
592639
stop: A token or sequence of tokens that, when generated, will cause the model to stop
593640
producing further content. This can be a single token or an array of tokens,
594641
acting as a signal to end the output.
@@ -631,11 +678,12 @@ async def chat(
631678
self,
632679
*,
633680
messages: Iterable[chat_chat_params.Message],
634-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
681+
model: str,
635682
stream: bool,
636683
logprobs: bool | NotGiven = NOT_GIVEN,
637684
max_tokens: int | NotGiven = NOT_GIVEN,
638685
n: int | NotGiven = NOT_GIVEN,
686+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
639687
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
640688
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
641689
temperature: float | NotGiven = NOT_GIVEN,
@@ -660,7 +708,8 @@ async def chat(
660708
the model to respond to. The array must contain at least one message.
661709
662710
model: The [ID of the model](https://dev.writer.com/home/models) to use for creating
663-
the chat completion.
711+
the chat completion. Supports `palmyra-x-004`, `palmyra-fin`, `palmyra-med`,
712+
`palmyra-creative`, and `palmyra-x-003-instruct`.
664713
665714
stream: Indicates whether the response should be streamed incrementally as it is
666715
generated or only returned once fully complete. Streaming can be useful for
@@ -676,6 +725,13 @@ async def chat(
676725
single request. This parameter allows for generating multiple responses,
677726
offering a variety of potential replies from which to choose.
678727
728+
response_format: The response format to use for the chat completion, available with
729+
`palmyra-x-004`.
730+
731+
`text` is the default response format. [JSON Schema](https://json-schema.org/)
732+
is supported for structured responses. If you specify `json_schema`, you must
733+
also provide a `json_schema` object.
734+
679735
stop: A token or sequence of tokens that, when generated, will cause the model to stop
680736
producing further content. This can be a single token or an array of tokens,
681737
acting as a signal to end the output.
@@ -718,10 +774,11 @@ async def chat(
718774
self,
719775
*,
720776
messages: Iterable[chat_chat_params.Message],
721-
model: Literal["palmyra-x-004", "palmyra-fin", "palmyra-med", "palmyra-creative", "palmyra-x-003-instruct"],
777+
model: str,
722778
logprobs: bool | NotGiven = NOT_GIVEN,
723779
max_tokens: int | NotGiven = NOT_GIVEN,
724780
n: int | NotGiven = NOT_GIVEN,
781+
response_format: chat_chat_params.ResponseFormat | NotGiven = NOT_GIVEN,
725782
stop: Union[List[str], str] | NotGiven = NOT_GIVEN,
726783
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
727784
stream_options: chat_chat_params.StreamOptions | NotGiven = NOT_GIVEN,
@@ -745,6 +802,7 @@ async def chat(
745802
"logprobs": logprobs,
746803
"max_tokens": max_tokens,
747804
"n": n,
805+
"response_format": response_format,
748806
"stop": stop,
749807
"stream": stream,
750808
"stream_options": stream_options,

0 commit comments

Comments
 (0)