Skip to content

Commit c4fadb4

Browse files
committed
feat(api): add optional name argument + improve docs (#569)
1 parent 068bd13 commit c4fadb4

File tree

10 files changed

+91
-78
lines changed

10 files changed

+91
-78
lines changed

src/lib/AbstractChatCompletionRunner.ts

+3-5
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ import {
66
type ChatCompletionMessage,
77
type ChatCompletionMessageParam,
88
type ChatCompletionCreateParams,
9-
type ChatCompletionAssistantMessageParam,
109
type ChatCompletionTool,
1110
} from 'openai/resources/chat/completions';
1211
import { APIUserAbortError, OpenAIError } from 'openai/error';
@@ -90,7 +89,6 @@ export abstract class AbstractChatCompletionRunner<
9089
}
9190

9291
protected _addMessage(message: ChatCompletionMessageParam, emit = true) {
93-
// @ts-expect-error this works around a bug in the Azure OpenAI API in which `content` is missing instead of null.
9492
if (!('content' in message)) message.content = null;
9593

9694
this.messages.push(message);
@@ -217,7 +215,7 @@ export abstract class AbstractChatCompletionRunner<
217215
}
218216

219217
#getFinalContent(): string | null {
220-
return this.#getFinalMessage().content;
218+
return this.#getFinalMessage().content ?? null;
221219
}
222220

223221
/**
@@ -229,12 +227,12 @@ export abstract class AbstractChatCompletionRunner<
229227
return this.#getFinalContent();
230228
}
231229

232-
#getFinalMessage(): ChatCompletionAssistantMessageParam {
230+
#getFinalMessage(): ChatCompletionMessage {
233231
let i = this.messages.length;
234232
while (i-- > 0) {
235233
const message = this.messages[i];
236234
if (isAssistantMessage(message)) {
237-
return message;
235+
return { ...message, content: message.content ?? null };
238236
}
239237
}
240238
throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant');

src/resources/audio/speech.ts

+3-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,9 @@ export interface SpeechCreateParams {
2828

2929
/**
3030
* The voice to use when generating the audio. Supported voices are `alloy`,
31-
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
31+
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
32+
* available in the
33+
* [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
3234
*/
3335
voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
3436

src/resources/chat/completions.ts

+56-35
Original file line numberDiff line numberDiff line change
@@ -105,21 +105,28 @@ export namespace ChatCompletion {
105105

106106
export interface ChatCompletionAssistantMessageParam {
107107
/**
108-
* The contents of the assistant message.
108+
* The role of the messages author, in this case `assistant`.
109109
*/
110-
content: string | null;
110+
role: 'assistant';
111111

112112
/**
113-
* The role of the messages author, in this case `assistant`.
113+
* The contents of the assistant message. Required unless `tool_calls` or
114+
* `function_call` is specified.
114115
*/
115-
role: 'assistant';
116+
content?: string | null;
116117

117118
/**
118119
* Deprecated and replaced by `tool_calls`. The name and arguments of a function
119120
* that should be called, as generated by the model.
120121
*/
121122
function_call?: ChatCompletionAssistantMessageParam.FunctionCall;
122123

124+
/**
125+
* An optional name for the participant. Provides the model information to
126+
* differentiate between participants of the same role.
127+
*/
128+
name?: string;
129+
123130
/**
124131
* The tool calls generated by the model, such as function calls.
125132
*/
@@ -309,7 +316,8 @@ export namespace ChatCompletionContentPartImage {
309316
url: string;
310317

311318
/**
312-
* Specifies the detail level of the image.
319+
* Specifies the detail level of the image. Learn more in the
320+
* [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
313321
*/
314322
detail?: 'auto' | 'low' | 'high';
315323
}
@@ -340,9 +348,9 @@ export interface ChatCompletionFunctionCallOption {
340348

341349
export interface ChatCompletionFunctionMessageParam {
342350
/**
343-
* The return value from the function call, to return to the model.
351+
* The contents of the function message.
344352
*/
345-
content: string | null;
353+
content: string;
346354

347355
/**
348356
* The name of the function to call.
@@ -451,12 +459,12 @@ export namespace ChatCompletionMessageToolCall {
451459
* function.
452460
*/
453461
export interface ChatCompletionNamedToolChoice {
454-
function?: ChatCompletionNamedToolChoice.Function;
462+
function: ChatCompletionNamedToolChoice.Function;
455463

456464
/**
457465
* The type of the tool. Currently, only `function` is supported.
458466
*/
459-
type?: 'function';
467+
type: 'function';
460468
}
461469

462470
export namespace ChatCompletionNamedToolChoice {
@@ -477,12 +485,18 @@ export interface ChatCompletionSystemMessageParam {
477485
/**
478486
* The contents of the system message.
479487
*/
480-
content: string | null;
488+
content: string;
481489

482490
/**
483491
* The role of the messages author, in this case `system`.
484492
*/
485493
role: 'system';
494+
495+
/**
496+
* An optional name for the participant. Provides the model information to
497+
* differentiate between participants of the same role.
498+
*/
499+
name?: string;
486500
}
487501

488502
export interface ChatCompletionTool {
@@ -511,7 +525,7 @@ export interface ChatCompletionToolMessageParam {
511525
/**
512526
* The contents of the tool message.
513527
*/
514-
content: string | null;
528+
content: string;
515529

516530
/**
517531
* The role of the messages author, in this case `tool`.
@@ -528,12 +542,18 @@ export interface ChatCompletionUserMessageParam {
528542
/**
529543
* The contents of the user message.
530544
*/
531-
content: string | Array<ChatCompletionContentPart> | null;
545+
content: string | Array<ChatCompletionContentPart>;
532546

533547
/**
534548
* The role of the messages author, in this case `user`.
535549
*/
536550
role: 'user';
551+
552+
/**
553+
* An optional name for the participant. Provides the model information to
554+
* differentiate between participants of the same role.
555+
*/
556+
name?: string;
537557
}
538558

539559
/**
@@ -567,19 +587,19 @@ export interface ChatCompletionCreateParamsBase {
567587
| 'gpt-4-32k'
568588
| 'gpt-4-32k-0314'
569589
| 'gpt-4-32k-0613'
570-
| 'gpt-3.5-turbo-1106'
571590
| 'gpt-3.5-turbo'
572591
| 'gpt-3.5-turbo-16k'
573592
| 'gpt-3.5-turbo-0301'
574593
| 'gpt-3.5-turbo-0613'
594+
| 'gpt-3.5-turbo-1106'
575595
| 'gpt-3.5-turbo-16k-0613';
576596

577597
/**
578598
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
579599
* existing frequency in the text so far, decreasing the model's likelihood to
580600
* repeat the same line verbatim.
581601
*
582-
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
602+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
583603
*/
584604
frequency_penalty?: number | null;
585605

@@ -627,7 +647,9 @@ export interface ChatCompletionCreateParamsBase {
627647
max_tokens?: number | null;
628648

629649
/**
630-
* How many chat completion choices to generate for each input message.
650+
* How many chat completion choices to generate for each input message. Note that
651+
* you will be charged based on the number of generated tokens across all of the
652+
* choices. Keep `n` as `1` to minimize costs.
631653
*/
632654
n?: number | null;
633655

@@ -636,7 +658,7 @@ export interface ChatCompletionCreateParamsBase {
636658
* whether they appear in the text so far, increasing the model's likelihood to
637659
* talk about new topics.
638660
*
639-
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
661+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
640662
*/
641663
presence_penalty?: number | null;
642664

@@ -649,10 +671,10 @@ export interface ChatCompletionCreateParamsBase {
649671
* **Important:** when using JSON mode, you **must** also instruct the model to
650672
* produce JSON yourself via a system or user message. Without this, the model may
651673
* generate an unending stream of whitespace until the generation reaches the token
652-
* limit, resulting in increased latency and appearance of a "stuck" request. Also
653-
* note that the message content may be partially cut off if
654-
* `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
655-
* or the conversation exceeded the max context length.
674+
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
675+
* the message content may be partially cut off if `finish_reason="length"`, which
676+
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
677+
* max context length.
656678
*/
657679
response_format?: ChatCompletionCreateParams.ResponseFormat;
658680

@@ -734,23 +756,22 @@ export namespace ChatCompletionCreateParams {
734756
*/
735757
name: string;
736758

759+
/**
760+
* A description of what the function does, used by the model to choose when and
761+
* how to call the function.
762+
*/
763+
description?: string;
764+
737765
/**
738766
* The parameters the functions accepts, described as a JSON Schema object. See the
739-
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
740-
* examples, and the
767+
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
768+
* for examples, and the
741769
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
742770
* documentation about the format.
743771
*
744-
* To describe a function that accepts no parameters, provide the value
745-
* `{"type": "object", "properties": {}}`.
746-
*/
747-
parameters: Shared.FunctionParameters;
748-
749-
/**
750-
* A description of what the function does, used by the model to choose when and
751-
* how to call the function.
772+
* Omitting `parameters` defines a function with an empty parameter list.
752773
*/
753-
description?: string;
774+
parameters?: Shared.FunctionParameters;
754775
}
755776

756777
/**
@@ -762,10 +783,10 @@ export namespace ChatCompletionCreateParams {
762783
* **Important:** when using JSON mode, you **must** also instruct the model to
763784
* produce JSON yourself via a system or user message. Without this, the model may
764785
* generate an unending stream of whitespace until the generation reaches the token
765-
* limit, resulting in increased latency and appearance of a "stuck" request. Also
766-
* note that the message content may be partially cut off if
767-
* `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
768-
* or the conversation exceeded the max context length.
786+
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
787+
* the message content may be partially cut off if `finish_reason="length"`, which
788+
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
789+
* max context length.
769790
*/
770791
export interface ResponseFormat {
771792
/**

src/resources/completions.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ export interface CompletionCreateParamsBase {
177177
* existing frequency in the text so far, decreasing the model's likelihood to
178178
* repeat the same line verbatim.
179179
*
180-
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
180+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
181181
*/
182182
frequency_penalty?: number | null;
183183

@@ -232,7 +232,7 @@ export interface CompletionCreateParamsBase {
232232
* whether they appear in the text so far, increasing the model's likelihood to
233233
* talk about new topics.
234234
*
235-
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
235+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
236236
*/
237237
presence_penalty?: number | null;
238238

src/resources/embeddings.ts

+2-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ export interface EmbeddingCreateParams {
8282
* Input text to embed, encoded as a string or array of tokens. To embed multiple
8383
* inputs in a single request, pass an array of strings or array of token arrays.
8484
* The input must not exceed the max input tokens for the model (8192 tokens for
85-
* `text-embedding-ada-002`) and cannot be an empty string.
85+
* `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
86+
* dimensions or less.
8687
* [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
8788
* for counting tokens.
8889
*/

src/resources/files.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@ import { Page } from 'openai/pagination';
1212

1313
export class Files extends APIResource {
1414
/**
15-
* Upload a file that can be used across various endpoints/features. The size of
16-
* all the files uploaded by one organization can be up to 100 GB.
15+
* Upload a file that can be used across various endpoints. The size of all the
16+
* files uploaded by one organization can be up to 100 GB.
1717
*
18-
* The size of individual files for can be a maximum of 512MB. See the
18+
* The size of individual files can be a maximum of 512 MB. See the
1919
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
2020
* learn more about the types of files supported. The Fine-tuning API only supports
2121
* `.jsonl` files.

src/resources/shared.ts

+13-15
Original file line numberDiff line numberDiff line change
@@ -7,33 +7,31 @@ export interface FunctionDefinition {
77
*/
88
name: string;
99

10+
/**
11+
* A description of what the function does, used by the model to choose when and
12+
* how to call the function.
13+
*/
14+
description?: string;
15+
1016
/**
1117
* The parameters the functions accepts, described as a JSON Schema object. See the
12-
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
13-
* examples, and the
18+
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
19+
* for examples, and the
1420
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
1521
* documentation about the format.
1622
*
17-
* To describe a function that accepts no parameters, provide the value
18-
* `{"type": "object", "properties": {}}`.
23+
* Omitting `parameters` defines a function with an empty parameter list.
1924
*/
20-
parameters: FunctionParameters;
21-
22-
/**
23-
* A description of what the function does, used by the model to choose when and
24-
* how to call the function.
25-
*/
26-
description?: string;
25+
parameters?: FunctionParameters;
2726
}
2827

2928
/**
3029
* The parameters the functions accepts, described as a JSON Schema object. See the
31-
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
32-
* examples, and the
30+
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
31+
* for examples, and the
3332
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
3433
* documentation about the format.
3534
*
36-
* To describe a function that accepts no parameters, provide the value
37-
* `{"type": "object", "properties": {}}`.
35+
* Omitting `parameters` defines a function with an empty parameter list.
3836
*/
3937
export type FunctionParameters = Record<string, unknown>;

tests/api-resources/beta/assistants/files.test.ts

+2-6
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@ const openai = new OpenAI({
1010

1111
describe('resource files', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', {
14-
file_id: 'string',
15-
});
13+
const responsePromise = openai.beta.assistants.files.create('file-abc123', { file_id: 'string' });
1614
const rawResponse = await responsePromise.asResponse();
1715
expect(rawResponse).toBeInstanceOf(Response);
1816
const response = await responsePromise;
@@ -23,9 +21,7 @@ describe('resource files', () => {
2321
});
2422

2523
test('create: required and optional params', async () => {
26-
const response = await openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', {
27-
file_id: 'string',
28-
});
24+
const response = await openai.beta.assistants.files.create('file-abc123', { file_id: 'string' });
2925
});
3026

3127
test('retrieve', async () => {

0 commit comments

Comments
 (0)