Skip to content

Commit

Permalink
feat: Add gpt-4o-2024-11-20 to model catalog in openai_dart (#614)
Browse files Browse the repository at this point in the history
  • Loading branch information
davidmigloz authored Dec 6, 2024
1 parent 315fe0f commit bf33308
Show file tree
Hide file tree
Showing 9 changed files with 149 additions and 154 deletions.
6 changes: 5 additions & 1 deletion packages/langchain_openai/lib/src/chat_models/types.dart
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,13 @@ import 'package:meta/meta.dart';
/// - `gpt-4o`
/// - `gpt-4o-2024-05-13`
/// - `gpt-4o-2024-08-06`
/// - `gpt-4o-2024-08-06`
/// - `gpt-4o-2024-11-20`
/// - `gpt-4o-audio-preview`
/// - `gpt-4o-audio-preview-2024-10-01`
/// - `gpt-4o-mini`
/// - `gpt-4o-mini-2024-07-18`
/// - `gpt-4o-realtime-preview`
/// - `gpt-4o-realtime-preview-2024-10-01`
/// - `gpt-3.5-turbo`
/// - `gpt-3.5-turbo-16k`
/// - `gpt-3.5-turbo-16k-0613`
Expand Down
53 changes: 53 additions & 0 deletions packages/openai_dart/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,59 @@ final res = await client.createChatCompletion(
// {"names":["John","Mary","Peter"]}
```

**Predicted Outputs:** ([docs](https://platform.openai.com/docs/guides/predicted-outputs))

> Predicted Outputs enable you to speed up API responses from Chat Completions when many of the output tokens are known ahead of time. This is most common when you are regenerating a text or code file with minor modifications.
```dart
const codeContent = '''
class User {
firstName: string = "";
lastName: string = "";
username: string = "";
}
export default User;
''';
const request = CreateChatCompletionRequest(
model: ChatCompletionModel.model(
ChatCompletionModels.gpt4o,
),
messages: [
ChatCompletionMessage.user(
content: ChatCompletionUserMessageContent.string(
'Replace the username property with an email property. '
'Respond only with code, and with no markdown formatting.',
),
),
ChatCompletionMessage.user(
content: ChatCompletionUserMessageContent.string(codeContent),
),
],
prediction: PredictionContent(
content: PredictionContentContent.text(codeContent),
),
);
final res1 = await client.createChatCompletion(request: request);
final choice1 = res1.choices.first;
print(choice1.message.content);
// class User {
// firstName: string = "";
// lastName: string = "";
// email: string = "";
// }
//
// export default User;
print(res1.usage?.completionTokensDetails?.acceptedPredictionTokens)
// 18
print(res1.usage?.completionTokensDetails?.rejectedPredictionTokens)
// 10
```

You can either pass a single prediction content using `PredictionContentContent.text('...')` or multiple predictions using `PredictionContentContent.textParts([...])`.

**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode))

> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It us recommended to use Structured Outputs if it is supported for your use case.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,6 @@ class CreateAssistantRequest with _$CreateAssistantRequest {

/// Available assistant models. Mind that the list may not be exhaustive nor up-to-date.
enum AssistantModels {
@JsonValue('chatgpt-4o-latest')
chatgpt4oLatest,
@JsonValue('gpt-4')
gpt4,
@JsonValue('gpt-4-32k')
Expand Down Expand Up @@ -189,6 +187,8 @@ enum AssistantModels {
gpt4o20240513,
@JsonValue('gpt-4o-2024-08-06')
gpt4o20240806,
@JsonValue('gpt-4o-2024-11-20')
gpt4o20241120,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
Expand All @@ -201,20 +201,10 @@ enum AssistantModels {
gpt35Turbo16k0613,
@JsonValue('gpt-3.5-turbo-0125')
gpt35Turbo0125,
@JsonValue('gpt-3.5-turbo-0301')
gpt35Turbo0301,
@JsonValue('gpt-3.5-turbo-0613')
gpt35Turbo0613,
@JsonValue('gpt-3.5-turbo-1106')
gpt35Turbo1106,
@JsonValue('o1-mini')
o1Mini,
@JsonValue('o1-mini-2024-09-12')
o1Mini20240912,
@JsonValue('o1-preview')
o1Preview,
@JsonValue('o1-preview-2024-09-12')
o1Preview20240912,
}

// ==========================================
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,8 @@ enum ChatCompletionModels {
gpt4o20240513,
@JsonValue('gpt-4o-2024-08-06')
gpt4o20240806,
@JsonValue('gpt-4o-2024-11-20')
gpt4o20241120,
@JsonValue('gpt-4o-audio-preview')
gpt4oAudioPreview,
@JsonValue('gpt-4o-audio-preview-2024-10-01')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,6 @@ class CreateRunRequest with _$CreateRunRequest {

/// Available models. Mind that the list may not be exhaustive nor up-to-date.
enum RunModels {
@JsonValue('chatgpt-4o-latest')
chatgpt4oLatest,
@JsonValue('gpt-4')
gpt4,
@JsonValue('gpt-4-32k')
Expand Down Expand Up @@ -223,6 +221,8 @@ enum RunModels {
gpt4o20240513,
@JsonValue('gpt-4o-2024-08-06')
gpt4o20240806,
@JsonValue('gpt-4o-2024-11-20')
gpt4o20241120,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
Expand All @@ -235,20 +235,10 @@ enum RunModels {
gpt35Turbo16k0613,
@JsonValue('gpt-3.5-turbo-0125')
gpt35Turbo0125,
@JsonValue('gpt-3.5-turbo-0301')
gpt35Turbo0301,
@JsonValue('gpt-3.5-turbo-0613')
gpt35Turbo0613,
@JsonValue('gpt-3.5-turbo-1106')
gpt35Turbo1106,
@JsonValue('o1-mini')
o1Mini,
@JsonValue('o1-mini-2024-09-12')
o1Mini20240912,
@JsonValue('o1-preview')
o1Preview,
@JsonValue('o1-preview-2024-09-12')
o1Preview20240912,
}

// ==========================================
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest {

/// Available models. Mind that the list may not be exhaustive nor up-to-date.
enum ThreadAndRunModels {
@JsonValue('chatgpt-4o-latest')
chatgpt4oLatest,
@JsonValue('gpt-4')
gpt4,
@JsonValue('gpt-4-32k')
Expand Down Expand Up @@ -238,6 +236,8 @@ enum ThreadAndRunModels {
gpt4o20240513,
@JsonValue('gpt-4o-2024-08-06')
gpt4o20240806,
@JsonValue('gpt-4o-2024-11-20')
gpt4o20241120,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
Expand All @@ -250,20 +250,10 @@ enum ThreadAndRunModels {
gpt35Turbo16k0613,
@JsonValue('gpt-3.5-turbo-0125')
gpt35Turbo0125,
@JsonValue('gpt-3.5-turbo-0301')
gpt35Turbo0301,
@JsonValue('gpt-3.5-turbo-0613')
gpt35Turbo0613,
@JsonValue('gpt-3.5-turbo-1106')
gpt35Turbo1106,
@JsonValue('o1-mini')
o1Mini,
@JsonValue('o1-mini-2024-09-12')
o1Mini20240912,
@JsonValue('o1-preview')
o1Preview,
@JsonValue('o1-preview-2024-09-12')
o1Preview20240912,
}

// ==========================================
Expand Down
22 changes: 4 additions & 18 deletions packages/openai_dart/lib/src/generated/schema/schema.g.dart

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit bf33308

Please sign in to comment.