Skip to content

Commit 727b60c

Browse files
HavenDVgithub-actions[bot]
andauthored
feat: Updated OpenAPI spec (#242)
Co-authored-by: github-actions[bot] <bot@openai.com>
1 parent 9c7fa48 commit 727b60c

14 files changed

+880
-434
lines changed

src/libs/Cohere/Generated/Cohere..JsonSerializerContext.g.cs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,8 @@ namespace Cohere
123123
typeof(global::Cohere.JsonConverters.StreamedChatResponseDiscriminatorEventTypeNullableJsonConverter),
124124
typeof(global::Cohere.JsonConverters.StreamedChatResponseV2DiscriminatorTypeJsonConverter),
125125
typeof(global::Cohere.JsonConverters.StreamedChatResponseV2DiscriminatorTypeNullableJsonConverter),
126+
typeof(global::Cohere.JsonConverters.ThinkingTypeJsonConverter),
127+
typeof(global::Cohere.JsonConverters.ThinkingTypeNullableJsonConverter),
126128
typeof(global::Cohere.JsonConverters.ToolV2TypeJsonConverter),
127129
typeof(global::Cohere.JsonConverters.ToolV2TypeNullableJsonConverter),
128130
typeof(global::Cohere.JsonConverters.UpdateClusterJobRequestStatusJsonConverter),

src/libs/Cohere/Generated/Cohere.CohereClient.Chat.g.cs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -740,6 +740,11 @@ partial void ProcessChatResponseContent(
740740
/// - AUTO: Cohere Platform Only<br/>
741741
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
742742
/// </param>
743+
/// <param name="rawPrompting">
744+
/// When enabled, the user's prompt will be sent to the model without<br/>
745+
/// any pre-processing.<br/>
746+
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
747+
/// </param>
743748
/// <param name="responseFormat">
744749
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
745750
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
@@ -834,6 +839,7 @@ partial void ProcessChatResponseContent(
834839
string? model = default,
835840
string? preamble = default,
836841
global::Cohere.ChatRequestPromptTruncation? promptTruncation = default,
842+
bool? rawPrompting = default,
837843
global::Cohere.ResponseFormat? responseFormat = default,
838844
global::Cohere.ChatRequestSafetyMode? safetyMode = default,
839845
bool? searchQueriesOnly = default,
@@ -862,6 +868,7 @@ partial void ProcessChatResponseContent(
862868
Preamble = preamble,
863869
PresencePenalty = presencePenalty,
864870
PromptTruncation = promptTruncation,
871+
RawPrompting = rawPrompting,
865872
ResponseFormat = responseFormat,
866873
SafetyMode = safetyMode,
867874
SearchQueriesOnly = searchQueriesOnly,

src/libs/Cohere/Generated/Cohere.CohereClient.Chatv2.g.cs

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -648,8 +648,9 @@ partial void ProcessChatv2ResponseContent(
648648
/// Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
649649
/// </param>
650650
/// <param name="maxTokens">
651-
/// The maximum number of tokens the model will generate as part of the response.<br/>
652-
/// **Note**: Setting a low value may result in incomplete generations.
651+
/// The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).<br/>
652+
/// **Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.<br/>
653+
/// **Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
653654
/// </param>
654655
/// <param name="messages">
655656
/// A list of chat messages in chronological order, representing a conversation between the user and the model.<br/>
@@ -667,6 +668,11 @@ partial void ProcessChatv2ResponseContent(
667668
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
668669
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
669670
/// </param>
671+
/// <param name="rawPrompting">
672+
/// When enabled, the user's prompt will be sent to the model without<br/>
673+
/// any pre-processing.<br/>
674+
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
675+
/// </param>
670676
/// <param name="responseFormat">
671677
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/v2/docs/command-r), [Command R+](https://docs.cohere.com/v2/docs/command-r-plus) and newer models.<br/>
672678
/// The model can be forced into outputting JSON objects by setting `{ "type": "json_object" }`.<br/>
@@ -705,6 +711,11 @@ partial void ProcessChatv2ResponseContent(
705711
/// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.<br/>
706712
/// Randomness can be further maximized by increasing the value of the `p` parameter.
707713
/// </param>
714+
/// <param name="thinking">
715+
/// Thinking gives the model enhanced reasoning capabilities for complex tasks, while also providing transparency into its step-by-step thought process before it delivers its final answer.<br/>
716+
/// When thinking is turned on, the model creates thinking content blocks where it outputs its internal reasoning. The model will incorporate insights from this reasoning before crafting a final response.<br/>
717+
/// When thinking is used without tools, the API response will include both thinking and text content blocks. Meanwhile, when thinking is used alongside tools and the model makes tool calls, the API response will include the thinking content block and `tool_calls`.
718+
/// </param>
708719
/// <param name="toolChoice">
709720
/// Used to control whether or not the model will be forced to use a tool when answering. When `REQUIRED` is specified, the model will be forced to use at least one of the user-defined tools, and the `tools` parameter must be passed in the request.<br/>
710721
/// When `NONE` is specified, the model will be forced **not** to use one of the specified tools, and give a direct response.<br/>
@@ -730,13 +741,15 @@ partial void ProcessChatv2ResponseContent(
730741
int? maxTokens = default,
731742
float? p = default,
732743
float? presencePenalty = default,
744+
bool? rawPrompting = default,
733745
global::Cohere.ResponseFormatV2? responseFormat = default,
734746
global::Cohere.Chatv2RequestSafetyMode? safetyMode = default,
735747
int? seed = default,
736748
global::System.Collections.Generic.IList<string>? stopSequences = default,
737749
bool? stream = default,
738750
bool? strictTools = default,
739751
float? temperature = default,
752+
global::Cohere.Thinking? thinking = default,
740753
global::Cohere.Chatv2RequestToolChoice? toolChoice = default,
741754
global::System.Collections.Generic.IList<global::Cohere.ToolV2>? tools = default,
742755
global::System.Threading.CancellationToken cancellationToken = default)
@@ -753,13 +766,15 @@ partial void ProcessChatv2ResponseContent(
753766
Model = model,
754767
P = p,
755768
PresencePenalty = presencePenalty,
769+
RawPrompting = rawPrompting,
756770
ResponseFormat = responseFormat,
757771
SafetyMode = safetyMode,
758772
Seed = seed,
759773
StopSequences = stopSequences,
760774
Stream = stream,
761775
StrictTools = strictTools,
762776
Temperature = temperature,
777+
Thinking = thinking,
763778
ToolChoice = toolChoice,
764779
Tools = tools,
765780
};

src/libs/Cohere/Generated/Cohere.ICohereClient.Chat.g.cs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,11 @@ public partial interface ICohereClient
127127
/// - AUTO: Cohere Platform Only<br/>
128128
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
129129
/// </param>
130+
/// <param name="rawPrompting">
131+
/// When enabled, the user's prompt will be sent to the model without<br/>
132+
/// any pre-processing.<br/>
133+
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
134+
/// </param>
130135
/// <param name="responseFormat">
131136
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
132137
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
@@ -221,6 +226,7 @@ public partial interface ICohereClient
221226
string? model = default,
222227
string? preamble = default,
223228
global::Cohere.ChatRequestPromptTruncation? promptTruncation = default,
229+
bool? rawPrompting = default,
224230
global::Cohere.ResponseFormat? responseFormat = default,
225231
global::Cohere.ChatRequestSafetyMode? safetyMode = default,
226232
bool? searchQueriesOnly = default,

src/libs/Cohere/Generated/Cohere.ICohereClient.Chatv2.g.cs

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,9 @@ public partial interface ICohereClient
4343
/// Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
4444
/// </param>
4545
/// <param name="maxTokens">
46-
/// The maximum number of tokens the model will generate as part of the response.<br/>
47-
/// **Note**: Setting a low value may result in incomplete generations.
46+
/// The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).<br/>
47+
/// **Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.<br/>
48+
/// **Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
4849
/// </param>
4950
/// <param name="messages">
5051
/// A list of chat messages in chronological order, representing a conversation between the user and the model.<br/>
@@ -62,6 +63,11 @@ public partial interface ICohereClient
6263
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
6364
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
6465
/// </param>
66+
/// <param name="rawPrompting">
67+
/// When enabled, the user's prompt will be sent to the model without<br/>
68+
/// any pre-processing.<br/>
69+
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
70+
/// </param>
6571
/// <param name="responseFormat">
6672
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/v2/docs/command-r), [Command R+](https://docs.cohere.com/v2/docs/command-r-plus) and newer models.<br/>
6773
/// The model can be forced into outputting JSON objects by setting `{ "type": "json_object" }`.<br/>
@@ -100,6 +106,11 @@ public partial interface ICohereClient
100106
/// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.<br/>
101107
/// Randomness can be further maximized by increasing the value of the `p` parameter.
102108
/// </param>
109+
/// <param name="thinking">
110+
/// Thinking gives the model enhanced reasoning capabilities for complex tasks, while also providing transparency into its step-by-step thought process before it delivers its final answer.<br/>
111+
/// When thinking is turned on, the model creates thinking content blocks where it outputs its internal reasoning. The model will incorporate insights from this reasoning before crafting a final response.<br/>
112+
/// When thinking is used without tools, the API response will include both thinking and text content blocks. Meanwhile, when thinking is used alongside tools and the model makes tool calls, the API response will include the thinking content block and `tool_calls`.
113+
/// </param>
103114
/// <param name="toolChoice">
104115
/// Used to control whether or not the model will be forced to use a tool when answering. When `REQUIRED` is specified, the model will be forced to use at least one of the user-defined tools, and the `tools` parameter must be passed in the request.<br/>
105116
/// When `NONE` is specified, the model will be forced **not** to use one of the specified tools, and give a direct response.<br/>
@@ -125,13 +136,15 @@ public partial interface ICohereClient
125136
int? maxTokens = default,
126137
float? p = default,
127138
float? presencePenalty = default,
139+
bool? rawPrompting = default,
128140
global::Cohere.ResponseFormatV2? responseFormat = default,
129141
global::Cohere.Chatv2RequestSafetyMode? safetyMode = default,
130142
int? seed = default,
131143
global::System.Collections.Generic.IList<string>? stopSequences = default,
132144
bool? stream = default,
133145
bool? strictTools = default,
134146
float? temperature = default,
147+
global::Cohere.Thinking? thinking = default,
135148
global::Cohere.Chatv2RequestToolChoice? toolChoice = default,
136149
global::System.Collections.Generic.IList<global::Cohere.ToolV2>? tools = default,
137150
global::System.Threading.CancellationToken cancellationToken = default);
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
#nullable enable
2+
3+
namespace Cohere.JsonConverters
4+
{
5+
/// <inheritdoc />
6+
public sealed class ThinkingTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter<global::Cohere.ThinkingType>
7+
{
8+
/// <inheritdoc />
9+
public override global::Cohere.ThinkingType Read(
10+
ref global::System.Text.Json.Utf8JsonReader reader,
11+
global::System.Type typeToConvert,
12+
global::System.Text.Json.JsonSerializerOptions options)
13+
{
14+
switch (reader.TokenType)
15+
{
16+
case global::System.Text.Json.JsonTokenType.String:
17+
{
18+
var stringValue = reader.GetString();
19+
if (stringValue != null)
20+
{
21+
return global::Cohere.ThinkingTypeExtensions.ToEnum(stringValue) ?? default;
22+
}
23+
24+
break;
25+
}
26+
case global::System.Text.Json.JsonTokenType.Number:
27+
{
28+
var numValue = reader.GetInt32();
29+
return (global::Cohere.ThinkingType)numValue;
30+
}
31+
case global::System.Text.Json.JsonTokenType.Null:
32+
{
33+
return default(global::Cohere.ThinkingType);
34+
}
35+
default:
36+
throw new global::System.ArgumentOutOfRangeException(nameof(reader));
37+
}
38+
39+
return default;
40+
}
41+
42+
/// <inheritdoc />
43+
public override void Write(
44+
global::System.Text.Json.Utf8JsonWriter writer,
45+
global::Cohere.ThinkingType value,
46+
global::System.Text.Json.JsonSerializerOptions options)
47+
{
48+
writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer));
49+
50+
writer.WriteStringValue(global::Cohere.ThinkingTypeExtensions.ToValueString(value));
51+
}
52+
}
53+
}
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
#nullable enable
2+
3+
namespace Cohere.JsonConverters
4+
{
5+
/// <inheritdoc />
6+
public sealed class ThinkingTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter<global::Cohere.ThinkingType?>
7+
{
8+
/// <inheritdoc />
9+
public override global::Cohere.ThinkingType? Read(
10+
ref global::System.Text.Json.Utf8JsonReader reader,
11+
global::System.Type typeToConvert,
12+
global::System.Text.Json.JsonSerializerOptions options)
13+
{
14+
switch (reader.TokenType)
15+
{
16+
case global::System.Text.Json.JsonTokenType.String:
17+
{
18+
var stringValue = reader.GetString();
19+
if (stringValue != null)
20+
{
21+
return global::Cohere.ThinkingTypeExtensions.ToEnum(stringValue);
22+
}
23+
24+
break;
25+
}
26+
case global::System.Text.Json.JsonTokenType.Number:
27+
{
28+
var numValue = reader.GetInt32();
29+
return (global::Cohere.ThinkingType)numValue;
30+
}
31+
case global::System.Text.Json.JsonTokenType.Null:
32+
{
33+
return default(global::Cohere.ThinkingType?);
34+
}
35+
default:
36+
throw new global::System.ArgumentOutOfRangeException(nameof(reader));
37+
}
38+
39+
return default;
40+
}
41+
42+
/// <inheritdoc />
43+
public override void Write(
44+
global::System.Text.Json.Utf8JsonWriter writer,
45+
global::Cohere.ThinkingType? value,
46+
global::System.Text.Json.JsonSerializerOptions options)
47+
{
48+
writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer));
49+
50+
if (value == null)
51+
{
52+
writer.WriteNullValue();
53+
}
54+
else
55+
{
56+
writer.WriteStringValue(global::Cohere.ThinkingTypeExtensions.ToValueString(value.Value));
57+
}
58+
}
59+
}
60+
}

0 commit comments

Comments
 (0)