@@ -118,7 +118,7 @@ const (
118
118
// a text completion, a chat completion, an embedding request, a speech request, or a transcription request.
119
119
type RequestInput struct {
120
120
TextCompletionInput * string `json:"text_completion_input,omitempty"`
121
- ChatCompletionInput []BifrostMessage `json:"chat_completion_input,omitempty"`
121
+ ChatCompletionInput []BifrostMessage `json:"chat_completion_input,omitempty"`
122
122
EmbeddingInput * EmbeddingInput `json:"embedding_input,omitempty"`
123
123
SpeechInput * SpeechInput `json:"speech_input,omitempty"`
124
124
TranscriptionInput * TranscriptionInput `json:"transcription_input,omitempty"`
@@ -295,12 +295,12 @@ type Fallback struct {
295
295
// mapped to the provider's parameters.
296
296
type ModelParameters struct {
297
297
ToolChoice * ToolChoice `json:"tool_choice,omitempty"` // Whether to call a tool
298
- Tools []Tool `json:"tools,omitempty"` // Tools to use
298
+ Tools []Tool `json:"tools,omitempty"` // Tools to use
299
299
Temperature * float64 `json:"temperature,omitempty"` // Controls randomness in the output
300
300
TopP * float64 `json:"top_p,omitempty"` // Controls diversity via nucleus sampling
301
301
TopK * int `json:"top_k,omitempty"` // Controls diversity via top-k sampling
302
302
MaxTokens * int `json:"max_tokens,omitempty"` // Maximum number of tokens to generate
303
- StopSequences []string `json:"stop_sequences,omitempty"` // Sequences that stop generation
303
+ StopSequences []string `json:"stop_sequences,omitempty"` // Sequences that stop generation
304
304
PresencePenalty * float64 `json:"presence_penalty,omitempty"` // Penalizes repeated tokens
305
305
FrequencyPenalty * float64 `json:"frequency_penalty,omitempty"` // Penalizes frequent tokens
306
306
ParallelToolCalls * bool `json:"parallel_tool_calls,omitempty"` // Enables parallel tool calls
@@ -318,7 +318,7 @@ type FunctionParameters struct {
318
318
Description * string `json:"description,omitempty"` // Description of the parameters
319
319
Required []string `json:"required,omitempty"` // Required parameter names
320
320
Properties map [string ]interface {} `json:"properties,omitempty"` // Parameter properties
321
- Enum []string `json:"enum,omitempty"` // Enum values for the parameters
321
+ Enum []string `json:"enum,omitempty"` // Enum values for the parameters
322
322
}
323
323
324
324
// Function represents a function that can be called by the model.
@@ -492,7 +492,7 @@ type ToolMessage struct {
492
492
type AssistantMessage struct {
493
493
Refusal * string `json:"refusal,omitempty"`
494
494
Annotations []Annotation `json:"annotations,omitempty"`
495
- ToolCalls []ToolCall `json:"tool_calls,omitempty"`
495
+ ToolCalls []ToolCall `json:"tool_calls,omitempty"`
496
496
Thought * string `json:"thought,omitempty"`
497
497
}
498
498
@@ -795,7 +795,7 @@ type BifrostResponseExtraFields struct {
795
795
Provider ModelProvider `json:"provider"`
796
796
Params ModelParameters `json:"model_params"`
797
797
Latency * int64 `json:"latency,omitempty"`
798
- ChatHistory []BifrostMessage `json:"chat_history,omitempty"`
798
+ ChatHistory []BifrostMessage `json:"chat_history,omitempty"`
799
799
BilledUsage * BilledLLMUsage `json:"billed_usage,omitempty"`
800
800
ChunkIndex int `json:"chunk_index"` // used for streaming responses to identify the chunk index, will be 0 for non-streaming responses
801
801
RawResponse interface {} `json:"raw_response,omitempty"`
0 commit comments