Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ import SwiftOpenAI
let response = try await service.startChat(parameters: parameters)
let choices = response.choices
let chatUsage = response.usage
let logprobs = choices.compactMap(\.logprobs)
let logprobs = choices?.compactMap(\.logprobs)
dump(logprobs)
self.messages = choices.compactMap(\.message.content)
self.messages = choices?.compactMap(\.message?.content) ?? []
dump(chatUsage)
self.usage = chatUsage
} catch APIError.responseUnsuccessful(let description, let statusCode) {
Expand All @@ -48,7 +48,7 @@ import SwiftOpenAI
do {
let stream = try await service.startStreamedChat(parameters: parameters)
for try await result in stream {
let content = result.choices.first?.delta.content ?? ""
let content = result.choices?.first?.delta?.content ?? ""
self.message += content
}
} catch APIError.responseUnsuccessful(let description, let statusCode) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ enum FunctionCallDefinition: String, CaseIterable {
do {
let chat = try await service.startChat(parameters: parameters)

guard let assistantMessage = chat.choices.first?.message else { return }
guard let assistantMessage = chat.choices?.first?.message else { return }

let content = assistantMessage.content ?? ""

Expand Down Expand Up @@ -157,7 +157,7 @@ enum FunctionCallDefinition: String, CaseIterable {
model: .gpt41106Preview)
do {
let chat = try await service.startChat(parameters: paramsForChat)
guard let assistantMessage = chat.choices.first?.message else { return }
guard let assistantMessage = chat.choices?.first?.message else { return }
await updateLastAssistantMessage(.init(content: .content(.init(text: assistantMessage.content)), origin: .received(.gpt)))
} catch {
// If an error occurs, update the UI to display the error message.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,12 @@ struct FunctionCallStreamedResponse {
let stream = try await service.startStreamedChat(parameters: parameters)
for try await result in stream {
// Extract the first choice from the stream results, if none exist, exit the loop.
if let choice = result.choices.first {
if let choice = result.choices?.first {
/// Because we are using the stream API we need to wait to populate
/// the needed values that comes from the streamed API to construct a valid tool call response.
/// This is not needed if the stream is set to false in the API completion request.
/// # Step 2: check if the model wanted to call a function
if let toolCalls = choice.delta.toolCalls {
if let toolCalls = choice.delta?.toolCalls {

/// # Step 3: Define the available functions to be called
availableFunctions = [.createImage: generateImage(arguments:)]
Expand All @@ -114,7 +114,7 @@ struct FunctionCallStreamedResponse {
}

/// The streamed content to display
if let newContent = choice.delta.content {
if let newContent = choice.delta?.content {
await updateLastAssistantMessage(.init(
content: .content(.init(text: newContent)),
origin: .received(.gpt)))
Expand Down Expand Up @@ -213,10 +213,10 @@ struct FunctionCallStreamedResponse {
let stream = try await service.startStreamedChat(parameters: paramsForChat)
for try await result in stream {
// Extract the first choice from the stream results, if none exist, exit the loop.
guard let choice = result.choices.first else { return }
guard let choice = result.choices?.first else { return }

/// The streamed content to display
if let newContent = choice.delta.content {
if let newContent = choice.delta?.content {
await updateLastAssistantMessage(.init(content: .content(.init(text: newContent)), origin: .received(.gpt)))
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ import SwiftOpenAI
let stream = try await service.startStreamedChat(parameters: localParameters)
for try await result in stream {
// Extract the first choice from the stream results, if none exist, exit the loop.
guard let choice = result.choices.first else { return }
guard let choice = result.choices?.first else { return }

// Store initial `role` and `functionCall` data from the first `choice.delta` for UI display.
// This information is essential for maintaining context in the conversation and for updating
Expand All @@ -73,7 +73,7 @@ import SwiftOpenAI
// Assign the content received in the current message to the newDelta.
newDelta.content = temporalReceivedMessageContent
// Update the UI with the latest assistant message and the corresponding delta.
updateLastAssistantMessage(content: choice.delta.content ?? "", delta: newDelta)
updateLastAssistantMessage(content: choice.delta?.content ?? "", delta: newDelta)

// Evaluate the `finishReason` to determine if the conversation has reached a logical end.
// If so, package the accumulated data into a new message parameter that will be used
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ final class ChatStructuredOutputToolProvider {
do {

let chat = try await service.startChat(parameters: parameters)
guard let assistantMessage = chat.choices.first?.message else { return }
guard let assistantMessage = chat.choices?.first?.message else { return }
let content = assistantMessage.content ?? ""
await updateLastAssistantMessage(.init(content: .content(.init(text: content)), origin: .received(.gpt)))
if let toolCalls = assistantMessage.toolCalls {
Expand Down Expand Up @@ -241,7 +241,7 @@ extension ChatStructuredOutputToolProvider {
model: .gpt4o)
do {
let chat = try await service.startChat(parameters: paramsForChat)
guard let assistantMessage = chat.choices.first?.message else { return }
guard let assistantMessage = chat.choices?.first?.message else { return }
await updateLastAssistantMessage(.init(content: .content(.init(text: assistantMessage.content)), origin: .received(.gpt)))
} catch {
// If an error occurs, update the UI to display the error message.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ final class ChatStructuredOutputProvider {
{
do {
let choices = try await service.startChat(parameters: parameters).choices
self.messages = choices.compactMap(\.message.content).map { $0.asJsonFormatted() }
self.messages = choices?.compactMap(\.message?.content).map { $0.asJsonFormatted() } ?? []
assert(messages.count == 1)
self.errorMessage = choices.first?.message.refusal ?? ""
self.errorMessage = choices?.first?.message?.refusal ?? ""
} catch APIError.responseUnsuccessful(let description, let statusCode) {
self.errorMessage = "Network error with status code: \(statusCode) and description: \(description)"
} catch {
Expand All @@ -47,10 +47,10 @@ final class ChatStructuredOutputProvider {
do {
let stream = try await service.startStreamedChat(parameters: parameters)
for try await result in stream {
let firstChoiceDelta = result.choices.first?.delta
let firstChoiceDelta = result.choices?.first?.delta
let content = firstChoiceDelta?.refusal ?? firstChoiceDelta?.content ?? ""
self.message += content
if result.choices.first?.finishReason != nil {
if result.choices?.first?.finishReason != nil {
self.message = self.message.asJsonFormatted()
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ import SwiftOpenAI
let stream = try await service.startStreamedChat(parameters: parameters)
for try await result in stream {
// Extract the first choice from the stream results, if none exist, exit the loop.
guard let choice = result.choices.first else { return }
guard let choice = result.choices?.first else { return }

// Store initial `role` and `functionCall` data from the first `choice.delta` for UI display.
// This information is essential for maintaining context in the conversation and for updating
Expand All @@ -64,7 +64,7 @@ import SwiftOpenAI
// Assign the content received in the current message to the newDelta.
newDelta.content = temporalReceivedMessageContent
// Update the UI with the latest assistant message and the corresponding delta.
await updateLastAssistantMessage(content: choice.delta.content ?? "", delta: newDelta)
await updateLastAssistantMessage(content: choice.delta?.content ?? "", delta: newDelta)
}
} catch {
// If an error occurs, update the UI to display the error message.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,29 +13,29 @@ public struct ChatCompletionChunkObject: Decodable {
/// A unique identifier for the chat completion chunk.
public let id: String?
/// A list of chat completion choices. Can be more than one if n is greater than 1.
public let choices: [ChatChoice]
public let choices: [ChatChoice]?
/// The Unix timestamp (in seconds) of when the chat completion chunk was created.
public let created: Int
public let created: Int?
/// The model to generate the completion.
public let model: String
public let model: String?
/// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.
public let serviceTier: String?
/// This fingerprint represents the backend configuration that the model runs with.
/// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
public let systemFingerprint: String?
/// The object type, which is always chat.completion.chunk.
public let object: String
public let object: String?
/// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request. When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.
public let usage: ChatUsage?

public struct ChatChoice: Decodable {

/// A chat completion delta generated by streamed model responses.
public let delta: Delta
public let delta: Delta?
/// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
public let finishReason: IntOrStringValue?
/// The index of the choice in the list of choices.
public let index: Int
public let index: Int?
/// Provided by the Vision API.
public let finishDetails: FinishDetails?
/// Log probability information for the choice.
Expand Down Expand Up @@ -69,18 +69,18 @@ public struct ChatCompletionChunkObject: Decodable {

public struct LogProb: Decodable {
/// A list of message content tokens with log probability information.
let content: [TokenDetail]
let content: [TokenDetail]?
}

public struct TokenDetail: Decodable {
/// The token.
let token: String
let token: String?
/// The log probability of this token.
let logprob: Double
let logprob: Double?
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
let bytes: [Int]?
/// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
let topLogprobs: [TopLogProb]
let topLogprobs: [TopLogProb]?

enum CodingKeys: String, CodingKey {
case token, logprob, bytes
Expand All @@ -89,17 +89,17 @@ public struct ChatCompletionChunkObject: Decodable {

struct TopLogProb: Decodable {
/// The token.
let token: String
let token: String?
/// The log probability of this token.
let logprob: Double
let logprob: Double?
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
let bytes: [Int]?
}
}

/// Provided by the Vision API.
public struct FinishDetails: Decodable {
let type: String
let type: String?
}

enum CodingKeys: String, CodingKey {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@ import Foundation
public struct ChatCompletionObject: Decodable {

/// A unique identifier for the chat completion.
public let id: String
public let id: String?
/// A list of chat completion choices. Can be more than one if n is greater than 1.
public let choices: [ChatChoice]
public let choices: [ChatChoice]?
/// The Unix timestamp (in seconds) of when the chat completion was created.
public let created: Int
public let created: Int?
/// The model used for the chat completion.
public let model: String
public let model: String?
/// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.
public let serviceTier: String?
/// This fingerprint represents the backend configuration that the model runs with.
/// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
public let systemFingerprint: String?
/// The object type, which is always chat.completion.
public let object: String
public let object: String?
/// Usage statistics for the completion request.
public let usage: ChatUsage?

Expand All @@ -33,9 +33,9 @@ public struct ChatCompletionObject: Decodable {
/// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
public let finishReason: IntOrStringValue?
/// The index of the choice in the list of choices.
public let index: Int
public let index: Int?
/// A chat completion message generated by the model.
public let message: ChatMessage
public let message: ChatMessage?
/// Log probability information for the choice.
public let logprobs: LogProb?

Expand All @@ -49,7 +49,7 @@ public struct ChatCompletionObject: Decodable {
@available(*, deprecated, message: "Deprecated and replaced by `tool_calls`")
public let functionCall: FunctionCall?
/// The role of the author of this message.
public let role: String
public let role: String?
/// The reasoning content generated by the model, if available.
public let reasoningContent: String?
/// Provided by the Vision API.
Expand All @@ -61,12 +61,12 @@ public struct ChatCompletionObject: Decodable {

/// Provided by the Vision API.
public struct FinishDetails: Decodable {
let type: String
let type: String?
}

public struct Audio: Decodable {
/// Unique identifier for this audio response.
public let id: String
public let id: String?
/// The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.
public let expiresAt: Int?
/// Base64 encoded audio bytes generated by the model, in the format specified in the request.
Expand Down Expand Up @@ -96,18 +96,18 @@ public struct ChatCompletionObject: Decodable {

public struct LogProb: Decodable {
/// A list of message content tokens with log probability information.
let content: [TokenDetail]
let content: [TokenDetail]?
}

public struct TokenDetail: Decodable {
/// The token.
let token: String
let token: String?
/// The log probability of this token.
let logprob: Double
let logprob: Double?
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
let bytes: [Int]?
/// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
let topLogprobs: [TopLogProb]
let topLogprobs: [TopLogProb]?

enum CodingKeys: String, CodingKey {
case token, logprob, bytes
Expand All @@ -116,9 +116,9 @@ public struct ChatCompletionObject: Decodable {

struct TopLogProb: Decodable {
/// The token.
let token: String
let token: String?
/// The log probability of this token.
let logprob: Double
let logprob: Double?
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
let bytes: [Int]?
}
Expand Down