Skip to content

Releases: jamesrochabrun/SwiftOpenAI

SwiftOpenAI v4.3.4

06 Oct 18:19
311b492

Choose a tag to compare

What's Changed

Full Changelog: v4.3.3...v4.3.4

SwiftOpenAI v4.3.3

30 Sep 07:47
5240490

Choose a tag to compare

What's Changed

Full Changelog: v4.3.2...v4.3.3

SwiftOpenAI v4.3.2

10 Aug 05:52
e303587

Choose a tag to compare

What's Changed

New Contributors

Full Changelog: v4.3.1...v4.3.2

SwiftOpenAI v4.3.1

07 Jul 18:41
dc8bde5

Choose a tag to compare

What's Changed

New Contributors

Full Changelog: v4.3.0...v4.3.1

SwiftOpenAI v4.3.0

17 Jun 22:01
d2b17e5

Choose a tag to compare

What's Changed

New Contributors

Full Changelog: v4.2.0...v4.3.0

SwiftOpenAI v4.2.0

07 Jun 22:31

Choose a tag to compare

What's Changed

⏺ Response API Streaming Support - Summary of Changes

Streaming Responses

The Response API supports streaming responses using Server-Sent Events (SSE). This allows you to receive partial responses as they are generated, enabling real-time UI updates and better user experience.

Stream Events

// The ResponseStreamEvent enum represents all possible streaming events
public enum ResponseStreamEvent: Decodable {
  case responseCreated(ResponseCreatedEvent)
  case responseInProgress(ResponseInProgressEvent)
  case responseCompleted(ResponseCompletedEvent)
  case responseFailed(ResponseFailedEvent)
  case outputItemAdded(OutputItemAddedEvent)
  case outputTextDelta(OutputTextDeltaEvent)
  case outputTextDone(OutputTextDoneEvent)
  case functionCallArgumentsDelta(FunctionCallArgumentsDeltaEvent)
  case reasoningSummaryTextDelta(ReasoningSummaryTextDeltaEvent)
  case error(ErrorEvent)
  // ... and many more event types
}

Basic Streaming Example

// Enable streaming by setting stream: true
let parameters = ModelResponseParameter(
    input: .string("Tell me a story"),
    model: .gpt4o,
    stream: true
)

// Create a stream
let stream = try await service.responseCreateStream(parameters)

// Process events as they arrive
for try await event in stream {
    switch event {
    case .outputTextDelta(let delta):
        // Append text chunk to your UI
        print(delta.delta, terminator: "")
        
    case .responseCompleted(let completed):
        // Response is complete
        print("\nResponse ID: \(completed.response.id)")
        
    case .error(let error):
        // Handle errors
        print("Error: \(error.message)")
        
    default:
        // Handle other events as needed
        break
    }
}

Streaming with Conversation State

// Maintain conversation continuity with previousResponseId
var previousResponseId: String? = nil
var messages: [(role: String, content: String)] = []

// First message
let firstParams = ModelResponseParameter(
    input: .string("Hello!"),
    model: .gpt4o,
    stream: true
)

let firstStream = try await service.responseCreateStream(firstParams)
var firstResponse = ""

for try await event in firstStream {
    switch event {
    case .outputTextDelta(let delta):
        firstResponse += delta.delta
        
    case .responseCompleted(let completed):
        previousResponseId = completed.response.id
        messages.append((role: "user", content: "Hello!"))
        messages.append((role: "assistant", content: firstResponse))
        
    default:
        break
    }
}

// Follow-up message with conversation context
var inputArray: [InputItem] = []

// Add conversation history
for message in messages {
    inputArray.append(.message(InputMessage(
        role: message.role,
        content: .text(message.content)
    )))
}

// Add new user message
inputArray.append(.message(InputMessage(
    role: "user",
    content: .text("How are you?")
)))

let followUpParams = ModelResponseParameter(
    input: .array(inputArray),
    model: .gpt4o,
    previousResponseId: previousResponseId,
    stream: true
)

let followUpStream = try await service.responseCreateStream(followUpParams)
// Process the follow-up stream...

Streaming with Tools and Function Calling

let parameters = ModelResponseParameter(
    input: .string("What's the weather in San Francisco?"),
    model: .gpt4o,
    tools: [
        Tool(
            type: "function",
            function: ChatCompletionParameters.ChatFunction(
                name: "get_weather",
                description: "Get current weather",
                parameters: JSONSchema(
                    type: .object,
                    properties: [
                        "location": JSONSchema(type: .string)
                    ],
                    required: ["location"]
                )
            )
        )
    ],
    stream: true
)

let stream = try await service.responseCreateStream(parameters)
var functionCallArguments = ""

for try await event in stream {
    switch event {
    case .functionCallArgumentsDelta(let delta):
        // Accumulate function call arguments
        functionCallArguments += delta.delta
        
    case .functionCallArgumentsDone(let done):
        // Function call is complete
        print("Function: \(done.name)")
        print("Arguments: \(functionCallArguments)")
        
    case .outputTextDelta(let delta):
        // Regular text output
        print(delta.delta, terminator: "")
        
    default:
        break
    }
}

Canceling a Stream

// Streams can be canceled using Swift's task cancellation
let streamTask = Task {
    let stream = try await service.responseCreateStream(parameters)
    
    for try await event in stream {
        // Check if task is cancelled
        if Task.isCancelled {
            break
        }
        
        // Process events...
    }
}

// Cancel the stream when needed
streamTask.cancel()

Complete Streaming Implementation Example

@MainActor
@Observable
class ResponseStreamProvider {
    var messages: [Message] = []
    var isStreaming = false
    var error: String?
    
    private let service: OpenAIService
    private var previousResponseId: String?
    private var streamTask: Task<Void, Never>?
    
    init(service: OpenAIService) {
        self.service = service
    }
    
    func sendMessage(_ text: String) {
        streamTask?.cancel()
        
        // Add user message
        messages.append(Message(role: .user, content: text))
        
        // Start streaming
        streamTask = Task {
            await streamResponse(for: text)
        }
    }
    
    private func streamResponse(for userInput: String) async {
        isStreaming = true
        error = nil
        
        // Create streaming message placeholder
        let streamingMessage = Message(role: .assistant, content: "", isStreaming: true)
        messages.append(streamingMessage)
        
        do {
            // Build conversation history
            var inputArray: [InputItem] = []
            for message in messages.dropLast(2) {
                inputArray.append(.message(InputMessage(
                    role: message.role.rawValue,
                    content: .text(message.content)
                )))
            }
            inputArray.append(.message(InputMessage(
                role: "user",
                content: .text(userInput)
            )))
            
            let parameters = ModelResponseParameter(
                input: .array(inputArray),
                model: .gpt4o,
                previousResponseId: previousResponseId,
                stream: true
            )
            
            let stream = try await service.responseCreateStream(parameters)
            var accumulatedText = ""
            
            for try await event in stream {
                guard !Task.isCancelled else { break }
                
                switch event {
                case .outputTextDelta(let delta):
                    accumulatedText += delta.delta
                    updateStreamingMessage(with: accumulatedText)
                    
                case .responseCompleted(let completed):
                    previousResponseId = completed.response.id
                    finalizeStreamingMessage(with: accumulatedText, responseId: completed.response.id)
                    
                case .error(let errorEvent):
                    throw APIError.requestFailed(description: errorEvent.message)
                    
                default:
                    break
                }
            }
        } catch {
            self.error = error.localizedDescription
            messages.removeLast() // Remove streaming message on error
        }
        
        isStreaming = false
    }
    
    private func updateStreamingMessage(with content: String) {
        if let index = messages.lastIndex(where: { $0.isStreaming }) {
            messages[index].content = content
        }
    }
    
    private func finalizeStreamingMessage(with content: String, responseId: String) {
        if let index = messages.lastIndex(where: { $0.isStreaming }) {
            messages[index].content = content
            messages[index].isStreaming = false
            messages[index].responseId = responseId
        }
    }
}

New Contributors

Full Changelog: v4.1.1...v4.2.0

SwiftOpenAI v4.1.1

13 May 06:47

Choose a tag to compare

Update Read me for Anthropic OpenAI compatibility

Anthropic

Anthropic provides OpenAI compatibility, for more, visit the documentation

To use Claude models with SwiftOpenAI you can.

let anthropicApiKey = ""
let openAIService = OpenAIServiceFactory.service(apiKey: anthropicApiKey, 
                     overrideBaseURL: "https://api.anthropic.com", 
                     overrideVersion: "v1")

Now you can create the completio parameters like this:

let parameters = ChatCompletionParameters(
   messages: [.init(
   role: .user,
   content: "Are you Claude?")],
   model: .custom("claude-3-7-sonnet-20250219"))

What else

New Contributors

Full Changelog: v4.1.0...v4.1.1

SwiftOpenAI v4.1.0

25 Apr 21:53

Choose a tag to compare

This library supports latest OpenAI Image generation

  • Parameters Create
/// 'Create Image':
/// https://platform.openai.com/docs/api-reference/images/create
public struct CreateImageParameters: Encodable {
   
   /// A text description of the desired image(s).
   /// The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.
   public let prompt: String
   
   // MARK: - Optional properties
   
   /// Allows to set transparency for the background of the generated image(s).
   /// This parameter is only supported for `gpt-image-1`.
   /// Must be one of `transparent`, `opaque` or `auto` (default value).
   /// When `auto` is used, the model will automatically determine the best background for the image.
   /// If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`.
   public let background: Background?
   
   /// The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`.
   /// Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
   public let model: Model?
   
   /// Control the content-moderation level for images generated by `gpt-image-1`.
   /// Must be either low for less restrictive filtering or auto (default value).
   public let moderation: Moderation?
   
   /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
   /// Defaults to `1`
   public let n: Int?
   
   /// The compression level (0-100%) for the generated images.
   /// This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.
   public let outputCompression: Int?
   
   /// The format in which the generated images are returned.
   /// This parameter is only supported for `gpt-image-1`.
   /// Must be one of `png`, `jpeg`, or `webp`.
   public let outputFormat: OutputFormat?
   
   /// The quality of the image that will be generated.
   /// - `auto` (default value) will automatically select the best quality for the given model.
   /// - `high`, `medium` and `low` are supported for gpt-image-1.
   /// - `hd` and `standard` are supported for dall-e-3.
   /// - `standard` is the only option for dall-e-2.
   public let quality: Quality?
   
   /// The format in which generated images with dall-e-2 and dall-e-3 are returned.
   /// Must be one of `url` or `b64_json`.
   /// URLs are only valid for 60 minutes after the image has been generated.
   /// This parameter isn't supported for `gpt-image-1` which will always return base64-encoded images.
   public let responseFormat: ResponseFormat?
   
   /// The size of the generated images.
   /// - For gpt-image-1, one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value)
   /// - For dall-e-3, one of `1024x1024`, `1792x1024`, or `1024x1792`
   /// - For dall-e-2, one of `256x256`, `512x512`, or `1024x1024`
   public let size: String?
   
   /// The style of the generated images.
   /// This parameter is only supported for `dall-e-3`.
   /// Must be one of `vivid` or `natural`.
   /// Vivid causes the model to lean towards generating hyper-real and dramatic images.
   /// Natural causes the model to produce more natural, less hyper-real looking images.
   public let style: Style?
   
   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
   public let user: String?
}
  • Parameters Edit
/// Creates an edited or extended image given one or more source images and a prompt.
/// This endpoint only supports `gpt-image-1` and `dall-e-2`.
public struct CreateImageEditParameters: Encodable {
   
   /// The image(s) to edit.
   /// For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 25MB.
   /// For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB.
   let image: [Data]
   
   /// A text description of the desired image(s).
   /// The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
   let prompt: String
   
   /// An additional image whose fully transparent areas indicate where `image` should be edited.
   /// If there are multiple images provided, the mask will be applied on the first image.
   /// Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
   let mask: Data?
   
   /// The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported.
   /// Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
   let model: String?
   
   /// The number of images to generate. Must be between 1 and 10.
   /// Defaults to 1.
   let n: Int?
   
   /// The quality of the image that will be generated.
   /// `high`, `medium` and `low` are only supported for `gpt-image-1`.
   /// `dall-e-2` only supports `standard` quality.
   /// Defaults to `auto`.
   let quality: String?
   
   /// The format in which the generated images are returned.
   /// Must be one of `url` or `b64_json`.
   /// URLs are only valid for 60 minutes after the image has been generated.
   /// This parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images.
   let responseFormat: String?
   
   /// The size of the generated images.
   /// Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`,
   /// and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
   let size: String?
   
   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
   let user: String?
}
  • Parameters Variations
/// Creates a variation of a given image.
/// This endpoint only supports `dall-e-2`.
public struct CreateImageVariationParameters: Encodable {
   
   /// The image to use as the basis for the variation(s).
   /// Must be a valid PNG file, less than 4MB, and square.
   let image: Data
   
   /// The model to use for image generation. Only `dall-e-2` is supported at this time.
   /// Defaults to `dall-e-2`.
   let model: String?
   
   /// The number of images to generate. Must be between 1 and 10.
   /// Defaults to 1.
   let n: Int?
   
   /// The format in which the generated images are returned.
   /// Must be one of `url` or `b64_json`.
   /// URLs are only valid for 60 minutes after the image has been generated.
   /// Defaults to `url`.
   let responseFormat: String?
   
   /// The size of the generated images.
   /// Must be one of `256x256`, `512x512`, or `1024x1024`.
   /// Defaults to `1024x1024`.
   let size: String?
   
   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
   let user: String?
}
  • Request example
import SwiftOpenAI

let service = OpenAIServiceFactory.service(apiKey: "<YOUR_KEY>")

// ❶ Describe the image you want
let prompt = "A watercolor dragon-unicorn hybrid flying above snowy mountains"

// ❷ Build parameters with the brand-new types (commit 880a15c)
let params = CreateImageParameters(
    prompt: prompt,
    model:  .gptImage1,      // .dallE3 / .dallE2 also valid
    n:      1,               // 1-10  (only 1 for DALL-E 3)
    quality: .high,          // .hd / .standard for DALL-E 3
    size:   "1024x1024"      // use "1792x1024" or "1024x1792" for wide / tall
)

do {
    // ❸ Fire the request – returns a `CreateImageResponse`
    let result = try await service.createImages(parameters: params)
    let url    = result.data?.first?.url          // or `b64Json` for base-64
    print("Image URL:", url ?? "none")
} catch {
    print("Generation failed:", error)
}

For a sample app example go to the Examples/SwiftOpenAIExample project on this repo.

SwiftOpenAI v4.0.7

14 Apr 06:36
3e81448

Choose a tag to compare

What's Changed

Full Changelog: v4.0.6...v4.0.7

SwiftOpenAI v4.0.6

17 Mar 07:08
3f5e195

Choose a tag to compare

Adding convenient property in ResponseModel addressing #129

   /// Convenience property that aggregates all text output from output_text items in the output array.
   /// Similar to the outputText property in Python and JavaScript SDKs.
   public var outputText: String? {
      let outputTextItems = output.compactMap { outputItem -> String? in
         switch outputItem {
         case .message(let message):
            return message.content.compactMap { contentItem -> String? in
               switch contentItem {
               case .outputText(let outputText):
                  return outputText.text
               }
            }.joined()
         default:
            return nil
         }
      }
      
      return outputTextItems.isEmpty ? nil : outputTextItems.joined()
   }