[
  {
    "path": ".github/workflows/ci.yml",
    "content": "# This workflow will build a Swift project\n# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-swift\n\nname: CI tests\n\non:\n  push:\n    branches: [ \"main\" ]\n  pull_request:\n    branches: [ \"main\" ]\n\njobs:\n  build_and_test_macos:\n    runs-on: macos-latest\n    steps:\n    - uses: actions/checkout@v4\n    - name: Get swift version\n      run: swift --version\n    - name: Build\n      run: swift build -q\n    - name: Run tests\n      run: swift test -q\n\n  build_and_test_linux:\n    runs-on: ubuntu-latest\n    container:\n      image: swift:6.0.1-jammy\n    steps:\n    - name: Install dependencies\n      run: |\n        apt-get update\n        apt-get install -y curl git\n    - uses: actions/checkout@v4\n    - name: Get swift version\n      run: swift --version\n    - name: Build\n      run: swift build -q\n    - name: Run tests\n      run: swift test -q\n\n  lint:\n    runs-on: macos-latest\n    steps:\n    - uses: actions/checkout@v4\n    - name: Set up Homebrew\n      id: set-up-homebrew\n      uses: Homebrew/actions/setup-homebrew@master\n    - name: Install swiftformat\n      run: brew install swiftformat\n    - name: Check formatting\n      run: swiftformat --config rules.swiftformat --lint ."
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store\n/.build\n/Packages\nxcuserdata/\nDerivedData/\n.swiftpm/configuration/registries.json\n.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata\n.netrc\nPackage.resolved\n\n# Xcode Swift Package Manager\n**/xcshareddata/swiftpm/\n**/project.xcworkspace/xcshareddata/swiftpm/\n**/xcshareddata/IDEWorkspaceChecks.plist\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "\n### Linting:\n```bash\nbrew install swiftformat\nswiftformat --config rules.swiftformat .\n```"
  },
  {
    "path": "Examples/RealtimeExample/README.md",
    "content": "# OpenAI Realtime API Example\n\nThis example demonstrates how to use SwiftOpenAI's Realtime API for bidirectional voice conversations with OpenAI's GPT-4o models.\n\n## Features\n\n- Real-time bidirectional audio streaming\n- Voice Activity Detection (VAD) for automatic turn-taking\n- Audio transcription of both user and AI speech\n- Function calling support\n- Interrupt handling when user starts speaking\n\n## Requirements\n\n- iOS 15+, macOS 12+, watchOS 9+\n- Microphone permissions\n- OpenAI API key\n\n## Setup\n\n### 1. Add Microphone Permission\n\nAdd the following to your `Info.plist`:\n\n```xml\n<key>NSMicrophoneUsageDescription</key>\n<string>We need access to your microphone for voice conversations with AI</string>\n```\n\n### 2. macOS Sandbox Configuration\n\nIf targeting macOS, enable the following in your target's Signing & Capabilities:\n\n- **App Sandbox**:\n  - Outgoing Connections (Client) ✓\n  - Audio Input ✓\n- **Hardened Runtime**:\n  - Audio Input ✓\n\n## Usage\n\n### Basic Example\n\n```swift\nimport SwiftUI\nimport OpenAI\n\nstruct ContentView: View {\n    let realtimeManager = RealtimeManager()\n    @State private var isActive = false\n\n    var body: some View {\n        Button(isActive ? \"Stop\" : \"Start\") {\n            isActive.toggle()\n            if isActive {\n                Task {\n                    try? await realtimeManager.startConversation()\n                }\n            } else {\n                Task {\n                    await realtimeManager.stopConversation()\n                }\n            }\n        }\n    }\n}\n\n@RealtimeActor\nfinal class RealtimeManager {\n    private var session: OpenAIRealtimeSession?\n    private var audioController: AudioController?\n\n    func startConversation() async throws {\n        // Initialize service\n        let service = OpenAIServiceFactory.service(apiKey: \"your-api-key\")\n\n        // Configure session\n        let config = OpenAIRealtimeSessionConfiguration(\n            inputAudioFormat: .pcm16,\n            inputAudioTranscription: .init(model: \"whisper-1\"),\n            instructions: \"You are a helpful assistant\",\n            modalities: [.audio, .text],\n            outputAudioFormat: .pcm16,\n            voice: \"shimmer\"\n        )\n\n        // Create session\n        session = try await service.realtimeSession(\n            model: \"gpt-4o-mini-realtime-preview-2024-12-17\",\n            configuration: config\n        )\n\n        // Setup audio\n        audioController = try await AudioController(modes: [.playback, .record])\n\n        // Handle microphone input\n        Task {\n            let micStream = try audioController!.micStream()\n            for await buffer in micStream {\n                if let base64Audio = AudioUtils.base64EncodeAudioPCMBuffer(from: buffer) {\n                    await session?.sendMessage(\n                        OpenAIRealtimeInputAudioBufferAppend(audio: base64Audio)\n                    )\n                }\n            }\n        }\n\n        // Handle AI responses\n        Task {\n            for await message in session!.receiver {\n                switch message {\n                case .responseAudioDelta(let audio):\n                    audioController?.playPCM16Audio(base64String: audio)\n                case .inputAudioBufferSpeechStarted:\n                    audioController?.interruptPlayback()\n                default:\n                    break\n                }\n            }\n        }\n    }\n\n    func stopConversation() {\n        audioController?.stop()\n        session?.disconnect()\n    }\n}\n```\n\n## Configuration Options\n\n### Voice Options\n\n- `alloy` - Neutral and balanced\n- `echo` - Friendly and warm\n- `shimmer` - Gentle and calming\n\n### Turn Detection\n\n#### Server VAD (Voice Activity Detection)\n\n```swift\nturnDetection: .init(type: .serverVAD(\n    prefixPaddingMs: 300,  // Audio to include before speech\n    silenceDurationMs: 500, // Silence duration to detect end\n    threshold: 0.5         // Activation threshold (0.0-1.0)\n))\n```\n\n#### Semantic VAD\n\n```swift\nturnDetection: .init(type: .semanticVAD(\n    eagerness: .medium  // .low, .medium, or .high\n))\n```\n\n### Modalities\n\n```swift\nmodalities: [.audio, .text]  // Both audio and text\nmodalities: [.text]          // Text only (disables audio)\n```\n\n## Handling Different Events\n\n```swift\nfor await message in session.receiver {\n    switch message {\n    case .error(let error):\n        print(\"Error: \\(error ?? \"Unknown\")\")\n\n    case .sessionCreated:\n        print(\"Session started\")\n\n    case .sessionUpdated:\n        // Trigger first response if AI speaks first\n        await session.sendMessage(OpenAIRealtimeResponseCreate())\n\n    case .responseAudioDelta(let base64Audio):\n        audioController.playPCM16Audio(base64String: base64Audio)\n\n    case .inputAudioBufferSpeechStarted:\n        // User started speaking, interrupt AI\n        audioController.interruptPlayback()\n\n    case .responseTranscriptDone(let transcript):\n        print(\"AI said: \\(transcript)\")\n\n    case .inputAudioTranscriptionCompleted(let transcript):\n        print(\"User said: \\(transcript)\")\n\n    case .responseFunctionCallArgumentsDone(let name, let args, let callId):\n        print(\"Function \\(name) called with: \\(args)\")\n        // Handle function call and return result\n\n    default:\n        break\n    }\n}\n```\n\n## Function Calling\n\nAdd tools to your configuration:\n\n```swift\nlet config = OpenAIRealtimeSessionConfiguration(\n    tools: [\n        .init(\n            name: \"get_weather\",\n            description: \"Get the current weather for a location\",\n            parameters: [\n                \"type\": \"object\",\n                \"properties\": [\n                    \"location\": [\n                        \"type\": \"string\",\n                        \"description\": \"City name\"\n                    ]\n                ],\n                \"required\": [\"location\"]\n            ]\n        )\n    ],\n    toolChoice: .auto\n)\n```\n\nHandle function calls in the message loop:\n\n```swift\ncase .responseFunctionCallArgumentsDone(let name, let args, let callId):\n    // Parse arguments and execute function\n    let result = handleFunction(name: name, args: args)\n\n    // Return result to OpenAI\n    await session.sendMessage(\n        OpenAIRealtimeConversationItemCreate(\n            item: .init(role: \"function\", text: result)\n        )\n    )\n```\n\n## Troubleshooting\n\n### No Audio Output\n\n- Check that `.playback` mode is included in AudioController initialization\n- Verify audio permissions are granted\n- Ensure `outputAudioFormat` is set to `.pcm16`\n\n### No Microphone Input\n\n- Check that `.record` mode is included in AudioController initialization\n- Verify microphone permissions in Info.plist\n- Check System Settings > Privacy & Security > Microphone\n\n### WebSocket Connection Fails\n\n- Verify API key is correct\n- Check that `openai-beta: realtime=v1` header is included (SwiftOpenAI handles this automatically)\n- Ensure you're using a compatible model (gpt-4o-mini-realtime-preview or newer)\n\n## Resources\n\n- [OpenAI Realtime API Documentation](https://platform.openai.com/docs/api-reference/realtime)\n- [SwiftOpenAI GitHub](https://github.com/jamesrochabrun/SwiftOpenAI)\n"
  },
  {
    "path": "Examples/RealtimeExample/RealtimeExample.swift",
    "content": "//\n//  RealtimeExample.swift\n//  SwiftOpenAI\n//\n//  Example implementation of OpenAI Realtime API for bidirectional voice conversation\n//\n\nimport AVFoundation\nimport OpenAI\nimport SwiftUI\n\n// MARK: - RealtimeExampleView\n\nstruct RealtimeExampleView: View {\n  let realtimeManager = RealtimeManager()\n\n  var body: some View {\n    VStack {\n      Button(isRealtimeActive ? \"Stop OpenAI Realtime\" : \"Start OpenAI Realtime\") {\n        isRealtimeActive.toggle()\n      }\n    }\n  }\n\n  @State private var isRealtimeActive = false {\n    willSet {\n      if newValue {\n        startRealtime()\n      } else {\n        stopRealtime()\n      }\n    }\n  }\n\n  private func startRealtime() {\n    Task {\n      do {\n        try await realtimeManager.startConversation()\n      } catch {\n        print(\"Could not start OpenAI realtime: \\(error.localizedDescription)\")\n      }\n    }\n  }\n\n  private func stopRealtime() {\n    Task {\n      await realtimeManager.stopConversation()\n    }\n  }\n\n}\n\n// MARK: - RealtimeManager\n\n@RealtimeActor\nfinal class RealtimeManager {\n  nonisolated init() { }\n\n  func startConversation() async throws {\n    // Initialize the OpenAI service with your API key\n    let service = OpenAIServiceFactory.service(apiKey: \"your-api-key-here\")\n\n    // Set to false if you want your user to speak first\n    let aiSpeaksFirst = true\n\n    let audioController = try await AudioController(modes: [.playback, .record])\n    let micStream = try audioController.micStream()\n\n    // Configure the realtime session\n    let configuration = OpenAIRealtimeSessionConfiguration(\n      inputAudioFormat: .pcm16,\n      inputAudioTranscription: .init(model: \"whisper-1\"),\n      instructions: \"You are a helpful, witty, and friendly AI assistant. \" +\n        \"Your voice and personality should be warm and engaging, \" +\n        \"with a lively and playful tone. Talk quickly.\",\n      maxResponseOutputTokens: .int(4096),\n      modalities: [.audio, .text],\n      outputAudioFormat: .pcm16,\n      temperature: 0.7,\n      turnDetection: .init(\n        type: .semanticVAD(eagerness: .medium)),\n      voice: \"shimmer\")\n\n    // Create the realtime session\n    let realtimeSession = try await service.realtimeSession(\n      model: \"gpt-4o-mini-realtime-preview-2024-12-17\",\n      configuration: configuration)\n\n    // Send audio from the microphone to OpenAI once OpenAI is ready for it\n    var isOpenAIReadyForAudio = false\n    Task {\n      for await buffer in micStream {\n        if\n          isOpenAIReadyForAudio,\n          let base64Audio = AudioUtils.base64EncodeAudioPCMBuffer(from: buffer)\n        {\n          await realtimeSession.sendMessage(\n            OpenAIRealtimeInputAudioBufferAppend(audio: base64Audio))\n        }\n      }\n    }\n\n    // Listen for messages from OpenAI\n    Task {\n      for await message in realtimeSession.receiver {\n        switch message {\n        case .error(let errorMessage):\n          print(\"Received error from OpenAI: \\(errorMessage ?? \"Unknown error\")\")\n          realtimeSession.disconnect()\n\n        case .sessionUpdated:\n          if aiSpeaksFirst {\n            await realtimeSession.sendMessage(OpenAIRealtimeResponseCreate())\n          } else {\n            isOpenAIReadyForAudio = true\n          }\n\n        case .responseAudioDelta(let base64String):\n          audioController.playPCM16Audio(base64String: base64String)\n\n        case .inputAudioBufferSpeechStarted:\n          // User started speaking, interrupt AI playback\n          audioController.interruptPlayback()\n\n        case .responseCreated:\n          isOpenAIReadyForAudio = true\n\n        case .responseTranscriptDone(let transcript):\n          print(\"AI said: \\(transcript)\")\n\n        case .inputAudioTranscriptionCompleted(let transcript):\n          print(\"User said: \\(transcript)\")\n\n        case .responseFunctionCallArgumentsDone(let name, let arguments, let callId):\n          print(\"Function call: \\(name) with args: \\(arguments)\")\n                    // Handle function calls here\n\n        default:\n          break\n        }\n      }\n    }\n\n    self.realtimeSession = realtimeSession\n    self.audioController = audioController\n  }\n\n  func stopConversation() {\n    audioController?.stop()\n    realtimeSession?.disconnect()\n    audioController = nil\n    realtimeSession = nil\n  }\n\n  private var realtimeSession: OpenAIRealtimeSession?\n  private var audioController: AudioController?\n\n}\n\n// MARK: - Basic Usage Example\n\n// To use the Realtime API:\n//\n// 1. Add NSMicrophoneUsageDescription to your Info.plist:\n//   <key>NSMicrophoneUsageDescription</key>\n//   <string>We need access to your microphone for voice conversations with AI</string>\n//\n// 2. On macOS, enable the following in your target's Signing & Capabilities:\n//   - App Sandbox > Outgoing Connections (client)\n//   - App Sandbox > Audio Input\n//   - Hardened Runtime > Audio Input\n//\n// 3. Initialize the service with your API key:\n//   let service = OpenAIServiceFactory.service(apiKey: \"your-api-key\")\n//\n// 4. Create a session configuration:\n//   let config = OpenAIRealtimeSessionConfiguration(\n//       inputAudioFormat: .pcm16,\n//       instructions: \"You are a helpful assistant\",\n//       modalities: [.audio, .text],\n//       outputAudioFormat: .pcm16,\n//       voice: \"shimmer\"\n//   )\n//\n// 5. Create the realtime session:\n//   let session = try await service.realtimeSession(\n//       model: \"gpt-4o-mini-realtime-preview-2024-12-17\",\n//       configuration: config\n//   )\n//\n// 6. Set up audio controller:\n//   let audioController = try await AudioController(modes: [.playback, .record])\n//\n// 7. Stream microphone audio to OpenAI:\n//   for await buffer in try audioController.micStream() {\n//       if let base64Audio = AudioUtils.base64EncodeAudioPCMBuffer(from: buffer) {\n//           await session.sendMessage(\n//               OpenAIRealtimeInputAudioBufferAppend(audio: base64Audio)\n//           )\n//       }\n//   }\n//\n// 8. Listen for and play responses:\n//   for await message in session.receiver {\n//       switch message {\n//       case .responseAudioDelta(let base64Audio):\n//           audioController.playPCM16Audio(base64String: base64Audio)\n//       default:\n//           break\n//       }\n//   }\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/AIProxyIntroView.swift",
    "content": "//\n//  AIProxyIntroView.swift\n//  SwiftOpenAIExample\n//\n//  Created by Lou Zell on 3/27/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct AIProxyIntroView: View {\n  var body: some View {\n    NavigationStack {\n      VStack {\n        Spacer()\n        VStack(spacing: 24) {\n          TextField(\"Enter partial key\", text: $partialKey)\n          TextField(\"Enter your service's URL\", text: $serviceURL)\n        }\n        .padding()\n        .textFieldStyle(.roundedBorder)\n\n        Text(\"You receive a partial key and service URL when you configure an app in the AIProxy dashboard\")\n          .font(.caption)\n\n        NavigationLink(destination: OptionsListView(\n          openAIService: aiproxyService,\n          options: OptionsListView.APIOption.allCases.filter { $0 != .localChat }))\n        {\n          Text(\"Continue\")\n            .padding()\n            .padding(.horizontal, 48)\n            .foregroundColor(.white)\n            .background(\n              Capsule()\n                .foregroundColor(canProceed ? Color(red: 64 / 255, green: 195 / 255, blue: 125 / 255) : .gray.opacity(0.2)))\n        }\n        .disabled(!canProceed)\n        Spacer()\n        Group {\n          Text(\n            \"AIProxy keeps your OpenAI API key secure. To configure AIProxy for your project, or to learn more about how it works, please see the docs at \") +\n            Text(\"[this link](https://www.aiproxy.pro/docs).\")\n        }\n        .font(.caption)\n      }\n      .padding()\n      .navigationTitle(\"AIProxy Configuration\")\n    }\n  }\n\n  @State private var partialKey = \"\"\n  @State private var serviceURL = \"\"\n\n  private var canProceed: Bool {\n    !(partialKey.isEmpty || serviceURL.isEmpty)\n  }\n\n  private var aiproxyService: OpenAIService {\n    OpenAIServiceFactory.service(\n      aiproxyPartialKey: partialKey,\n      aiproxyServiceURL: serviceURL != \"\" ? serviceURL : nil)\n  }\n}\n\n#Preview {\n  ApiKeyIntroView()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ApiKeyIntroView.swift",
    "content": "//\n//  ApiKeyIntroView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ApiKeyIntroView: View {\n  var body: some View {\n    NavigationStack {\n      VStack {\n        Spacer()\n        VStack(spacing: 24) {\n          TextField(\"Enter API Key\", text: $apiKey)\n          TextField(\"Enter Organization ID (Optional)\", text: $organizationIdentifier)\n            .onChange(of: organizationIdentifier) { _, newValue in\n              if !newValue.isEmpty {\n                localOrganizationID = newValue\n              }\n            }\n        }\n        .padding()\n        .textFieldStyle(.roundedBorder)\n        NavigationLink(destination: OptionsListView(\n          openAIService: OpenAIServiceFactory.service(apiKey: apiKey, organizationID: localOrganizationID, debugEnabled: true),\n          options: OptionsListView.APIOption.allCases.filter { $0 != .localChat }))\n        {\n          Text(\"Continue\")\n            .padding()\n            .padding(.horizontal, 48)\n            .foregroundColor(.white)\n            .background(\n              Capsule()\n                .foregroundColor(apiKey.isEmpty ? .gray.opacity(0.2) : Color(red: 64 / 255, green: 195 / 255, blue: 125 / 255)))\n        }\n        .disabled(apiKey.isEmpty)\n        Spacer()\n        Group {\n          Text(\"If you don't have a valid API KEY yet, you can visit \") +\n            Text(\"[this link](https://platform.openai.com/account/api-keys)\") + Text(\" to get started.\")\n        }\n        .font(.caption)\n      }\n      .padding()\n      .navigationTitle(\"Enter OpenAI API KEY\")\n    }\n  }\n\n  @State private var apiKey = \"\"\n  @State private var organizationIdentifier = \"\"\n  @State private var localOrganizationID: String? = nil\n}\n\n#Preview {\n  ApiKeyIntroView()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assets.xcassets/AccentColor.colorset/Contents.json",
    "content": "{\n  \"colors\" : [\n    {\n      \"idiom\" : \"universal\"\n    }\n  ],\n  \"info\" : {\n    \"author\" : \"xcode\",\n    \"version\" : 1\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assets.xcassets/AppIcon.appiconset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"idiom\" : \"universal\",\n      \"platform\" : \"ios\",\n      \"size\" : \"1024x1024\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"1x\",\n      \"size\" : \"16x16\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"2x\",\n      \"size\" : \"16x16\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"1x\",\n      \"size\" : \"32x32\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"2x\",\n      \"size\" : \"32x32\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"1x\",\n      \"size\" : \"128x128\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"2x\",\n      \"size\" : \"128x128\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"1x\",\n      \"size\" : \"256x256\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"2x\",\n      \"size\" : \"256x256\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"1x\",\n      \"size\" : \"512x512\"\n    },\n    {\n      \"idiom\" : \"mac\",\n      \"scale\" : \"2x\",\n      \"size\" : \"512x512\"\n    }\n  ],\n  \"info\" : {\n    \"author\" : \"xcode\",\n    \"version\" : 1\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assets.xcassets/Contents.json",
    "content": "{\n  \"info\" : {\n    \"author\" : \"xcode\",\n    \"version\" : 1\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assistants/AssistantConfigurationDemoView.swift",
    "content": "//\n//  AssistantConfigurationDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/17/23.\n//\n\nimport Foundation\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - AssistantFunctionCallDefinition\n\nenum AssistantFunctionCallDefinition: String, CaseIterable {\n  case createImage = \"create_image\"\n\n  var functionTool: AssistantObject.Tool {\n    switch self {\n    case .createImage:\n      .init(type: .function, function: .init(\n        name: rawValue,\n        strict: nil,\n        description: \"call this function if the request asks to generate an image\",\n        parameters: .init(\n          type: .object,\n          properties: [\n            \"prompt\": .init(type: .string, description: \"The exact prompt passed in.\"),\n            \"count\": .init(type: .integer, description: \"The number of images requested\"),\n          ],\n          required: [\"prompt\", \"count\"])))\n    }\n  }\n}\n\n// MARK: - AssistantConfigurationDemoView\n\nstruct AssistantConfigurationDemoView: View {\n  init(service: OpenAIService) {\n    self.service = service\n    _provider = State(initialValue: AssistantConfigurationProvider(service: service))\n  }\n\n  var isCodeInterpreterOn: Binding<Bool> {\n    Binding(\n      get: {\n        parameters.tools.contains { $0.displayToolType == .codeInterpreter } == true\n      },\n      set: { newValue in\n        if newValue {\n          parameters.tools.append(AssistantObject.Tool(type: .codeInterpreter))\n        } else {\n          parameters.tools.removeAll { $0.displayToolType == .codeInterpreter }\n        }\n      })\n  }\n\n  var isDalleToolOn: Binding<Bool> {\n    Binding(\n      get: {\n        parameters.tools.contains { $0.displayToolType == .function } == true\n      },\n      set: { newValue in\n        if newValue {\n          parameters.tools.append(AssistantFunctionCallDefinition.createImage.functionTool)\n        } else {\n          parameters.tools.removeAll { $0.displayToolType == .function }\n        }\n      })\n  }\n\n  var isFileSearchOn: Binding<Bool> {\n    Binding(\n      get: {\n        parameters.tools.contains { $0.displayToolType == .fileSearch } == true\n      },\n      set: { newValue in\n        if newValue {\n          parameters.tools.append(AssistantObject.Tool(type: .fileSearch))\n        } else {\n          parameters.tools.removeAll { $0.displayToolType == .fileSearch }\n        }\n      })\n  }\n\n  var body: some View {\n    ScrollView {\n      VStack(spacing: 16) {\n        avatarView\n        inputViews\n        capabilities\n        footerActions\n        knowledge\n      }\n      .padding()\n    }.sheet(isPresented: $showAvatarFlow) {\n      AssistantsListDemoView(assistants: provider.assistants, service: service)\n    }\n  }\n\n  var footerActions: some View {\n    HStack {\n      Button(\"Save\") {\n        Task {\n          try await provider.createAssistant(parameters: parameters)\n        }\n      }\n      Button(\"Delete\") {\n        Task {\n          for assistant in provider.assistants {\n            try await provider.deleteAssistant(id: assistant.id)\n          }\n        }\n      }\n      Button(\"Show Assistants\") {\n        Task {\n          try await provider.listAssistants()\n          showAvatarFlow = true\n        }\n      }\n    }\n    .buttonStyle(.borderedProminent)\n  }\n\n  @ViewBuilder\n  var avatarView: some View {\n    if isAvatarLoading {\n      Circle()\n        .stroke(.gray, style: StrokeStyle(lineWidth: 1, dash: [5, 5]))\n        .frame(width: 100, height: 100)\n        .overlay(\n          Image(systemName: \"rays\")\n            .resizable()\n            .frame(width: 20, height: 20)\n            .tint(.gray)\n            .symbolEffect(.variableColor.iterative.dimInactiveLayers))\n    } else if let avatarURL = provider.avatarURL {\n      URLImageView(url: avatarURL)\n        .clipShape(Circle())\n        .overlay(Circle().stroke(Color.white, lineWidth: 1))\n        .shadow(radius: 10)\n    } else {\n      Circle()\n        .stroke(.gray, style: StrokeStyle(lineWidth: 1, dash: [5, 5]))\n        .frame(width: 100, height: 100)\n        .overlay(\n          Menu(content: {\n            Button {\n              Task {\n                isAvatarLoading = true\n                defer { isAvatarLoading = false } // ensure isLoading is set to false when the\n                let prompt = parameters.description ?? \"Some random image for an avatar\"\n                try await provider.createAvatar(prompt: prompt)\n              }\n            } label: {\n              Text(\"Use DALL·E\")\n            }\n          }, label: {\n            Image(systemName: \"plus\")\n              .resizable()\n              .frame(width: 20, height: 20)\n              .tint(.gray)\n          }))\n    }\n  }\n\n  var inputViews: some View {\n    VStack(spacing: 16) {\n      InputView(title: \"Name\") {\n        TextField(\"\", text: $parameters.name.orEmpty, axis: .vertical)\n      }\n      InputView(title: \"Description\") {\n        TextField(\"\", text: $parameters.description.orEmpty, axis: .vertical)\n      }\n      InputView(title: \"Instructions\") {\n        ZStack {\n          RoundedRectangle(cornerRadius: 4)\n            .stroke(.gray.opacity(0.3))\n          TextEditor(text: $parameters.instructions.orEmpty)\n            .foregroundStyle(.primary)\n            .clipShape(RoundedRectangle(cornerRadius: 4))\n            .frame(minHeight: 100)\n        }\n      }\n    }\n    .textFieldStyle(.roundedBorder)\n  }\n\n  var capabilities: some View {\n    InputView(title: \"Capabilities\") {\n      VStack(spacing: 16) {\n        CheckboxRow(title: \"Code interpreter\", isChecked: isCodeInterpreterOn)\n        CheckboxRow(title: \"File Search\", isChecked: isFileSearchOn)\n        CheckboxRow(title: \"DALL·E Image Generation\", isChecked: isDalleToolOn)\n      }\n    }\n    .inputViewStyle(.init(verticalPadding: 16.0))\n  }\n\n  // TODO: Add a demo to create a vector store and add files in to it.\n  var knowledge: some View {\n    FilesPicker(\n      service: service,\n      sectionTitle: \"Knowledge\",\n      actionTitle: \"Upload files\",\n      fileIDS: $fileIDS,\n      actions: $filePickerInitialActions)\n  }\n\n  @State private var provider: AssistantConfigurationProvider\n  @State private var parameters = AssistantParameters(action: .create(model: Model.gpt41106Preview.value))\n  @State private var isAvatarLoading = false\n  @State private var showAvatarFlow = false\n  @State private var fileIDS = [String]()\n  /// Used mostly to display already uploaded files if any.\n  @State private var filePickerInitialActions = [FilePickerAction]()\n\n  private let service: OpenAIService\n}\n\nextension Binding where Value == String? {\n  var orEmpty: Binding<String> {\n    Binding<String>(\n      get: { self.wrappedValue ?? \"\" },\n      set: { self.wrappedValue = $0 })\n  }\n}\n\n#Preview {\n  AssistantConfigurationDemoView(service: OpenAIServiceFactory.service(apiKey: \"\"))\n}\n\n// MARK: - InputView\n\nstruct InputView<Content: View>: View {\n  let content: Content\n  let title: String\n\n  init(title: String, @ViewBuilder content: () -> Content) {\n    self.title = title\n    self.content = content()\n  }\n\n  var body: some View {\n    VStack(alignment: .leading, spacing: style.verticalPadding) {\n      Text(title)\n        .font(.headline)\n      content\n    }\n  }\n\n  @Environment(\\.inputViewStyle) private var style: InputViewStyle\n}\n\n// MARK: - InputViewStyle\n\nstruct InputViewStyle {\n  let verticalPadding: CGFloat\n\n  init(verticalPadding: CGFloat = 8.0) {\n    self.verticalPadding = verticalPadding\n  }\n}\n\n// MARK: - InputViewStyleKey\n\nstruct InputViewStyleKey: EnvironmentKey {\n  static let defaultValue = InputViewStyle()\n}\n\nextension EnvironmentValues {\n  var inputViewStyle: InputViewStyle {\n    get { self[InputViewStyleKey.self] }\n    set { self[InputViewStyleKey.self] = newValue }\n  }\n}\n\nextension View {\n  func inputViewStyle(_ style: InputViewStyle) -> some View {\n    environment(\\.inputViewStyle, style)\n  }\n}\n\n// MARK: - CheckboxView\n\nstruct CheckboxView: View {\n  @Binding var isChecked: Bool\n\n  var body: some View {\n    Button(action: {\n      withAnimation {\n        isChecked.toggle()\n      }\n    }) {\n      Image(systemName: isChecked ? \"checkmark.square\" : \"square\")\n    }\n    .buttonStyle(PlainButtonStyle())\n  }\n}\n\n// MARK: - CheckboxRow\n\nstruct CheckboxRow: View {\n  let title: String\n  @Binding var isChecked: Bool\n\n  var body: some View {\n    HStack {\n      CheckboxView(isChecked: $isChecked)\n      Text(title)\n      Spacer()\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assistants/AssistantConfigurationProvider.swift",
    "content": "//\n//  AssistantConfigurationProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/17/23.\n//\n\nimport Foundation\nimport SwiftOpenAI\n\n@Observable\nclass AssistantConfigurationProvider {\n  // MARK: - Initializer\n\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var assistant: AssistantObject?\n  var assistants = [AssistantObject]()\n  var avatarURL: URL?\n  var assistantDeletionStatus: DeletionStatus?\n\n  func listAssistants()\n    async throws\n  {\n    do {\n      let assistants = try await service.listAssistants(limit: nil, order: nil, after: nil, before: nil)\n      self.assistants = assistants.data\n    } catch {\n      debugPrint(\"\\(error)\")\n    }\n  }\n\n  func deleteAssistant(\n    id: String)\n    async throws\n  {\n    do {\n      assistantDeletionStatus = try await service.deleteAssistant(id: id)\n    } catch {\n      debugPrint(\"\\(error)\")\n    }\n  }\n\n  func createAssistant(\n    parameters: AssistantParameters)\n    async throws\n  {\n    do {\n      assistant = try await service.createAssistant(parameters: parameters)\n    } catch {\n      debugPrint(\"\\(error)\")\n    }\n  }\n\n  func createAvatar(\n    prompt: String)\n    async throws\n  {\n    do {\n      let avatarURLs = try await service.createImages(parameters: .init(prompt: prompt, model: .dallE3)).data?.compactMap(\\.url)\n      avatarURL = URL(string: avatarURLs?.first ?? \"\")\n    } catch {\n      debugPrint(\"\\(error)\")\n    }\n  }\n\n  // TODO: Create demo for this.\n  func createVStore() async throws {\n    _ = try await service.createVectorStore(parameters: .init(name: \"Personal Data\"))\n  }\n\n  // MARK: - Private Properties\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assistants/AssistantStreamDemoScreen.swift",
    "content": "//\n//  AssistantStreamDemoScreen.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 3/19/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - AssistantStartThreadScreen\n\npublic struct AssistantStartThreadScreen: View {\n  init(assistant: AssistantObject, service: OpenAIService) {\n    self.assistant = assistant\n    self.service = service\n    _threadProvider = State(initialValue: AssistantThreadConfigurationProvider(service: service))\n  }\n\n  public var body: some View {\n    ScrollView {\n      VStack {\n        Text(assistant.name ?? \"No name\")\n          .font(.largeTitle).bold()\n        Text(\"For function call demo type: Create an image of a cow.\")\n          .font(.caption)\n        switch tutorialStage {\n        case .crateThread:\n          createThreadView\n\n        case .createMessage(let threadID):\n          createMessageView(threadID: threadID)\n\n        case .createRunAndStream(let message):\n          createRunAndStreamView(threadID: message.threadID)\n\n        case .showStream(let threadID):\n          showStreamView(threadID: threadID)\n        }\n      }\n      .padding()\n    }\n  }\n\n  enum TutorialState {\n    case crateThread\n    case createMessage(threadID: String)\n    case createRunAndStream(message: MessageObject)\n    case showStream(threadID: String)\n  }\n\n  let assistant: AssistantObject\n  let service: OpenAIService\n\n  var createThreadView: some View {\n    Button {\n      Task {\n        try await threadProvider.createThread()\n        if let threadID = threadProvider.thread?.id {\n          tutorialStage = .createMessage(threadID: threadID)\n        }\n      }\n    } label: {\n      Text(\"Step 1: Create a thread\")\n    }\n  }\n\n  func createMessageView(threadID: String) -> some View {\n    VStack(alignment: .leading, spacing: 20) {\n      Text(\"Nice! Thread created id:\")\n        .font(.title).bold()\n      Text(\"\\(threadID)\")\n      Text(\"Step 2: Create a message in the text field and press the button ✈️\").font(.title)\n      Text(\"eg: Briefly explain SwiftUI state.\")\n      HStack(spacing: 4) {\n        TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n          .textFieldStyle(.roundedBorder)\n          .padding()\n        Button {\n          Task {\n            try await threadProvider.createMessage(\n              threadID: threadID,\n              parameters: .init(role: .user, content: .stringContent(prompt)))\n            if let message = threadProvider.message {\n              tutorialStage = .createRunAndStream(message: message)\n            }\n          }\n        } label: {\n          Image(systemName: \"paperplane\")\n        }\n      }\n    }\n    .padding()\n  }\n\n  func createRunAndStreamView(threadID: String) -> some View {\n    VStack(spacing: 20) {\n      Text(\"Nice! Message created with id:\")\n        .font(.title2).bold()\n      Text(\"\\(threadID)\")\n        .font(.body)\n      Text(\"Step 3: Run and Stream the message\")\n        .font(.title2)\n\n      Button {\n        Task {\n          tutorialStage = .showStream(threadID: threadID)\n          try await threadProvider.createRunAndStreamMessage(\n            threadID: threadID,\n            parameters: .init(assistantID: assistant.id))\n        }\n      } label: {\n        Text(\"Run and Stream the message\")\n      }\n      .buttonStyle(.borderedProminent)\n      ChatStreamView(provider: threadProvider, prompt: prompt, assistantName: assistant.name)\n    }\n  }\n\n  func showStreamView(threadID: String) -> some View {\n    VStack {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          try await threadProvider.createMessage(\n            threadID: threadID,\n            parameters: .init(role: .user, content: .stringContent(prompt)))\n          threadProvider.messageText = \"\"\n          threadProvider.toolOuptutMessage = \"\"\n          try await threadProvider.createRunAndStreamMessage(\n            threadID: threadID,\n            parameters: .init(assistantID: assistant.id))\n        }\n      } label: {\n        Text(\"Run and Stream the message\")\n      }\n      .buttonStyle(.borderedProminent)\n      ChatStreamView(provider: threadProvider, prompt: prompt, assistantName: assistant.name)\n    }\n  }\n\n  @State private var threadProvider: AssistantThreadConfigurationProvider\n  @State private var prompt = \"\"\n\n  @State private var tutorialStage = TutorialState.crateThread\n}\n\n// MARK: - ChatStreamView\n\nstruct ChatStreamView: View {\n  let provider: AssistantThreadConfigurationProvider\n  let prompt: String\n  let assistantName: String?\n\n  var body: some View {\n    VStack(spacing: 24) {\n      VStack(alignment: .leading, spacing: 16) {\n        Text(\"User:\")\n          .font(.title2)\n          .bold()\n        Text(prompt)\n      }\n      .frame(maxWidth: .infinity, alignment: .leading)\n\n      VStack(alignment: .leading, spacing: 16) {\n        Text(\"\\(assistantName ?? \"Assistant\"):\")\n          .font(.title2)\n          .bold()\n        if !provider.toolOuptutMessage.isEmpty {\n          Text(\"Code Intepreter\")\n            .foregroundColor(.mint)\n            .fontDesign(.monospaced)\n            .bold()\n            .font(.title3)\n          Text(LocalizedStringKey(provider.toolOuptutMessage))\n            .fontDesign(.monospaced)\n        }\n        if !provider.messageText.isEmpty {\n          Text(\"Message\")\n            .font(.title3)\n            .foregroundColor(.mint)\n            .fontDesign(.monospaced)\n            .bold()\n          Text(provider.messageText)\n            .font(.body)\n        }\n        if !provider.functionCallOutput.isEmpty {\n          Text(\"Function Call\")\n            .font(.title3)\n            .foregroundColor(.pink)\n            .fontDesign(.monospaced)\n            .bold()\n          Text(provider.functionCallOutput)\n            .font(.body)\n        }\n      }\n      .frame(maxWidth: .infinity, alignment: .leading)\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assistants/AssistantThreadConfigurationProvider.swift",
    "content": "//\n//  AssistantThreadConfigurationProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 3/19/24.\n//\n\nimport Foundation\nimport SwiftOpenAI\n\n@Observable\nclass AssistantThreadConfigurationProvider {\n  // MARK: - Initializer\n\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var thread: ThreadObject?\n  var message: MessageObject?\n  var runObject: RunObject?\n  var messageText = \"\"\n  var toolOuptutMessage = \"\"\n  var functionCallOutput = \"\"\n\n  func createThread()\n    async throws\n  {\n    do {\n      thread = try await service.createThread(parameters: .init())\n    } catch {\n      print(\"THREAD ERROR: \\(error)\")\n    }\n  }\n\n  func createMessage(\n    threadID: String,\n    parameters: MessageParameter)\n    async throws\n  {\n    do {\n      message = try await service.createMessage(threadID: threadID, parameters: parameters)\n    } catch {\n      print(\"THREAD ERROR: \\(error)\")\n    }\n  }\n\n  func createRunAndStreamMessage(\n    threadID: String,\n    parameters: RunParameter)\n    async throws\n  {\n    do {\n      let stream = try await service.createRunStream(threadID: threadID, parameters: parameters)\n      for try await result in stream {\n        switch result {\n        case .threadMessageDelta(let messageDelta):\n          let content = messageDelta.delta.content.first\n          switch content {\n          case .imageFile, .imageUrl, nil:\n            break\n          case .text(let textContent):\n            messageText += textContent.text.value\n          }\n\n        case .threadRunStepDelta(let runStepDelta):\n          let toolCall = runStepDelta.delta.stepDetails.toolCalls?.first?.toolCall\n          switch toolCall {\n          case .codeInterpreterToolCall(let toolCall):\n            toolOuptutMessage += toolCall.input ?? \"\"\n          case .fileSearchToolCall(let toolCall):\n            print(\"PROVIDER: File search tool call \\(toolCall)\")\n          case .functionToolCall(let toolCall):\n            functionCallOutput += toolCall.arguments\n          case nil:\n            print(\"PROVIDER: tool call nil\")\n          }\n\n        case .threadRunCompleted(let runObject):\n          print(\"PROVIDER: the run is completed - \\(runObject)\")\n\n        default: break\n        }\n      }\n    } catch {\n      print(\"THREAD ERROR: \\(error)\")\n    }\n  }\n\n  // MARK: - Private Properties\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Assistants/AssistantsListDemoView.swift",
    "content": "//\n//  AssistantsListDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 3/19/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - AssistantObject + Identifiable\n\nextension AssistantObject: Identifiable { }\n\n// MARK: - AssistantsListDemoView\n\npublic struct AssistantsListDemoView: View {\n  public var body: some View {\n    NavigationView {\n      ForEach(assistants) { assistant in\n        NavigationLink(destination: AssistantStartThreadScreen(assistant: assistant, service: service)) {\n          VStack(alignment: .leading) {\n            Text(assistant.name ?? \"No name\")\n              .font(.title).bold()\n            Text(assistant.description ?? \"No Description\")\n              .font(.subheadline).fontWeight(.medium)\n            Text(assistant.id)\n              .font(.caption).fontWeight(.bold)\n          }\n          .padding()\n          .frame(maxWidth: .infinity, alignment: .leading)\n          .background {\n            RoundedRectangle(cornerRadius: 25.0)\n              .fill(.mint)\n          }\n          .padding()\n        }\n      }\n    }\n  }\n\n  let assistants: [AssistantObject]\n  let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/AudioDemo/AudioDemoView.swift",
    "content": "//\n//  AudioDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct AudioDemoView: View {\n  init(service: OpenAIService) {\n    _audioProvider = State(initialValue: AudioProvider(service: service))\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter message to convert to speech\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n          try await audioProvider.speech(parameters: .init(model: .tts1, input: prompt, voice: .shimmer))\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  var transcriptionView: some View {\n    VStack {\n      Text(\"Tap this button to use the transcript API, a `m4a` file has been added to the app's bundle.\")\n        .font(.callout)\n        .padding()\n      Button(\"Transcript\") {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the function exits\n          /// ['flac', 'm4a', 'mp3', 'mp4', 'mpeg', 'mpga', 'oga', 'ogg', 'wav', 'webm'] (supported formats)\n          let data = try contentLoader.loadBundledContent(fromFileNamed: \"narcos\", ext: \"m4a\")\n          try await audioProvider.transcript(parameters: .init(fileName: \"narcos.m4a\", file: data))\n        }\n      }\n      .buttonStyle(.borderedProminent)\n      Text(audioProvider.transcription)\n        .padding()\n    }\n  }\n\n  var translationView: some View {\n    VStack {\n      Text(\"Tap this button to use the translationView API, a `m4a` file in German has been added to the app's bundle.\")\n        .font(.callout)\n        .padding()\n      Button(\"Translate\") {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the function exits\n          /// ['flac', 'm4a', 'mp3', 'mp4', 'mpeg', 'mpga', 'oga', 'ogg', 'wav', 'webm'] (supported formats)\n          let data = try contentLoader.loadBundledContent(fromFileNamed: \"german\", ext: \"m4a\")\n          try await audioProvider.translate(parameters: .init(fileName: \"german.m4a\", file: data))\n        }\n      }\n      .buttonStyle(.borderedProminent)\n      Text(audioProvider.translation)\n        .padding()\n    }\n  }\n\n  var body: some View {\n    ScrollView {\n      VStack {\n        VStack {\n          Text(\"Add a text to convert to speech\")\n          textArea\n        }\n        transcriptionView\n          .padding()\n        Divider()\n        translationView\n          .padding()\n      }\n    }.overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n      .safeAreaPadding()\n  }\n\n  @State private var audioProvider: AudioProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n\n  private let contentLoader = ContentLoader()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/AudioDemo/AudioProvider.swift",
    "content": "//\n//  AudioProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport AVFoundation\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass AudioProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var transcription = \"\"\n  var translation = \"\"\n  var speechErrorMessage = \"\"\n  var audioPlayer: AVAudioPlayer?\n\n  func transcript(\n    parameters: AudioTranscriptionParameters)\n    async throws\n  {\n    do {\n      transcription = try await service.createTranscription(parameters: parameters).text\n    } catch {\n      transcription = \"\\(error)\"\n    }\n  }\n\n  func translate(\n    parameters: AudioTranslationParameters)\n    async throws\n  {\n    do {\n      translation = try await service.createTranslation(parameters: parameters).text\n    } catch {\n      translation = \"\\(error)\"\n    }\n  }\n\n  func speech(\n    parameters: AudioSpeechParameters)\n    async throws\n  {\n    do {\n      let speech = try await service.createSpeech(parameters: parameters).output\n      playAudio(from: speech)\n    } catch let error as APIError {\n      speechErrorMessage = error.displayDescription\n    } catch {\n      speechErrorMessage = \"\\(error)\"\n    }\n  }\n\n  private let service: OpenAIService\n\n  private func playAudio(from data: Data) {\n    do {\n      // Initialize the audio player with the data\n      audioPlayer = try AVAudioPlayer(data: data)\n      audioPlayer?.prepareToPlay()\n      audioPlayer?.play()\n    } catch {\n      // Handle errors\n      print(\"Error playing audio: \\(error.localizedDescription)\")\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatDemo/ChatDemoView.swift",
    "content": "//\n//  ChatDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ChatDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatProvider(service: service))\n  }\n\n  enum ChatConfig {\n    case chatCompletion\n    case chatCompeltionStream\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollView {\n      VStack {\n        picker\n        textArea\n        Text(chatProvider.errorMessage)\n          .foregroundColor(.red)\n        switch selectedSegment {\n        case .chatCompeltionStream:\n          streamedChatResultView\n        case .chatCompletion:\n          chatCompletionResultView\n        }\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var picker: some View {\n    Picker(\"Options\", selection: $selectedSegment) {\n      Text(\"Chat Completion\").tag(ChatConfig.chatCompletion)\n      Text(\"Chat Completion stream\").tag(ChatConfig.chatCompeltionStream)\n    }\n    .pickerStyle(SegmentedPickerStyle())\n    .padding()\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n\n          let content = ChatCompletionParameters.Message.ContentType.text(prompt)\n          prompt = \"\"\n          let model: Model =\n            if let customModel, !customModel.isEmpty {\n              .custom(customModel)\n            } else {\n              .gpt4o\n            }\n\n          let parameters = ChatCompletionParameters(\n            messages: [.init(\n              role: .user,\n              content: content)],\n            model: model)\n          switch selectedSegment {\n          case .chatCompletion:\n            try await chatProvider.startChat(parameters: parameters)\n          case .chatCompeltionStream:\n            try await chatProvider.startStreamedChat(parameters: parameters)\n          }\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  /// stream = `false`\n  var chatCompletionResultView: some View {\n    ForEach(Array(chatProvider.messages.enumerated()), id: \\.offset) { _, val in\n      VStack(spacing: 0) {\n        Text(\"\\(val)\")\n      }\n    }\n  }\n\n  /// stream = `true`\n  var streamedChatResultView: some View {\n    VStack {\n      Button(\"Cancel stream\") {\n        chatProvider.cancelStream()\n      }\n      Text(chatProvider.message)\n    }\n  }\n\n  @State private var chatProvider: ChatProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var selectedSegment = ChatConfig.chatCompeltionStream\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatDemo/ChatProvider.swift",
    "content": "//\n//  ChatProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass ChatProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var messages = [String]()\n  var errorMessage = \"\"\n  var message = \"\"\n  var usage: ChatUsage?\n\n  func startChat(\n    parameters: ChatCompletionParameters)\n    async throws\n  {\n    do {\n      let response = try await service.startChat(parameters: parameters)\n      let choices = response.choices\n      let chatUsage = response.usage\n      let logprobs = choices?.compactMap(\\.logprobs)\n      dump(logprobs)\n      messages = choices?.compactMap(\\.message?.content) ?? []\n      dump(chatUsage)\n      usage = chatUsage\n    } catch APIError.responseUnsuccessful(let description, let statusCode) {\n      self.errorMessage = \"Network error with status code: \\(statusCode) and description: \\(description)\"\n    } catch {\n      errorMessage = error.localizedDescription\n    }\n  }\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters)\n    async throws\n  {\n    streamTask = Task {\n      do {\n        let stream = try await service.startStreamedChat(parameters: parameters)\n        for try await result in stream {\n          let content = result.choices?.first?.delta?.content ?? \"\"\n          self.message += content\n        }\n      } catch APIError.responseUnsuccessful(let description, let statusCode) {\n        self.errorMessage = \"Network error with status code: \\(statusCode) and description: \\(description)\"\n      } catch {\n        self.errorMessage = error.localizedDescription\n      }\n    }\n  }\n\n  func cancelStream() {\n    streamTask?.cancel()\n  }\n\n  private let service: OpenAIService\n  private var streamTask: Task<Void, Never>?\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/ChatMessageDisplayModel.swift",
    "content": "//\n//  ChatMessageDisplayModel.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/13/23.\n//\n\nimport Foundation\nimport SwiftOpenAI\n\nstruct ChatMessageDisplayModel: Identifiable {\n  init(\n    id: UUID = UUID(),\n    content: DisplayContent,\n    origin: MessageOrigin)\n  {\n    self.id = id\n    self.content = content\n    self.origin = origin\n  }\n\n  enum DisplayContent: Equatable {\n    case content(DisplayMessageType)\n    case error(String)\n\n    static func ==(lhs: DisplayContent, rhs: DisplayContent) -> Bool {\n      switch (lhs, rhs) {\n      case (.content(let a), .content(let b)):\n        a == b\n      case (.error(let a), .error(let b)):\n        a == b\n      default:\n        false\n      }\n    }\n\n    struct DisplayMessageType: Equatable {\n      var text: String?\n      var urls: [URL]? = nil\n    }\n  }\n\n  enum MessageOrigin {\n    case received(ReceivedSource)\n    case sent\n\n    enum ReceivedSource {\n      case gpt\n      case dalle\n    }\n  }\n\n  let id: UUID\n  var content: DisplayContent\n  let origin: MessageOrigin\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/ChatMessageView.swift",
    "content": "//\n//  ChatMessageView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/13/23.\n//\n\nimport Foundation\nimport SwiftUI\n\nstruct ChatMessageView: View {\n  let message: ChatMessageDisplayModel\n\n  @ViewBuilder\n  var header: some View {\n    switch message.origin {\n    case .received(let source):\n      switch source {\n      case .gpt:\n        headerWith(\"wand.and.stars\", title: \"CHATGPT\")\n      case .dalle:\n        EmptyView()\n      }\n\n    case .sent:\n      headerWith(\"person.circle\", title: \"USER\")\n    }\n  }\n\n  var body: some View {\n    VStack(alignment: .leading, spacing: 8) {\n      header\n      Group {\n        switch message.content {\n        case .content(let mediaType):\n          VStack(alignment: .leading, spacing: Sizes.spacingMedium) {\n            imagesFrom(urls: mediaType.urls ?? [])\n            chatMessageViewWith(mediaType.text)\n          }\n          .transition(.opacity)\n\n        case .error(let error):\n          Text(error)\n            .padding()\n            .font(.callout)\n            .background(\n              RoundedRectangle(cornerRadius: 20)\n                .foregroundColor(.red.opacity(0.7)))\n        }\n      }\n      .padding(.leading, 23)\n    }\n  }\n\n  @ViewBuilder\n  func chatMessageViewWith(\n    _ text: String?)\n    -> some View\n  {\n    if let text {\n      if text.isEmpty {\n        LoadingView()\n      } else {\n        Text(text)\n          .font(.body)\n      }\n    } else {\n      EmptyView()\n    }\n  }\n\n  func headerWith(\n    _ systemImageName: String,\n    title: String)\n    -> some View\n  {\n    HStack {\n      Image(systemName: systemImageName)\n        .resizable()\n        .frame(width: 16, height: 16)\n      Text(title)\n        .font(.caption2)\n    }\n    .foregroundColor(.gray.opacity(0.9))\n  }\n\n  func imagesFrom(\n    urls: [URL])\n    -> some View\n  {\n    ScrollView(.horizontal, showsIndicators: false) {\n      HStack(spacing: 8) {\n        ForEach(urls, id: \\.self) { url in\n          URLImageView(url: url)\n        }\n      }\n    }\n  }\n}\n\n#Preview {\n  VStack {\n    ChatMessageView(message: .init(\n      content: .content(.init(text: \"What is the capital of Peru? and what is the population\")),\n      origin: .sent))\n    ChatMessageView(message: .init(content: .content(.init(text: \"Lima, an its 28 million habitants.\")), origin: .received(.gpt)))\n    ChatMessageView(message: .init(\n      content: .content(.init(\n        text: \"The image you requested is ready 🐱\",\n        urls: [\n          URL(\n            string: \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\")!,\n        ])),\n      origin: .received(.dalle)))\n    ChatMessageView(message: .init(content: .content(.init(text: \"\")), origin: .received(.gpt)))\n  }\n  .padding()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/Completion/ChatFunctionCallDemoView.swift",
    "content": "//\n//  ChatFunctionCallDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/14/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ChatFunctionCallDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatFunctionCallProvider(service: service, customModel: customModel))\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollViewReader { proxy in\n      VStack {\n        List(chatProvider.chatDisplayMessages) { message in\n          ChatMessageView(message: message)\n            .listRowSeparator(.hidden)\n        }\n        .listStyle(.plain)\n        .onChange(of: chatProvider.chatDisplayMessages.last?.content) {\n          let lastMessage = chatProvider.chatDisplayMessages.last\n          if let id = lastMessage?.id {\n            proxy.scrollTo(id, anchor: .bottom)\n          }\n        }\n        textArea\n      }\n    }\n  }\n\n  var textArea: some View {\n    HStack(spacing: 0) {\n      VStack(alignment: .leading, spacing: 0) {\n        textField\n          .padding(.vertical, Sizes.spacingExtraSmall)\n          .padding(.horizontal, Sizes.spacingSmall)\n      }\n      .padding(.vertical, Sizes.spacingExtraSmall)\n      .padding(.horizontal, Sizes.spacingExtraSmall)\n      .background(\n        RoundedRectangle(cornerRadius: 20)\n          .stroke(.gray, lineWidth: 1))\n      .padding(.horizontal, Sizes.spacingMedium)\n      textAreSendButton\n    }\n    .padding(.horizontal)\n    .disabled(isLoading)\n  }\n\n  var textField: some View {\n    TextField(\n      \"How Can I help you today?\",\n      text: $prompt,\n      axis: .vertical)\n  }\n\n  var textAreSendButton: some View {\n    Button {\n      Task {\n        /// Loading UI\n        isLoading = true\n        defer { isLoading = false }\n        // Clears text field.\n        let userPrompt = prompt\n        prompt = \"\"\n        try await chatProvider.startChat(prompt: userPrompt)\n      }\n    } label: {\n      Image(systemName: \"paperplane\")\n    }\n    .buttonStyle(.bordered)\n    .tint(ThemeColor.tintColor)\n    .disabled(prompt.isEmpty)\n  }\n\n  @State private var chatProvider: ChatFunctionCallProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/Completion/ChatFunctionCallProvider.swift",
    "content": "//\n//  ChatFunctionCallProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/14/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - FunctionCallDefinition\n\nenum FunctionCallDefinition: String, CaseIterable {\n  case createImage = \"create_image\"\n\n  // Add more functions if needed, parallel function calling is supported.\n\n  var functionTool: ChatCompletionParameters.Tool {\n    switch self {\n    case .createImage:\n      .init(function: .init(\n        name: rawValue,\n        strict: nil,\n        description: \"call this function if the request asks to generate an image\",\n        parameters: .init(\n          type: .object,\n          properties: [\n            \"prompt\": .init(type: .string, description: \"The exact prompt passed in.\"),\n            \"count\": .init(type: .integer, description: \"The number of images requested\"),\n          ],\n          required: [\"prompt\", \"count\"])))\n    }\n  }\n}\n\n// MARK: - ChatFunctionCallProvider\n\n@Observable\nclass ChatFunctionCallProvider {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.service = service\n    self.customModel = customModel\n  }\n\n  // MARK: - Initializer\n\n  let customModel: String?\n\n  // MARK: - Public Properties\n\n  /// To be used for UI purposes.\n  var chatDisplayMessages = [ChatMessageDisplayModel]()\n\n  @MainActor\n  func generateImage(arguments: String) async throws -> String {\n    let dictionary = arguments.toDictionary()!\n    let prompt = dictionary[\"prompt\"] as! String\n\n    let assistantMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: \"Generating images...\")),\n      origin: .received(.gpt))\n    updateLastAssistantMessage(assistantMessage)\n\n    let urls = try await service.createImages(parameters: .init(prompt: prompt, model: .dallE3)).data?.compactMap(\\.url)\n      .compactMap { URL(string: $0) } ?? []\n\n    let dalleAssistantMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: nil, urls: urls)),\n      origin: .received(.dalle))\n    updateLastAssistantMessage(dalleAssistantMessage)\n\n    return prompt\n  }\n\n  // MARK: - Public Methods\n\n  func startChat(\n    prompt: String)\n    async throws\n  {\n    defer {\n      chatMessageParameters = []\n    }\n\n    await startNewUserDisplayMessage(prompt)\n\n    await startNewAssistantEmptyDisplayMessage()\n\n    /// # Step 1: send the conversation and available functions to the model\n    let userMessage = createUserMessage(prompt)\n    chatMessageParameters.append(userMessage)\n\n    let tools = FunctionCallDefinition.allCases.map(\\.functionTool)\n\n    let model: Model =\n      if let customModel, !customModel.isEmpty {\n        .custom(customModel)\n      } else {\n        .gpt41106Preview\n      }\n\n    let parameters = ChatCompletionParameters(\n      messages: chatMessageParameters,\n      model: model,\n      toolChoice: ToolChoice.auto,\n      tools: tools)\n\n    do {\n      let chat = try await service.startChat(parameters: parameters)\n\n      guard let assistantMessage = chat.choices?.first?.message else { return }\n\n      let content = assistantMessage.content ?? \"\"\n\n      await updateLastAssistantMessage(.init(content: .content(.init(text: content)), origin: .received(.gpt)))\n\n      /// # Step 2: check if the model wanted to call a function\n      if let toolCalls = assistantMessage.toolCalls {\n        /// # Step 3: call the function\n        availableFunctions = [.createImage: generateImage(arguments:)]\n        // Append the `assistantMessage` in to the `chatMessageParameters` to extend the conversation\n        let parameterAssistantMessage = ChatCompletionParameters.Message(\n          role: .assistant,\n          content: .text(content), toolCalls: assistantMessage.toolCalls)\n\n        chatMessageParameters.append(parameterAssistantMessage)\n\n        /// # Step 4: send the info for each function call and function response to the model\n        for toolCall in toolCalls {\n          let name = toolCall.function.name\n          let id = toolCall.id\n          let functionToCall = availableFunctions[FunctionCallDefinition(rawValue: name!)!]!\n          let arguments = toolCall.function.arguments\n          let content = try await functionToCall(arguments)\n          let toolMessage = ChatCompletionParameters.Message(\n            role: .tool,\n            content: .text(content),\n            name: name,\n            toolCallID: id)\n          chatMessageParameters.append(toolMessage)\n        }\n\n        /// # get a new response from the model where it can see the function response\n        await continueChat()\n      }\n\n    } catch let error as APIError {\n      // If an error occurs, update the UI to display the error message.\n      await updateLastAssistantMessage(.init(content: .error(\"\\(error.displayDescription)\"), origin: .received(.gpt)))\n    }\n  }\n\n  func continueChat() async {\n    let systemMessage = ChatCompletionParameters.Message(\n      role: .system,\n      content: .text(\n        \"You are an artist powered by AI, if the messages has a tool message you will weight that bigger in order to create a response, and you are providing me an image, you always respond in readable language and never providing URLs of images, most of the times you add an emoji on your responses if makes sense, do not describe the image. also always offer more help\"))\n\n    chatMessageParameters.insert(systemMessage, at: 0)\n\n    let model: Model =\n      if let customModel, !customModel.isEmpty {\n        .custom(customModel)\n      } else {\n        .gpt41106Preview\n      }\n\n    let paramsForChat = ChatCompletionParameters(\n      messages: chatMessageParameters,\n      model: model)\n    do {\n      let chat = try await service.startChat(parameters: paramsForChat)\n      guard let assistantMessage = chat.choices?.first?.message else { return }\n      await updateLastAssistantMessage(.init(content: .content(.init(text: assistantMessage.content)), origin: .received(.gpt)))\n    } catch {\n      // If an error occurs, update the UI to display the error message.\n      await updateLastAssistantMessage(.init(content: .error(\"\\(error)\"), origin: .received(.gpt)))\n    }\n  }\n\n  func createUserMessage(\n    _ prompt: String)\n    -> ChatCompletionParameters.Message\n  {\n    .init(role: .user, content: .text(prompt))\n  }\n\n  // MARK: - Private Properties\n\n  private let service: OpenAIService\n  private var lastDisplayedMessageID: UUID?\n  /// To be used for a new request\n  private var chatMessageParameters = [ChatCompletionParameters.Message]()\n  private var availableFunctions = [FunctionCallDefinition: @MainActor (String) async throws -> String]()\n\n  // MARK: - Private Methods\n\n  @MainActor\n  private func startNewUserDisplayMessage(_ prompt: String) {\n    let startingMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: prompt)),\n      origin: .sent)\n    addMessage(startingMessage)\n  }\n\n  @MainActor\n  private func startNewAssistantEmptyDisplayMessage() {\n    let newMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: \"\")),\n      origin: .received(.gpt))\n    addMessage(newMessage)\n  }\n\n  @MainActor\n  private func updateLastAssistantMessage(\n    _ message: ChatMessageDisplayModel)\n  {\n    guard let id = lastDisplayedMessageID, let index = chatDisplayMessages.firstIndex(where: { $0.id == id }) else { return }\n\n    var lastMessage = chatDisplayMessages[index]\n\n    switch message.content {\n    case .content(let newMedia):\n      switch lastMessage.content {\n      case .content(let lastMedia):\n        var updatedMedia = lastMedia\n        if\n          let newText = newMedia.text,\n          var lastMediaText = lastMedia.text\n        {\n          lastMediaText += newText\n          updatedMedia.text = lastMediaText\n        } else {\n          updatedMedia.text = \"\"\n        }\n        if let urls = newMedia.urls {\n          updatedMedia.urls = urls\n        }\n        lastMessage.content = .content(updatedMedia)\n\n      case .error:\n        break\n      }\n\n    case .error:\n      lastMessage.content = message.content\n    }\n\n    chatDisplayMessages[index] = ChatMessageDisplayModel(\n      id: id,\n      content: lastMessage.content,\n      origin: message.origin)\n  }\n\n  @MainActor\n  private func addMessage(_ message: ChatMessageDisplayModel) {\n    let newMessageId = message.id\n    lastDisplayedMessageID = newMessageId\n    withAnimation {\n      chatDisplayMessages.append(message)\n    }\n  }\n}\n\nextension String {\n  fileprivate func toDictionary() -> [String: Any]? {\n    guard let jsonData = data(using: .utf8) else {\n      print(\"Failed to convert JSON string to Data.\")\n      return nil\n    }\n\n    do {\n      return try JSONSerialization.jsonObject(with: jsonData, options: []) as? [String: Any]\n    } catch {\n      print(\"Failed to deserialize JSON: \\(error.localizedDescription)\")\n      return nil\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/Stream/ChatFunctionsCallStreamProvider.swift",
    "content": "//\n//  ChatFunctionsCallStreamProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/6/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - FunctionCallStreamedResponse\n\n/// This is a demo in how to implement parallel function calling when using the completion API stream = true\n\nstruct FunctionCallStreamedResponse {\n  let name: String\n  let id: String\n  let toolCall: ToolCall\n  var argument: String\n}\n\n// MARK: - ChatFunctionsCallStreamProvider\n\n@Observable\nclass ChatFunctionsCallStreamProvider {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.service = service\n    self.customModel = customModel\n  }\n\n  // MARK: - Initializer\n\n  let customModel: String?\n\n  // MARK: - Public Properties\n\n  /// To be used for UI purposes.\n  var chatDisplayMessages = [ChatMessageDisplayModel]()\n\n  @MainActor\n  func generateImage(arguments: String) async throws -> String {\n    let dictionary = arguments.toDictionary()!\n    let prompt = dictionary[\"prompt\"] as! String\n    let count = (dictionary[\"count\"] as? Int) ?? 1\n\n    // TODO: Improve the loading state\n    let assistantMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: \"Generating images...\")),\n      origin: .received(.gpt))\n    updateLastAssistantMessage(assistantMessage)\n\n    let urls = try await service.legacyCreateImages(\n      parameters: .init(prompt: prompt, model: .dalle2(.small), numberOfImages: count)).data.compactMap(\\.url)\n\n    let dalleAssistantMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: nil, urls: urls)),\n      origin: .received(.dalle))\n    updateLastAssistantMessage(dalleAssistantMessage)\n\n    return prompt\n  }\n\n  // MARK: - Public Methods\n\n  func chat(\n    prompt: String)\n    async throws\n  {\n    defer {\n      functionCallsMap = [:]\n      chatMessageParameters = []\n    }\n\n    await startNewUserDisplayMessage(prompt)\n\n    await startNewAssistantEmptyDisplayMessage()\n\n    let systemMessage = ChatCompletionParameters.Message(\n      role: .system,\n      content: .text(\n        \"You are an artist powered by AI, if the messages has a tool message you will weight that bigger in order to create a response, and you are providing me an image, you always respond in readable language and never providing URLs of images, most of the times you add an emoji on your responses if makes sense, do not describe the image. also always offer more help\"))\n    chatMessageParameters.append(systemMessage)\n\n    /// # Step 1: send the conversation and available functions to the model\n    let userMessage = createUserMessage(prompt)\n    chatMessageParameters.append(userMessage)\n\n    let tools = FunctionCallDefinition.allCases.map(\\.functionTool)\n\n    let model: Model =\n      if let customModel, !customModel.isEmpty {\n        .custom(customModel)\n      } else {\n        .gpt35Turbo1106\n      }\n\n    let parameters = ChatCompletionParameters(\n      messages: chatMessageParameters,\n      model: model,\n      toolChoice: ToolChoice.auto,\n      tools: tools)\n\n    do {\n      // Begin the chat stream with the updated parameters.\n      let stream = try await service.startStreamedChat(parameters: parameters)\n      for try await result in stream {\n        // Extract the first choice from the stream results, if none exist, exit the loop.\n        if let choice = result.choices?.first {\n          /// Because we are using the stream API we need to wait to populate\n          /// the needed values that comes from the streamed API to construct a valid tool call response.\n          /// This is not needed if the stream is set to false in the API completion request.\n          /// # Step 2: check if the model wanted to call a function\n          if let toolCalls = choice.delta?.toolCalls {\n            /// # Step 3: Define the available functions to be called\n            availableFunctions = [.createImage: generateImage(arguments:)]\n\n            mapStreamedToolCallsResponse(toolCalls)\n          }\n\n          /// The streamed content to display\n          if let newContent = choice.delta?.content {\n            await updateLastAssistantMessage(.init(\n              content: .content(.init(text: newContent)),\n              origin: .received(.gpt)))\n          }\n        }\n      }\n      // # extend conversation with assistant's reply\n      // Append the `assistantMessage` in to the `chatMessageParameters` to extend the conversation\n      if !functionCallsMap.isEmpty {\n        let assistantMessage = createAssistantMessage()\n        chatMessageParameters.append(assistantMessage)\n        /// # Step 4: send the info for each function call and function response to the model\n        let toolMessages = try await createToolsMessages()\n        chatMessageParameters.append(contentsOf: toolMessages)\n\n        // Lastly call the chat again\n        await continueChat()\n      }\n\n      // TUTORIAL\n    } catch {\n      // If an error occurs, update the UI to display the error message.\n      await updateLastAssistantMessage(.init(content: .error(\"\\(error)\"), origin: .received(.gpt)))\n    }\n  }\n\n  func mapStreamedToolCallsResponse(\n    _ toolCalls: [ToolCall])\n  {\n    for toolCall in toolCalls {\n      // Intentionally force unwrapped to catch errrors quickly on demo. // This should be properly handled.\n      let function = FunctionCallDefinition.allCases[toolCall.index!]\n      if var streamedFunctionCallResponse = functionCallsMap[function] {\n        streamedFunctionCallResponse.argument += toolCall.function.arguments\n        functionCallsMap[function] = streamedFunctionCallResponse\n      } else {\n        let streamedFunctionCallResponse = FunctionCallStreamedResponse(\n          name: toolCall.function.name!,\n          id: toolCall.id!,\n          toolCall: toolCall,\n          argument: toolCall.function.arguments)\n        functionCallsMap[function] = streamedFunctionCallResponse\n      }\n    }\n  }\n\n  func createUserMessage(\n    _ prompt: String)\n    -> ChatCompletionParameters.Message\n  {\n    .init(role: .user, content: .text(prompt))\n  }\n\n  func createAssistantMessage() -> ChatCompletionParameters.Message {\n    var toolCalls = [ToolCall]()\n    for (_, functionCallStreamedResponse) in functionCallsMap {\n      let toolCall = functionCallStreamedResponse.toolCall\n      // Intentionally force unwrapped to catch errrors quickly on demo. // This should be properly handled.\n      let messageToolCall = ToolCall(\n        id: toolCall.id!,\n        function: .init(arguments: toolCall.function.arguments, name: toolCall.function.name!))\n      toolCalls.append(messageToolCall)\n    }\n    return .init(role: .assistant, content: .text(\"\"), toolCalls: toolCalls)\n  }\n\n  func createToolsMessages() async throws\n    -> [ChatCompletionParameters.Message]\n  {\n    var toolMessages = [ChatCompletionParameters.Message]()\n    for (key, functionCallStreamedResponse) in functionCallsMap {\n      let name = functionCallStreamedResponse.name\n      let id = functionCallStreamedResponse.id\n      let functionToCall = availableFunctions[key]!\n      let arguments = functionCallStreamedResponse.argument\n      let content = try await functionToCall(arguments)\n      let toolMessage = ChatCompletionParameters.Message(\n        role: .tool,\n        content: .text(content),\n        name: name,\n        toolCallID: id)\n      toolMessages.append(toolMessage)\n    }\n    return toolMessages\n  }\n\n  func continueChat() async {\n    let paramsForChat = ChatCompletionParameters(\n      messages: chatMessageParameters,\n      model: .gpt41106Preview)\n    do {\n      // Begin the chat stream with the updated parameters.\n      let stream = try await service.startStreamedChat(parameters: paramsForChat)\n      for try await result in stream {\n        // Extract the first choice from the stream results, if none exist, exit the loop.\n        guard let choice = result.choices?.first else { return }\n\n        /// The streamed content to display\n        if let newContent = choice.delta?.content {\n          await updateLastAssistantMessage(.init(content: .content(.init(text: newContent)), origin: .received(.gpt)))\n        }\n      }\n    } catch {\n      // If an error occurs, update the UI to display the error message.\n      await updateLastAssistantMessage(.init(content: .error(\"\\(error)\"), origin: .received(.gpt)))\n    }\n  }\n\n  // MARK: - Private Properties\n\n  private let service: OpenAIService\n  private var lastDisplayedMessageID: UUID?\n  /// To be used for a new request\n  private var chatMessageParameters = [ChatCompletionParameters.Message]()\n  private var functionCallsMap = [FunctionCallDefinition: FunctionCallStreamedResponse]()\n  private var availableFunctions = [FunctionCallDefinition: @MainActor (String) async throws -> String]()\n\n  // MARK: - Private Methods\n\n  @MainActor\n  private func startNewUserDisplayMessage(_ prompt: String) {\n    let startingMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: prompt)),\n      origin: .sent)\n    addMessage(startingMessage)\n  }\n\n  @MainActor\n  private func startNewAssistantEmptyDisplayMessage() {\n    let newMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: \"\")),\n      origin: .received(.gpt))\n    addMessage(newMessage)\n  }\n\n  @MainActor\n  private func updateLastAssistantMessage(\n    _ message: ChatMessageDisplayModel)\n  {\n    guard let id = lastDisplayedMessageID, let index = chatDisplayMessages.firstIndex(where: { $0.id == id }) else { return }\n\n    var lastMessage = chatDisplayMessages[index]\n\n    switch message.content {\n    case .content(let newMedia):\n      switch lastMessage.content {\n      case .content(let lastMedia):\n        var updatedMedia = lastMedia\n        if\n          let newText = newMedia.text,\n          var lastMediaText = lastMedia.text\n        {\n          lastMediaText += newText\n          updatedMedia.text = lastMediaText\n        } else {\n          updatedMedia.text = \"\"\n        }\n        if let urls = newMedia.urls {\n          updatedMedia.urls = urls\n        }\n        lastMessage.content = .content(updatedMedia)\n\n      case .error:\n        break\n      }\n\n    case .error:\n      lastMessage.content = message.content\n    }\n\n    chatDisplayMessages[index] = ChatMessageDisplayModel(\n      id: id,\n      content: lastMessage.content,\n      origin: message.origin)\n  }\n\n  @MainActor\n  private func addMessage(_ message: ChatMessageDisplayModel) {\n    let newMessageId = message.id\n    lastDisplayedMessageID = newMessageId\n    withAnimation {\n      chatDisplayMessages.append(message)\n    }\n  }\n}\n\nextension String {\n  fileprivate func toDictionary() -> [String: Any]? {\n    guard let jsonData = data(using: .utf8) else {\n      print(\"Failed to convert JSON string to Data.\")\n      return nil\n    }\n\n    do {\n      return try JSONSerialization.jsonObject(with: jsonData, options: []) as? [String: Any]\n    } catch {\n      print(\"Failed to deserialize JSON: \\(error.localizedDescription)\")\n      return nil\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/Stream/ChatFunctionsCalllStreamDemoView.swift",
    "content": "//\n//  ChatFunctionsCalllStreamDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/6/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ChatFunctionsCalllStreamDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatFunctionsCallStreamProvider(service: service, customModel: customModel))\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollViewReader { proxy in\n      VStack {\n        List(chatProvider.chatDisplayMessages) { message in\n          ChatMessageView(message: message)\n            .listRowSeparator(.hidden)\n        }\n        .listStyle(.plain)\n        .onChange(of: chatProvider.chatDisplayMessages.last?.content) {\n          let lastMessage = chatProvider.chatDisplayMessages.last\n          if let id = lastMessage?.id {\n            proxy.scrollTo(id, anchor: .bottom)\n          }\n        }\n        textArea\n      }\n    }\n  }\n\n  var textArea: some View {\n    HStack(spacing: 0) {\n      VStack(alignment: .leading, spacing: 0) {\n        textField\n          .padding(.vertical, Sizes.spacingExtraSmall)\n          .padding(.horizontal, Sizes.spacingSmall)\n      }\n      .padding(.vertical, Sizes.spacingExtraSmall)\n      .padding(.horizontal, Sizes.spacingExtraSmall)\n      .background(\n        RoundedRectangle(cornerRadius: 20)\n          .stroke(.gray, lineWidth: 1))\n      .padding(.horizontal, Sizes.spacingMedium)\n      textAreSendButton\n    }\n    .padding(.horizontal)\n    .disabled(isLoading)\n  }\n\n  var textField: some View {\n    TextField(\n      \"How Can I help you today?\",\n      text: $prompt,\n      axis: .vertical)\n  }\n\n  var textAreSendButton: some View {\n    Button {\n      Task {\n        /// Loading UI\n        isLoading = true\n        defer { isLoading = false }\n        // Clears text field.\n        let userPrompt = prompt\n        prompt = \"\"\n        try await chatProvider.chat(prompt: userPrompt)\n      }\n    } label: {\n      Image(systemName: \"paperplane\")\n    }\n    .buttonStyle(.bordered)\n    .tint(ThemeColor.tintColor)\n    .disabled(prompt.isEmpty)\n  }\n\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var chatProvider: ChatFunctionsCallStreamProvider\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStreamFluidConversationDemo/ChatFluidConversationProvider.swift",
    "content": "//\n//  ChatFluidConversationProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/4/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass ChatFluidConversationProvider {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.service = service\n    self.customModel = customModel\n  }\n\n  // MARK: - Initializer\n\n  let customModel: String?\n\n  // MARK: - Public Properties\n\n  /// A collection of messages for display in the UI, representing the conversation.\n  var chatMessages = [ChatDisplayMessage]()\n\n  // MARK: - Public Methods\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters,\n    prompt: String)\n    async throws\n  {\n    // Displays the user message in the UI\n    await startNewUserDisplayMessage(prompt)\n    // Start a new assistant message that is initially empty.\n    await startNewAssistantEmptyDisplayMessage()\n\n    // Copy the provided parameters and update the messages for the chat stream.\n    var localParameters = parameters\n    localParameters.messages = parameterMessages\n\n    do {\n      // Begin the chat stream with the updated parameters.\n      let stream = try await service.startStreamedChat(parameters: localParameters)\n      for try await result in stream {\n        // Extract the first choice from the stream results, if none exist, exit the loop.\n        guard let choice = result.choices?.first else { return }\n\n        // Store initial `role` and `functionCall` data from the first `choice.delta` for UI display.\n        // This information is essential for maintaining context in the conversation and for updating\n        // the chat UI with proper role attributions for each message.\n        var newDelta = ChatDisplayMessage.Delta(role: \"\", content: \"\")\n        if let firstDelta = firstChatMessageResponseDelta[result.id ?? \"\"] {\n          // If we have already stored the first delta for this result ID, reuse its role.\n          newDelta.role = firstDelta.role!\n        } else {\n          // Otherwise, store the first delta received for future reference.\n          firstChatMessageResponseDelta[result.id ?? \"\"] = choice.delta\n        }\n        // Assign the content received in the current message to the newDelta.\n        newDelta.content = temporalReceivedMessageContent\n        // Update the UI with the latest assistant message and the corresponding delta.\n        updateLastAssistantMessage(content: choice.delta?.content ?? \"\", delta: newDelta)\n\n        // Evaluate the `finishReason` to determine if the conversation has reached a logical end.\n        // If so, package the accumulated data into a new message parameter that will be used\n        // to enrich context in subsequent API calls, leading to better conversation continuity.\n        if let finishReason = choice.finishReason {\n          debugPrint(\"FINISH_REASON \\(finishReason)\")\n          // Construct a new message parameter with the role and content derived from the delta.\n          // Intentionally force unwrapped, if fails is programming error.\n          let newMessage = ChatCompletionParameters.Message(\n            role: .init(rawValue: newDelta.role)!,\n            content: .text(newDelta.content))\n          // Append the new message parameter to the collection for future requests.\n          updateParameterMessagesArray(newMessage)\n        }\n      }\n    } catch {\n      // If an error occurs, update the UI to display the error message.\n      updateLastDisplayedMessage(.init(content: .error(\"\\(error)\"), type: .received, delta: nil))\n    }\n  }\n\n  /// Defines the maximum number of parameter messages to retain for context. A larger history can enrich\n  /// the language model's responses but be mindful as it will also increase the number of tokens sent in each request,\n  /// thus affecting API consumption. A balance is required; a count of 5 is a reasonable starting point.\n  private static var parameterMessagesMaxStorageCount = 5\n\n  // MARK: - Private Properties\n\n  private let service: OpenAIService\n\n  /// Accumulates the streamed message content for real-time display updates in the UI.\n  private var temporalReceivedMessageContent = \"\"\n  /// Tracks the identifier of the last message displayed, enabling updates in the from the streaming API response.\n  private var lastDisplayedMessageID: UUID?\n  /// Stores the initial chat message's delta, which uniquely includes metadata like `role`.\n  private var firstChatMessageResponseDelta = [String: ChatCompletionChunkObject.ChatChoice.Delta]()\n  /// Builds a history of messages sent and received, enhancing the chat's context for future requests.\n  private var parameterMessages = [ChatCompletionParameters.Message]()\n\n  // MARK: - Private Methods\n\n  @MainActor\n  private func startNewUserDisplayMessage(_ prompt: String) {\n    // Updates the UI with\n    let startingMessage = ChatDisplayMessage(\n      content: .text(prompt),\n      type: .sent, delta: nil)\n    addMessage(startingMessage)\n    // Stores a new\n    let newParameterMessage = ChatCompletionParameters.Message(role: .user, content: .text(prompt))\n    updateParameterMessagesArray(newParameterMessage)\n  }\n\n  @MainActor\n  private func startNewAssistantEmptyDisplayMessage() {\n    firstChatMessageResponseDelta = [:]\n    temporalReceivedMessageContent = \"\"\n    let newMessage = ChatDisplayMessage(content: .text(temporalReceivedMessageContent), type: .received, delta: nil)\n    let newMessageId = newMessage.id\n    lastDisplayedMessageID = newMessageId\n    addMessage(newMessage)\n  }\n\n  private func updateLastAssistantMessage(\n    content: String,\n    delta: ChatDisplayMessage.Delta)\n  {\n    temporalReceivedMessageContent += content\n    guard let id = lastDisplayedMessageID, let index = chatMessages.firstIndex(where: { $0.id == id }) else { return }\n    chatMessages[index] = ChatDisplayMessage(\n      id: id,\n      content: .text(temporalReceivedMessageContent),\n      type: .received,\n      delta: delta)\n  }\n\n  @MainActor\n  private func addMessage(_ message: ChatDisplayMessage) {\n    withAnimation {\n      chatMessages.append(message)\n    }\n  }\n\n  private func updateParameterMessagesArray(_ message: ChatCompletionParameters.Message) {\n    parameterMessages.append(message)\n    debugPrint(\"NEWCOUNT \\(parameterMessages.count) message \\(message)\")\n    if parameterMessages.count > Self.parameterMessagesMaxStorageCount {\n      debugPrint(\"NEWCOUNT \\(parameterMessages.count) removed message \\(parameterMessages[0])\")\n      parameterMessages.removeFirst()\n    }\n  }\n\n  private func updateLastDisplayedMessage(_ message: ChatDisplayMessage) {\n    chatMessages[chatMessages.count - 1] = message\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStreamFluidConversationDemo/ChatStreamFluidConversationDemoView.swift",
    "content": "//\n//  ChatStreamFluidConversationDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/4/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ChatStreamFluidConversationDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatFluidConversationProvider(service: service, customModel: customModel))\n  }\n\n  enum GPTModel: String, CaseIterable {\n    case gpt3dot5 = \"GPT-3.5\"\n    case gpt4 = \"GPT-4\"\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollViewReader { proxy in\n      VStack {\n        picker\n        List(chatProvider.chatMessages) { message in\n          ChatDisplayMessageView(message: message)\n            .listRowSeparator(.hidden)\n        }\n        .listStyle(.plain)\n        .onChange(of: chatProvider.chatMessages.last?.content) {\n          let lastMessage = chatProvider.chatMessages.last\n          if let id = lastMessage?.id {\n            proxy.scrollTo(id, anchor: .bottom)\n          }\n        }\n        textArea\n      }\n    }\n  }\n\n  var picker: some View {\n    Picker(\"\", selection: $selectedModel) {\n      ForEach(GPTModel.allCases, id: \\.self) { model in\n        Text(model.rawValue)\n          .font(.title)\n          .tag(model)\n      }\n    }\n    .pickerStyle(.segmented)\n    .padding()\n  }\n\n  var textArea: some View {\n    HStack(spacing: 0) {\n      TextField(\n        \"How Can I help you today?\",\n        text: $prompt,\n        axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      textAreButton\n    }\n    .padding(.horizontal)\n    .disabled(isLoading)\n  }\n\n  var textAreButton: some View {\n    Button {\n      Task {\n        isLoading = true\n        defer {\n          // ensure isLoading is set to false after the function executes.\n          isLoading = false\n          prompt = \"\"\n        }\n        /// Make the request\n        let model: Model =\n          if let customModel, !customModel.isEmpty {\n            .custom(customModel)\n          } else {\n            selectedModel == .gpt3dot5 ? .gpt35Turbo : .gpt4\n          }\n\n        try await chatProvider.startStreamedChat(parameters: .init(\n          messages: [.init(role: .user, content: .text(prompt))],\n          model: model), prompt: prompt)\n      }\n    } label: {\n      Image(systemName: \"paperplane\")\n    }\n    .buttonStyle(.bordered)\n  }\n\n  @State private var chatProvider: ChatFluidConversationProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var selectedModel = GPTModel.gpt3dot5\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructureOutputTool/ChatStructureOutputToolDemoView.swift",
    "content": "//\n//  ChatStructureOutputToolDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 8/11/24.\n//\n\nimport Foundation\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ChatStructureOutputToolDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    chatProvider = .init(service: service, customModel: customModel)\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollViewReader { proxy in\n      VStack {\n        List(chatProvider.chatDisplayMessages) { message in\n          ChatMessageView(message: message)\n            .listRowSeparator(.hidden)\n        }\n        .listStyle(.plain)\n        .onChange(of: chatProvider.chatDisplayMessages.last?.content) {\n          let lastMessage = chatProvider.chatDisplayMessages.last\n          if let id = lastMessage?.id {\n            proxy.scrollTo(id, anchor: .bottom)\n          }\n        }\n        textArea\n      }\n    }\n  }\n\n  var textArea: some View {\n    HStack(spacing: 0) {\n      VStack(alignment: .leading, spacing: 0) {\n        textField\n          .padding(.vertical, Sizes.spacingExtraSmall)\n          .padding(.horizontal, Sizes.spacingSmall)\n      }\n      .padding(.vertical, Sizes.spacingExtraSmall)\n      .padding(.horizontal, Sizes.spacingExtraSmall)\n      .background(\n        RoundedRectangle(cornerRadius: 20)\n          .stroke(.gray, lineWidth: 1))\n      .padding(.horizontal, Sizes.spacingMedium)\n      textAreSendButton\n    }\n    .padding(.horizontal)\n    .disabled(isLoading)\n  }\n\n  var textField: some View {\n    TextField(\n      \"How Can I help you today?\",\n      text: $prompt,\n      axis: .vertical)\n  }\n\n  var textAreSendButton: some View {\n    Button {\n      Task {\n        /// Loading UI\n        isLoading = true\n        defer { isLoading = false }\n        // Clears text field.\n        let userPrompt = prompt\n        prompt = \"\"\n        try await chatProvider.startChat(prompt: userPrompt)\n      }\n    } label: {\n      Image(systemName: \"paperplane\")\n    }\n    .buttonStyle(.bordered)\n    .tint(ThemeColor.tintColor)\n    .disabled(prompt.isEmpty)\n  }\n\n  @State private var chatProvider: ChatStructuredOutputToolProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructureOutputTool/ChatStructuredOutputToolProvider.swift",
    "content": "//\n//  ChatStructuredOutputToolProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 8/11/24.\n//\n\nimport Foundation\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - StructuredToolCall\n\nenum StructuredToolCall: String, CaseIterable {\n  case structureUI = \"structured_ui\"\n\n  var functionTool: ChatCompletionParameters.Tool {\n    switch self {\n    case .structureUI:\n      .init(\n        function: .init(\n          name: rawValue,\n          strict: true,\n          description: \"Dynamically generated UI\",\n          parameters: structureUISchema))\n    }\n  }\n\n  var structureUISchema: JSONSchema {\n    JSONSchema(\n      type: .object,\n      properties: [\n        \"type\": JSONSchema(\n          type: .string,\n          description: \"The type of the UI component\",\n          additionalProperties: false,\n          enum: [\"div\", \"button\", \"header\", \"section\", \"field\", \"form\"]),\n        \"label\": JSONSchema(\n          type: .string,\n          description: \"The label of the UI component, used for buttons or form fields\",\n          additionalProperties: false),\n        \"children\": JSONSchema(\n          type: .array,\n          description: \"Nested UI components\",\n          items: JSONSchema(ref: \"#\"),\n          additionalProperties: false),\n        \"attributes\": JSONSchema(\n          type: .array,\n          description: \"Arbitrary attributes for the UI component, suitable for any element\",\n          items: JSONSchema(\n            type: .object,\n            properties: [\n              \"name\": JSONSchema(\n                type: .string,\n                description: \"The name of the attribute, for example onClick or className\",\n                additionalProperties: false),\n              \"value\": JSONSchema(\n                type: .string,\n                description: \"The value of the attribute\",\n                additionalProperties: false),\n            ],\n            required: [\"name\", \"value\"],\n            additionalProperties: false),\n          additionalProperties: false),\n      ],\n      required: [\"type\", \"label\", \"children\", \"attributes\"],\n      additionalProperties: false)\n  }\n}\n\n// MARK: - ChatStructuredOutputToolProvider\n\n@Observable\nfinal class ChatStructuredOutputToolProvider {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.service = service\n    self.customModel = customModel\n  }\n\n  // MARK: - Init\n\n  let customModel: String?\n\n  var chatDisplayMessages = [ChatMessageDisplayModel]()\n  let systemMessage = ChatCompletionParameters.Message(role: .system, content: .text(\"You are a math tutor\"))\n\n  func startChat(\n    prompt: String)\n    async throws\n  {\n    await startNewUserDisplayMessage(prompt)\n    await startNewAssistantEmptyDisplayMessage()\n\n    let userMessage = createUserMessage(prompt)\n    chatMessageParameters.append(userMessage)\n\n    let model: Model =\n      if let customModel, !customModel.isEmpty {\n        .custom(customModel)\n      } else {\n        .gpt4o20240806\n      }\n\n    let parameters = ChatCompletionParameters(\n      messages: [systemMessage] + chatMessageParameters,\n      model: model,\n      tools: StructuredToolCall.allCases.map(\\.functionTool))\n\n    do {\n      let chat = try await service.startChat(parameters: parameters)\n      guard let assistantMessage = chat.choices?.first?.message else { return }\n      let content = assistantMessage.content ?? \"\"\n      await updateLastAssistantMessage(.init(content: .content(.init(text: content)), origin: .received(.gpt)))\n      if let toolCalls = assistantMessage.toolCalls {\n        availableFunctions = [.structureUI: getStructureOutput(arguments:)]\n        // Append the `assistantMessage` in to the `chatMessageParameters` to extend the conversation\n        let parameterAssistantMessage = ChatCompletionParameters.Message(\n          role: .assistant,\n          content: .text(content), toolCalls: assistantMessage.toolCalls)\n\n        chatMessageParameters.append(parameterAssistantMessage)\n\n        /// # Step 4: send the info for each function call and function response to the model\n        for toolCall in toolCalls {\n          let name = toolCall.function.name\n          let id = toolCall.id\n          let functionToCall = availableFunctions[StructuredToolCall(rawValue: name!)!]!\n          let arguments = toolCall.function.arguments\n          let content = functionToCall(arguments)\n          let toolMessage = ChatCompletionParameters.Message(\n            role: .tool,\n            content: .text(content),\n            name: name,\n            toolCallID: id)\n          chatMessageParameters.append(toolMessage)\n        }\n\n        /// # get a new response from the model where it can see the function response\n        await continueChat()\n      }\n\n    } catch let error as APIError {\n      // If an error occurs, update the UI to display the error message.\n      await updateLastAssistantMessage(.init(content: .error(\"\\(error.displayDescription)\"), origin: .received(.gpt)))\n    }\n  }\n\n  // MARK: Tool functions\n\n  func getStructureOutput(arguments: String) -> String {\n    arguments\n  }\n\n  private let service: OpenAIService\n  private var lastDisplayedMessageID: UUID?\n  private var chatMessageParameters = [ChatCompletionParameters.Message]()\n  private var availableFunctions = [StructuredToolCall: (String) -> String]()\n}\n\n// MARK: UI related\n\nextension ChatStructuredOutputToolProvider {\n  func createUserMessage(\n    _ prompt: String)\n    -> ChatCompletionParameters.Message\n  {\n    .init(role: .user, content: .text(prompt))\n  }\n\n  func continueChat() async {\n    let model: Model =\n      if let customModel, !customModel.isEmpty {\n        .custom(customModel)\n      } else {\n        .gpt4o\n      }\n\n    let paramsForChat = ChatCompletionParameters(\n      messages: chatMessageParameters,\n      model: model)\n    do {\n      let chat = try await service.startChat(parameters: paramsForChat)\n      guard let assistantMessage = chat.choices?.first?.message else { return }\n      await updateLastAssistantMessage(.init(content: .content(.init(text: assistantMessage.content)), origin: .received(.gpt)))\n    } catch {\n      // If an error occurs, update the UI to display the error message.\n      await updateLastAssistantMessage(.init(content: .error(\"\\(error)\"), origin: .received(.gpt)))\n    }\n  }\n\n  // MARK: - Private Methods\n\n  @MainActor\n  private func startNewUserDisplayMessage(_ prompt: String) {\n    let startingMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: prompt)),\n      origin: .sent)\n    addMessage(startingMessage)\n  }\n\n  @MainActor\n  private func startNewAssistantEmptyDisplayMessage() {\n    let newMessage = ChatMessageDisplayModel(\n      content: .content(.init(text: \"\")),\n      origin: .received(.gpt))\n    addMessage(newMessage)\n  }\n\n  @MainActor\n  private func updateLastAssistantMessage(\n    _ message: ChatMessageDisplayModel)\n  {\n    guard let id = lastDisplayedMessageID, let index = chatDisplayMessages.firstIndex(where: { $0.id == id }) else { return }\n\n    var lastMessage = chatDisplayMessages[index]\n\n    switch message.content {\n    case .content(let newMedia):\n      switch lastMessage.content {\n      case .content(let lastMedia):\n        var updatedMedia = lastMedia\n        if\n          let newText = newMedia.text,\n          var lastMediaText = lastMedia.text\n        {\n          lastMediaText += newText\n          updatedMedia.text = lastMediaText\n        } else {\n          updatedMedia.text = \"\"\n        }\n        if let urls = newMedia.urls {\n          updatedMedia.urls = urls\n        }\n        lastMessage.content = .content(updatedMedia)\n\n      case .error:\n        break\n      }\n\n    case .error:\n      lastMessage.content = message.content\n    }\n\n    chatDisplayMessages[index] = ChatMessageDisplayModel(\n      id: id,\n      content: lastMessage.content,\n      origin: message.origin)\n  }\n\n  @MainActor\n  private func addMessage(_ message: ChatMessageDisplayModel) {\n    let newMessageId = message.id\n    lastDisplayedMessageID = newMessageId\n    withAnimation {\n      chatDisplayMessages.append(message)\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructuredOutputs/ChatStructuredOutputDemoView.swift",
    "content": "//\n//  ChatStructuredOutputDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 8/10/24.\n//\n\nimport Foundation\nimport SwiftOpenAI\nimport SwiftUI\n\n/// Schema demo\n///\n/// https://openai.com/index/introducing-structured-outputs-in-the-api/\n///\n/// \"response_format\": {\n/// \"type\": \"json_schema\",\n/// \"json_schema\": {\n/// \"name\": \"math_response\",\n/// \"strict\": true,\n/// \"schema\": {\n/// \"type\": \"object\",\n/// \"properties\": {\n/// \"steps\": {\n/// \"type\": \"array\",\n/// \"items\": {\n/// \"type\": \"object\",\n/// \"properties\": {\n/// \"explanation\": {\n/// \"type\": \"string\"\n/// },\n/// \"output\": {\n/// \"type\": \"string\"\n/// }\n/// },\n/// \"required\": [\"explanation\", \"output\"],\n/// \"additionalProperties\": false\n/// }\n/// },\n/// \"final_answer\": {\n/// \"type\": \"string\"\n/// }\n/// },\n/// \"required\": [\"steps\", \"final_answer\"],\n/// \"additionalProperties\": false\n/// }\n/// }\n/// }\n\n// Steps to define the above Schema:\n\n// 1: Define the Step schema object\n\nlet stepSchema = JSONSchema(\n  type: .object,\n  properties: [\n    \"explanation\": JSONSchema(type: .string),\n    \"output\": JSONSchema(\n      type: .string),\n  ],\n  required: [\"explanation\", \"output\"],\n  additionalProperties: false)\n\n// 2. Define the steps Array schema.\n\nlet stepsArraySchema = JSONSchema(type: .array, items: stepSchema)\n\n/// 3. Define the final Answer schema.\nlet finalAnswerSchema = JSONSchema(type: .string)\n\n/// 4. Define the response format JSON schema.\nlet responseFormatSchema = JSONSchemaResponseFormat(\n  name: \"math_response\",\n  strict: true,\n  schema: JSONSchema(\n    type: .object,\n    properties: [\n      \"steps\": stepsArraySchema,\n      \"final_answer\": finalAnswerSchema,\n    ],\n    required: [\"steps\", \"final_answer\"],\n    additionalProperties: false))\n\n// MARK: - ChatStructuredOutputDemoView\n\n// We can also handle optional values.\n//\n// let weatherSchema = JSONSchemaResponseFormat(\n// name: \"get_weather\",\n// description: \"Fetches the weather in the given location\",\n// strict: true,\n// schema: JSONSchema(\n// type: .object,\n// properties: [\n// \"location\": JSONSchema(\n// type: .string,\n// description: \"The location to get the weather for\"\n// ),\n// \"unit\": JSONSchema(\n// type: .optional(.string),\n// description: \"The unit to return the temperature in\",\n// enum: [\"F\", \"C\"]\n// ),\n// \"thinking\": .init(\n// type: .object,\n// description: \"your thinking\",\n// properties: [\"step\": .init(type: .string)],\n// required: [\"step\"])\n// ],\n// required: [\"location\", \"unit\", \"thinking\"]\n// )\n// )\n\nstruct ChatStructuredOutputDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatStructuredOutputProvider(service: service, customModel: customModel))\n  }\n\n  enum ChatConfig {\n    case chatCompletion\n    case chatCompeltionStream\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollView {\n      VStack {\n        picker\n        textArea\n        Text(chatProvider.errorMessage)\n          .foregroundColor(.red)\n        switch selectedSegment {\n        case .chatCompeltionStream:\n          streamedChatResultView\n        case .chatCompletion:\n          chatCompletionResultView\n        }\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var picker: some View {\n    Picker(\"Options\", selection: $selectedSegment) {\n      Text(\"Chat Completion\").tag(ChatConfig.chatCompletion)\n      Text(\"Chat Completion stream\").tag(ChatConfig.chatCompeltionStream)\n    }\n    .pickerStyle(SegmentedPickerStyle())\n    .padding()\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n\n          let content = ChatCompletionParameters.Message.ContentType.text(prompt)\n          prompt = \"\"\n          let parameters = ChatCompletionParameters(\n            messages: [\n              .init(role: .system, content: .text(\"You are a helpful math tutor.\")),\n              .init(\n                role: .user,\n                content: content),\n            ],\n            model: .gpt4o20240806,\n            responseFormat: .jsonSchema(responseFormatSchema))\n          switch selectedSegment {\n          case .chatCompletion:\n            try await chatProvider.startChat(parameters: parameters)\n          case .chatCompeltionStream:\n            try await chatProvider.startStreamedChat(parameters: parameters)\n          }\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  /// stream = `false`\n  var chatCompletionResultView: some View {\n    ForEach(Array(chatProvider.messages.enumerated()), id: \\.offset) { _, val in\n      VStack(spacing: 0) {\n        Text(\"\\(val)\")\n      }\n    }\n  }\n\n  /// stream = `true`\n  var streamedChatResultView: some View {\n    VStack {\n      Button(\"Cancel stream\") {\n        chatProvider.cancelStream()\n      }\n      Text(chatProvider.message)\n    }\n  }\n\n  @State private var chatProvider: ChatStructuredOutputProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var selectedSegment = ChatConfig.chatCompeltionStream\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructuredOutputs/ChatStructuredOutputProvider.swift",
    "content": "//\n//  ChatStructuredOutputProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 8/10/24.\n//\n\nimport Foundation\nimport SwiftOpenAI\n\n// MARK: - ChatStructuredOutputProvider\n\n@Observable\nfinal class ChatStructuredOutputProvider {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.service = service\n    self.customModel = customModel\n  }\n\n  // MARK: - Initializer\n\n  let customModel: String?\n\n  var message = \"\"\n  var messages = [String]()\n  var errorMessage = \"\"\n\n  // MARK: - Public Methods\n\n  func startChat(\n    parameters: ChatCompletionParameters)\n    async throws\n  {\n    do {\n      let choices = try await service.startChat(parameters: parameters).choices ?? []\n      messages = choices.compactMap(\\.message?.content).map { $0.asJsonFormatted() }\n      assert(messages.count == 1)\n      errorMessage = choices.first?.message?.refusal ?? \"\"\n    } catch APIError.responseUnsuccessful(let description, let statusCode) {\n      self.errorMessage = \"Network error with status code: \\(statusCode) and description: \\(description)\"\n    } catch {\n      errorMessage = error.localizedDescription\n    }\n  }\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters)\n    async throws\n  {\n    streamTask = Task {\n      do {\n        let stream = try await service.startStreamedChat(parameters: parameters)\n        for try await result in stream {\n          let firstChoiceDelta = result.choices?.first?.delta\n          let content = firstChoiceDelta?.refusal ?? firstChoiceDelta?.content ?? \"\"\n          self.message += content\n          if result.choices?.first?.finishReason != nil {\n            self.message = self.message.asJsonFormatted()\n          }\n        }\n      } catch APIError.responseUnsuccessful(let description, let statusCode) {\n        self.errorMessage = \"Network error with status code: \\(statusCode) and description: \\(description)\"\n      } catch {\n        self.errorMessage = error.localizedDescription\n      }\n    }\n  }\n\n  func cancelStream() {\n    streamTask?.cancel()\n  }\n\n  private let service: OpenAIService\n  private var streamTask: Task<Void, Never>?\n}\n\n/// Helper that allows to display the JSON Schema.\nextension String {\n  func asJsonFormatted() -> String {\n    guard let data = data(using: .utf8) else { return self }\n    do {\n      // Parse JSON string to Any object\n      if let jsonObject = try JSONSerialization.jsonObject(with: data, options: []) as? [String: Any] {\n        // Convert back to data with pretty-printing\n        let prettyPrintedData = try JSONSerialization.data(withJSONObject: jsonObject, options: [.prettyPrinted, .sortedKeys])\n\n        // Convert formatted data back to string\n        return String(data: prettyPrintedData, encoding: .utf8) ?? self\n      }\n    } catch {\n      print(\"Error formatting JSON: \\(error)\")\n    }\n    return self\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/EmbeddingsDemo/EmbeddingsDemoView.swift",
    "content": "//\n//  EmbeddingsDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct EmbeddingsDemoView: View {\n  init(service: OpenAIService) {\n    _embeddingsProvider = State(initialValue: EmbeddingsProvider(service: service))\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n          do {\n            try await embeddingsProvider.createEmbeddings(parameters: .init(\n              input: prompt,\n              model: .textEmbedding3Large,\n              encodingFormat: nil,\n              dimensions: nil))\n          } catch {\n            errorMessage = \"\\(error)\"\n          }\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  var list: some View {\n    List {\n      ForEach(Array(embeddingsProvider.embeddings.enumerated()), id: \\.offset) { _, embeddingObject in\n        Section(header: Text(\"Section \\(embeddingObject.index) \\(embeddingObject.object)\")) {\n          ForEach(embeddingObject.embedding, id: \\.self) { embedding in\n            Text(\"Embedding Value \\(embedding)\")\n          }\n        }\n      }\n    }\n  }\n\n  var body: some View {\n    VStack {\n      textArea\n      if !errorMessage.isEmpty {\n        Text(\"Error \\(errorMessage)\")\n          .bold()\n      }\n      list\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  @State private var embeddingsProvider: EmbeddingsProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var errorMessage = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/EmbeddingsDemo/Embeddingsprovider.swift",
    "content": "//\n//  Embeddingsprovider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass EmbeddingsProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var embeddings = [EmbeddingObject]()\n\n  func createEmbeddings(\n    parameters: EmbeddingParameter)\n    async throws\n  {\n    embeddings = try await service.createEmbeddings(parameters: parameters).data\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Files/AttachmentView.swift",
    "content": "//\n//  AttachmentView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 5/29/24.\n//\n\nimport SwiftUI\n\nstruct AttachmentView: View {\n  let fileName: String\n  @Binding var actionTrigger: Bool\n\n  let isLoading: Bool\n\n  var body: some View {\n    HStack(spacing: Sizes.spacingExtraSmall) {\n      HStack {\n        if isLoading == true {\n          ProgressView()\n            .frame(width: 10, height: 10)\n            .padding(.horizontal, Sizes.spacingExtraSmall)\n        } else {\n          Image(systemName: \"doc\")\n            .resizable()\n            .aspectRatio(contentMode: .fit)\n            .frame(width: 10)\n            .foregroundColor(.secondary)\n        }\n        Text(fileName)\n          .font(.caption2)\n      }\n      Button {\n        actionTrigger = true\n\n      } label: {\n        Image(systemName: \"xmark.circle.fill\")\n      }\n      .disabled(isLoading)\n    }\n    .padding(.leading, Sizes.spacingMedium)\n    .background(\n      RoundedRectangle(cornerRadius: 8)\n        .stroke(.gray.opacity(0.5), lineWidth: 0.5))\n  }\n}\n\n#Preview {\n  AttachmentView(fileName: \"Mydocument.pdf\", actionTrigger: .constant(true), isLoading: true)\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Files/FileAttachmentView.swift",
    "content": "//\n//  FileAttachmentView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 5/29/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - FileAttachmentView\n\nstruct FileAttachmentView: View {\n  init(\n    service: OpenAIService,\n    action: FilePickerAction,\n    fileUploadedCompletion: @escaping (_ file: FileObject) -> Void,\n    fileDeletedCompletion: @escaping (_ parameters: FilePickerAction, _ id: String) -> Void)\n  {\n    fileProvider = FilesPickerProvider(service: service)\n    self.action = action\n    self.fileUploadedCompletion = fileUploadedCompletion\n    self.fileDeletedCompletion = fileDeletedCompletion\n  }\n\n  var body: some View {\n    Group {\n      switch action {\n      case .request(let parameters):\n        newUploadedFileView(parameters: parameters)\n      case .retrieveAndDisplay(let id):\n        previousUploadedFileView(id: id)\n      }\n    }\n    .onChange(of: deleted) { oldValue, newValue in\n      if oldValue != newValue, newValue {\n        Task {\n          if let fileObject {\n            fileDeleteStatus = try await fileProvider.deleteFileWith(id: fileObject.id)\n          }\n        }\n      }\n    }\n    .onChange(of: fileDeleteStatus) { oldValue, newValue in\n      if oldValue != newValue, let newValue, newValue.deleted {\n        fileDeletedCompletion(action, newValue.id)\n      }\n    }\n  }\n\n  func newUploadedFileView(\n    parameters: FileParameters)\n    -> some View\n  {\n    AttachmentView(\n      fileName: (fileObject?.filename ?? parameters.fileName) ?? \"\",\n      actionTrigger: $deleted,\n      isLoading: fileObject == nil || deleted)\n      .disabled(fileObject == nil)\n      .opacity(fileObject == nil ? 0.3 : 1)\n      .onFirstAppear {\n        Task {\n          fileObject = try await fileProvider.uploadFile(parameters: parameters)\n        }\n      }\n      .onChange(of: fileObject) { oldValue, newValue in\n        if oldValue != newValue, let newValue {\n          fileUploadedCompletion(newValue)\n        }\n      }\n  }\n\n  func previousUploadedFileView(\n    id: String)\n    -> some View\n  {\n    AttachmentView(fileName: fileObject?.filename ?? \"Document\", actionTrigger: $deleted, isLoading: fileObject == nil || deleted)\n      .onFirstAppear {\n        Task {\n          fileObject = try await fileProvider.retrieveFileWith(id: id)\n        }\n      }\n  }\n\n  @State private var fileObject: FileObject?\n  @State private var fileDeleteStatus: DeletionStatus?\n  @State private var deleted = false\n\n  private let fileProvider: FilesPickerProvider\n  private let fileUploadedCompletion: (_ file: FileObject) -> Void\n  private let fileDeletedCompletion: (_ action: FilePickerAction, _ id: String) -> Void\n  private let action: FilePickerAction\n}\n\n// MARK: - OnFirstAppear\n\nprivate struct OnFirstAppear: ViewModifier {\n  let perform: () -> Void\n\n  @State private var firstTime = true\n\n  func body(content: Content) -> some View {\n    content.onAppear {\n      if firstTime {\n        firstTime = false\n        perform()\n      }\n    }\n  }\n}\n\nextension View {\n  func onFirstAppear(perform: @escaping () -> Void) -> some View {\n    modifier(OnFirstAppear(perform: perform))\n  }\n}\n\n// MARK: - DeletionStatus + @retroactive Equatable\n\nextension DeletionStatus: @retroactive Equatable {\n  public static func ==(lhs: DeletionStatus, rhs: DeletionStatus) -> Bool {\n    lhs.id == rhs.id\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Files/FilesPicker.swift",
    "content": "//\n//  FilesPicker.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 5/29/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - FileObject + @retroactive Equatable\n\nextension FileObject: @retroactive Equatable {\n  public static func ==(lhs: FileObject, rhs: FileObject) -> Bool {\n    lhs.id == rhs.id\n  }\n}\n\n// MARK: - FileParameters + @retroactive Equatable, @retroactive Identifiable\n\nextension FileParameters: @retroactive Equatable, @retroactive Identifiable {\n  public var id: String {\n    fileName ?? \"\"\n  }\n\n  public static func ==(lhs: FileParameters, rhs: FileParameters) -> Bool {\n    lhs.file == rhs.file &&\n      lhs.fileName == rhs.fileName &&\n      lhs.purpose == rhs.purpose\n  }\n}\n\n// MARK: - FilePickerAction\n\nenum FilePickerAction: Identifiable, Equatable {\n  case request(FileParameters)\n  case retrieveAndDisplay(id: String)\n\n  var id: String {\n    switch self {\n    case .request(let fileParameters): fileParameters.id\n    case .retrieveAndDisplay(let id): id\n    }\n  }\n}\n\n// MARK: - FilesPicker\n\nstruct FilesPicker: View {\n  init(\n    service: OpenAIService,\n    sectionTitle: String? = nil,\n    actionTitle: String,\n    fileIDS: Binding<[String]>,\n    actions: Binding<[FilePickerAction]>)\n  {\n    self.service = service\n    self.sectionTitle = sectionTitle\n    self.actionTitle = actionTitle\n    _fileIDS = fileIDS\n    _actions = actions\n  }\n\n  var body: some View {\n    VStack(alignment: .leading) {\n      Group {\n        if let sectionTitle {\n          VStack {\n            Text(sectionTitle)\n            Button {\n              presentImporter = true\n            } label: {\n              Text(actionTitle)\n            }\n          }\n\n        } else {\n          Button {\n            presentImporter = true\n          } label: {\n            Text(actionTitle)\n          }\n        }\n      }\n      .fileImporter(\n        isPresented: $presentImporter,\n        allowedContentTypes: [.pdf, .text, .mp3, .mpeg],\n        allowsMultipleSelection: true)\n      { result in\n        switch result {\n        case .success(let files):\n          for file in files {\n            // gain access to the directory\n            let gotAccess = file.startAccessingSecurityScopedResource()\n            guard gotAccess else { continue }\n            if\n              let data = try? Data(contentsOf: file.absoluteURL)\n            {\n              let parameter = FileParameters(fileName: file.lastPathComponent, file: data, purpose: \"assistants\")\n              actions.append(.request(parameter))\n            }\n            file.stopAccessingSecurityScopedResource()\n          }\n\n        case .failure(let error):\n          print(error)\n        }\n      }\n      ForEach(actions, id: \\.id) { action in\n        FileAttachmentView(\n          service: service,\n          action: action)\n        { fileResponse in\n          fileIDS.append(fileResponse.id)\n        } fileDeletedCompletion: { actionToDelete, deletedFileID in\n          /// Remove file ids from network request.\n          fileIDS.removeAll(where: { id in\n            id == deletedFileID\n          })\n          /// Update UI\n          actions.removeAll { action in\n            actionToDelete.id == action.id\n          }\n        }\n      }\n    }\n    .frame(maxWidth: .infinity, alignment: .leading)\n  }\n\n  @State private var presentImporter = false\n  @Binding private var actions: [FilePickerAction]\n  @Binding private var fileIDS: [String]\n\n  private let service: OpenAIService\n  private let sectionTitle: String?\n  private let actionTitle: String\n}\n\n#Preview {\n  FilesPicker(\n    service: OpenAIServiceFactory.service(apiKey: \"\"),\n    sectionTitle: \"Knowledge\",\n    actionTitle: \"Uplodad File\",\n    fileIDS: .constant([\"s\"]),\n    actions: .constant(\n      [\n        .retrieveAndDisplay(id: \"id1\"),\n        .retrieveAndDisplay(id: \"id2\"),\n        .retrieveAndDisplay(id: \"id3\"),\n        .retrieveAndDisplay(id: \"id4\"),\n        .retrieveAndDisplay(id: \"id5\"),\n        .retrieveAndDisplay(id: \"id6\"),\n      ]))\n      .padding()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Files/FilesPickerProvider.swift",
    "content": "//\n//  FilesPickerProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 5/29/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nfinal class FilesPickerProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var files = [FileObject]()\n  var uploadedFile: FileObject?\n  var deletedStatus: DeletionStatus?\n  var retrievedFile: FileObject?\n  var fileContent = [[String: Any]]()\n\n  func listFiles() async throws {\n    files = try await service.listFiles().data\n  }\n\n  func uploadFile(\n    parameters: FileParameters)\n    async throws -> FileObject?\n  {\n    try await service.uploadFile(parameters: parameters)\n  }\n\n  func deleteFileWith(\n    id: String)\n    async throws -> DeletionStatus?\n  {\n    try await service.deleteFileWith(id: id)\n  }\n\n  func retrieveFileWith(\n    id: String)\n    async throws -> FileObject?\n  {\n    try await service.retrieveFileWith(id: id)\n  }\n\n  func retrieveContentForFileWith(\n    id: String)\n    async throws\n  {\n    fileContent = try await service.retrieveContentForFileWith(id: id)\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/FilesDemo/FilesDemoView.swift",
    "content": "//\n//  FilesDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - FilesDemoView\n\nstruct FilesDemoView: View {\n  init(service: OpenAIService) {\n    _filesProvider = State(initialValue: FilesProvider(service: service))\n  }\n\n  enum Config {\n    case list\n    case moreOptions\n  }\n\n  var body: some View {\n    VStack {\n      picker\n      if !errorMessage.isEmpty {\n        Text(\"Error \\(errorMessage)\")\n          .bold()\n      }\n      switch selectedSegment {\n      case .list:\n        listView\n      case .moreOptions:\n        moreOptionsView\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var picker: some View {\n    Picker(\"Options\", selection: $selectedSegment) {\n      Text(\"Shows List\").tag(Config.list)\n      Text(\"Show More options\").tag(Config.moreOptions)\n    }\n    .pickerStyle(SegmentedPickerStyle())\n    .padding()\n  }\n\n  var moreOptionsView: some View {\n    ScrollView {\n      VStack {\n        uploadFileButton\n        Text(\"This button will load a file that has been added to this app bundle.\")\n        if let uploadedFile = filesProvider.uploadedFile {\n          FileObjectView(file: uploadedFile)\n        }\n      }\n    }\n  }\n\n  var listView: some View {\n    VStack(spacing: 0) {\n      listFilesButton\n      list\n    }\n  }\n\n  var listFilesButton: some View {\n    Button(\"List Files\") {\n      Task {\n        isLoading = true\n        defer { isLoading = false } // ensure isLoading is set to false when the\n        do {\n          try await filesProvider.listFiles()\n        } catch {\n          errorMessage = \"\\(error)\"\n        }\n      }\n    }\n    .buttonStyle(.borderedProminent)\n  }\n\n  var uploadFileButton: some View {\n    Button(\"Upload File\") {\n      Task {\n        isLoading = true\n        defer { isLoading = false } // ensure isLoading is set to false when the\n        do {\n          let fileData = try contentLoader.loadBundledContent(fromFileNamed: \"WorldCupData\", ext: \"jsonl\")\n          try await filesProvider.uploadFile(parameters: .init(fileName: \"WorldCupData\", file: fileData, purpose: \"fine-tune\"))\n        } catch {\n          errorMessage = \"\\(error)\"\n        }\n      }\n    }\n    .buttonStyle(.borderedProminent)\n  }\n\n  var list: some View {\n    List {\n      ForEach(Array(filesProvider.files.enumerated()), id: \\.offset) { _, file in\n        FileObjectView(file: file)\n      }\n    }\n  }\n\n  @State private var filesProvider: FilesProvider\n  @State private var isLoading = false\n  @State private var errorMessage = \"\"\n  @State private var selectedSegment = Config.list\n\n  private let contentLoader = ContentLoader()\n}\n\n// MARK: - FileObjectView\n\nstruct FileObjectView: View {\n  init(file: FileObject) {\n    self.file = file\n  }\n\n  var body: some View {\n    VStack(alignment: .leading, spacing: 4) {\n      Text(\"File name = \\(file.filename)\")\n        .font(.title2)\n      VStack(alignment: .leading, spacing: 2) {\n        Text(\"ID = \\(file.id)\")\n        Text(\"Created = \\(file.createdAt)\")\n        Text(\"Object = \\(file.object)\")\n        Text(\"Purpose = \\(file.purpose)\")\n        Text(\"Status = \\(file.status ?? \"NO STATUS\")\")\n        Text(\"Status Details = \\(file.statusDetails ?? \"NO DETAILS\")\")\n      }\n      .font(.callout)\n    }\n    .foregroundColor(.primary)\n    .padding()\n    .background(\n      RoundedRectangle(cornerSize: .init(width: 20, height: 20))\n        .foregroundColor(.mint))\n  }\n\n  private let file: FileObject\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/FilesDemo/FilesProvider.swift",
    "content": "//\n//  FilesProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass FilesProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var files = [FileObject]()\n  var uploadedFile: FileObject?\n  var deletedStatus: DeletionStatus?\n  var retrievedFile: FileObject?\n  var fileContent = [[String: Any]]()\n\n  func listFiles() async throws {\n    files = try await service.listFiles().data\n  }\n\n  func uploadFile(\n    parameters: FileParameters)\n    async throws\n  {\n    uploadedFile = try await service.uploadFile(parameters: parameters)\n  }\n\n  func deleteFileWith(\n    id: String)\n    async throws\n  {\n    deletedStatus = try await service.deleteFileWith(id: id)\n  }\n\n  func retrieveFileWith(\n    id: String)\n    async throws\n  {\n    retrievedFile = try await service.retrieveFileWith(id: id)\n  }\n\n  func retrieveContentForFileWith(\n    id: String)\n    async throws\n  {\n    fileContent = try await service.retrieveContentForFileWith(id: id)\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/FineTuningDemo/FineTuningJobDemoView.swift",
    "content": "//\n//  FineTuningJobDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - FineTuningJobDemoView\n\nstruct FineTuningJobDemoView: View {\n  init(service: OpenAIService) {\n    _fineTuningJobProvider = State(initialValue: FineTuningJobProvider(service: service))\n  }\n\n  var body: some View {\n    VStack {\n      Button(\"List Fine tuning jobs\") {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n          try await fineTuningJobProvider.listFineTuningJobs()\n        }\n      }\n      .buttonStyle(.borderedProminent)\n      List {\n        ForEach(Array(fineTuningJobProvider.fineTunedJobs.enumerated()), id: \\.offset) { _, job in\n          FineTuningObjectView(job: job)\n        }\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  @State private var fineTuningJobProvider: FineTuningJobProvider\n  @State private var isLoading = false\n}\n\n// MARK: - FineTuningObjectView\n\nstruct FineTuningObjectView: View {\n  init(job: FineTuningJobObject) {\n    self.job = job\n  }\n\n  var body: some View {\n    VStack(alignment: .leading, spacing: 4) {\n      Text(\"Fine Tuned Model - \\(job.fineTunedModel ?? \"NO MODEL\")\")\n        .font(.title2)\n      VStack(alignment: .leading, spacing: 2) {\n        Text(\"Model = \\(job.model)\")\n        Text(\"Object = \\(job.object)\")\n        Text(\"ID = \\(job.id)\")\n        Text(\"Created = \\(job.createdAt)\")\n        Text(\"Organization ID = \\(job.organizationId)\")\n        Text(\"Training file = \\(job.trainingFile)\")\n        Text(\"Status = \\(job.status)\")\n          .bold()\n      }\n      .font(.callout)\n    }\n    .foregroundColor(.primary)\n    .padding()\n    .background(\n      RoundedRectangle(cornerSize: .init(width: 20, height: 20))\n        .foregroundColor(.mint))\n  }\n\n  private let job: FineTuningJobObject\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/FineTuningDemo/FineTuningJobProvider.swift",
    "content": "//\n//  FineTuningJobProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass FineTuningJobProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var createdFineTuningJob: FineTuningJobObject?\n  var canceledFineTuningJob: FineTuningJobObject?\n  var retrievedFineTuningJob: FineTuningJobObject?\n  var fineTunedJobs = [FineTuningJobObject]()\n  var finteTuningEventObjects = [FineTuningJobEventObject]()\n\n  func createFineTuningJob(\n    parameters: FineTuningJobParameters)\n    async throws\n  {\n    createdFineTuningJob = try await service.createFineTuningJob(parameters: parameters)\n  }\n\n  func listFineTuningJobs()\n    async throws\n  {\n    fineTunedJobs = try await service.listFineTuningJobs(after: nil, limit: nil).data\n  }\n\n  func retrieveFineTuningJob(\n    id: String)\n    async throws\n  {\n    retrievedFineTuningJob = try await service.retrieveFineTuningJob(id: id)\n  }\n\n  func cancelFineTuningJob(\n    id: String)\n    async throws\n  {\n    canceledFineTuningJob = try await service.cancelFineTuningJobWith(id: id)\n  }\n\n  func listFineTuningEventsForJobWith(\n    id: String)\n    async throws\n  {\n    finteTuningEventObjects = try await service.listFineTuningEventsForJobWith(id: id, after: nil, limit: nil).data\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ImagesDemo/ImagesDemoView.swift",
    "content": "//\n//  ImagesDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/24/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ImagesDemoView: View {\n  init(service: OpenAIService) {\n    _imagesProvider = State(initialValue: ImagesProvider(service: service))\n  }\n\n  var body: some View {\n    ScrollView {\n      textArea\n      if !errorMessage.isEmpty {\n        Text(\"Error \\(errorMessage)\")\n          .bold()\n      }\n      ForEach(Array(imagesProvider.images.enumerated()), id: \\.offset) { _, url in\n        AsyncImage(url: url, scale: 1) { image in\n          image\n            .resizable()\n            .aspectRatio(contentMode: .fill)\n            .clipped()\n        } placeholder: {\n          EmptyView()\n        }\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n          do {\n            try await imagesProvider.createImages(parameters: .init(prompt: prompt, model: .dalle3(.largeSquare)))\n          } catch {\n            errorMessage = \"\\(error)\"\n          }\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  @State private var imagesProvider: ImagesProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var errorMessage = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ImagesDemo/ImagesProvider.swift",
    "content": "//\n//  ImagesProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/23/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass ImagesProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var images = [URL]()\n\n  func createImages(\n    parameters: ImageCreateParameters)\n    async throws\n  {\n    let urls = try await service.legacyCreateImages(\n      parameters: parameters).data.map(\\.url)\n    images = urls.compactMap(\\.self)\n  }\n\n  func editImages(\n    parameters: ImageEditParameters)\n    async throws\n  {\n    let urls = try await service.legacyEditImage(\n      parameters: parameters).data.map(\\.url)\n    images = urls.compactMap(\\.self)\n  }\n\n  func createImageVariations(\n    parameters: ImageVariationParameters)\n    async throws\n  {\n    let urls = try await service.legacyCreateImageVariations(parameters: parameters).data.map(\\.url)\n    images = urls.compactMap(\\.self)\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/LocalChatDemo/LocalChatDemoView.swift",
    "content": "//\n//  LocalChatDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 6/24/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n/// For more visit https://github.com/ollama/ollama/blob/main/docs/openai.md\n\n/// Important:\n/// Before using a model, pull it locally ollama pull:\n\n/// `ollama pull llama3`\n/// Default model names\n/// For tooling that relies on default OpenAI model names such as gpt-3.5-turbo, use ollama cp to copy an existing model name to a temporary name:\n\n/// `ollama cp llama3 gpt-3.5-turbo`\n/// Afterwards, this new model name can be specified the model field:\n\n/// ```curl http://localhost:11434/v1/chat/completions \\\n///    -H \"Content-Type: application/json\" \\\n///    -d '{\n///        \"model\": \"gpt-3.5-turbo\",\n///        \"messages\": [\n///           {\n///               \"role\": \"user\",\n///                \"content\": \"Hello!\"\n///            }\n///        ]\n///    }'```\n\nstruct LocalChatDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatProvider(service: service))\n  }\n\n  enum ChatConfig {\n    case chatCompletion\n    case chatCompeltionStream\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollView {\n      VStack {\n        picker\n        textArea\n        Text(chatProvider.errorMessage)\n          .foregroundColor(.red)\n        switch selectedSegment {\n        case .chatCompeltionStream:\n          streamedChatResultView\n        case .chatCompletion:\n          chatCompletionResultView\n        }\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var picker: some View {\n    Picker(\"Options\", selection: $selectedSegment) {\n      Text(\"Chat Completion\").tag(ChatConfig.chatCompletion)\n      Text(\"Chat Completion stream\").tag(ChatConfig.chatCompeltionStream)\n    }\n    .pickerStyle(SegmentedPickerStyle())\n    .padding()\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n\n          let content = ChatCompletionParameters.Message.ContentType.text(prompt)\n          prompt = \"\"\n          let model: Model =\n            if let customModel, !customModel.isEmpty {\n              .custom(customModel)\n            } else {\n              // Make sure you run `ollama pull llama3` in your terminal to download this model.\n              .custom(\"llama3\")\n            }\n\n          let parameters = ChatCompletionParameters(\n            messages: [.init(\n              role: .user,\n              content: content)],\n            model: model)\n          switch selectedSegment {\n          case .chatCompletion:\n            try await chatProvider.startChat(parameters: parameters)\n          case .chatCompeltionStream:\n            try await chatProvider.startStreamedChat(parameters: parameters)\n          }\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  /// stream = `false`\n  var chatCompletionResultView: some View {\n    ForEach(Array(chatProvider.messages.enumerated()), id: \\.offset) { _, val in\n      VStack(spacing: 0) {\n        Text(\"\\(val)\")\n      }\n    }\n  }\n\n  /// stream = `true`\n  var streamedChatResultView: some View {\n    VStack {\n      Button(\"Cancel stream\") {\n        chatProvider.cancelStream()\n      }\n      Text(chatProvider.message)\n    }\n  }\n\n  @State private var chatProvider: ChatProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var selectedSegment = ChatConfig.chatCompeltionStream\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/LocalHostEntryView.swift",
    "content": "//\n//  LocalHostEntryView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 6/24/24.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct LocalHostEntryView: View {\n  var body: some View {\n    NavigationStack {\n      VStack {\n        Spacer()\n        TextField(\"Enter URL\", text: $url)\n          .padding()\n          .textFieldStyle(.roundedBorder)\n        NavigationLink(destination: OptionsListView(\n          openAIService: OpenAIServiceFactory.service(baseURL: url),\n          options: [.localChat]))\n        {\n          Text(\"Continue\")\n            .padding()\n            .padding(.horizontal, 48)\n            .foregroundColor(.white)\n            .background(\n              Capsule()\n                .foregroundColor(url.isEmpty ? .gray.opacity(0.2) : Color(red: 64 / 255, green: 195 / 255, blue: 125 / 255)))\n        }\n        .disabled(url.isEmpty)\n        Spacer()\n      }\n      .padding()\n      .navigationTitle(\"Enter URL\")\n    }\n  }\n\n  @State private var url = \"\"\n}\n\n#Preview {\n  ApiKeyIntroView()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ModelsDemo/ModelsDemoView.swift",
    "content": "//\n//  ModelsDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/24/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ModelsDemoView: View {\n  init(service: OpenAIService) {\n    _modelsProvider = State(initialValue: ModelsProvider(service: service))\n  }\n\n  var body: some View {\n    VStack {\n      showModelsButton\n      list\n    }\n  }\n\n  var list: some View {\n    List {\n      ForEach(Array(modelsProvider.models.enumerated()), id: \\.offset) { _, model in\n        Text(\"\\(model.id)\")\n      }\n    }\n  }\n\n  var showModelsButton: some View {\n    Button(\"List models\") {\n      Task {\n        isLoading = true\n        defer { isLoading = false } // ensure isLoading is set to false when the\n        do {\n          try await modelsProvider.listModels()\n        } catch {\n          errorMessage = \"\\(error)\"\n        }\n      }\n    }\n    .buttonStyle(.bordered)\n  }\n\n  @State private var modelsProvider: ModelsProvider\n  @State private var isLoading = false\n  @State private var errorMessage = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ModelsDemo/ModelsProvider.swift",
    "content": "//\n//  ModelsProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/24/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass ModelsProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var models = [ModelObject]()\n  var retrievedModel: ModelObject?\n  var deletionStatus: DeletionStatus?\n\n  func listModels() async throws {\n    models = try await service.listModels().data\n  }\n\n  func retrieveModelWith(\n    id: String)\n    async throws\n  {\n    retrievedModel = try await service.retrieveModelWith(id: id)\n  }\n\n  func deleteFineTuneModelWith(\n    id: String)\n    async throws\n  {\n    deletionStatus = try await service.deleteFineTuneModelWith(id: id)\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ModerationsDemo/ModerationDemoView.swift",
    "content": "//\n//  ModerationDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/24/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ModerationDemoView: View {\n  init(service: OpenAIService) {\n    _moderationProvider = State(initialValue: ModerationProvider(service: service))\n  }\n\n  var body: some View {\n    VStack {\n      textArea\n      if moderationProvider.isFlagged {\n        Text(\"That is not a nice thing to say.\")\n      }\n      if !errorMessage.isEmpty {\n        Text(\"Error \\(errorMessage)\")\n          .bold()\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n          do {\n            try await moderationProvider.createModerationFromText(parameters: .init(input: prompt))\n          } catch {\n            errorMessage = \"\\(error)\"\n          }\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  @State private var moderationProvider: ModerationProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var errorMessage = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ModerationsDemo/ModerationProvider.swift",
    "content": "//\n//  ModerationProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/24/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass ModerationProvider {\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  var isFlagged = false\n\n  func createModerationFromText(\n    parameters: ModerationParameter<String>)\n    async throws\n  {\n    isFlagged = try await service.createModerationFromText(parameters: parameters).isFlagged\n  }\n\n  func createModerationFromTexts(\n    parameters: ModerationParameter<[String]>)\n    async throws\n  {\n    isFlagged = try await service.createModerationFromTexts(parameters: parameters).isFlagged\n  }\n\n  private let service: OpenAIService\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/OptionsListView.swift",
    "content": "//\n//  OptionsListView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct OptionsListView: View {\n  /// https://platform.openai.com/docs/api-reference\n  enum APIOption: String, CaseIterable, Identifiable {\n    case audio = \"Audio\"\n    case chat = \"Chat\"\n    case chatPredictedOutput = \"Chat Predicted Output\"\n    case localChat = \"Local Chat\" // Ollama\n    case vision = \"Vision\"\n    case embeddings = \"Embeddings\"\n    case fineTuning = \"Fine Tuning\"\n    case files = \"Files\"\n    case images = \"Images\"\n    case models = \"Models\"\n    case moderations = \"Moderations\"\n    case chatHistoryConversation = \"Chat History Conversation\"\n    case chatFunctionCall = \"Chat Functions call\"\n    case chatFunctionsCallStream = \"Chat Functions call (Stream)\"\n    case chatStructuredOutput = \"Chat Structured Output\"\n    case chatStructuredOutputTool = \"Chat Structured Output Tools\"\n    case configureAssistant = \"Configure Assistant\"\n    case realTimeAPI = \"Real time API\"\n    case responseStream = \"Response Stream Demo\"\n\n    var id: String { rawValue }\n  }\n\n  var openAIService: OpenAIService\n\n  var options: [APIOption]\n\n  var body: some View {\n    VStack {\n      // Custom model input field\n      VStack(alignment: .leading, spacing: 8) {\n        Text(\"Custom Model (Optional)\")\n          .font(.caption)\n          .foregroundColor(.secondary)\n        TextField(\"e.g., grok-beta, claude-3-opus, etc.\", text: $customModel)\n          .textFieldStyle(.roundedBorder)\n          .autocapitalization(.none)\n          .disableAutocorrection(true)\n      }\n      .padding()\n      List(options, id: \\.self, selection: $selection) { option in\n        Text(option.rawValue)\n      }\n    }\n    .sheet(item: $selection) { selection in\n      VStack {\n        Text(selection.rawValue)\n          .font(.largeTitle)\n          .padding()\n        switch selection {\n        case .audio:\n          AudioDemoView(service: openAIService)\n        case .chat:\n          ChatDemoView(service: openAIService, customModel: customModel)\n        case .chatPredictedOutput:\n          ChatPredictedOutputDemoView(service: openAIService, customModel: customModel)\n        case .vision:\n          ChatVisionDemoView(service: openAIService, customModel: customModel)\n        case .embeddings:\n          EmbeddingsDemoView(service: openAIService)\n        case .fineTuning:\n          FineTuningJobDemoView(service: openAIService)\n        case .files:\n          FilesDemoView(service: openAIService)\n        case .images:\n          ImagesDemoView(service: openAIService)\n        case .localChat:\n          LocalChatDemoView(service: openAIService, customModel: customModel)\n        case .models:\n          ModelsDemoView(service: openAIService)\n        case .moderations:\n          ModerationDemoView(service: openAIService)\n        case .chatHistoryConversation:\n          ChatStreamFluidConversationDemoView(service: openAIService, customModel: customModel)\n        case .chatFunctionCall:\n          ChatFunctionCallDemoView(service: openAIService)\n        case .chatFunctionsCallStream:\n          ChatFunctionsCalllStreamDemoView(service: openAIService, customModel: customModel)\n        case .chatStructuredOutput:\n          ChatStructuredOutputDemoView(service: openAIService, customModel: customModel)\n        case .chatStructuredOutputTool:\n          ChatStructureOutputToolDemoView(service: openAIService, customModel: customModel)\n        case .configureAssistant:\n          AssistantConfigurationDemoView(service: openAIService)\n        case .realTimeAPI:\n          Text(\"WIP\")\n        case .responseStream:\n          ResponseStreamDemoView(service: openAIService)\n        }\n      }\n    }\n  }\n\n  @State private var selection: APIOption? = nil\n  @State private var customModel = \"\"\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/PredictedOutputsDemo/ChatPredictedOutputDemoView.swift",
    "content": "//\n//  ChatPredictedOutputDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 1/3/25.\n//\n\nimport Foundation\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - ChatPredictedOutputDemoView\n\n/// https://platform.openai.com/docs/guides/predicted-outputs\nstruct ChatPredictedOutputDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    chatProvider = ChatProvider(service: service)\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollView {\n      VStack {\n        textArea\n        Text(chatProvider.errorMessage)\n          .foregroundColor(.red)\n        chatCompletionResultView\n      }\n    }\n    .overlay(\n      Group {\n        if isLoading {\n          ProgressView()\n        } else {\n          EmptyView()\n        }\n      })\n  }\n\n  var textArea: some View {\n    HStack(spacing: 4) {\n      TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .padding()\n      Button {\n        Task {\n          isLoading = true\n          defer { isLoading = false } // ensure isLoading is set to false when the\n\n          let content = ChatCompletionParameters.Message.ContentType.text(prompt)\n          prompt = \"\"\n          let model: Model =\n            if let customModel, !customModel.isEmpty {\n              .custom(customModel)\n            } else {\n              .gpt4o\n            }\n\n          let parameters = ChatCompletionParameters(\n            messages: [\n              .init(role: .system, content: .text(systemMessage)),\n              .init(role: .user, content: content),\n              .init(role: .user, content: .text(predictedCode)),\n            ], // Sending the predicted code as another user message.\n            model: model,\n            prediction: .init(content: .text(predictedCode)))\n          try await chatProvider.startChat(parameters: parameters)\n        }\n      } label: {\n        Image(systemName: \"paperplane\")\n      }\n      .buttonStyle(.bordered)\n    }\n    .padding()\n  }\n\n  /// stream = `false`\n  var chatCompletionResultView: some View {\n    ForEach(Array(chatProvider.messages.enumerated()), id: \\.offset) { _, val in\n      VStack(spacing: 0) {\n        Text(\"\\(val)\")\n      }\n    }\n  }\n\n  @State private var chatProvider: ChatProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n}\n\nlet systemMessage = \"\"\"\n  You are a code editor assistant. I only output code without any explanations, commentary, or additional text. I follow these rules:\n\n  1. Respond with code only, never any text or explanations\n  2. Use appropriate syntax highlighting/formatting \n  3. If the code needs to be modified/improved, output the complete updated code\n  4. Do not include caveats, introductions, or commentary\n  5. Do not ask questions or solicit feedback\n  6. Do not explain what changes were made\n  7. Assume the user knows what they want and will review the code themselves\n  \"\"\"\n\nlet predictedCode = \"\"\"\n  struct ChatPredictedOutputDemoView: View {\n\n     @State private var chatProvider: ChatProvider\n     @State private var isLoading = false\n     @State private var prompt = \"\"\n\n     init(service: OpenAIService) {\n        chatProvider = ChatProvider(service: service)\n     }\n\n     var body: some View {\n        ScrollView {\n           VStack {\n              textArea\n              Text(chatProvider.errorMessage)\n                 .foregroundColor(.red)\n              streamedChatResultView\n           }\n        }\n        .overlay(\n           Group {\n              if isLoading {\n                 ProgressView()\n              } else {\n                 EmptyView()\n              }\n           }\n        )\n     }\n\n     var textArea: some View {\n        HStack(spacing: 4) {\n           TextField(\"Enter prompt\", text: $prompt, axis: .vertical)\n              .textFieldStyle(.roundedBorder)\n              .padding()\n           Button {\n              Task {\n                 isLoading = true\n                 defer { isLoading = false }  // ensure isLoading is set to false when the\n\n                 let content: ChatCompletionParameters.Message.ContentType = .text(prompt)\n                 prompt = \"\"\n                 let parameters = ChatCompletionParameters(\n                    messages: [.init(\n                    role: .user,\n                    content: content)],\n                    model: .gpt4o)\n              }\n           } label: {\n              Image(systemName: \"paperplane\")\n           }\n           .buttonStyle(.bordered)\n        }\n        .padding()\n     }\n\n     /// stream = `true`\n     var streamedChatResultView: some View {\n        VStack {\n           Button(\"Cancel stream\") {\n              chatProvider.cancelStream()\n           }\n           Text(chatProvider.message)\n        }\n     }\n  }\n  \"\"\"\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Preview Content/Preview Assets.xcassets/Contents.json",
    "content": "{\n  \"info\" : {\n    \"author\" : \"xcode\",\n    \"version\" : 1\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Resources/WorldCupData.jsonl",
    "content": "{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Tell me about the World Cup winner in Spain 1982.\"}, {\"role\": \"assistant\", \"content\": \"The winner of the 1982 FIFA World Cup held in Spain was Italy. They defeated West Germany 3-1 in the final match.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"What about Mexico 1986?\"}, {\"role\": \"assistant\", \"content\": \"The 1986 FIFA World Cup in Mexico was won by Argentina. They beat West Germany 3-2 in the final.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Who won in Italy 1990?\"}, {\"role\": \"assistant\", \"content\": \"West Germany won the 1990 World Cup in Italy. They defeated Argentina 1-0 in the final.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"What about USA 1994?\"}, {\"role\": \"assistant\", \"content\": \"Brazil won the 1994 World Cup held in the USA. They defeated Italy in a penalty shootout after a 0-0 draw in the final match.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Tell me about France 1998.\"}, {\"role\": \"assistant\", \"content\": \"France won the 1998 World Cup held in their home country. They defeated Brazil 3-0 in the final.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"What happened in Korea-Japan 2002?\"}, {\"role\": \"assistant\", \"content\": \"Brazil won the 2002 World Cup held in South Korea and Japan. They defeated Germany 2-0 in the final match.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Who won in Germany 2006?\"}, {\"role\": \"assistant\", \"content\": \"Italy won the 2006 World Cup held in Germany. They defeated France in a penalty shootout after a 1-1 draw in the final.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Tell me about South Africa 2010.\"}, {\"role\": \"assistant\", \"content\": \"Spain won the 2010 World Cup held in South Africa. They defeated the Netherlands 1-0 in the final.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Who won in Brazil 2014?\"}, {\"role\": \"assistant\", \"content\": \"Germany won the 2014 World Cup held in Brazil. They defeated Argentina 1-0 in the final.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"What about Russia 2018?\"}, {\"role\": \"assistant\", \"content\": \"France won the 2018 World Cup held in Russia. They defeated Croatia 4-2 in the final match.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant specializing in World Cup history.\"}, {\"role\": \"user\", \"content\": \"Tell me about Qatar 2022.\"}, {\"role\": \"assistant\", \"content\": \"I don't know\"}]}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ResponseAPIDemo/ResponseStreamDemoView.swift",
    "content": "//\n//  ResponseStreamDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 6/7/25.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n// MARK: - ResponseStreamDemoView\n\nstruct ResponseStreamDemoView: View {\n  init(service: OpenAIService) {\n    _provider = State(initialValue: ResponseStreamProvider(service: service))\n  }\n\n  @Environment(\\.colorScheme) var colorScheme\n\n  var body: some View {\n    VStack(spacing: 0) {\n      // Header\n      headerView\n\n      // Messages\n      ScrollViewReader { proxy in\n        ScrollView {\n          LazyVStack(spacing: 12) {\n            ForEach(provider.messages) { message in\n              MessageBubbleView(message: message)\n                .id(message.id)\n            }\n\n            if provider.isStreaming {\n              HStack {\n                LoadingIndicatorView()\n                  .frame(width: 30, height: 30)\n                Spacer()\n              }\n              .padding(.horizontal)\n            }\n          }\n          .padding()\n        }\n        .onChange(of: provider.messages.count) { _, _ in\n          withAnimation {\n            proxy.scrollTo(provider.messages.last?.id, anchor: .bottom)\n          }\n        }\n      }\n\n      // Error view\n      if let error = provider.error {\n        Text(error)\n          .foregroundColor(.red)\n          .font(.caption)\n          .padding(.horizontal)\n          .padding(.vertical, 8)\n          .background(Color.red.opacity(0.1))\n      }\n\n      // Input area\n      inputArea\n    }\n    .navigationTitle(\"Response Stream Demo\")\n    .navigationBarTitleDisplayMode(.inline)\n    .toolbar {\n      ToolbarItem(placement: .navigationBarTrailing) {\n        Button(\"Clear\") {\n          provider.clearConversation()\n        }\n        .disabled(provider.isStreaming)\n      }\n    }\n  }\n\n  @State private var provider: ResponseStreamProvider\n  @State private var inputText = \"\"\n  @FocusState private var isInputFocused: Bool\n\n  // MARK: - Subviews\n\n  private var headerView: some View {\n    VStack(alignment: .leading, spacing: 8) {\n      Text(\"Streaming Responses with Conversation State\")\n        .font(.headline)\n\n      Text(\"This demo uses the Responses API with streaming to maintain conversation context across multiple turns.\")\n        .font(.caption)\n        .foregroundColor(.secondary)\n\n      if provider.messages.isEmpty {\n        Label(\"Start a conversation below\", systemImage: \"bubble.left.and.bubble.right\")\n          .font(.caption)\n          .foregroundColor(.blue)\n          .padding(.top, 4)\n      }\n    }\n    .frame(maxWidth: .infinity, alignment: .leading)\n    .padding()\n    .background(Color(UIColor.secondarySystemBackground))\n  }\n\n  private var inputArea: some View {\n    HStack(spacing: 12) {\n      TextField(\"Type a message...\", text: $inputText, axis: .vertical)\n        .textFieldStyle(.roundedBorder)\n        .lineLimit(1 ... 5)\n        .focused($isInputFocused)\n        .disabled(provider.isStreaming)\n        .onSubmit {\n          sendMessage()\n        }\n\n      Button(action: sendMessage) {\n        Image(systemName: provider.isStreaming ? \"stop.circle.fill\" : \"arrow.up.circle.fill\")\n          .font(.title2)\n          .foregroundColor(provider.isStreaming ? .red : (inputText.isEmpty ? .gray : .blue))\n      }\n      .disabled(!provider.isStreaming && inputText.isEmpty)\n    }\n    .padding()\n    .background(Color(UIColor.systemBackground))\n    .overlay(\n      Rectangle()\n        .frame(height: 1)\n        .foregroundColor(Color(UIColor.separator)),\n      alignment: .top)\n  }\n\n  private func sendMessage() {\n    guard !inputText.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty else { return }\n\n    if provider.isStreaming {\n      provider.stopStreaming()\n    } else {\n      let message = inputText\n      inputText = \"\"\n      provider.sendMessage(message)\n    }\n  }\n}\n\n// MARK: - MessageBubbleView\n\nstruct MessageBubbleView: View {\n  let message: ResponseStreamProvider.ResponseMessage\n\n  @Environment(\\.colorScheme) var colorScheme\n\n  var body: some View {\n    HStack {\n      if message.role == .assistant {\n        messageContent\n          .background(backgroundGradient)\n          .cornerRadius(16)\n          .overlay(\n            RoundedRectangle(cornerRadius: 16)\n              .stroke(borderColor, lineWidth: 1))\n        Spacer(minLength: 60)\n      } else {\n        Spacer(minLength: 60)\n        messageContent\n          .background(Color.blue)\n          .cornerRadius(16)\n          .foregroundColor(.white)\n      }\n    }\n  }\n\n  private var messageContent: some View {\n    VStack(alignment: .leading, spacing: 4) {\n      if message.role == .assistant, message.isStreaming {\n        HStack(spacing: 4) {\n          Image(systemName: \"dot.radiowaves.left.and.right\")\n            .font(.caption2)\n            .foregroundColor(.blue)\n          Text(\"Streaming...\")\n            .font(.caption2)\n            .foregroundColor(.secondary)\n        }\n      }\n\n      Text(message.content.isEmpty && message.isStreaming ? \" \" : message.content)\n        .padding(.horizontal, 12)\n        .padding(.vertical, 8)\n\n      if message.role == .assistant, !message.isStreaming, message.responseId != nil {\n        Text(\"Response ID: \\(String(message.responseId?.prefix(8) ?? \"\"))\")\n          .font(.caption2)\n          .foregroundColor(.secondary)\n          .padding(.horizontal, 12)\n          .padding(.bottom, 4)\n      }\n    }\n  }\n\n  private var backgroundGradient: some View {\n    LinearGradient(\n      gradient: Gradient(colors: [\n        Color(UIColor.secondarySystemBackground),\n        Color(UIColor.tertiarySystemBackground),\n      ]),\n      startPoint: .topLeading,\n      endPoint: .bottomTrailing)\n  }\n\n  private var borderColor: Color {\n    colorScheme == .dark ? Color.white.opacity(0.1) : Color.black.opacity(0.1)\n  }\n}\n\n// MARK: - LoadingIndicatorView\n\nstruct LoadingIndicatorView: View {\n  var body: some View {\n    ZStack {\n      ForEach(0 ..< 3) { index in\n        Circle()\n          .fill(Color.blue)\n          .frame(width: 8, height: 8)\n          .offset(x: CGFloat(index - 1) * 12)\n          .opacity(0.8)\n          .scaleEffect(animationScale(for: index))\n      }\n    }\n    .onAppear {\n      withAnimation(\n        .easeInOut(duration: 0.8)\n          .repeatForever(autoreverses: true))\n      {\n        animationAmount = 1\n      }\n    }\n  }\n\n  @State private var animationAmount = 0.0\n\n  private func animationScale(for index: Int) -> Double {\n    let delay = Double(index) * 0.1\n    let progress = (animationAmount + delay).truncatingRemainder(dividingBy: 1.0)\n    return 0.5 + (0.5 * sin(progress * .pi))\n  }\n}\n\n// MARK: - Preview\n\n#Preview {\n  NavigationView {\n    ResponseStreamDemoView(service: OpenAIServiceFactory.service(apiKey: \"test\"))\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ResponseAPIDemo/ResponseStreamProvider.swift",
    "content": "//\n//  ResponseStreamProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 6/7/25.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@MainActor\n@Observable\nclass ResponseStreamProvider {\n  // MARK: - Initialization\n\n  init(service: OpenAIService) {\n    self.service = service\n  }\n\n  // MARK: - Message Model\n\n  struct ResponseMessage: Identifiable {\n    let id = UUID()\n    let role: MessageRole\n    var content: String\n    let timestamp: Date\n    var isStreaming = false\n    let responseId: String?\n\n    enum MessageRole {\n      case user\n      case assistant\n    }\n  }\n\n  var messages = [ResponseMessage]()\n  var isStreaming = false\n  var currentStreamingMessage: ResponseMessage?\n  var error: String?\n\n  // MARK: - Public Methods\n\n  func sendMessage(_ text: String) {\n    // Cancel any existing stream\n    streamTask?.cancel()\n\n    // Add user message\n    let userMessage = ResponseMessage(\n      role: .user,\n      content: text,\n      timestamp: Date(),\n      responseId: nil)\n    messages.append(userMessage)\n\n    // Start streaming response\n    streamTask = Task {\n      await streamResponse(for: text)\n    }\n  }\n\n  func stopStreaming() {\n    streamTask?.cancel()\n    streamTask = nil\n\n    // Finalize current streaming message\n    if var message = currentStreamingMessage {\n      message.isStreaming = false\n      if let index = messages.firstIndex(where: { $0.id == message.id }) {\n        messages[index] = message\n      }\n    }\n\n    currentStreamingMessage = nil\n    isStreaming = false\n  }\n\n  func clearConversation() {\n    stopStreaming()\n    messages.removeAll()\n    previousResponseId = nil\n    error = nil\n  }\n\n  private let service: OpenAIService\n  private var previousResponseId: String?\n  private var streamTask: Task<Void, Never>?\n\n  // MARK: - Private Methods\n\n  private func streamResponse(for userInput: String) async {\n    isStreaming = true\n    error = nil\n\n    // Create streaming message placeholder\n    let streamingMessage = ResponseMessage(\n      role: .assistant,\n      content: \"\",\n      timestamp: Date(),\n      isStreaming: true,\n      responseId: nil)\n    messages.append(streamingMessage)\n    currentStreamingMessage = streamingMessage\n\n    do {\n      // Build input array with conversation history\n      var inputArray = [InputItem]()\n\n      // Add conversation history\n      for message in messages.dropLast(2) { // Exclude current user message and streaming placeholder\n        let content = message.content\n        switch message.role {\n        case .user:\n          inputArray.append(.message(InputMessage(role: \"user\", content: .text(content))))\n        case .assistant:\n          // Assistant messages in conversation history should be sent as simple text\n          inputArray.append(.message(InputMessage(\n            role: \"assistant\",\n            content: .text(content))))\n        }\n      }\n\n      // Add current user message\n      inputArray.append(.message(InputMessage(role: \"user\", content: .text(userInput))))\n\n      let parameters = ModelResponseParameter(\n        input: .array(inputArray),\n        model: .gpt5,\n        instructions: \"You are a helpful assistant. Use the conversation history to provide contextual responses.\",\n        maxOutputTokens: 1000,\n        previousResponseId: previousResponseId,\n        tools: [.imageGeneration(.init())])\n\n      let stream = try await service.responseCreateStream(parameters)\n      var accumulatedText = \"\"\n\n      for try await event in stream {\n        guard !Task.isCancelled else { break }\n\n        switch event {\n        case .responseCreated:\n          // Response created event - we'll get the ID in responseCompleted\n          break\n\n        case .outputTextDelta(let delta):\n          accumulatedText += delta.delta\n          updateStreamingMessage(with: accumulatedText)\n\n        case .responseCompleted(let completed):\n          // Update previous response ID for conversation continuity\n          previousResponseId = completed.response.id\n\n          // Finalize the message\n          finalizeStreamingMessage(\n            with: accumulatedText,\n            responseId: completed.response.id)\n\n        case .responseFailed(let failed):\n          throw APIError.requestFailed(\n            description: failed.response.error?.message ?? \"Stream failed\")\n\n        case .error(let errorEvent):\n          throw APIError.requestFailed(\n            description: errorEvent.message)\n\n        default:\n          // Handle other events as needed\n          break\n        }\n      }\n\n    } catch {\n      self.error = error.localizedDescription\n\n      // Remove streaming message on error\n      if let streamingId = currentStreamingMessage?.id {\n        messages.removeAll { $0.id == streamingId }\n      }\n    }\n\n    currentStreamingMessage = nil\n    isStreaming = false\n  }\n\n  private func updateStreamingMessage(with content: String) {\n    guard\n      let messageId = currentStreamingMessage?.id,\n      let index = messages.firstIndex(where: { $0.id == messageId })\n    else {\n      return\n    }\n\n    messages[index].content = content\n  }\n\n  private func finalizeStreamingMessage(with content: String, responseId: String) {\n    guard\n      let messageId = currentStreamingMessage?.id,\n      let index = messages.firstIndex(where: { $0.id == messageId })\n    else {\n      return\n    }\n\n    messages[index].content = content\n    messages[index].isStreaming = false\n    messages[index] = ResponseMessage(\n      role: .assistant,\n      content: content,\n      timestamp: messages[index].timestamp,\n      isStreaming: false,\n      responseId: responseId)\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/ServiceSelectionView.swift",
    "content": "//\n//  ServiceSelectionView.swift\n//  SwiftOpenAIExample\n//\n//  Created by Lou Zell on 3/27/24.\n//\n\nimport SwiftUI\n\nstruct ServiceSelectionView: View {\n  var body: some View {\n    NavigationStack {\n      List {\n        Section(\"Select Service\") {\n          NavigationLink(destination: ApiKeyIntroView()) {\n            VStack(alignment: .leading) {\n              Text(\"Default OpenAI Service\")\n                .padding(.bottom, 10)\n              Group {\n                Text(\"Use this service to test SwiftOpenAI functionality by providing your own OpenAI key.\")\n              }\n              .font(.caption)\n              .fontWeight(.light)\n            }\n          }\n\n          NavigationLink(destination: AIProxyIntroView()) {\n            VStack(alignment: .leading) {\n              Text(\"AIProxy Service\")\n                .padding(.bottom, 10)\n              Group {\n                Text(\n                  \"Use this service to test SwiftOpenAI functionality with requests proxied through AIProxy for key protection.\")\n              }\n              .font(.caption)\n              .fontWeight(.light)\n            }\n          }\n\n          NavigationLink(destination: LocalHostEntryView()) {\n            VStack(alignment: .leading) {\n              Text(\"Ollama\")\n                .padding(.bottom, 10)\n              Group {\n                Text(\"Use this service to test SwiftOpenAI functionality by providing your own local host.\")\n              }\n              .font(.caption)\n              .fontWeight(.light)\n            }\n          }\n        }\n      }\n    }\n  }\n}\n\n#Preview {\n  ServiceSelectionView()\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedModels/ChatDisplayMessage.swift",
    "content": "//\n//  ChatDisplayMessage.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/4/23.\n//\n\nimport Foundation\nimport SwiftOpenAI\n\nstruct ChatDisplayMessage: Identifiable {\n  init(\n    id: UUID = UUID(),\n    content: DisplayContent,\n    type: DisplayMessageType,\n    delta: ChatDisplayMessage.Delta?)\n  {\n    self.id = id\n    self.content = content\n    self.type = type\n    self.delta = delta\n  }\n\n  struct Delta {\n    var role: String\n    var content: String\n    var functionCallName: String?\n    var functionCallArguments: String?\n  }\n\n  enum DisplayContent: Equatable {\n    case text(String)\n    case images([URL])\n    case content([ChatCompletionParameters.Message.ContentType.MessageContent])\n    case error(String)\n\n    static func ==(lhs: DisplayContent, rhs: DisplayContent) -> Bool {\n      switch (lhs, rhs) {\n      case (.images(let a), .images(let b)):\n        a == b\n      case (.content(let a), .content(let b)):\n        a == b\n      case (.error(let a), .error(let b)):\n        a == b\n      default:\n        false\n      }\n    }\n  }\n\n  enum DisplayMessageType {\n    case received, sent\n  }\n\n  let id: UUID\n  let content: DisplayContent\n  let type: DisplayMessageType\n  let delta: Delta?\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedUI/ChatDisplayMessageView.swift",
    "content": "//\n//  ChatDisplayMessageView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/4/23.\n//\n\nimport SwiftUI\n\nstruct ChatDisplayMessageView: View {\n  let message: ChatDisplayMessage\n\n  var body: some View {\n    VStack(alignment: .leading, spacing: 8) {\n      headerFor(message: message)\n      Group {\n        switch message.content {\n        case .content(let content):\n          let text = content.compactMap { contentItem -> String? in\n            if case .text(let text) = contentItem {\n              return text\n            } else {\n              return nil\n            }\n          }.first ?? \"\"\n\n          let urls = content.compactMap { contentItem -> URL? in\n            if case .imageUrl(let imageDetail) = contentItem {\n              return imageDetail.url\n            } else {\n              return nil\n            }\n          }\n          VStack(alignment: .leading, spacing: 8) {\n            chatImagesViewFrom(urls: urls)\n            chatMessageViewWith(text)\n          }\n\n        case .error(let error):\n          Text(error)\n            .padding()\n            .font(.callout)\n            .background(\n              RoundedRectangle(cornerRadius: 20)\n                .foregroundColor(.red.opacity(0.7)))\n\n        case .text(let text):\n          chatMessageViewWith(text)\n\n        case .images(let urls):\n          chatImagesViewFrom(urls: urls)\n        }\n      }\n      .padding(.leading, 23)\n    }\n  }\n\n  @ViewBuilder\n  func chatMessageViewWith(\n    _ text: String)\n    -> some View\n  {\n    if text.isEmpty {\n      ChatMessageLoadingView(animationDuration: 0.5)\n        .frame(width: 10, height: 10)\n    } else {\n      Text(text)\n        .font(.body)\n    }\n  }\n\n  func headerFor(\n    message: ChatDisplayMessage)\n    -> some View\n  {\n    HStack {\n      Image(systemName: message.type == .sent ? \"person.circle\" : \"wand.and.stars\")\n        .resizable()\n        .frame(width: 15, height: 15)\n      Text(message.type == .sent ? \"USER\" : \"CHATGPT\")\n        .font(.caption2)\n    }\n    .foregroundColor(.gray.opacity(0.9))\n  }\n\n  func chatImagesViewFrom(\n    urls: [URL])\n    -> some View\n  {\n    ScrollView(.horizontal, showsIndicators: false) {\n      HStack(spacing: 8) {\n        ForEach(urls, id: \\.self) { url in\n          URLImageView(url: url)\n        }\n      }\n    }\n  }\n\n  @State private var urls = [URL]()\n}\n\n#Preview {\n  VStack(alignment: .leading) {\n    ChatDisplayMessageView(message: .init(content: .text(\"How are you?\"), type: .sent, delta: nil))\n    ChatDisplayMessageView(message: .init(content: .text(\"I am ok\"), type: .received, delta: nil))\n    ChatDisplayMessageView(message: .init(content: .images([]), type: .received, delta: nil))\n  }\n  .padding()\n  .frame(maxWidth: .infinity, alignment: .leading)\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedUI/ChatMessageLoadingView.swift",
    "content": "//\n//  ChatMessageLoadingView.swift\n//\n//\n//  Created by James Rochabrun on 3/28/24.\n//\n\nimport Foundation\nimport SwiftUI\n\nstruct ChatMessageLoadingView: View {\n  var animationDuration: Double\n  @State private var isScaledUp = false\n\n  var body: some View {\n    Circle()\n      .scaleEffect(isScaledUp ? 1.5 : 1) // 1.5 is 150% size, 1 is 100% size\n      .onAppear {\n        withAnimation(Animation.easeInOut(duration: animationDuration).repeatForever(autoreverses: true)) {\n          isScaledUp.toggle()\n        }\n      }\n  }\n}\n\n#Preview {\n  ChatMessageLoadingView(animationDuration: 0.2)\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedUI/LoadingView.swift",
    "content": "//\n//  LoadingView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/4/23.\n//\n\nimport SwiftUI\n\nstruct LoadingView: View {\n  let timer = Timer.publish(every: 0.5, on: .main, in: .common).autoconnect()\n\n  var body: some View {\n    HStack {\n      Text(\"\\(getDots())\")\n        .font(.title)\n        .onReceive(timer) { _ in\n          withAnimation {\n            dotsCount = (dotsCount + 1) % 4\n          }\n        }\n    }\n    .frame(minHeight: 40)\n  }\n\n  func getDots() -> String {\n    String(repeating: \".\", count: dotsCount)\n  }\n\n  @State private var dotsCount = 0\n\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedUI/Theme/Sizes.swift",
    "content": "//\n//  Sizes.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/13/23.\n//\n\nimport Foundation\n\n// MARK: - Sizes\n\nenum Sizes { }\n\nextension Sizes {\n  static let spacingExtraSmall: CGFloat = 4.0\n  static let spacingSmall: CGFloat = 6\n  static let spacingMedium: CGFloat = 8\n  static let spacingLarge: CGFloat = 12\n  static let spacingExtraLarge: CGFloat = 16\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedUI/Theme/ThemeColor.swift",
    "content": "//\n//  ThemeColor.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/13/23.\n//\n\nimport SwiftUI\n\n// MARK: - ThemeColor\n\nenum ThemeColor { }\n\nextension ThemeColor {\n  static let tintColor = Color.purple\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SharedUI/URLImageView.swift",
    "content": "//\n//  URLImageView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/4/23.\n//\n\nimport SwiftUI\n\n// MARK: - URLImageView\n\nstruct URLImageView: View {\n  let url: URL\n\n  var body: some View {\n    AsyncImage(\n      url: url,\n      transaction: Transaction(animation: .easeInOut))\n    { phase in\n      switch phase {\n      case .empty:\n        ProgressView()\n\n      case .success(let image):\n        image\n          .resizable()\n          .frame(width: 100, height: 100)\n          .transition(.opacity)\n\n      case .failure:\n        Image(systemName: \"wifi.slash\")\n\n      @unknown default:\n        EmptyView()\n      }\n    }\n    .frame(width: 100, height: 100)\n    .background(Color.gray)\n    .clipShape(RoundedRectangle(cornerRadius: 10))\n  }\n}\n\nlet urlImageViewMockURL =\n  URL(\n    string: \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\")!\n\n#Preview {\n  ScrollView {\n    VStack(spacing: 40) {\n      URLImageView(url: urlImageViewMockURL)\n      URLImageView(url: urlImageViewMockURL)\n        .clipShape(Circle())\n        .overlay(Circle().stroke(Color.white, lineWidth: 4))\n        .shadow(radius: 10)\n      URLImageView(url: urlImageViewMockURL)\n        .clipShape(Circle())\n        .overlay(Circle().stroke(Color.white, lineWidth: 4))\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SwiftOpenAIExample.entitlements",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n    <key>com.apple.security.app-sandbox</key>\n    <true/>\n    <key>com.apple.security.files.user-selected.read-only</key>\n    <true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/SwiftOpenAIExampleApp.swift",
    "content": "//\n//  SwiftOpenAIExampleApp.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport SwiftUI\n\n@main\nstruct SwiftOpenAIExampleApp: App {\n  var body: some Scene {\n    WindowGroup {\n      ServiceSelectionView()\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Utilities/ContentLoader.swift",
    "content": "//\n//  ContentLoader.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport Foundation\n\nstruct ContentLoader {\n  enum Error: Swift.Error {\n    case fileNotFound(name: String)\n    case fileDecodingFailed(name: String, Swift.Error)\n  }\n\n  func urlFromAsset(fromFileNamed name: String, ext: String) -> URL? {\n    guard\n      let url = Bundle.main.url(\n        forResource: name,\n        withExtension: ext)\n    else {\n      return nil\n    }\n    return url\n  }\n\n  func loadBundledContent(fromFileNamed name: String, ext: String) throws -> Data {\n    guard let url = urlFromAsset(fromFileNamed: name, ext: ext) else {\n      throw Error.fileNotFound(name: name)\n    }\n\n    do {\n      return try Data(contentsOf: url)\n    } catch {\n      throw Error.fileDecodingFailed(name: name, error)\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Vision/ChatVisionDemoView.swift",
    "content": "//\n//  ChatVisionDemoView.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/8/23.\n//\n\nimport PhotosUI\nimport SwiftOpenAI\nimport SwiftUI\n\nstruct ChatVisionDemoView: View {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.customModel = customModel\n    _chatProvider = State(initialValue: ChatVisionProvider(service: service, customModel: customModel))\n  }\n\n  let customModel: String?\n\n  var body: some View {\n    ScrollViewReader { proxy in\n      VStack {\n        List(chatProvider.chatMessages) { message in\n          ChatDisplayMessageView(message: message)\n            .listRowSeparator(.hidden)\n        }\n        .listStyle(.plain)\n        .onChange(of: chatProvider.chatMessages.last?.content) {\n          let lastMessage = chatProvider.chatMessages.last\n          if let id = lastMessage?.id {\n            proxy.scrollTo(id, anchor: .bottom)\n          }\n        }\n        textArea\n      }\n    }\n  }\n\n  var textArea: some View {\n    HStack(spacing: 0) {\n      photoPicker\n      VStack(alignment: .leading, spacing: 0) {\n        if !selectedImages.isEmpty {\n          selectedImagesView\n          Divider()\n            .foregroundColor(.gray)\n        }\n        textField\n          .padding(6)\n      }\n      .padding(.vertical, 2)\n      .padding(.horizontal, 2)\n      .animation(.bouncy, value: selectedImages.isEmpty)\n      .background(\n        RoundedRectangle(cornerRadius: 16)\n          .stroke(.gray, lineWidth: 1))\n      .padding(.horizontal, 8)\n      textAreSendButton\n    }\n    .padding(.horizontal)\n    .disabled(isLoading)\n  }\n\n  var textField: some View {\n    TextField(\n      \"How Can I help you today?\",\n      text: $prompt,\n      axis: .vertical)\n  }\n\n  var textAreSendButton: some View {\n    Button {\n      Task {\n        isLoading = true\n        defer {\n          // ensure isLoading is set to false after the function executes.\n          isLoading = false\n        }\n        /// Make the request\n        let content: [ChatCompletionParameters.Message.ContentType.MessageContent] = [\n          .text(prompt),\n        ] + selectedImageURLS.map { .imageUrl(.init(url: $0)) }\n        resetInput()\n        let model: Model =\n          if let customModel, !customModel.isEmpty {\n            .custom(customModel)\n          } else {\n            .gpt4o\n          }\n\n        try await chatProvider.startStreamedChat(parameters: .init(\n          messages: [.init(role: .user, content: .contentArray(content))],\n          model: model, maxTokens: 300), content: content)\n      }\n    } label: {\n      Image(systemName: \"paperplane\")\n    }\n    .buttonStyle(.bordered)\n    .disabled(prompt.isEmpty)\n  }\n\n  var photoPicker: some View {\n    PhotosPicker(selection: $selectedItems, matching: .images) {\n      Image(systemName: \"photo\")\n    }\n    .onChange(of: selectedItems) {\n      Task {\n        selectedImages.removeAll()\n        for item in selectedItems {\n          if let data = try? await item.loadTransferable(type: Data.self) {\n            let base64String = data.base64EncodedString()\n            let url = URL(string: \"data:image/jpeg;base64,\\(base64String)\")!\n            selectedImageURLS.append(url)\n            if let uiImage = UIImage(data: data) {\n              let image = Image(uiImage: uiImage)\n              selectedImages.append(image)\n            }\n          }\n        }\n      }\n    }\n  }\n\n  var selectedImagesView: some View {\n    HStack(spacing: 0) {\n      ForEach(0 ..< selectedImages.count, id: \\.self) { i in\n        selectedImages[i]\n          .resizable()\n          .frame(width: 60, height: 60)\n          .clipShape(RoundedRectangle(cornerRadius: 12))\n          .padding(4)\n      }\n    }\n  }\n\n  @State private var chatProvider: ChatVisionProvider\n  @State private var isLoading = false\n  @State private var prompt = \"\"\n  @State private var selectedItems = [PhotosPickerItem]()\n  @State private var selectedImages = [Image]()\n  @State private var selectedImageURLS = [URL]()\n\n  /// Called when the user taps on the send button. Clears the selected images and prompt.\n  private func resetInput() {\n    prompt = \"\"\n    selectedImages = []\n    selectedItems = []\n    selectedImageURLS = []\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample/Vision/ChatVisionProvider.swift",
    "content": "//\n//  ChatVisionProvider.swift\n//  SwiftOpenAIExample\n//\n//  Created by James Rochabrun on 11/9/23.\n//\n\nimport SwiftOpenAI\nimport SwiftUI\n\n@Observable\nclass ChatVisionProvider {\n  init(service: OpenAIService, customModel: String? = nil) {\n    self.service = service\n    self.customModel = customModel\n  }\n\n  // MARK: - Initializer\n\n  let customModel: String?\n\n  // MARK: - Public Properties\n\n  /// A collection of messages for display in the UI, representing the conversation.\n  var chatMessages = [ChatDisplayMessage]()\n\n  // MARK: - Public Methods\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters,\n    content: [ChatCompletionParameters.Message.ContentType.MessageContent])\n    async throws\n  {\n    // Displays the user message in the UI\n    await startNewUserDisplayMessage(content)\n    // Start a new assistant message that is initially empty.\n    await startNewAssistantEmptyDisplayMessage()\n\n    do {\n      // Begin the chat stream with the updated parameters.\n      let stream = try await service.startStreamedChat(parameters: parameters)\n      for try await result in stream {\n        // Extract the first choice from the stream results, if none exist, exit the loop.\n        guard let choice = result.choices?.first else { return }\n\n        // Store initial `role` and `functionCall` data from the first `choice.delta` for UI display.\n        // This information is essential for maintaining context in the conversation and for updating\n        // the chat UI with proper role attributions for each message.\n        var newDelta = ChatDisplayMessage.Delta(role: \"\", content: \"\")\n        if let firstDelta = firstChatMessageResponseDelta[result.id ?? \"\"] {\n          // If we have already stored the first delta for this result ID, reuse its role.\n          newDelta.role = firstDelta.role!\n        } else {\n          // Otherwise, store the first delta received for future reference.\n          firstChatMessageResponseDelta[result.id ?? \"\"] = choice.delta\n        }\n        // Assign the content received in the current message to the newDelta.\n        newDelta.content = temporalReceivedMessageContent\n        // Update the UI with the latest assistant message and the corresponding delta.\n        await updateLastAssistantMessage(content: choice.delta?.content ?? \"\", delta: newDelta)\n      }\n    } catch {\n      // If an error occurs, update the UI to display the error message.\n      updateLastDisplayedMessage(.init(content: .error(\"\\(error)\"), type: .received, delta: nil))\n    }\n  }\n\n  // MARK: - Private Properties\n\n  private let service: OpenAIService\n\n  /// Accumulates the streamed message content for real-time display updates in the UI.\n  private var temporalReceivedMessageContent = \"\"\n  /// Tracks the identifier of the last message displayed, enabling updates in the from the streaming API response.\n  private var lastDisplayedMessageID: UUID?\n  /// Stores the initial chat message's delta, which uniquely includes metadata like `role`.\n  private var firstChatMessageResponseDelta = [String: ChatCompletionChunkObject.ChatChoice.Delta]()\n\n  // MARK: - Private Methods\n\n  @MainActor\n  private func startNewUserDisplayMessage(_ content: [ChatCompletionParameters.Message.ContentType.MessageContent]) {\n    // Updates the UI with\n    let startingMessage = ChatDisplayMessage(\n      content: .content(content),\n      type: .sent, delta: nil)\n    addMessage(startingMessage)\n  }\n\n  @MainActor\n  private func startNewAssistantEmptyDisplayMessage() {\n    firstChatMessageResponseDelta = [:]\n    temporalReceivedMessageContent = \"\"\n    let newMessage = ChatDisplayMessage(content: .text(temporalReceivedMessageContent), type: .received, delta: nil)\n    let newMessageId = newMessage.id\n    lastDisplayedMessageID = newMessageId\n    addMessage(newMessage)\n  }\n\n  @MainActor\n  private func updateLastAssistantMessage(\n    content: String,\n    delta: ChatDisplayMessage.Delta)\n  {\n    temporalReceivedMessageContent += content\n    guard let id = lastDisplayedMessageID, let index = chatMessages.firstIndex(where: { $0.id == id }) else { return }\n    chatMessages[index] = ChatDisplayMessage(\n      id: id,\n      content: .text(temporalReceivedMessageContent),\n      type: .received,\n      delta: delta)\n  }\n\n  @MainActor\n  private func addMessage(_ message: ChatDisplayMessage) {\n    withAnimation {\n      chatMessages.append(message)\n    }\n  }\n\n  private func updateLastDisplayedMessage(_ message: ChatDisplayMessage) {\n    chatMessages[chatMessages.count - 1] = message\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample.xcodeproj/project.pbxproj",
    "content": "// !$*UTF8*$!\n{\n\tarchiveVersion = 1;\n\tclasses = {\n\t};\n\tobjectVersion = 60;\n\tobjects = {\n\n/* Begin PBXBuildFile section */\n\t\t0DEE5DC12BB40643004894AD /* SwiftOpenAI in Frameworks */ = {isa = PBXBuildFile; productRef = 0DEE5DC02BB40643004894AD /* SwiftOpenAI */; };\n\t\t0DF957842BB53BEF00DD2013 /* ServiceSelectionView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0DF957832BB53BEF00DD2013 /* ServiceSelectionView.swift */; };\n\t\t0DF957862BB543F100DD2013 /* AIProxyIntroView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0DF957852BB543F100DD2013 /* AIProxyIntroView.swift */; };\n\t\t7B029E372C6893FD0025681A /* ChatStructuredOutputProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B029E362C6893FD0025681A /* ChatStructuredOutputProvider.swift */; };\n\t\t7B029E392C68940D0025681A /* ChatStructuredOutputDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B029E382C68940D0025681A /* ChatStructuredOutputDemoView.swift */; };\n\t\t7B029E3C2C69BE990025681A /* ChatStructuredOutputToolProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B029E3B2C69BE990025681A /* ChatStructuredOutputToolProvider.swift */; };\n\t\t7B029E3E2C69BEA70025681A /* ChatStructureOutputToolDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B029E3D2C69BEA70025681A /* ChatStructureOutputToolDemoView.swift */; };\n\t\t7B1268052B08246400400694 /* AssistantConfigurationDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B1268042B08246400400694 /* AssistantConfigurationDemoView.swift */; };\n\t\t7B1268072B08247C00400694 /* AssistantConfigurationProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B1268062B08247C00400694 /* AssistantConfigurationProvider.swift */; };\n\t\t7B2B6D562DF434670059B4BB /* ResponseStreamDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B2B6D552DF434670059B4BB /* ResponseStreamDemoView.swift */; };\n\t\t7B2B6D582DF4347E0059B4BB /* ResponseStreamProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B2B6D572DF4347E0059B4BB /* ResponseStreamProvider.swift */; };\n\t\t7B3DDCC52BAAA722004B5C96 /* AssistantsListDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B3DDCC42BAAA722004B5C96 /* AssistantsListDemoView.swift */; };\n\t\t7B3DDCC72BAAAD34004B5C96 /* AssistantThreadConfigurationProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B3DDCC62BAAAD34004B5C96 /* AssistantThreadConfigurationProvider.swift */; };\n\t\t7B3DDCC92BAAAF96004B5C96 /* AssistantStreamDemoScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B3DDCC82BAAAF96004B5C96 /* AssistantStreamDemoScreen.swift */; };\n\t\t7B413DFE2BB6846D009684CD /* ChatMessageLoadingView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B413DFD2BB6846D009684CD /* ChatMessageLoadingView.swift */; };\n\t\t7B436B962AE24A04003CE281 /* OptionsListView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436B952AE24A04003CE281 /* OptionsListView.swift */; };\n\t\t7B436B992AE25052003CE281 /* ContentLoader.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436B982AE25052003CE281 /* ContentLoader.swift */; };\n\t\t7B436B9B2AE25094003CE281 /* narcos.m4a in Resources */ = {isa = PBXBuildFile; fileRef = 7B436B9A2AE25093003CE281 /* narcos.m4a */; };\n\t\t7B436B9E2AE256BC003CE281 /* german.m4a in Resources */ = {isa = PBXBuildFile; fileRef = 7B436B9D2AE256BC003CE281 /* german.m4a */; };\n\t\t7B436BA12AE25958003CE281 /* ChatProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BA02AE25958003CE281 /* ChatProvider.swift */; };\n\t\t7B436BA32AE25962003CE281 /* ChatDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BA22AE25962003CE281 /* ChatDemoView.swift */; };\n\t\t7B436BA62AE77F37003CE281 /* Embeddingsprovider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BA52AE77F37003CE281 /* Embeddingsprovider.swift */; };\n\t\t7B436BA82AE77F41003CE281 /* EmbeddingsDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BA72AE77F41003CE281 /* EmbeddingsDemoView.swift */; };\n\t\t7B436BAB2AE788F1003CE281 /* FineTuningJobProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BAA2AE788F1003CE281 /* FineTuningJobProvider.swift */; };\n\t\t7B436BAD2AE788FB003CE281 /* FineTuningJobDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BAC2AE788FB003CE281 /* FineTuningJobDemoView.swift */; };\n\t\t7B436BB02AE79369003CE281 /* FilesDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BAF2AE79369003CE281 /* FilesDemoView.swift */; };\n\t\t7B436BB22AE79370003CE281 /* FilesProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BB12AE79370003CE281 /* FilesProvider.swift */; };\n\t\t7B436BB42AE79481003CE281 /* WorldCupData.jsonl in Resources */ = {isa = PBXBuildFile; fileRef = 7B436BB32AE79481003CE281 /* WorldCupData.jsonl */; };\n\t\t7B436BB72AE7A2EA003CE281 /* ImagesProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BB62AE7A2EA003CE281 /* ImagesProvider.swift */; };\n\t\t7B436BB92AE7A2F2003CE281 /* ImagesDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BB82AE7A2F2003CE281 /* ImagesDemoView.swift */; };\n\t\t7B436BBC2AE7ABD3003CE281 /* ModelsProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BBB2AE7ABD3003CE281 /* ModelsProvider.swift */; };\n\t\t7B436BBE2AE7ABDA003CE281 /* ModelsDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BBD2AE7ABDA003CE281 /* ModelsDemoView.swift */; };\n\t\t7B436BC12AE7B01F003CE281 /* ModerationProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BC02AE7B01F003CE281 /* ModerationProvider.swift */; };\n\t\t7B436BC32AE7B027003CE281 /* ModerationDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B436BC22AE7B027003CE281 /* ModerationDemoView.swift */; };\n\t\t7B50DD282C2A9A390070A64D /* LocalHostEntryView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B50DD272C2A9A390070A64D /* LocalHostEntryView.swift */; };\n\t\t7B50DD2B2C2A9D2F0070A64D /* LocalChatDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B50DD2A2C2A9D2F0070A64D /* LocalChatDemoView.swift */; };\n\t\t7B7239A02AF625F200646679 /* ChatFluidConversationProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B72399F2AF625F200646679 /* ChatFluidConversationProvider.swift */; };\n\t\t7B7239A22AF6260D00646679 /* ChatDisplayMessage.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239A12AF6260D00646679 /* ChatDisplayMessage.swift */; };\n\t\t7B7239A42AF6289900646679 /* ChatStreamFluidConversationDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239A32AF6289900646679 /* ChatStreamFluidConversationDemoView.swift */; };\n\t\t7B7239A62AF628F800646679 /* ChatDisplayMessageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239A52AF628F800646679 /* ChatDisplayMessageView.swift */; };\n\t\t7B7239A82AF6292100646679 /* LoadingView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239A72AF6292100646679 /* LoadingView.swift */; };\n\t\t7B7239AB2AF6294C00646679 /* URLImageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239AA2AF6294C00646679 /* URLImageView.swift */; };\n\t\t7B7239AE2AF9FF0000646679 /* ChatFunctionsCallStreamProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239AD2AF9FF0000646679 /* ChatFunctionsCallStreamProvider.swift */; };\n\t\t7B7239B12AF9FF3C00646679 /* ChatFunctionsCalllStreamDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B7239B02AF9FF3C00646679 /* ChatFunctionsCalllStreamDemoView.swift */; };\n\t\t7B99C2E72C0718DE00E701B3 /* FilesPicker.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B99C2E62C0718DE00E701B3 /* FilesPicker.swift */; };\n\t\t7B99C2E92C0718FF00E701B3 /* FileAttachmentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B99C2E82C0718FF00E701B3 /* FileAttachmentView.swift */; };\n\t\t7B99C2EB2C07191200E701B3 /* AttachmentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B99C2EA2C07191200E701B3 /* AttachmentView.swift */; };\n\t\t7B99C2ED2C071B1600E701B3 /* FilesPickerProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7B99C2EC2C071B1600E701B3 /* FilesPickerProvider.swift */; };\n\t\t7BA788CD2AE23A48008825D5 /* SwiftOpenAIExampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788CC2AE23A48008825D5 /* SwiftOpenAIExampleApp.swift */; };\n\t\t7BA788CF2AE23A48008825D5 /* ApiKeyIntroView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788CE2AE23A48008825D5 /* ApiKeyIntroView.swift */; };\n\t\t7BA788D12AE23A49008825D5 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 7BA788D02AE23A49008825D5 /* Assets.xcassets */; };\n\t\t7BA788D52AE23A49008825D5 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 7BA788D42AE23A49008825D5 /* Preview Assets.xcassets */; };\n\t\t7BA788E92AE23A49008825D5 /* SwiftOpenAIExampleUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788E82AE23A49008825D5 /* SwiftOpenAIExampleUITests.swift */; };\n\t\t7BA788EB2AE23A49008825D5 /* SwiftOpenAIExampleUITestsLaunchTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788EA2AE23A49008825D5 /* SwiftOpenAIExampleUITestsLaunchTests.swift */; };\n\t\t7BA788FC2AE23B42008825D5 /* AudioDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788FB2AE23B42008825D5 /* AudioDemoView.swift */; };\n\t\t7BA788FE2AE23B95008825D5 /* AudioProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788FD2AE23B95008825D5 /* AudioProvider.swift */; };\n\t\t7BBE7E912AFCA52A0096A693 /* ChatVisionDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7E902AFCA52A0096A693 /* ChatVisionDemoView.swift */; };\n\t\t7BBE7E942AFCC9640096A693 /* ChatVisionProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7E932AFCC9640096A693 /* ChatVisionProvider.swift */; };\n\t\t7BBE7EA52B02E8A70096A693 /* Sizes.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7EA42B02E8A70096A693 /* Sizes.swift */; };\n\t\t7BBE7EA72B02E8AC0096A693 /* ThemeColor.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7EA62B02E8AC0096A693 /* ThemeColor.swift */; };\n\t\t7BBE7EA92B02E8E50096A693 /* ChatMessageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7EA82B02E8E50096A693 /* ChatMessageView.swift */; };\n\t\t7BBE7EAB2B02E8FC0096A693 /* ChatMessageDisplayModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7EAA2B02E8FC0096A693 /* ChatMessageDisplayModel.swift */; };\n\t\t7BBE7EDE2B03718E0096A693 /* ChatFunctionCallProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7EDD2B03718E0096A693 /* ChatFunctionCallProvider.swift */; };\n\t\t7BBE7EE02B0372550096A693 /* ChatFunctionCallDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BBE7EDF2B0372550096A693 /* ChatFunctionCallDemoView.swift */; };\n\t\t7BE802592D2878170080E06A /* ChatPredictedOutputDemoView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BE802582D2878170080E06A /* ChatPredictedOutputDemoView.swift */; };\n\t\t7BE9A5AF2B0B33E600CE8103 /* SwiftOpenAIExampleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7BA788DE2AE23A49008825D5 /* SwiftOpenAIExampleTests.swift */; };\n/* End PBXBuildFile section */\n\n/* Begin PBXContainerItemProxy section */\n\t\t7BA788DB2AE23A49008825D5 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 7BA788C12AE23A48008825D5 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 7BA788C82AE23A48008825D5;\n\t\t\tremoteInfo = SwiftOpenAIExample;\n\t\t};\n\t\t7BA788E52AE23A49008825D5 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 7BA788C12AE23A48008825D5 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 7BA788C82AE23A48008825D5;\n\t\t\tremoteInfo = SwiftOpenAIExample;\n\t\t};\n/* End PBXContainerItemProxy section */\n\n/* Begin PBXFileReference section */\n\t\t0DF957832BB53BEF00DD2013 /* ServiceSelectionView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ServiceSelectionView.swift; sourceTree = \"<group>\"; };\n\t\t0DF957852BB543F100DD2013 /* AIProxyIntroView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AIProxyIntroView.swift; sourceTree = \"<group>\"; };\n\t\t7B029E362C6893FD0025681A /* ChatStructuredOutputProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatStructuredOutputProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B029E382C68940D0025681A /* ChatStructuredOutputDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatStructuredOutputDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B029E3B2C69BE990025681A /* ChatStructuredOutputToolProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatStructuredOutputToolProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B029E3D2C69BEA70025681A /* ChatStructureOutputToolDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatStructureOutputToolDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B1268042B08246400400694 /* AssistantConfigurationDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AssistantConfigurationDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B1268062B08247C00400694 /* AssistantConfigurationProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AssistantConfigurationProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B2B6D552DF434670059B4BB /* ResponseStreamDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ResponseStreamDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B2B6D572DF4347E0059B4BB /* ResponseStreamProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ResponseStreamProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B3DDCC42BAAA722004B5C96 /* AssistantsListDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AssistantsListDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B3DDCC62BAAAD34004B5C96 /* AssistantThreadConfigurationProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AssistantThreadConfigurationProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B3DDCC82BAAAF96004B5C96 /* AssistantStreamDemoScreen.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AssistantStreamDemoScreen.swift; sourceTree = \"<group>\"; };\n\t\t7B413DFD2BB6846D009684CD /* ChatMessageLoadingView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageLoadingView.swift; sourceTree = \"<group>\"; };\n\t\t7B436B952AE24A04003CE281 /* OptionsListView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = OptionsListView.swift; sourceTree = \"<group>\"; };\n\t\t7B436B982AE25052003CE281 /* ContentLoader.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentLoader.swift; sourceTree = \"<group>\"; };\n\t\t7B436B9A2AE25093003CE281 /* narcos.m4a */ = {isa = PBXFileReference; lastKnownFileType = file; path = narcos.m4a; sourceTree = \"<group>\"; };\n\t\t7B436B9D2AE256BC003CE281 /* german.m4a */ = {isa = PBXFileReference; lastKnownFileType = file; path = german.m4a; sourceTree = \"<group>\"; };\n\t\t7B436BA02AE25958003CE281 /* ChatProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BA22AE25962003CE281 /* ChatDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B436BA52AE77F37003CE281 /* Embeddingsprovider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Embeddingsprovider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BA72AE77F41003CE281 /* EmbeddingsDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EmbeddingsDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B436BAA2AE788F1003CE281 /* FineTuningJobProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FineTuningJobProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BAC2AE788FB003CE281 /* FineTuningJobDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FineTuningJobDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B436BAF2AE79369003CE281 /* FilesDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FilesDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B436BB12AE79370003CE281 /* FilesProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FilesProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BB32AE79481003CE281 /* WorldCupData.jsonl */ = {isa = PBXFileReference; lastKnownFileType = text; path = WorldCupData.jsonl; sourceTree = \"<group>\"; };\n\t\t7B436BB62AE7A2EA003CE281 /* ImagesProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ImagesProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BB82AE7A2F2003CE281 /* ImagesDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ImagesDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B436BBB2AE7ABD3003CE281 /* ModelsProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ModelsProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BBD2AE7ABDA003CE281 /* ModelsDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ModelsDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B436BC02AE7B01F003CE281 /* ModerationProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ModerationProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B436BC22AE7B027003CE281 /* ModerationDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ModerationDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B50DD272C2A9A390070A64D /* LocalHostEntryView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LocalHostEntryView.swift; sourceTree = \"<group>\"; };\n\t\t7B50DD2A2C2A9D2F0070A64D /* LocalChatDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LocalChatDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B72399F2AF625F200646679 /* ChatFluidConversationProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatFluidConversationProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B7239A12AF6260D00646679 /* ChatDisplayMessage.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatDisplayMessage.swift; sourceTree = \"<group>\"; };\n\t\t7B7239A32AF6289900646679 /* ChatStreamFluidConversationDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatStreamFluidConversationDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B7239A52AF628F800646679 /* ChatDisplayMessageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatDisplayMessageView.swift; sourceTree = \"<group>\"; };\n\t\t7B7239A72AF6292100646679 /* LoadingView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LoadingView.swift; sourceTree = \"<group>\"; };\n\t\t7B7239AA2AF6294C00646679 /* URLImageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = URLImageView.swift; sourceTree = \"<group>\"; };\n\t\t7B7239AD2AF9FF0000646679 /* ChatFunctionsCallStreamProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatFunctionsCallStreamProvider.swift; sourceTree = \"<group>\"; };\n\t\t7B7239B02AF9FF3C00646679 /* ChatFunctionsCalllStreamDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatFunctionsCalllStreamDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7B99C2E62C0718DE00E701B3 /* FilesPicker.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FilesPicker.swift; sourceTree = \"<group>\"; };\n\t\t7B99C2E82C0718FF00E701B3 /* FileAttachmentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FileAttachmentView.swift; sourceTree = \"<group>\"; };\n\t\t7B99C2EA2C07191200E701B3 /* AttachmentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AttachmentView.swift; sourceTree = \"<group>\"; };\n\t\t7B99C2EC2C071B1600E701B3 /* FilesPickerProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FilesPickerProvider.swift; sourceTree = \"<group>\"; };\n\t\t7BA788C92AE23A48008825D5 /* SwiftOpenAIExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = SwiftOpenAIExample.app; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t7BA788CC2AE23A48008825D5 /* SwiftOpenAIExampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwiftOpenAIExampleApp.swift; sourceTree = \"<group>\"; };\n\t\t7BA788CE2AE23A48008825D5 /* ApiKeyIntroView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ApiKeyIntroView.swift; sourceTree = \"<group>\"; };\n\t\t7BA788D02AE23A49008825D5 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = \"<group>\"; };\n\t\t7BA788D22AE23A49008825D5 /* SwiftOpenAIExample.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = SwiftOpenAIExample.entitlements; sourceTree = \"<group>\"; };\n\t\t7BA788D42AE23A49008825D5 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = \"Preview Assets.xcassets\"; sourceTree = \"<group>\"; };\n\t\t7BA788DA2AE23A49008825D5 /* SwiftOpenAIExampleTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SwiftOpenAIExampleTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t7BA788DE2AE23A49008825D5 /* SwiftOpenAIExampleTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwiftOpenAIExampleTests.swift; sourceTree = \"<group>\"; };\n\t\t7BA788E42AE23A49008825D5 /* SwiftOpenAIExampleUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SwiftOpenAIExampleUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t7BA788E82AE23A49008825D5 /* SwiftOpenAIExampleUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwiftOpenAIExampleUITests.swift; sourceTree = \"<group>\"; };\n\t\t7BA788EA2AE23A49008825D5 /* SwiftOpenAIExampleUITestsLaunchTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwiftOpenAIExampleUITestsLaunchTests.swift; sourceTree = \"<group>\"; };\n\t\t7BA788FB2AE23B42008825D5 /* AudioDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AudioDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7BA788FD2AE23B95008825D5 /* AudioProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AudioProvider.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7E902AFCA52A0096A693 /* ChatVisionDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatVisionDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7E932AFCC9640096A693 /* ChatVisionProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatVisionProvider.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7EA42B02E8A70096A693 /* Sizes.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Sizes.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7EA62B02E8AC0096A693 /* ThemeColor.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ThemeColor.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7EA82B02E8E50096A693 /* ChatMessageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatMessageView.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7EAA2B02E8FC0096A693 /* ChatMessageDisplayModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatMessageDisplayModel.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7EDD2B03718E0096A693 /* ChatFunctionCallProvider.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatFunctionCallProvider.swift; sourceTree = \"<group>\"; };\n\t\t7BBE7EDF2B0372550096A693 /* ChatFunctionCallDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatFunctionCallDemoView.swift; sourceTree = \"<group>\"; };\n\t\t7BE802582D2878170080E06A /* ChatPredictedOutputDemoView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatPredictedOutputDemoView.swift; sourceTree = \"<group>\"; };\n/* End PBXFileReference section */\n\n/* Begin PBXFrameworksBuildPhase section */\n\t\t7BA788C62AE23A48008825D5 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t0DEE5DC12BB40643004894AD /* SwiftOpenAI in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t7BA788D72AE23A49008825D5 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t7BA788E12AE23A49008825D5 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXFrameworksBuildPhase section */\n\n/* Begin PBXGroup section */\n\t\t7B029E352C6893BF0025681A /* ChatStructuredOutputs */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B029E362C6893FD0025681A /* ChatStructuredOutputProvider.swift */,\n\t\t\t\t7B029E382C68940D0025681A /* ChatStructuredOutputDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ChatStructuredOutputs;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B029E3A2C69BE750025681A /* ChatStructureOutputTool */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B029E3B2C69BE990025681A /* ChatStructuredOutputToolProvider.swift */,\n\t\t\t\t7B029E3D2C69BEA70025681A /* ChatStructureOutputToolDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ChatStructureOutputTool;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B1268032B08241200400694 /* Assistants */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B1268042B08246400400694 /* AssistantConfigurationDemoView.swift */,\n\t\t\t\t7B1268062B08247C00400694 /* AssistantConfigurationProvider.swift */,\n\t\t\t\t7B3DDCC42BAAA722004B5C96 /* AssistantsListDemoView.swift */,\n\t\t\t\t7B3DDCC62BAAAD34004B5C96 /* AssistantThreadConfigurationProvider.swift */,\n\t\t\t\t7B3DDCC82BAAAF96004B5C96 /* AssistantStreamDemoScreen.swift */,\n\t\t\t);\n\t\t\tpath = Assistants;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B2B6D542DF434550059B4BB /* ResponseAPIDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B2B6D552DF434670059B4BB /* ResponseStreamDemoView.swift */,\n\t\t\t\t7B2B6D572DF4347E0059B4BB /* ResponseStreamProvider.swift */,\n\t\t\t);\n\t\t\tpath = ResponseAPIDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436B972AE25045003CE281 /* Utilities */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436B982AE25052003CE281 /* ContentLoader.swift */,\n\t\t\t);\n\t\t\tpath = Utilities;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436B9C2AE2515C003CE281 /* Resources */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436B9D2AE256BC003CE281 /* german.m4a */,\n\t\t\t\t7B436B9A2AE25093003CE281 /* narcos.m4a */,\n\t\t\t\t7B436BB32AE79481003CE281 /* WorldCupData.jsonl */,\n\t\t\t);\n\t\t\tpath = Resources;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436B9F2AE2593D003CE281 /* ChatDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BA02AE25958003CE281 /* ChatProvider.swift */,\n\t\t\t\t7B436BA22AE25962003CE281 /* ChatDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ChatDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436BA42AE77EF9003CE281 /* EmbeddingsDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BA52AE77F37003CE281 /* Embeddingsprovider.swift */,\n\t\t\t\t7B436BA72AE77F41003CE281 /* EmbeddingsDemoView.swift */,\n\t\t\t);\n\t\t\tpath = EmbeddingsDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436BA92AE788CA003CE281 /* FineTuningDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BAA2AE788F1003CE281 /* FineTuningJobProvider.swift */,\n\t\t\t\t7B436BAC2AE788FB003CE281 /* FineTuningJobDemoView.swift */,\n\t\t\t);\n\t\t\tpath = FineTuningDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436BAE2AE7935C003CE281 /* FilesDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BAF2AE79369003CE281 /* FilesDemoView.swift */,\n\t\t\t\t7B436BB12AE79370003CE281 /* FilesProvider.swift */,\n\t\t\t);\n\t\t\tpath = FilesDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436BB52AE7A2DA003CE281 /* ImagesDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BB62AE7A2EA003CE281 /* ImagesProvider.swift */,\n\t\t\t\t7B436BB82AE7A2F2003CE281 /* ImagesDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ImagesDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436BBA2AE7ABC4003CE281 /* ModelsDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BBB2AE7ABD3003CE281 /* ModelsProvider.swift */,\n\t\t\t\t7B436BBD2AE7ABDA003CE281 /* ModelsDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ModelsDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B436BBF2AE7B003003CE281 /* ModerationsDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B436BC02AE7B01F003CE281 /* ModerationProvider.swift */,\n\t\t\t\t7B436BC22AE7B027003CE281 /* ModerationDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ModerationsDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B50DD292C2A9D1D0070A64D /* LocalChatDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B50DD2A2C2A9D2F0070A64D /* LocalChatDemoView.swift */,\n\t\t\t);\n\t\t\tpath = LocalChatDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B72399E2AF625B700646679 /* ChatStreamFluidConversationDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B72399F2AF625F200646679 /* ChatFluidConversationProvider.swift */,\n\t\t\t\t7B7239A32AF6289900646679 /* ChatStreamFluidConversationDemoView.swift */,\n\t\t\t);\n\t\t\tpath = ChatStreamFluidConversationDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B7239A92AF6294200646679 /* SharedUI */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BBE7EA32B02E89B0096A693 /* Theme */,\n\t\t\t\t7B7239A72AF6292100646679 /* LoadingView.swift */,\n\t\t\t\t7B7239AA2AF6294C00646679 /* URLImageView.swift */,\n\t\t\t\t7B7239A52AF628F800646679 /* ChatDisplayMessageView.swift */,\n\t\t\t\t7B413DFD2BB6846D009684CD /* ChatMessageLoadingView.swift */,\n\t\t\t);\n\t\t\tpath = SharedUI;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B7239AC2AF9FEC300646679 /* ChatFunctionsCall */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BBE7EDC2B0371670096A693 /* Completion */,\n\t\t\t\t7BBE7EDB2B0370E60096A693 /* Stream */,\n\t\t\t\t7BBE7EAA2B02E8FC0096A693 /* ChatMessageDisplayModel.swift */,\n\t\t\t\t7BBE7EA82B02E8E50096A693 /* ChatMessageView.swift */,\n\t\t\t);\n\t\t\tpath = ChatFunctionsCall;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B7239AF2AF9FF1D00646679 /* SharedModels */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B7239A12AF6260D00646679 /* ChatDisplayMessage.swift */,\n\t\t\t);\n\t\t\tpath = SharedModels;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7B99C2E52C0718CD00E701B3 /* Files */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B99C2E62C0718DE00E701B3 /* FilesPicker.swift */,\n\t\t\t\t7B99C2E82C0718FF00E701B3 /* FileAttachmentView.swift */,\n\t\t\t\t7B99C2EA2C07191200E701B3 /* AttachmentView.swift */,\n\t\t\t\t7B99C2EC2C071B1600E701B3 /* FilesPickerProvider.swift */,\n\t\t\t);\n\t\t\tpath = Files;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788C02AE23A48008825D5 = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BA788CB2AE23A48008825D5 /* SwiftOpenAIExample */,\n\t\t\t\t7BA788DD2AE23A49008825D5 /* SwiftOpenAIExampleTests */,\n\t\t\t\t7BA788E72AE23A49008825D5 /* SwiftOpenAIExampleUITests */,\n\t\t\t\t7BA788CA2AE23A48008825D5 /* Products */,\n\t\t\t);\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788CA2AE23A48008825D5 /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BA788C92AE23A48008825D5 /* SwiftOpenAIExample.app */,\n\t\t\t\t7BA788DA2AE23A49008825D5 /* SwiftOpenAIExampleTests.xctest */,\n\t\t\t\t7BA788E42AE23A49008825D5 /* SwiftOpenAIExampleUITests.xctest */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788CB2AE23A48008825D5 /* SwiftOpenAIExample */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B2B6D542DF434550059B4BB /* ResponseAPIDemo */,\n\t\t\t\t7BA788CC2AE23A48008825D5 /* SwiftOpenAIExampleApp.swift */,\n\t\t\t\t7BE802572D2877D30080E06A /* PredictedOutputsDemo */,\n\t\t\t\t7B50DD292C2A9D1D0070A64D /* LocalChatDemo */,\n\t\t\t\t7B99C2E52C0718CD00E701B3 /* Files */,\n\t\t\t\t7B7239AF2AF9FF1D00646679 /* SharedModels */,\n\t\t\t\t7B7239A92AF6294200646679 /* SharedUI */,\n\t\t\t\t7B1268032B08241200400694 /* Assistants */,\n\t\t\t\t7BA788FA2AE23B27008825D5 /* AudioDemo */,\n\t\t\t\t7B436B9F2AE2593D003CE281 /* ChatDemo */,\n\t\t\t\t7B7239AC2AF9FEC300646679 /* ChatFunctionsCall */,\n\t\t\t\t7B029E352C6893BF0025681A /* ChatStructuredOutputs */,\n\t\t\t\t7B029E3A2C69BE750025681A /* ChatStructureOutputTool */,\n\t\t\t\t7B72399E2AF625B700646679 /* ChatStreamFluidConversationDemo */,\n\t\t\t\t7B436BA42AE77EF9003CE281 /* EmbeddingsDemo */,\n\t\t\t\t7B436BA92AE788CA003CE281 /* FineTuningDemo */,\n\t\t\t\t7B436BAE2AE7935C003CE281 /* FilesDemo */,\n\t\t\t\t7B436BB52AE7A2DA003CE281 /* ImagesDemo */,\n\t\t\t\t7B436BBA2AE7ABC4003CE281 /* ModelsDemo */,\n\t\t\t\t7B436BBF2AE7B003003CE281 /* ModerationsDemo */,\n\t\t\t\t7B436B9C2AE2515C003CE281 /* Resources */,\n\t\t\t\t7B436B972AE25045003CE281 /* Utilities */,\n\t\t\t\t7BBE7E922AFCC9300096A693 /* Vision */,\n\t\t\t\t7BA788CE2AE23A48008825D5 /* ApiKeyIntroView.swift */,\n\t\t\t\t7B50DD272C2A9A390070A64D /* LocalHostEntryView.swift */,\n\t\t\t\t0DF957852BB543F100DD2013 /* AIProxyIntroView.swift */,\n\t\t\t\t7B436B952AE24A04003CE281 /* OptionsListView.swift */,\n\t\t\t\t0DF957832BB53BEF00DD2013 /* ServiceSelectionView.swift */,\n\t\t\t\t7BA788D02AE23A49008825D5 /* Assets.xcassets */,\n\t\t\t\t7BA788D22AE23A49008825D5 /* SwiftOpenAIExample.entitlements */,\n\t\t\t\t7BA788D32AE23A49008825D5 /* Preview Content */,\n\t\t\t);\n\t\t\tpath = SwiftOpenAIExample;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788D32AE23A49008825D5 /* Preview Content */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BA788D42AE23A49008825D5 /* Preview Assets.xcassets */,\n\t\t\t);\n\t\t\tpath = \"Preview Content\";\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788DD2AE23A49008825D5 /* SwiftOpenAIExampleTests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BA788DE2AE23A49008825D5 /* SwiftOpenAIExampleTests.swift */,\n\t\t\t);\n\t\t\tpath = SwiftOpenAIExampleTests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788E72AE23A49008825D5 /* SwiftOpenAIExampleUITests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BA788E82AE23A49008825D5 /* SwiftOpenAIExampleUITests.swift */,\n\t\t\t\t7BA788EA2AE23A49008825D5 /* SwiftOpenAIExampleUITestsLaunchTests.swift */,\n\t\t\t);\n\t\t\tpath = SwiftOpenAIExampleUITests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BA788FA2AE23B27008825D5 /* AudioDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BA788FB2AE23B42008825D5 /* AudioDemoView.swift */,\n\t\t\t\t7BA788FD2AE23B95008825D5 /* AudioProvider.swift */,\n\t\t\t);\n\t\t\tpath = AudioDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BBE7E922AFCC9300096A693 /* Vision */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BBE7E902AFCA52A0096A693 /* ChatVisionDemoView.swift */,\n\t\t\t\t7BBE7E932AFCC9640096A693 /* ChatVisionProvider.swift */,\n\t\t\t);\n\t\t\tpath = Vision;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BBE7EA32B02E89B0096A693 /* Theme */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BBE7EA42B02E8A70096A693 /* Sizes.swift */,\n\t\t\t\t7BBE7EA62B02E8AC0096A693 /* ThemeColor.swift */,\n\t\t\t);\n\t\t\tpath = Theme;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BBE7EDB2B0370E60096A693 /* Stream */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7B7239AD2AF9FF0000646679 /* ChatFunctionsCallStreamProvider.swift */,\n\t\t\t\t7B7239B02AF9FF3C00646679 /* ChatFunctionsCalllStreamDemoView.swift */,\n\t\t\t);\n\t\t\tpath = Stream;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BBE7EDC2B0371670096A693 /* Completion */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BBE7EDD2B03718E0096A693 /* ChatFunctionCallProvider.swift */,\n\t\t\t\t7BBE7EDF2B0372550096A693 /* ChatFunctionCallDemoView.swift */,\n\t\t\t);\n\t\t\tpath = Completion;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t7BE802572D2877D30080E06A /* PredictedOutputsDemo */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t7BE802582D2878170080E06A /* ChatPredictedOutputDemoView.swift */,\n\t\t\t);\n\t\t\tpath = PredictedOutputsDemo;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXGroup section */\n\n/* Begin PBXNativeTarget section */\n\t\t7BA788C82AE23A48008825D5 /* SwiftOpenAIExample */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 7BA788EE2AE23A49008825D5 /* Build configuration list for PBXNativeTarget \"SwiftOpenAIExample\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t7BA788C52AE23A48008825D5 /* Sources */,\n\t\t\t\t7BA788C62AE23A48008825D5 /* Frameworks */,\n\t\t\t\t7BA788C72AE23A48008825D5 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t);\n\t\t\tname = SwiftOpenAIExample;\n\t\t\tpackageProductDependencies = (\n\t\t\t\t0DEE5DC02BB40643004894AD /* SwiftOpenAI */,\n\t\t\t);\n\t\t\tproductName = SwiftOpenAIExample;\n\t\t\tproductReference = 7BA788C92AE23A48008825D5 /* SwiftOpenAIExample.app */;\n\t\t\tproductType = \"com.apple.product-type.application\";\n\t\t};\n\t\t7BA788D92AE23A49008825D5 /* SwiftOpenAIExampleTests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 7BA788F12AE23A49008825D5 /* Build configuration list for PBXNativeTarget \"SwiftOpenAIExampleTests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t7BA788D62AE23A49008825D5 /* Sources */,\n\t\t\t\t7BA788D72AE23A49008825D5 /* Frameworks */,\n\t\t\t\t7BA788D82AE23A49008825D5 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t7BA788DC2AE23A49008825D5 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = SwiftOpenAIExampleTests;\n\t\t\tproductName = SwiftOpenAIExampleTests;\n\t\t\tproductReference = 7BA788DA2AE23A49008825D5 /* SwiftOpenAIExampleTests.xctest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.unit-test\";\n\t\t};\n\t\t7BA788E32AE23A49008825D5 /* SwiftOpenAIExampleUITests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 7BA788F42AE23A49008825D5 /* Build configuration list for PBXNativeTarget \"SwiftOpenAIExampleUITests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t7BA788E02AE23A49008825D5 /* Sources */,\n\t\t\t\t7BA788E12AE23A49008825D5 /* Frameworks */,\n\t\t\t\t7BA788E22AE23A49008825D5 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t7BA788E62AE23A49008825D5 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = SwiftOpenAIExampleUITests;\n\t\t\tproductName = SwiftOpenAIExampleUITests;\n\t\t\tproductReference = 7BA788E42AE23A49008825D5 /* SwiftOpenAIExampleUITests.xctest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.ui-testing\";\n\t\t};\n/* End PBXNativeTarget section */\n\n/* Begin PBXProject section */\n\t\t7BA788C12AE23A48008825D5 /* Project object */ = {\n\t\t\tisa = PBXProject;\n\t\t\tattributes = {\n\t\t\t\tBuildIndependentTargetsInParallel = 1;\n\t\t\t\tLastSwiftUpdateCheck = 1500;\n\t\t\t\tLastUpgradeCheck = 1500;\n\t\t\t\tTargetAttributes = {\n\t\t\t\t\t7BA788C82AE23A48008825D5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 15.0;\n\t\t\t\t\t};\n\t\t\t\t\t7BA788D92AE23A49008825D5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 15.0;\n\t\t\t\t\t\tTestTargetID = 7BA788C82AE23A48008825D5;\n\t\t\t\t\t};\n\t\t\t\t\t7BA788E32AE23A49008825D5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 15.0;\n\t\t\t\t\t\tTestTargetID = 7BA788C82AE23A48008825D5;\n\t\t\t\t\t};\n\t\t\t\t};\n\t\t\t};\n\t\t\tbuildConfigurationList = 7BA788C42AE23A48008825D5 /* Build configuration list for PBXProject \"SwiftOpenAIExample\" */;\n\t\t\tcompatibilityVersion = \"Xcode 14.0\";\n\t\t\tdevelopmentRegion = en;\n\t\t\thasScannedForEncodings = 0;\n\t\t\tknownRegions = (\n\t\t\t\ten,\n\t\t\t\tBase,\n\t\t\t);\n\t\t\tmainGroup = 7BA788C02AE23A48008825D5;\n\t\t\tpackageReferences = (\n\t\t\t\t0DEE5DBF2BB40643004894AD /* XCLocalSwiftPackageReference \"../..\" */,\n\t\t\t);\n\t\t\tproductRefGroup = 7BA788CA2AE23A48008825D5 /* Products */;\n\t\t\tprojectDirPath = \"\";\n\t\t\tprojectRoot = \"\";\n\t\t\ttargets = (\n\t\t\t\t7BA788C82AE23A48008825D5 /* SwiftOpenAIExample */,\n\t\t\t\t7BA788D92AE23A49008825D5 /* SwiftOpenAIExampleTests */,\n\t\t\t\t7BA788E32AE23A49008825D5 /* SwiftOpenAIExampleUITests */,\n\t\t\t);\n\t\t};\n/* End PBXProject section */\n\n/* Begin PBXResourcesBuildPhase section */\n\t\t7BA788C72AE23A48008825D5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t7B436B9E2AE256BC003CE281 /* german.m4a in Resources */,\n\t\t\t\t7BA788D52AE23A49008825D5 /* Preview Assets.xcassets in Resources */,\n\t\t\t\t7B436BB42AE79481003CE281 /* WorldCupData.jsonl in Resources */,\n\t\t\t\t7B436B9B2AE25094003CE281 /* narcos.m4a in Resources */,\n\t\t\t\t7BA788D12AE23A49008825D5 /* Assets.xcassets in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t7BA788D82AE23A49008825D5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t7BA788E22AE23A49008825D5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXResourcesBuildPhase section */\n\n/* Begin PBXSourcesBuildPhase section */\n\t\t7BA788C52AE23A48008825D5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t7BBE7EA92B02E8E50096A693 /* ChatMessageView.swift in Sources */,\n\t\t\t\t7BE802592D2878170080E06A /* ChatPredictedOutputDemoView.swift in Sources */,\n\t\t\t\t7B7239AE2AF9FF0000646679 /* ChatFunctionsCallStreamProvider.swift in Sources */,\n\t\t\t\t7B436BA12AE25958003CE281 /* ChatProvider.swift in Sources */,\n\t\t\t\t7B436BC32AE7B027003CE281 /* ModerationDemoView.swift in Sources */,\n\t\t\t\t7B7239AB2AF6294C00646679 /* URLImageView.swift in Sources */,\n\t\t\t\t7B7239B12AF9FF3C00646679 /* ChatFunctionsCalllStreamDemoView.swift in Sources */,\n\t\t\t\t7B50DD282C2A9A390070A64D /* LocalHostEntryView.swift in Sources */,\n\t\t\t\t7BBE7EAB2B02E8FC0096A693 /* ChatMessageDisplayModel.swift in Sources */,\n\t\t\t\t7B99C2E92C0718FF00E701B3 /* FileAttachmentView.swift in Sources */,\n\t\t\t\t7BBE7EA52B02E8A70096A693 /* Sizes.swift in Sources */,\n\t\t\t\t7B7239A22AF6260D00646679 /* ChatDisplayMessage.swift in Sources */,\n\t\t\t\t0DF957862BB543F100DD2013 /* AIProxyIntroView.swift in Sources */,\n\t\t\t\t7B1268052B08246400400694 /* AssistantConfigurationDemoView.swift in Sources */,\n\t\t\t\t7B436BB72AE7A2EA003CE281 /* ImagesProvider.swift in Sources */,\n\t\t\t\t7B436B962AE24A04003CE281 /* OptionsListView.swift in Sources */,\n\t\t\t\t7BBE7EDE2B03718E0096A693 /* ChatFunctionCallProvider.swift in Sources */,\n\t\t\t\t7B7239A62AF628F800646679 /* ChatDisplayMessageView.swift in Sources */,\n\t\t\t\t7B99C2ED2C071B1600E701B3 /* FilesPickerProvider.swift in Sources */,\n\t\t\t\t7B7239A02AF625F200646679 /* ChatFluidConversationProvider.swift in Sources */,\n\t\t\t\t7BA788CF2AE23A48008825D5 /* ApiKeyIntroView.swift in Sources */,\n\t\t\t\t7BA788CD2AE23A48008825D5 /* SwiftOpenAIExampleApp.swift in Sources */,\n\t\t\t\t7B3DDCC52BAAA722004B5C96 /* AssistantsListDemoView.swift in Sources */,\n\t\t\t\t7B3DDCC72BAAAD34004B5C96 /* AssistantThreadConfigurationProvider.swift in Sources */,\n\t\t\t\t7B3DDCC92BAAAF96004B5C96 /* AssistantStreamDemoScreen.swift in Sources */,\n\t\t\t\t7B029E3E2C69BEA70025681A /* ChatStructureOutputToolDemoView.swift in Sources */,\n\t\t\t\t7B413DFE2BB6846D009684CD /* ChatMessageLoadingView.swift in Sources */,\n\t\t\t\t7B436BB92AE7A2F2003CE281 /* ImagesDemoView.swift in Sources */,\n\t\t\t\t7B436BB22AE79370003CE281 /* FilesProvider.swift in Sources */,\n\t\t\t\t7BBE7E942AFCC9640096A693 /* ChatVisionProvider.swift in Sources */,\n\t\t\t\t0DF957842BB53BEF00DD2013 /* ServiceSelectionView.swift in Sources */,\n\t\t\t\t7B436BAD2AE788FB003CE281 /* FineTuningJobDemoView.swift in Sources */,\n\t\t\t\t7B436BB02AE79369003CE281 /* FilesDemoView.swift in Sources */,\n\t\t\t\t7BBE7E912AFCA52A0096A693 /* ChatVisionDemoView.swift in Sources */,\n\t\t\t\t7B99C2EB2C07191200E701B3 /* AttachmentView.swift in Sources */,\n\t\t\t\t7B50DD2B2C2A9D2F0070A64D /* LocalChatDemoView.swift in Sources */,\n\t\t\t\t7B029E3C2C69BE990025681A /* ChatStructuredOutputToolProvider.swift in Sources */,\n\t\t\t\t7B436BAB2AE788F1003CE281 /* FineTuningJobProvider.swift in Sources */,\n\t\t\t\t7B7239A42AF6289900646679 /* ChatStreamFluidConversationDemoView.swift in Sources */,\n\t\t\t\t7BA788FC2AE23B42008825D5 /* AudioDemoView.swift in Sources */,\n\t\t\t\t7B99C2E72C0718DE00E701B3 /* FilesPicker.swift in Sources */,\n\t\t\t\t7B1268072B08247C00400694 /* AssistantConfigurationProvider.swift in Sources */,\n\t\t\t\t7B029E392C68940D0025681A /* ChatStructuredOutputDemoView.swift in Sources */,\n\t\t\t\t7B436BBE2AE7ABDA003CE281 /* ModelsDemoView.swift in Sources */,\n\t\t\t\t7B029E372C6893FD0025681A /* ChatStructuredOutputProvider.swift in Sources */,\n\t\t\t\t7B436BA32AE25962003CE281 /* ChatDemoView.swift in Sources */,\n\t\t\t\t7B7239A82AF6292100646679 /* LoadingView.swift in Sources */,\n\t\t\t\t7B436B992AE25052003CE281 /* ContentLoader.swift in Sources */,\n\t\t\t\t7B436BC12AE7B01F003CE281 /* ModerationProvider.swift in Sources */,\n\t\t\t\t7B436BBC2AE7ABD3003CE281 /* ModelsProvider.swift in Sources */,\n\t\t\t\t7B2B6D562DF434670059B4BB /* ResponseStreamDemoView.swift in Sources */,\n\t\t\t\t7B2B6D582DF4347E0059B4BB /* ResponseStreamProvider.swift in Sources */,\n\t\t\t\t7B436BA62AE77F37003CE281 /* Embeddingsprovider.swift in Sources */,\n\t\t\t\t7BBE7EA72B02E8AC0096A693 /* ThemeColor.swift in Sources */,\n\t\t\t\t7BA788FE2AE23B95008825D5 /* AudioProvider.swift in Sources */,\n\t\t\t\t7B436BA82AE77F41003CE281 /* EmbeddingsDemoView.swift in Sources */,\n\t\t\t\t7BBE7EE02B0372550096A693 /* ChatFunctionCallDemoView.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t7BA788D62AE23A49008825D5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t7BE9A5AF2B0B33E600CE8103 /* SwiftOpenAIExampleTests.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t7BA788E02AE23A49008825D5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t7BA788E92AE23A49008825D5 /* SwiftOpenAIExampleUITests.swift in Sources */,\n\t\t\t\t7BA788EB2AE23A49008825D5 /* SwiftOpenAIExampleUITestsLaunchTests.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXSourcesBuildPhase section */\n\n/* Begin PBXTargetDependency section */\n\t\t7BA788DC2AE23A49008825D5 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 7BA788C82AE23A48008825D5 /* SwiftOpenAIExample */;\n\t\t\ttargetProxy = 7BA788DB2AE23A49008825D5 /* PBXContainerItemProxy */;\n\t\t};\n\t\t7BA788E62AE23A49008825D5 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 7BA788C82AE23A48008825D5 /* SwiftOpenAIExample */;\n\t\t\ttargetProxy = 7BA788E52AE23A49008825D5 /* PBXContainerItemProxy */;\n\t\t};\n/* End PBXTargetDependency section */\n\n/* Begin XCBuildConfiguration section */\n\t\t7BA788EC2AE23A49008825D5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++20\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_WEAK = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = dwarf;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_TESTABILITY = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu17;\n\t\t\t\tGCC_DYNAMIC_NO_PIC = NO;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_OPTIMIZATION_LEVEL = 0;\n\t\t\t\tGCC_PREPROCESSOR_DEFINITIONS = (\n\t\t\t\t\t\"DEBUG=1\",\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t);\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tLOCALIZATION_PREFERS_STRING_CATALOGS = YES;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;\n\t\t\t\tMTL_FAST_MATH = YES;\n\t\t\t\tONLY_ACTIVE_ARCH = YES;\n\t\t\t\tSWIFT_ACTIVE_COMPILATION_CONDITIONS = \"DEBUG $(inherited)\";\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t7BA788ED2AE23A49008825D5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++20\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_WEAK = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu17;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tLOCALIZATION_PREFERS_STRING_CATALOGS = YES;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tMTL_FAST_MATH = YES;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t7BA788EF2AE23A49008825D5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = SwiftOpenAIExample/SwiftOpenAIExample.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tDEVELOPMENT_ASSET_PATHS = \"\\\"SwiftOpenAIExample/Preview Content\\\"\";\n\t\t\t\tDEVELOPMENT_TEAM = CQ45U4X9K3;\n\t\t\t\tENABLE_HARDENED_RUNTIME = YES;\n\t\t\t\tENABLE_PREVIEWS = YES;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tINFOPLIST_KEY_NSMicrophoneUsageDescription = \"Microphone access is needed for voice chat\";\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphoneos*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphonesimulator*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphoneos*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphonesimulator*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphoneos*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphonesimulator*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIStatusBarStyle[sdk=iphoneos*]\" = UIStatusBarStyleDefault;\n\t\t\t\t\"INFOPLIST_KEY_UIStatusBarStyle[sdk=iphonesimulator*]\" = UIStatusBarStyleDefault;\n\t\t\t\tINFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = \"UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight\";\n\t\t\t\tINFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = \"UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight\";\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 17.0;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = \"@executable_path/Frameworks\";\n\t\t\t\t\"LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]\" = \"@executable_path/../Frameworks\";\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 14.0;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = jamesRochabrun.SwiftOpenAIExample;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSDKROOT = auto;\n\t\t\t\tSUPPORTED_PLATFORMS = \"iphoneos iphonesimulator macosx\";\n\t\t\t\tSWIFT_EMIT_LOC_STRINGS = YES;\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t7BA788F02AE23A49008825D5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = SwiftOpenAIExample/SwiftOpenAIExample.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tDEVELOPMENT_ASSET_PATHS = \"\\\"SwiftOpenAIExample/Preview Content\\\"\";\n\t\t\t\tDEVELOPMENT_TEAM = CQ45U4X9K3;\n\t\t\t\tENABLE_HARDENED_RUNTIME = YES;\n\t\t\t\tENABLE_PREVIEWS = YES;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tINFOPLIST_KEY_NSMicrophoneUsageDescription = \"Microphone access is needed for voice chat\";\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphoneos*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphonesimulator*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphoneos*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphonesimulator*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphoneos*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphonesimulator*]\" = YES;\n\t\t\t\t\"INFOPLIST_KEY_UIStatusBarStyle[sdk=iphoneos*]\" = UIStatusBarStyleDefault;\n\t\t\t\t\"INFOPLIST_KEY_UIStatusBarStyle[sdk=iphonesimulator*]\" = UIStatusBarStyleDefault;\n\t\t\t\tINFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = \"UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight\";\n\t\t\t\tINFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = \"UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight\";\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 17.0;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = \"@executable_path/Frameworks\";\n\t\t\t\t\"LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]\" = \"@executable_path/../Frameworks\";\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 14.0;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = jamesRochabrun.SwiftOpenAIExample;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSDKROOT = auto;\n\t\t\t\tSUPPORTED_PLATFORMS = \"iphoneos iphonesimulator macosx\";\n\t\t\t\tSWIFT_EMIT_LOC_STRINGS = YES;\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t7BA788F22AE23A49008825D5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES;\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tDEVELOPMENT_TEAM = CQ45U4X9K3;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 17.0;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 14.0;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = jamesRochabrun.SwiftOpenAIExampleTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSDKROOT = auto;\n\t\t\t\tSUPPORTED_PLATFORMS = \"iphoneos iphonesimulator macosx\";\n\t\t\t\tSWIFT_EMIT_LOC_STRINGS = NO;\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/SwiftOpenAIExample.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/SwiftOpenAIExample\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t7BA788F32AE23A49008825D5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES;\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tDEVELOPMENT_TEAM = CQ45U4X9K3;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 17.0;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 14.0;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = jamesRochabrun.SwiftOpenAIExampleTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSDKROOT = auto;\n\t\t\t\tSUPPORTED_PLATFORMS = \"iphoneos iphonesimulator macosx\";\n\t\t\t\tSWIFT_EMIT_LOC_STRINGS = NO;\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/SwiftOpenAIExample.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/SwiftOpenAIExample\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t7BA788F52AE23A49008825D5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tDEVELOPMENT_TEAM = CQ45U4X9K3;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 17.0;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 14.0;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = jamesRochabrun.SwiftOpenAIExampleUITests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSDKROOT = auto;\n\t\t\t\tSUPPORTED_PLATFORMS = \"iphoneos iphonesimulator macosx\";\n\t\t\t\tSWIFT_EMIT_LOC_STRINGS = NO;\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tTEST_TARGET_NAME = SwiftOpenAIExample;\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t7BA788F62AE23A49008825D5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tDEVELOPMENT_TEAM = CQ45U4X9K3;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 17.0;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 14.0;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = jamesRochabrun.SwiftOpenAIExampleUITests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSDKROOT = auto;\n\t\t\t\tSUPPORTED_PLATFORMS = \"iphoneos iphonesimulator macosx\";\n\t\t\t\tSWIFT_EMIT_LOC_STRINGS = NO;\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tTEST_TARGET_NAME = SwiftOpenAIExample;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n/* End XCBuildConfiguration section */\n\n/* Begin XCConfigurationList section */\n\t\t7BA788C42AE23A48008825D5 /* Build configuration list for PBXProject \"SwiftOpenAIExample\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t7BA788EC2AE23A49008825D5 /* Debug */,\n\t\t\t\t7BA788ED2AE23A49008825D5 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t7BA788EE2AE23A49008825D5 /* Build configuration list for PBXNativeTarget \"SwiftOpenAIExample\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t7BA788EF2AE23A49008825D5 /* Debug */,\n\t\t\t\t7BA788F02AE23A49008825D5 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t7BA788F12AE23A49008825D5 /* Build configuration list for PBXNativeTarget \"SwiftOpenAIExampleTests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t7BA788F22AE23A49008825D5 /* Debug */,\n\t\t\t\t7BA788F32AE23A49008825D5 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t7BA788F42AE23A49008825D5 /* Build configuration list for PBXNativeTarget \"SwiftOpenAIExampleUITests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t7BA788F52AE23A49008825D5 /* Debug */,\n\t\t\t\t7BA788F62AE23A49008825D5 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n/* End XCConfigurationList section */\n\n/* Begin XCLocalSwiftPackageReference section */\n\t\t0DEE5DBF2BB40643004894AD /* XCLocalSwiftPackageReference \"../..\" */ = {\n\t\t\tisa = XCLocalSwiftPackageReference;\n\t\t\trelativePath = ../..;\n\t\t};\n/* End XCLocalSwiftPackageReference section */\n\n/* Begin XCSwiftPackageProductDependency section */\n\t\t0DEE5DC02BB40643004894AD /* SwiftOpenAI */ = {\n\t\t\tisa = XCSwiftPackageProductDependency;\n\t\t\tproductName = SwiftOpenAI;\n\t\t};\n/* End XCSwiftPackageProductDependency section */\n\t};\n\trootObject = 7BA788C12AE23A48008825D5 /* Project object */;\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"self:\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExample.xcodeproj/xcshareddata/xcschemes/SwiftOpenAIExample.xcscheme",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Scheme\n   LastUpgradeVersion = \"1500\"\n   version = \"2.2\">\n   <BuildAction\n      parallelizeBuildables = \"YES\"\n      buildImplicitDependencies = \"YES\">\n      <BuildActionEntries>\n         <BuildActionEntry\n            buildForTesting = \"YES\"\n            buildForRunning = \"YES\"\n            buildForProfiling = \"YES\"\n            buildForArchiving = \"YES\"\n            buildForAnalyzing = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"7BA788C82AE23A48008825D5\"\n               BuildableName = \"SwiftOpenAIExample.app\"\n               BlueprintName = \"SwiftOpenAIExample\"\n               ReferencedContainer = \"container:SwiftOpenAIExample.xcodeproj\">\n            </BuildableReference>\n         </BuildActionEntry>\n         <BuildActionEntry\n            buildForTesting = \"YES\"\n            buildForRunning = \"YES\"\n            buildForProfiling = \"YES\"\n            buildForArchiving = \"YES\"\n            buildForAnalyzing = \"YES\">\n            <AutocreatedTestPlanReference>\n            </AutocreatedTestPlanReference>\n         </BuildActionEntry>\n      </BuildActionEntries>\n   </BuildAction>\n   <TestAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\"\n      shouldAutocreateTestPlan = \"YES\">\n      <Testables>\n         <TestableReference\n            skipped = \"NO\"\n            parallelizable = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"7BA788D92AE23A49008825D5\"\n               BuildableName = \"SwiftOpenAIExampleTests.xctest\"\n               BlueprintName = \"SwiftOpenAIExampleTests\"\n               ReferencedContainer = \"container:SwiftOpenAIExample.xcodeproj\">\n            </BuildableReference>\n         </TestableReference>\n         <TestableReference\n            skipped = \"NO\"\n            parallelizable = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"7BA788E32AE23A49008825D5\"\n               BuildableName = \"SwiftOpenAIExampleUITests.xctest\"\n               BlueprintName = \"SwiftOpenAIExampleUITests\"\n               ReferencedContainer = \"container:SwiftOpenAIExample.xcodeproj\">\n            </BuildableReference>\n         </TestableReference>\n      </Testables>\n   </TestAction>\n   <LaunchAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      launchStyle = \"0\"\n      useCustomWorkingDirectory = \"NO\"\n      ignoresPersistentStateOnLaunch = \"NO\"\n      debugDocumentVersioning = \"YES\"\n      debugServiceExtension = \"internal\"\n      allowLocationSimulation = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"7BA788C82AE23A48008825D5\"\n            BuildableName = \"SwiftOpenAIExample.app\"\n            BlueprintName = \"SwiftOpenAIExample\"\n            ReferencedContainer = \"container:SwiftOpenAIExample.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </LaunchAction>\n   <ProfileAction\n      buildConfiguration = \"Release\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\"\n      savedToolIdentifier = \"\"\n      useCustomWorkingDirectory = \"NO\"\n      debugDocumentVersioning = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"7BA788C82AE23A48008825D5\"\n            BuildableName = \"SwiftOpenAIExample.app\"\n            BlueprintName = \"SwiftOpenAIExample\"\n            ReferencedContainer = \"container:SwiftOpenAIExample.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </ProfileAction>\n   <AnalyzeAction\n      buildConfiguration = \"Debug\">\n   </AnalyzeAction>\n   <ArchiveAction\n      buildConfiguration = \"Release\"\n      revealArchiveInOrganizer = \"YES\">\n   </ArchiveAction>\n</Scheme>\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExampleTests/SwiftOpenAIExampleTests.swift",
    "content": "//\n//  SwiftOpenAIExampleTests.swift\n//  SwiftOpenAIExampleTests\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport XCTest\n\nfinal class SwiftOpenAIExampleTests: XCTestCase {\n  override func setUpWithError() throws {\n    // Put setup code here. This method is called before the invocation of each test method in the class.\n  }\n\n  override func tearDownWithError() throws {\n    // Put teardown code here. This method is called after the invocation of each test method in the class.\n  }\n\n  func testExample() throws {\n    // This is an example of a functional test case.\n    // Use XCTAssert and related functions to verify your tests produce the correct results.\n    // Any test you write for XCTest can be annotated as throws and async.\n    // Mark your test throws to produce an unexpected failure when your test encounters an uncaught error.\n    // Mark your test async to allow awaiting for asynchronous code to complete. Check the results with assertions afterwards.\n  }\n\n  func testPerformanceExample() throws {\n    // This is an example of a performance test case.\n    measure {\n      // Put the code you want to measure the time of here.\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExampleUITests/SwiftOpenAIExampleUITests.swift",
    "content": "//\n//  SwiftOpenAIExampleUITests.swift\n//  SwiftOpenAIExampleUITests\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport XCTest\n\nfinal class SwiftOpenAIExampleUITests: XCTestCase {\n  override func setUpWithError() throws {\n    // Put setup code here. This method is called before the invocation of each test method in the class.\n\n    // In UI tests it is usually best to stop immediately when a failure occurs.\n    continueAfterFailure = false\n\n    // In UI tests it’s important to set the initial state - such as interface orientation - required for your tests before they run. The setUp method is a good place to do this.\n  }\n\n  override func tearDownWithError() throws {\n    // Put teardown code here. This method is called after the invocation of each test method in the class.\n  }\n\n  func testExample() throws {\n    // UI tests must launch the application that they test.\n    let app = XCUIApplication()\n    app.launch()\n\n    // Use XCTAssert and related functions to verify your tests produce the correct results.\n  }\n\n  func testLaunchPerformance() throws {\n    if #available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 7.0, *) {\n      // This measures how long it takes to launch your application.\n      measure(metrics: [XCTApplicationLaunchMetric()]) {\n        XCUIApplication().launch()\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "Examples/SwiftOpenAIExample/SwiftOpenAIExampleUITests/SwiftOpenAIExampleUITestsLaunchTests.swift",
    "content": "//\n//  SwiftOpenAIExampleUITestsLaunchTests.swift\n//  SwiftOpenAIExampleUITests\n//\n//  Created by James Rochabrun on 10/19/23.\n//\n\nimport XCTest\n\nfinal class SwiftOpenAIExampleUITestsLaunchTests: XCTestCase {\n  override class var runsForEachTargetApplicationUIConfiguration: Bool {\n    true\n  }\n\n  override func setUpWithError() throws {\n    continueAfterFailure = false\n  }\n\n  func testLaunch() throws {\n    let app = XCUIApplication()\n    app.launch()\n\n    // Insert steps here to perform after app launch but before taking a screenshot,\n    // such as logging into a test account or navigating somewhere in the app\n\n    let attachment = XCTAttachment(screenshot: app.screenshot())\n    attachment.name = \"Launch Screen\"\n    attachment.lifetime = .keepAlways\n    add(attachment)\n  }\n}\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 James Rochabrun\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Package.swift",
    "content": "// swift-tools-version: 5.9\n// The swift-tools-version declares the minimum version of Swift required to build this package.\n\nimport PackageDescription\n\nlet package = Package(\n  name: \"SwiftOpenAI\",\n  platforms: [\n    .iOS(.v15),\n    .macOS(.v12),\n    .watchOS(.v9),\n  ],\n  products: [\n    // Products define the executables and libraries a package produces, making them visible to other packages.\n    .library(\n      name: \"SwiftOpenAI\",\n      targets: [\"SwiftOpenAI\"]),\n  ],\n  dependencies: [\n    .package(url: \"https://github.com/swift-server/async-http-client.git\", from: \"1.25.2\"),\n  ],\n  targets: [\n    // Targets are the basic building blocks of a package, defining a module or a test suite.\n    // Targets can depend on other targets in this package and products from dependencies.\n    .target(\n      name: \"SwiftOpenAI\",\n      dependencies: [\n        .product(name: \"AsyncHTTPClient\", package: \"async-http-client\", condition: .when(platforms: [.linux])),\n      ]),\n    .testTarget(\n      name: \"SwiftOpenAITests\",\n      dependencies: [\"SwiftOpenAI\"]),\n  ])\n"
  },
  {
    "path": "README.md",
    "content": "# SwiftOpenAI\n<img width=\"1090\" alt=\"repoOpenAI\" src=\"https://github.com/jamesrochabrun/SwiftOpenAI/assets/5378604/51bc5736-a32f-4a9f-922e-209d950e28f7\">\n\n![iOS 15+](https://img.shields.io/badge/iOS-15%2B-blue.svg)\n![macOS 13+](https://img.shields.io/badge/macOS-13%2B-blue.svg)\n![watchOS 9+](https://img.shields.io/badge/watchOS-9%2B-blue.svg)\n![Linux](https://img.shields.io/badge/Linux-blue.svg)\n[![MIT license](https://img.shields.io/badge/License-MIT-blue.svg)](https://lbesson.mit-license.org/)\n[![swift-version](https://img.shields.io/badge/swift-5.9-brightgreen.svg)](https://github.com/apple/swift)\n[![swiftui-version](https://img.shields.io/badge/swiftui-brightgreen)](https://developer.apple.com/documentation/swiftui)\n[![xcode-version](https://img.shields.io/badge/xcode-15%20-brightgreen)](https://developer.apple.com/xcode/)\n[![swift-package-manager](https://img.shields.io/badge/package%20manager-compatible-brightgreen.svg?logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iNjJweCIgaGVpZ2h0PSI0OXB4IiB2aWV3Qm94PSIwIDAgNjIgNDkiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDYzLjEgKDkyNDUyKSAtIGh0dHBzOi8vc2tldGNoLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJHcm91cCIgZmlsbC1ydWxlPSJub256ZXJvIj4KICAgICAgICAgICAgPHBvbHlnb24gaWQ9IlBhdGgiIGZpbGw9IiNEQkI1NTEiIHBvaW50cz0iNTEuMzEwMzQ0OCAwIDEwLjY4OTY1NTIgMCAwIDEzLjUxNzI0MTQgMCA0OSA2MiA0OSA2MiAxMy41MTcyNDE0Ij48L3BvbHlnb24+CiAgICAgICAgICAgIDxwb2x5Z29uIGlkPSJQYXRoIiBmaWxsPSIjRjdFM0FGIiBwb2ludHM9IjI3IDI1IDMxIDI1IDM1IDI1IDM3IDI1IDM3IDE0IDI1IDE0IDI1IDI1Ij48L3BvbHlnb24+CiAgICAgICAgICAgIDxwb2x5Z29uIGlkPSJQYXRoIiBmaWxsPSIjRUZDNzVFIiBwb2ludHM9IjEwLjY4OTY1NTIgMCAwIDE0IDYyIDE0IDUxLjMxMDM0NDggMCI+PC9wb2x5Z29uPgogICAgICAgICAgICA8cG9seWdvbiBpZD0iUmVjdGFuZ2xlIiBmaWxsPSIjRjdFM0FGIiBwb2ludHM9IjI3IDAgMzUgMCAzNyAxNCAyNSAxNCI+PC9wb2x5Z29uPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+)](https://github.com/apple/swift-package-manager)\n[![Buy me a coffee](https://img.shields.io/badge/Buy%20me%20a%20coffee-048754?logo=buymeacoffee)](https://buymeacoffee.com/jamesrochabrun)\n\nAn open-source Swift package designed for effortless interaction with OpenAI's public API. \n\n🚀 Now also available as [CLI](https://github.com/jamesrochabrun/SwiftOpenAICLI) and also as [MCP](https://github.com/jamesrochabrun/SwiftOpenAIMCP)\n\n## Table of Contents\n- [Description](#description)\n- [Getting an API Key](#getting-an-api-key)\n- [Installation](#installation)\n- [Compatibility](#compatibility)\n- [Usage](#usage)\n- [Collaboration](#collaboration)\n\n## Description\n\n`SwiftOpenAI` is an open-source Swift package that streamlines interactions with **all** OpenAI's API endpoints, now with added support for Azure, AIProxy, Assistant stream APIs, and the new **Realtime API** for low-latency bidirectional voice conversations.\n\n### OpenAI ENDPOINTS\n\n- [Audio](#audio)\n   - [Transcriptions](#audio-transcriptions)\n   - [Translations](#audio-translations)\n   - [Speech](#audio-Speech)\n   - [Realtime](#audio-realtime)\n- [Chat](#chat)\n   - [Function Calling](#function-calling)\n   - [Structured Outputs](#structured-outputs)\n   - [Vision](#vision)\n- [Response](#response)\n   - [Streaming Responses](#streaming-responses)\n- [Embeddings](#embeddings)\n- [Fine-tuning](#fine-tuning)\n- [Batch](#batch)\n- [Files](#files)\n- [Images](#images)\n- [Models](#models)\n- [Moderations](#moderations)\n\n### **BETA**\n- [Assistants](#assistants)\n   - [Assistants File Object](#assistants-file-object)\n- [Threads](#threads)\n- [Messages](#messages)\n   - [Message File Object](#message-file-object)\n- [Runs](#runs)\n   - [Run Step object](#run-step-object)\n   - [Run Step details](#run-step-details)\n- [Assistants Streaming](#assistants-streaming)\n   - [Message Delta Object](#message-delta-object)\n   - [Run Step Delta Object](#run-step-delta-object)\n- [Vector Stores](#vector-stores)\n   - [Vector store File](#vector-store-file)\n   - [Vector store File Batch](#vector-store-file-batch)\n\n## Getting an API Key\n\n⚠️ **Important**\n\nTo interact with OpenAI services, you'll need an API key. Follow these steps to obtain one:\n\n1. Visit [OpenAI](https://www.openai.com/).\n2. Sign up for an [account](https://platform.openai.com/signup) or [log in](https://platform.openai.com/login) if you already have one.\n3. Navigate to the [API key page](https://platform.openai.com/account/api-keys) and follow the instructions to generate a new API key.\n\nFor more information, consult OpenAI's [official documentation](https://platform.openai.com/docs/).\n\n⚠️  Please take precautions to keep your API key secure per [OpenAI's guidance](https://platform.openai.com/docs/api-reference/authentication):\n\n> Remember that your API key is a secret! Do not share it with others or expose\n> it in any client-side code (browsers, apps). Production requests must be\n> routed through your backend server where your API key can be securely\n> loaded from an environment variable or key management service.\n\nSwiftOpenAI has built-in support for AIProxy, which is a backend for AI apps, to satisfy this requirement.\nTo configure AIProxy, see the instructions [here](#aiproxy).\n\n\n## Installation\n\n### Swift Package Manager\n\n1. Open your Swift project in Xcode.\n2. Go to `File` ->  `Add Package Dependency`.\n3. In the search bar, enter [this URL](https://github.com/jamesrochabrun/SwiftOpenAI).\n4. Choose the version you'd like to install (see the note below).\n5. Click `Add Package`.\n\nNote: Xcode has a quirk where it defaults an SPM package's upper limit to 2.0.0. This package is beyond that\nlimit, so you should not accept the defaults that Xcode proposes. Instead, enter the lower bound of the\n[release version](https://github.com/jamesrochabrun/SwiftOpenAI/releases) that you'd like to support, and then\ntab out of the input box for Xcode to adjust the upper bound. Alternatively, you may select `branch` -> `main`\nto stay on the bleeding edge.\n\n## Compatibility\n\n### Platform Support\n\nSwiftOpenAI supports both Apple platforms and Linux.\n- **Apple platforms** include iOS 15+, macOS 13+, and watchOS 9+.\n- **Linux**: SwiftOpenAI on Linux uses AsyncHTTPClient to work around URLSession bugs in Apple's Foundation framework, and can be used with the [Vapor](https://vapor.codes/) server framework.\n\n### OpenAI-Compatible Providers\n\nSwiftOpenAI supports various providers that are OpenAI-compatible, including but not limited to:\n\n- [Azure OpenAI](#azure-openai)\n- [Anthropic](#anthropic)\n- [Gemini](#gemini)\n- [Ollama](#ollama)\n- [Groq](#groq)\n- [xAI](#xai)\n- [OpenRouter](#openRouter)\n- [DeepSeek](#deepseek)\n- [AIProxy](#aiproxy)\n\nCheck OpenAIServiceFactory for convenience initializers that you can use to provide custom URLs.\n\n## Usage\n\nTo use SwiftOpenAI in your project, first import the package:\n\n```swift\nimport SwiftOpenAI\n```\n\nThen, initialize the service using your OpenAI API key:\n\n```swift\nlet apiKey = \"your_openai_api_key_here\"\nlet service = OpenAIServiceFactory.service(apiKey: apiKey)\n```\n\nYou can optionally specify an organization name if needed.\n\n```swift\nlet apiKey = \"your_openai_api_key_here\"\nlet oganizationID = \"your_organixation_id\"\nlet service = OpenAIServiceFactory.service(apiKey: apiKey, organizationID: oganizationID)\n```\n\nhttps://developer.apple.com/documentation/foundation/nsurlsessionconfiguration/1408259-timeoutintervalforrequest\n\nFor reasoning models, ensure that you extend the timeoutIntervalForRequest in the URL session configuration to a higher value. The default is 60 seconds, which may be insufficient, as requests to reasoning models can take longer to process and respond.\n\nTo configure it:\n\n```swift\nlet apiKey = \"your_openai_api_key_here\"\nlet organizationID = \"your_organization_id\"\nlet session = URLSession.shared\nsession.configuration.timeoutIntervalForRequest = 360 // e.g., 360 seconds or more.\nlet httpClient = URLSessionHTTPClientAdapter(urlSession: session)\nlet service = OpenAIServiceFactory.service(apiKey: apiKey, organizationID: organizationID, httpClient: httpClient)\n```\n\nThat's all you need to begin accessing the full range of OpenAI endpoints.\n\n### How to get the status code of network errors\n\nYou may want to build UI around the type of error that the API returns.\nFor example, a `429` means that your requests are being rate limited.\nThe `APIError` type has a case `responseUnsuccessful` with two associated values: a `description` and `statusCode`.\nHere is a usage example using the chat completion API:\n\n```swift\nlet service = OpenAIServiceFactory.service(apiKey: apiKey)\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .text(\"hello world\"))],\n                                          model: .gpt4o)\ndo {\n   let choices = try await service.startChat(parameters: parameters).choices\n   // Work with choices\n} catch APIError.responseUnsuccessful(let description, let statusCode) {\n   print(\"Network error with status code: \\(statusCode) and description: \\(description)\")\n} catch {\n   print(error.localizedDescription)\n}\n```\n\n\n### Audio\n\n### Audio Transcriptions\nParameters\n```swift\npublic struct AudioTranscriptionParameters: Encodable {\n   \n   /// The name of the file asset is not documented in OpenAI's official documentation; however, it is essential for constructing the multipart request.\n   let fileName: String\n   /// The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n   let file: Data\n   /// ID of the model to use. Only whisper-1 is currently available.\n   let model: String\n   /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n   let language: String?\n   /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language.\n   let prompt: String?\n   /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. Defaults to json\n   let responseFormat: String?\n   /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. Defaults to 0\n   let temperature: Double?\n   \n   public enum Model {\n      case whisperOne \n      case custom(model: String)\n   }\n   \n   public init(\n      fileName: String,\n      file: Data,\n      model: Model = .whisperOne,\n      prompt: String? = nil,\n      responseFormat: String? = nil,\n      temperature: Double? = nil,\n      language: String? = nil)\n   {\n      self.fileName = fileName\n      self.file = file\n      self.model = model.rawValue\n      self.prompt = prompt\n      self.responseFormat = responseFormat\n      self.temperature = temperature\n      self.language = language\n   }\n}\n```\n\nResponse\n```swift\npublic struct AudioObject: Decodable {\n   \n   /// The transcribed text if the request uses the `transcriptions` API, or the translated text if the request uses the `translations` endpoint.\n   public let text: String\n}\n```\n\nUsage\n```swift\nlet fileName = \"narcos.m4a\"\nlet data = Data(contentsOfURL:_) // Data retrieved from the file named \"narcos.m4a\".\nlet parameters = AudioTranscriptionParameters(fileName: fileName, file: data) // **Important**: in the file name always provide the file extension.\nlet audioObject =  try await service.createTranscription(parameters: parameters)\n```\n### Audio Translations\nParameters\n```swift\npublic struct AudioTranslationParameters: Encodable {\n   \n   /// The name of the file asset is not documented in OpenAI's official documentation; however, it is essential for constructing the multipart request.\n   let fileName: String\n   /// The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n   let file: Data\n   /// ID of the model to use. Only whisper-1 is currently available.\n   let model: String\n   /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language.\n   let prompt: String?\n   /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. Defaults to json\n   let responseFormat: String?\n   /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. Defaults to 0\n   let temperature: Double?\n   \n   public enum Model {\n      case whisperOne \n      case custom(model: String)\n   }\n   \n   public init(\n      fileName: String,\n      file: Data,\n      model: Model = .whisperOne,\n      prompt: String? = nil,\n      responseFormat: String? = nil,\n      temperature: Double? = nil)\n   {\n      self.fileName = fileName\n      self.file = file\n      self.model = model.rawValue\n      self.prompt = prompt\n      self.responseFormat = responseFormat\n      self.temperature = temperature\n   }\n}\n```\n\nResponse\n```swift\npublic struct AudioObject: Decodable {\n   \n   /// The transcribed text if the request uses the `transcriptions` API, or the translated text if the request uses the `translations` endpoint.\n   public let text: String\n}\n```\n\nUsage\n```swift\nlet fileName = \"german.m4a\"\nlet data = Data(contentsOfURL:_) // Data retrieved from the file named \"german.m4a\".\nlet parameters = AudioTranslationParameters(fileName: fileName, file: data) // **Important**: in the file name always provide the file extension.\nlet audioObject = try await service.createTranslation(parameters: parameters)\n```\n\n### Audio Speech\nParameters\n```swift\n/// [Generates audio from the input text.](https://platform.openai.com/docs/api-reference/audio/createSpeech)\npublic struct AudioSpeechParameters: Encodable {\n\n   /// One of the available [TTS models](https://platform.openai.com/docs/models/tts): tts-1 or tts-1-hd\n   let model: String\n   /// The text to generate audio for. The maximum length is 4096 characters.\n   let input: String\n   /// The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the [Text to speech guide.](https://platform.openai.com/docs/guides/text-to-speech/voice-options)\n   let voice: String\n   /// Defaults to mp3, The format to audio in. Supported formats are mp3, opus, aac, and flac.\n   let responseFormat: String?\n   /// Defaults to 1,  The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.\n   let speed: Double?\n\n   public enum TTSModel: String {\n      case tts1 = \"tts-1\"\n      case tts1HD = \"tts-1-hd\"\n   }\n\n   public enum Voice: String {\n      case alloy\n      case echo\n      case fable\n      case onyx\n      case nova\n      case shimmer\n   }\n\n   public enum ResponseFormat: String {\n      case mp3\n      case opus\n      case aac\n      case flac\n   }\n   \n   public init(\n      model: TTSModel,\n      input: String,\n      voice: Voice,\n      responseFormat: ResponseFormat? = nil,\n      speed: Double? = nil)\n   {\n       self.model = model.rawValue\n       self.input = input\n       self.voice = voice.rawValue\n       self.responseFormat = responseFormat?.rawValue\n       self.speed = speed\n   }\n}\n```\n\nResponse\n```swift\n/// The [audio speech](https://platform.openai.com/docs/api-reference/audio/createSpeech) response.\npublic struct AudioSpeechObject: Decodable {\n\n   /// The audio file content data.\n   public let output: Data\n}\n```\n\nUsage\n```swift\nlet prompt = \"Hello, how are you today?\"\nlet parameters = AudioSpeechParameters(model: .tts1, input: prompt, voice: .shimmer)\nlet audioObjectData = try await service.createSpeech(parameters: parameters).output\nplayAudio(from: audioObjectData)\n\n// Play data\n private func playAudio(from data: Data) {\n       do {\n           // Initialize the audio player with the data\n           audioPlayer = try AVAudioPlayer(data: data)\n           audioPlayer?.prepareToPlay()\n           audioPlayer?.play()\n       } catch {\n           // Handle errors\n           print(\"Error playing audio: \\(error.localizedDescription)\")\n       }\n   }\n```\n\n### Audio Realtime\n\nThe [Realtime API](https://platform.openai.com/docs/api-reference/realtime) enables bidirectional voice conversations with OpenAI's models using WebSockets and low-latency audio streaming. The API supports both audio-to-audio and text-to-audio interactions with built-in voice activity detection, transcription, and function calling.\n\n**Platform Requirements:** iOS 15+, macOS 13+, watchOS 9+. Requires AVFoundation (not available on Linux).\n\n**Permissions Required:**\n- Add `NSMicrophoneUsageDescription` to your Info.plist\n- On macOS: Enable sandbox entitlements for microphone access and outgoing network connections\n\nParameters\n```swift\n/// Configuration for creating a realtime session\npublic struct OpenAIRealtimeSessionConfiguration: Encodable, Sendable {\n\n   /// The input audio format. Options: .pcm16, .g711_ulaw, .g711_alaw. Default is .pcm16\n   let inputAudioFormat: AudioFormat?\n   /// Configuration for input audio transcription using Whisper\n   let inputAudioTranscription: InputAudioTranscription?\n   /// System instructions for the model. Recommended default provided\n   let instructions: String?\n   /// Maximum tokens for response output. Can be .value(Int) or .infinite\n   let maxResponseOutputTokens: MaxResponseOutputTokens?\n   /// Output modalities: [.audio, .text] or [.text] only. Default is [.audio, .text]\n   let modalities: [Modality]?\n   /// The output audio format. Options: .pcm16, .g711_ulaw, .g711_alaw. Default is .pcm16\n   let outputAudioFormat: AudioFormat?\n   /// Audio playback speed. Range: 0.25 to 4.0. Default is 1.0\n   let speed: Double?\n   /// Sampling temperature for model responses. Range: 0.6 to 1.2. Default is 0.8\n   let temperature: Double?\n   /// Array of tools/functions available for the model to call\n   let tools: [Tool]?\n   /// Tool selection mode: .none, .auto, .required, or .specific(functionName: String)\n   let toolChoice: ToolChoice?\n   /// Voice activity detection configuration. Options: .serverVAD or .semanticVAD\n   let turnDetection: TurnDetection?\n   /// The voice to use. Options: \"alloy\", \"ash\", \"ballad\", \"coral\", \"echo\", \"sage\", \"shimmer\", \"verse\"\n   let voice: String?\n\n   /// Available audio formats\n   public enum AudioFormat: String, Encodable, Sendable {\n      case pcm16\n      case g711_ulaw = \"g711-ulaw\"\n      case g711_alaw = \"g711-alaw\"\n   }\n\n   /// Output modalities\n   public enum Modality: String, Encodable, Sendable {\n      case audio\n      case text\n   }\n\n   /// Turn detection configuration\n   public struct TurnDetection: Encodable, Sendable {\n      /// Server-based VAD with customizable timing\n      public static func serverVAD(\n         prefixPaddingMs: Int = 300,\n         silenceDurationMs: Int = 500,\n         threshold: Double = 0.5\n      ) -> TurnDetection\n\n      /// Semantic VAD with eagerness level\n      public static func semanticVAD(eagerness: Eagerness = .medium) -> TurnDetection\n\n      public enum Eagerness: String, Encodable, Sendable {\n         case low, medium, high\n      }\n   }\n}\n```\n\nResponse\n```swift\n/// Messages received from the realtime API\npublic enum OpenAIRealtimeMessage: Sendable {\n   case error(String?)                    // Error occurred\n   case sessionCreated                    // Session successfully created\n   case sessionUpdated                    // Configuration updated\n   case responseCreated                   // Model started generating response\n   case responseAudioDelta(String)        // Audio chunk (base64 PCM16)\n   case inputAudioBufferSpeechStarted     // User started speaking (VAD detected)\n   case responseFunctionCallArgumentsDone(name: String, arguments: String, callId: String)\n   case responseTranscriptDelta(String)   // Partial AI transcript\n   case responseTranscriptDone(String)    // Complete AI transcript\n   case inputAudioBufferTranscript(String)           // User audio transcript\n   case inputAudioTranscriptionDelta(String)         // Partial user transcription\n   case inputAudioTranscriptionCompleted(String)     // Complete user transcription\n}\n```\n\nSupporting Types\n```swift\n/// Manages microphone input and audio playback for realtime conversations.\n/// Audio played through AudioController does not interfere with mic input (the model won't hear itself).\n@RealtimeActor\npublic final class AudioController {\n\n   /// Initialize with specified modes\n   /// - Parameter modes: Array of .record (for microphone) and/or .playback (for audio output)\n   public init(modes: [Mode]) async throws\n\n   public enum Mode {\n      case record   // Enable microphone streaming\n      case playback // Enable audio playback\n   }\n\n   /// Returns an AsyncStream of microphone audio buffers\n   /// - Throws: OpenAIError if .record mode wasn't enabled during initialization\n   public func micStream() throws -> AsyncStream<AVAudioPCMBuffer>\n\n   /// Plays base64-encoded PCM16 audio from the model\n   /// - Parameter base64String: Base64-encoded PCM16 audio data\n   public func playPCM16Audio(base64String: String)\n\n   /// Interrupts current audio playback (useful when user starts speaking)\n   public func interruptPlayback()\n\n   /// Stops all audio operations\n   public func stop()\n}\n\n/// Utility for encoding audio buffers to base64\npublic enum AudioUtils {\n   /// Converts AVAudioPCMBuffer to base64 string for transmission\n   public static func base64EncodeAudioPCMBuffer(from buffer: AVAudioPCMBuffer) -> String?\n\n   /// Checks if headphones are connected\n   public static var headphonesConnected: Bool\n}\n```\n\nUsage\n```swift\n// 1. Create session configuration\nlet configuration = OpenAIRealtimeSessionConfiguration(\n   voice: \"alloy\",\n   instructions: \"You are a helpful AI assistant. Be concise and friendly.\",\n   turnDetection: .serverVAD(\n      prefixPaddingMs: 300,\n      silenceDurationMs: 500,\n      threshold: 0.5\n   ),\n   inputAudioTranscription: .init(model: \"whisper-1\")\n)\n\n// 2. Create realtime session\nlet session = try await service.realtimeSession(\n   model: \"gpt-4o-mini-realtime-preview-2024-12-17\",\n   configuration: configuration\n)\n\n// 3. Initialize audio controller for recording and playback\nlet audioController = try await AudioController(modes: [.record, .playback])\n\n// 4. Handle incoming messages from OpenAI\nTask {\n   for await message in session.receiver {\n      switch message {\n      case .responseAudioDelta(let audio):\n         // Play audio from the model\n         audioController.playPCM16Audio(base64String: audio)\n\n      case .inputAudioBufferSpeechStarted:\n         // User started speaking - interrupt model's audio\n         audioController.interruptPlayback()\n\n      case .responseTranscriptDelta(let text):\n         // Display partial model transcript\n         print(\"Model (partial): \\(text)\")\n\n      case .responseTranscriptDone(let text):\n         // Display complete model transcript\n         print(\"Model: \\(text)\")\n\n      case .inputAudioTranscriptionCompleted(let text):\n         // Display user's transcribed speech\n         print(\"User: \\(text)\")\n\n      case .responseFunctionCallArgumentsDone(let name, let args, let callId):\n         // Handle function call from model\n         print(\"Function call: \\(name) with args: \\(args)\")\n         // Execute function and send result back\n\n      case .error(let error):\n         print(\"Error: \\(error ?? \"Unknown error\")\")\n\n      default:\n         break\n      }\n   }\n}\n\n// 5. Stream microphone audio to OpenAI\nTask {\n   do {\n      for try await buffer in audioController.micStream() {\n         // Encode audio buffer to base64\n         guard let base64Audio = AudioUtils.base64EncodeAudioPCMBuffer(from: buffer) else {\n            continue\n         }\n\n         // Send audio to OpenAI\n         try await session.sendMessage(\n            OpenAIRealtimeInputAudioBufferAppend(audio: base64Audio)\n         )\n      }\n   } catch {\n      print(\"Microphone error: \\(error)\")\n   }\n}\n\n// 6. Manually trigger a response (optional - usually VAD handles this)\ntry await session.sendMessage(\n   OpenAIRealtimeResponseCreate()\n)\n\n// 7. Update session configuration mid-conversation (optional)\nlet newConfig = OpenAIRealtimeSessionConfiguration(\n   voice: \"shimmer\",\n   temperature: 0.9\n)\ntry await session.sendMessage(\n   OpenAIRealtimeSessionUpdate(sessionConfig: newConfig)\n)\n\n// 8. Cleanup when done\naudioController.stop()\nsession.disconnect()\n```\n\nFunction Calling\n```swift\n// Define tools in configuration\nlet tools: [OpenAIRealtimeSessionConfiguration.Tool] = [\n   .init(\n      name: \"get_weather\",\n      description: \"Get the current weather in a location\",\n      parameters: [\n         \"type\": \"object\",\n         \"properties\": [\n            \"location\": [\n               \"type\": \"string\",\n               \"description\": \"City name, e.g. San Francisco\"\n            ]\n         ],\n         \"required\": [\"location\"]\n      ]\n   )\n]\n\nlet config = OpenAIRealtimeSessionConfiguration(\n   voice: \"alloy\",\n   tools: tools,\n   toolChoice: .auto\n)\n\n// Handle function calls in message receiver\ncase .responseFunctionCallArgumentsDone(let name, let args, let callId):\n   if name == \"get_weather\" {\n      // Parse arguments and execute function\n      let result = getWeather(arguments: args)\n\n      // Send result back to model\n      try await session.sendMessage(\n         OpenAIRealtimeConversationItemCreate(\n            item: .functionCallOutput(\n               callId: callId,\n               output: result\n            )\n         )\n      )\n   }\n```\n\nAdvanced Features\n- **Voice Activity Detection (VAD):** Choose between server-based VAD (with configurable timing) or semantic VAD (with eagerness levels)\n- **Transcription:** Enable Whisper transcription for both user input and model output\n- **Session Updates:** Change voice, instructions, or tools mid-conversation without reconnecting\n- **Response Triggers:** Manually trigger model responses or rely on automatic VAD\n- **Platform-Specific Behavior:** Automatically selects optimal audio API based on platform and headphone connection\n\nFor a complete implementation example, see `Examples/RealtimeExample/RealtimeExample.swift` in the repository.\n\n### Chat\nParameters\n```swift\npublic struct ChatCompletionParameters: Encodable {\n   \n   /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models)\n   public var messages: [Message]\n   /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/how-we-use-your-data) table for details on which models work with the Chat API.\n   /// Supports GPT-4, GPT-4o, GPT-5, and other models. For GPT-5 family: .gpt5, .gpt5Mini, .gpt5Nano\n   public var model: String\n   /// Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products.\n   /// Defaults to false\n   public var store: Bool?\n   /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. Defaults to 0\n   /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)\n   public var frequencyPenalty: Double?\n   /// Controls how the model responds to function calls. none means the model does not call a function, and responds to the end-user. auto means the model can pick between an end-user or calling a function. Specifying a particular function via {\"name\": \"my_function\"} forces the model to call that function. none is the default when no functions are present. auto is the default if functions are present.\n   @available(*, deprecated, message: \"Deprecated in favor of tool_choice.\")\n   public var functionCall: FunctionCall?\n   /// Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. \n   /// auto means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"type: \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that function.\n   /// `none` is the default when no functions are present. auto is the default if functions are present.\n   public var toolChoice: ToolChoice?\n   /// A list of functions the model may generate JSON inputs for.\n   @available(*, deprecated, message: \"Deprecated in favor of tools.\")\n   public var functions: [ChatFunction]?\n   /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n   public var tools: [Tool]?\n   /// Whether to enable parallel function calling during tool use. Defaults to true.\n   public var parallelToolCalls: Bool?\n   /// Modify the likelihood of specified tokens appearing in the completion.\n   /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. Defaults to null.\n   public var logitBias: [Int: Double]?\n   /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model. Defaults to false.\n   public var logprobs: Bool?\n   /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.\n   public var topLogprobs: Int?\n   /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\n   /// This value is now deprecated in favor of max_completion_tokens, and is not compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning)\n   public var maxTokens: Int?\n   /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning)\n   public var maCompletionTokens: Int?\n   /// How many chat completion choices to generate for each input message. Defaults to 1.\n   public var n: Int?\n   /// Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default:\n   /// [\"text\"]\n   ///The gpt-4o-audio-preview model can also be used to [generate audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use:\n   /// [\"text\", \"audio\"]\n   public var modalities: [String]?\n   /// Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. [Learn more.](https://platform.openai.com/docs/guides/audio)\n   public var audio: Audio?\n   /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Defaults to 0\n   /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)\n   public var presencePenalty: Double?\n   /// An object specifying the format that the model must output. Used to enable JSON mode.\n   /// Setting to `{ type: \"json_object\" }` enables `JSON` mode, which guarantees the message the model generates is valid JSON.\n   ///Important: when using `JSON` mode you must still instruct the model to produce `JSON` yourself via some conversation message, for example via your system message. If you don't do this, the model may generate an unending stream of whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance of a \"stuck\" request. Also note that the message content may be partial (i.e. cut off) if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n   public var responseFormat: ResponseFormat?\n   /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n   /// If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n   /// If set to 'default', the request will be processed in the shared cluster.\n   /// When this parameter is set, the response body will include the service_tier utilized.\n   public var serviceTier: String?\n   /// This feature is in `Beta`. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n   /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n   public var seed: Int?\n   /// Up to 4 sequences where the API will stop generating further tokens. Defaults to null.\n   public var stop: [String]?\n   /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format) as they become available, with the stream terminated by a data: [DONE] message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions ).\n   /// Defaults to false.\n   var stream: Bool? = nil\n   /// Options for streaming response. Only set this when you set stream: true\n   var streamOptions: StreamOptions?\n   /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n   /// We generally recommend altering this or `top_p` but not both. Defaults to 1.\n   public var temperature: Double?\n   /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n   /// We generally recommend altering this or `temperature` but not both. Defaults to 1\n   public var topP: Double?\n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n   /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).\n   public var user: String?\n   \n   public struct Message: Encodable {\n      \n      /// The role of the messages author. One of system, user, assistant, or tool message.\n      let role: String\n      /// The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n      let content: ContentType\n      /// The name of the author of this message. name is required if role is function, and it should be the name of the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n      let name: String?\n      /// The name and arguments of a function that should be called, as generated by the model.\n      @available(*, deprecated, message: \"Deprecated and replaced by `tool_calls`\")\n      let functionCall: FunctionCall?\n      /// The tool calls generated by the model, such as function calls.\n      let toolCalls: [ToolCall]?\n      /// Tool call that this message is responding to.\n      let toolCallID: String?\n      \n      public enum ContentType: Encodable {\n         \n         case text(String)\n         case contentArray([MessageContent])\n         \n         public func encode(to encoder: Encoder) throws {\n            var container = encoder.singleValueContainer()\n            switch self {\n            case .text(let text):\n               try container.encode(text)\n            case .contentArray(let contentArray):\n               try container.encode(contentArray)\n            }\n         }\n         \n         public enum MessageContent: Encodable, Equatable, Hashable {\n            \n            case text(String)\n            case imageUrl(ImageDetail)\n            \n            public struct ImageDetail: Encodable, Equatable, Hashable {\n               \n               public let url: URL\n               public let detail: String?\n               \n               enum CodingKeys: String, CodingKey {\n                  case url\n                  case detail\n               }\n               \n               public func encode(to encoder: Encoder) throws {\n                  var container = encoder.container(keyedBy: CodingKeys.self)\n                  try container.encode(url, forKey: .url)\n                  try container.encode(detail, forKey: .detail)\n               }\n               \n               public init(url: URL, detail: String? = nil) {\n                  self.url = url\n                  self.detail = detail\n               }\n            }\n            \n            enum CodingKeys: String, CodingKey {\n               case type\n               case text\n               case imageUrl = \"image_url\"\n            }\n            \n            public func encode(to encoder: Encoder) throws {\n               var container = encoder.container(keyedBy: CodingKeys.self)\n               switch self {\n               case .text(let text):\n                  try container.encode(\"text\", forKey: .type)\n                  try container.encode(text, forKey: .text)\n               case .imageUrl(let imageDetail):\n                  try container.encode(\"image_url\", forKey: .type)\n                  try container.encode(imageDetail, forKey: .imageUrl)\n               }\n            }\n            \n            public func hash(into hasher: inout Hasher) {\n               switch self {\n               case .text(let string):\n                  hasher.combine(string)\n               case .imageUrl(let imageDetail):\n                  hasher.combine(imageDetail)\n               }\n            }\n            \n            public static func ==(lhs: MessageContent, rhs: MessageContent) -> Bool {\n               switch (lhs, rhs) {\n               case let (.text(a), .text(b)):\n                  return a == b\n               case let (.imageUrl(a), .imageUrl(b)):\n                  return a == b\n               default:\n                  return false\n               }\n            }\n         }\n      }\n      \n      public enum Role: String {\n         case system // content, role\n         case user // content, role\n         case assistant // content, role, tool_calls\n         case tool // content, role, tool_call_id\n      }\n      \n      enum CodingKeys: String, CodingKey {\n         case role\n         case content\n         case name\n         case functionCall = \"function_call\"\n         case toolCalls = \"tool_calls\"\n         case toolCallID = \"tool_call_id\"\n      }\n      \n      public init(\n         role: Role,\n         content: ContentType,\n         name: String? = nil,\n         functionCall: FunctionCall? = nil,\n         toolCalls: [ToolCall]? = nil,\n         toolCallID: String? = nil)\n      {\n         self.role = role.rawValue\n         self.content = content\n         self.name = name\n         self.functionCall = functionCall\n         self.toolCalls = toolCalls\n         self.toolCallID = toolCallID\n      }\n   }\n   \n   @available(*, deprecated, message: \"Deprecated in favor of ToolChoice.\")\n   public enum FunctionCall: Encodable, Equatable {\n      case none\n      case auto\n      case function(String)\n      \n      enum CodingKeys: String, CodingKey {\n         case none = \"none\"\n         case auto = \"auto\"\n         case function = \"name\"\n      }\n      \n      public func encode(to encoder: Encoder) throws {\n         switch self {\n         case .none:\n            var container = encoder.singleValueContainer()\n            try container.encode(CodingKeys.none.rawValue)\n         case .auto:\n            var container = encoder.singleValueContainer()\n            try container.encode(CodingKeys.auto.rawValue)\n         case .function(let name):\n            var container = encoder.container(keyedBy: CodingKeys.self)\n            try container.encode(name, forKey: .function)\n         }\n      }\n   }\n   \n   /// [Documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)\n   public struct Tool: Encodable {\n      \n      /// The type of the tool. Currently, only `function` is supported.\n      let type: String\n      /// object\n      let function: ChatFunction\n      \n      public init(\n         type: String = \"function\",\n         function: ChatFunction)\n      {\n         self.type = type\n         self.function = function\n      }\n   }\n   \n   public struct ChatFunction: Codable, Equatable {\n      \n      /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n      let name: String\n      /// A description of what the function does, used by the model to choose when and how to call the function.\n      let description: String?\n      /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format.\n      /// Omitting parameters defines a function with an empty parameter list.\n      let parameters: JSONSchema?\n      /// Defaults to false, Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the [function calling guide].(https://platform.openai.com/docs/api-reference/chat/docs/guides/function-calling)\n      let strict: Bool?\n      \n      public init(\n         name: String,\n         strict: Bool?,\n         description: String?,\n         parameters: JSONSchema?)\n      {\n         self.name = name\n         self.strict = strict\n         self.description = description\n         self.parameters = parameters\n      }\n   }\n   \n   public enum ServiceTier: String, Encodable {\n      /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n      /// If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n      /// If set to 'default', the request will be processed in the shared cluster.\n      /// When this parameter is set, the response body will include the service_tier utilized.\n      case auto\n      case `default`\n   }\n   \n   public struct StreamOptions: Encodable {\n      /// If set, an additional chunk will be streamed before the data: [DONE] message.\n      /// The usage field on this chunk shows the token usage statistics for the entire request,\n      /// and the choices field will always be an empty array. All other chunks will also include\n      /// a usage field, but with a null value.\n      let includeUsage: Bool\n\n      enum CodingKeys: String, CodingKey {\n          case includeUsage = \"include_usage\"\n      }\n   }\n   \n   /// Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]\n   /// [Learn more.](https://platform.openai.com/docs/guides/audio)\n   public struct Audio: Encodable {\n      /// Specifies the voice type. Supported voices are alloy, echo, fable, onyx, nova, and shimmer.\n      public let voice: String\n      /// Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16.\n      public let format: String\n      \n      public init(\n         voice: String,\n         format: String)\n      {\n         self.voice = voice\n         self.format = format\n      }\n   }\n\n   enum CodingKeys: String, CodingKey {\n      case messages\n      case model\n      case store\n      case frequencyPenalty = \"frequency_penalty\"\n      case toolChoice = \"tool_choice\"\n      case functionCall = \"function_call\"\n      case tools\n      case parallelToolCalls = \"parallel_tool_calls\"\n      case functions\n      case logitBias = \"logit_bias\"\n      case logprobs\n      case topLogprobs = \"top_logprobs\"\n      case maxTokens = \"max_tokens\"\n      case maCompletionTokens = \"max_completion_tokens\"\n      case n\n      case modalities\n      case audio\n      case responseFormat = \"response_format\"\n      case presencePenalty = \"presence_penalty\"\n      case seed\n      case serviceTier = \"service_tier\"\n      case stop\n      case stream\n      case streamOptions = \"stream_options\"\n      case temperature\n      case topP = \"top_p\"\n      case user\n   }\n   \n   public init(\n      messages: [Message],\n      model: Model,\n      store: Bool? = nil,\n      frequencyPenalty: Double? = nil,\n      functionCall: FunctionCall? = nil,\n      toolChoice: ToolChoice? = nil,\n      functions: [ChatFunction]? = nil,\n      tools: [Tool]? = nil,\n      parallelToolCalls: Bool? = nil,\n      logitBias: [Int: Double]? = nil,\n      logProbs: Bool? = nil,\n      topLogprobs: Int? = nil,\n      maxTokens: Int? = nil,\n      n: Int? = nil,\n      modalities: [String]? = nil,\n      audio: Audio? = nil,\n      responseFormat: ResponseFormat? = nil,\n      presencePenalty: Double? = nil,\n      serviceTier: ServiceTier? = nil,\n      seed: Int? = nil,\n      stop: [String]? = nil,\n      temperature: Double? = nil,\n      topProbability: Double? = nil,\n      user: String? = nil)\n   {\n      self.messages = messages\n      self.model = model.value\n      self.store = store\n      self.frequencyPenalty = frequencyPenalty\n      self.functionCall = functionCall\n      self.toolChoice = toolChoice\n      self.functions = functions\n      self.tools = tools\n      self.parallelToolCalls = parallelToolCalls\n      self.logitBias = logitBias\n      self.logprobs = logProbs\n      self.topLogprobs = topLogprobs\n      self.maxTokens = maxTokens\n      self.n = n\n      self.modalities = modalities\n      self.audio = audio\n      self.responseFormat = responseFormat\n      self.presencePenalty = presencePenalty\n      self.serviceTier = serviceTier?.rawValue\n      self.seed = seed\n      self.stop = stop\n      self.temperature = temperature\n      self.topP = topProbability\n      self.user = user\n   }\n}\n```\n\nResponse\n### Chat completion object\n```swift\n/// Represents a chat [completion](https://platform.openai.com/docs/api-reference/chat/object) response returned by model, based on the provided input.\npublic struct ChatCompletionObject: Decodable {\n   \n   /// A unique identifier for the chat completion.\n   public let id: String\n   /// A list of chat completion choices. Can be more than one if n is greater than 1.\n   public let choices: [ChatChoice]\n   /// The Unix timestamp (in seconds) of when the chat completion was created.\n   public let created: Int\n   /// The model used for the chat completion.\n   public let model: String\n   /// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.\n   public let serviceTier: String?\n   /// This fingerprint represents the backend configuration that the model runs with.\n   /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n   public let systemFingerprint: String?\n   /// The object type, which is always chat.completion.\n   public let object: String\n   /// Usage statistics for the completion request.\n   public let usage: ChatUsage\n   \n   public struct ChatChoice: Decodable {\n      \n      /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.\n      public let finishReason: IntOrStringValue?\n      /// The index of the choice in the list of choices.\n      public let index: Int\n      /// A chat completion message generated by the model.\n      public let message: ChatMessage   \n      /// Log probability information for the choice.\n      public let logprobs: LogProb?\n      \n      public struct ChatMessage: Decodable {\n         \n         /// The contents of the message.\n         public let content: String?\n         /// The tool calls generated by the model, such as function calls.\n         public let toolCalls: [ToolCall]?\n         /// The name and arguments of a function that should be called, as generated by the model.\n         @available(*, deprecated, message: \"Deprecated and replaced by `tool_calls`\")\n         public let functionCall: FunctionCall?\n         /// The role of the author of this message.\n         public let role: String\n         /// Provided by the Vision API.\n         public let finishDetails: FinishDetails?\n         /// The refusal message generated by the model.\n         public let refusal: String?\n         /// If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).\n         public let audio: Audio?\n         \n         /// Provided by the Vision API.\n         public struct FinishDetails: Decodable {\n            let type: String\n         }\n         \n         public struct Audio: Decodable {\n            /// Unique identifier for this audio response.\n            public let id: String\n            /// The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.\n            public let expiresAt: Int\n            /// Base64 encoded audio bytes generated by the model, in the format specified in the request.\n            public let data: String\n            /// Transcript of the audio generated by the model.\n            public let transcript: String\n            \n            enum CodingKeys: String, CodingKey {\n               case id\n               case expiresAt = \"expires_at\"\n               case data\n               case transcript\n            }\n         }\n      }\n      \n      public struct LogProb: Decodable {\n         /// A list of message content tokens with log probability information.\n         let content: [TokenDetail]\n      }\n      \n      public struct TokenDetail: Decodable {\n         /// The token.\n         let token: String\n         /// The log probability of this token.\n         let logprob: Double\n         /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n         let bytes: [Int]?\n         /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.\n         let topLogprobs: [TopLogProb]\n         \n         enum CodingKeys: String, CodingKey {\n            case token, logprob, bytes\n            case topLogprobs = \"top_logprobs\"\n         }\n         \n         struct TopLogProb: Decodable {\n            /// The token.\n            let token: String\n            /// The log probability of this token.\n            let logprob: Double\n            /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n            let bytes: [Int]?\n         }\n      }\n   }\n   \n   public struct ChatUsage: Decodable {\n      \n      /// Number of tokens in the generated completion.\n      public let completionTokens: Int\n      /// Number of tokens in the prompt.\n      public let promptTokens: Int\n      /// Total number of tokens used in the request (prompt + completion).\n      public let totalTokens: Int\n   }\n}\n```\n\nUsage\n```swift\nlet prompt = \"Tell me a joke\"\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .text(prompt))], model: .gpt4o)\nlet chatCompletionObject = service.startChat(parameters: parameters)\n```\n\nResponse\n### Chat completion chunk object\n```swift\n/// Represents a [streamed](https://platform.openai.com/docs/api-reference/chat/streaming) chunk of a chat completion response returned by model, based on the provided input.\npublic struct ChatCompletionChunkObject: Decodable {\n   \n   /// A unique identifier for the chat completion chunk.\n   public let id: String\n   /// A list of chat completion choices. Can be more than one if n is greater than 1.\n   public let choices: [ChatChoice]\n   /// The Unix timestamp (in seconds) of when the chat completion chunk was created.\n   public let created: Int\n   /// The model to generate the completion.\n   public let model: String\n   /// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.\n   public let serviceTier: String?\n   /// This fingerprint represents the backend configuration that the model runs with.\n   /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n   public let systemFingerprint: String?\n   /// The object type, which is always chat.completion.chunk.\n   public let object: String\n   \n   public struct ChatChoice: Decodable {\n      \n      /// A chat completion delta generated by streamed model responses.\n      public let delta: Delta\n      /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.\n      public let finishReason: IntOrStringValue?\n      /// The index of the choice in the list of choices.\n      public let index: Int\n      /// Provided by the Vision API.\n      public let finishDetails: FinishDetails?\n      \n      public struct Delta: Decodable {\n         \n         /// The contents of the chunk message.\n         public let content: String?\n         /// The tool calls generated by the model, such as function calls.\n         public let toolCalls: [ToolCall]?\n         /// The name and arguments of a function that should be called, as generated by the model.\n         @available(*, deprecated, message: \"Deprecated and replaced by `tool_calls`\")\n         public let functionCall: FunctionCall?\n         /// The role of the author of this message.\n         public let role: String?\n      }\n      \n      public struct LogProb: Decodable {\n         /// A list of message content tokens with log probability information.\n         let content: [TokenDetail]\n      }\n      \n      public struct TokenDetail: Decodable {\n         /// The token.\n         let token: String\n         /// The log probability of this token.\n         let logprob: Double\n         /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n         let bytes: [Int]?\n         /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.\n         let topLogprobs: [TopLogProb]\n         \n         enum CodingKeys: String, CodingKey {\n            case token, logprob, bytes\n            case topLogprobs = \"top_logprobs\"\n         }\n         \n         struct TopLogProb: Decodable {\n            /// The token.\n            let token: String\n            /// The log probability of this token.\n            let logprob: Double\n            /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n            let bytes: [Int]?\n         }\n      }\n      \n      /// Provided by the Vision API.\n      public struct FinishDetails: Decodable {\n         let type: String\n      }\n   }\n}\n```\nUsage\n```swift\nlet prompt = \"Tell me a joke\"\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .text(prompt))], model: .gpt4o)\nlet chatCompletionObject = try await service.startStreamedChat(parameters: parameters)\n```\n\n### Function Calling\n\nChat Completion also supports [Function Calling](https://platform.openai.com/docs/guides/function-calling) and [Parallel Function Calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling). `functions` has been deprecated in favor of `tools` check [OpenAI Documentation](https://platform.openai.com/docs/api-reference/chat/create) for more.\n\n```swift\npublic struct ToolCall: Codable {\n\n   public let index: Int\n   /// The ID of the tool call.\n   public let id: String?\n   /// The type of the tool. Currently, only `function` is supported.\n   public let type: String?\n   /// The function that the model called.\n   public let function: FunctionCall\n\n   public init(\n      index: Int,\n      id: String,\n      type: String = \"function\",\n      function: FunctionCall)\n   {\n      self.index = index\n      self.id = id\n      self.type = type\n      self.function = function\n   }\n}\n\npublic struct FunctionCall: Codable {\n\n   /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.\n   let arguments: String\n   /// The name of the function to call.\n   let name: String\n\n   public init(\n      arguments: String,\n      name: String)\n   {\n      self.arguments = arguments\n      self.name = name\n   }\n}\n```\n\nUsage\n```swift\n/// Define a `ToolCall`\nvar tool: ToolCall {\n   .init(\n      type: \"function\", // The type of the tool. Currently, only \"function\" is supported.\n      function: .init(\n         name: \"create_image\",\n         description: \"Call this function if the request asks to generate an image\",\n         parameters: .init(\n            type: .object,\n            properties: [\n               \"prompt\": .init(type: .string, description: \"The exact prompt passed in.\"),\n               \"count\": .init(type: .integer, description: \"The number of images requested\")\n            ],\n            required: [\"prompt\", \"count\"])))\n}\n\nlet prompt = \"Show me an image of an unicorn eating ice cream\"\nlet content: ChatCompletionParameters.Message.ContentType = .text(prompt)\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: content)], model: .gpt41106Preview, tools: [tool])\nlet chatCompletionObject = try await service.startStreamedChat(parameters: parameters)\n```\nFor more details about how to also uploading base 64 encoded images in iOS check the [ChatFunctionsCalllDemo](https://github.com/jamesrochabrun/SwiftOpenAI/tree/main/Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall) demo on the Examples section of this package.\n\n### Structured Outputs\n\n#### Documentation:\n\n- [Structured Outputs Guides](https://platform.openai.com/docs/guides/structured-outputs/structured-outputs)\n- [Examples](https://platform.openai.com/docs/guides/structured-outputs/examples)\n- [How to use](https://platform.openai.com/docs/guides/structured-outputs/how-to-use)\n- [Supported schemas](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas)\n\nMust knowns:\n\n- [All fields must be required](https://platform.openai.com/docs/guides/structured-outputs/all-fields-must-be-required) , To use Structured Outputs, all fields or function parameters must be specified as required.\n- Although all fields must be required (and the model will return a value for each parameter), it is possible to emulate an optional parameter by using a union type with null.\n- [Objects have limitations on nesting depth and size](https://platform.openai.com/docs/guides/structured-outputs/objects-have-limitations-on-nesting-depth-and-size), A schema may have up to 100 object properties total, with up to 5 levels of nesting.\n\n- [additionalProperties](https://platform.openai.com/docs/guides/structured-outputs/additionalproperties-false-must-always-be-set-in-objects)): false must always be set in objects\nadditionalProperties controls whether it is allowable for an object to contain additional keys / values that were not defined in the JSON Schema.\nStructured Outputs only supports generating specified keys / values, so we require developers to set additionalProperties: false to opt into Structured Outputs.\n- [Key ordering](https://platform.openai.com/docs/guides/structured-outputs/key-ordering), When using Structured Outputs, outputs will be produced in the same order as the ordering of keys in the schema.\n- [Recursive schemas are supported](https://platform.openai.com/docs/guides/structured-outputs/recursive-schemas-are-supported)\n\n#### How to use Structured Outputs in SwiftOpenAI\n\n1. Function calling: Structured Outputs via tools is available by setting strict: true within your function definition. This feature works with all models that support tools, including all models gpt-4-0613 and gpt-3.5-turbo-0613 and later. When Structured Outputs are enabled, model outputs will match the supplied tool definition.\n\nUsing this schema:\n\n```json\n{\n  \"schema\": {\n    \"type\": \"object\",\n    \"properties\": {\n      \"steps\": {\n        \"type\": \"array\",\n        \"items\": {\n          \"type\": \"object\",\n          \"properties\": {\n            \"explanation\": {\n              \"type\": \"string\"\n            },\n            \"output\": {\n              \"type\": \"string\"\n            }\n          },\n          \"required\": [\"explanation\", \"output\"],\n          \"additionalProperties\": false\n        }\n      },\n      \"final_answer\": {\n        \"type\": \"string\"\n      }\n    },\n    \"required\": [\"steps\", \"final_answer\"],\n    \"additionalProperties\": false\n  }\n}\n```\n\nYou can use the convenient `JSONSchema` object like this:\n\n```swift\n// 1: Define the Step schema object\n\nlet stepSchema = JSONSchema(\n   type: .object,\n   properties: [\n      \"explanation\": JSONSchema(type: .string),\n      \"output\": JSONSchema(\n         type: .string)\n   ],\n   required: [\"explanation\", \"output\"],\n   additionalProperties: false\n)\n\n// 2. Define the steps Array schema.\n\nlet stepsArraySchema = JSONSchema(type: .array, items: stepSchema)\n\n// 3. Define the final Answer schema.\n\nlet finalAnswerSchema = JSONSchema(type: .string)\n\n// 4. Define math reponse JSON schema.\n\nlet mathResponseSchema = JSONSchema(\n      type: .object,\n      properties: [\n         \"steps\": stepsArraySchema,\n         \"final_answer\": finalAnswerSchema\n      ],\n      required: [\"steps\", \"final_answer\"],\n      additionalProperties: false\n)\n\nlet tool = ChatCompletionParameters.Tool(\n            function: .init(\n               name: \"math_response\",\n               strict: true,\n               parameters: mathResponseSchema))\n)\n\nlet prompt = \"solve 8x + 31 = 2\"\nlet systemMessage = ChatCompletionParameters.Message(role: .system, content: .text(\"You are a math tutor\"))\nlet userMessage = ChatCompletionParameters.Message(role: .user, content: .text(prompt))\nlet parameters = ChatCompletionParameters(\n   messages: [systemMessage, userMessage],\n   model: .gpt4o20240806,\n   tools: [tool])\n\nlet chat = try await service.startChat(parameters: parameters)\n```\n\n2. A new option for the `response_format` parameter: developers can now supply a JSON Schema via `json_schema`, a new option for the response_format parameter. This is useful when the model is not calling a tool, but rather, responding to the user in a structured way. This feature works with our newest GPT-4o models: `gpt-4o-2024-08-06`, released today, and `gpt-4o-mini-2024-07-18`. When a response_format is supplied with strict: true, model outputs will match the supplied schema.\n\nUsing the previous schema, this is how you can implement it as json schema using the convenient `JSONSchemaResponseFormat` object:\n\n```swift\n// 1: Define the Step schema object\n\nlet stepSchema = JSONSchema(\n   type: .object,\n   properties: [\n      \"explanation\": JSONSchema(type: .string),\n      \"output\": JSONSchema(\n         type: .string)\n   ],\n   required: [\"explanation\", \"output\"],\n   additionalProperties: false\n)\n\n// 2. Define the steps Array schema.\n\nlet stepsArraySchema = JSONSchema(type: .array, items: stepSchema)\n\n// 3. Define the final Answer schema.\n\nlet finalAnswerSchema = JSONSchema(type: .string)\n\n// 4. Define the response format JSON schema.\n\nlet responseFormatSchema = JSONSchemaResponseFormat(\n   name: \"math_response\",\n   strict: true,\n   schema: JSONSchema(\n      type: .object,\n      properties: [\n         \"steps\": stepsArraySchema,\n         \"final_answer\": finalAnswerSchema\n      ],\n      required: [\"steps\", \"final_answer\"],\n      additionalProperties: false\n   )\n)\n\nlet prompt = \"solve 8x + 31 = 2\"\nlet systemMessage = ChatCompletionParameters.Message(role: .system, content: .text(\"You are a math tutor\"))\nlet userMessage = ChatCompletionParameters.Message(role: .user, content: .text(prompt))\nlet parameters = ChatCompletionParameters(\n   messages: [systemMessage, userMessage],\n   model: .gpt4o20240806,\n   responseFormat: .jsonSchema(responseFormatSchema))\n```\n\nSwiftOpenAI Structred outputs supports:\n\n- [x] Tools Structured output.\n- [x] Response format Structure output.\n- [x] Recursive Schema.\n- [x] Optional values Schema.\n- [ ] Pydantic models.\n\nWe don't support Pydantic models, users need tos manually create Schemas using `JSONSchema` or `JSONSchemaResponseFormat` objects.\n\nPro tip 🔥 Use [iosAICodeAssistant GPT](https://chatgpt.com/g/g-qj7RuW7PY-iosai-code-assistant) to construct SwifOpenAI schemas. Just paste your JSON schema and ask the GPT to create SwiftOpenAI schemas for tools and response format.\n\nFor more details visit the Demo project for [tools](https://github.com/jamesrochabrun/SwiftOpenAI/tree/main/Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructureOutputTool) and [response format](https://github.com/jamesrochabrun/SwiftOpenAI/tree/main/Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructuredOutputs).\n\n### Vision\n\n[Vision](https://platform.openai.com/docs/guides/vision) API is available for use; developers must access it through the chat completions API, specifically using the gpt-4-vision-preview model or gpt-4o model. Using any other model will not provide an image description\n\nUsage\n```swift\nlet imageURL = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\nlet prompt = \"What is this?\"\nlet messageContent: [ChatCompletionParameters.Message.ContentType.MessageContent] = [.text(prompt), .imageUrl(.init(url: imageURL)] // Users can add as many `.imageUrl` instances to the service.\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .contentArray(messageContent))], model: .gpt4o)\nlet chatCompletionObject = try await service.startStreamedChat(parameters: parameters)\n```\n\n![Simulator Screen Recording - iPhone 15 - 2023-11-09 at 17 12 06](https://github.com/jamesrochabrun/SwiftOpenAI/assets/5378604/db2cbb3b-0c80-4ac8-8fe5-dbb782b270da)\n\nFor more details about how to also uploading base 64 encoded images in iOS check the [ChatVision](https://github.com/jamesrochabrun/SwiftOpenAI/tree/main/Examples/SwiftOpenAIExample/SwiftOpenAIExample/Vision) demo on the Examples section of this package.\n\n### Response\n\nOpenAI's most advanced interface for generating model responses. Supports text and image inputs, and text outputs. Create stateful interactions with the model, using the output of previous responses as input. Extend the model's capabilities with built-in tools for file search, web search, computer use, and more. Allow the model access to external systems and data using function calling.\n\n- Full streaming support with `responseCreateStream` method\n- Comprehensive `ResponseStreamEvent` enum covering 40+ event types\n- Enhanced `InputMessage` with `id` field for response ID tracking\n- Improved conversation state management with `previousResponseId`\n- Real-time text streaming, function calls, and tool usage events\n- Support for reasoning summaries, web search, file search, and image generation events\n- **NEW**: Support for GPT-5 models (gpt-5, gpt-5-mini, gpt-5-nano)\n- **NEW**: Verbosity parameter for controlling response detail level\n\n#### ModelResponseParameter\n\nThe `ModelResponseParameter` provides a comprehensive interface for creating model responses:\n\n```swift\nlet parameters = ModelResponseParameter(\n    input: .text(\"What is the answer to life, the universe, and everything?\"),\n    model: .gpt5,  // Support for GPT-5, GPT-5-mini, GPT-5-nano\n    text: TextConfiguration(\n        format: .text,\n        verbosity: \"low\"  // NEW: Control response verbosity (\"low\", \"medium\", \"high\")\n    ),\n    temperature: 0.7\n)\n\nlet response = try await service.responseCreate(parameters)\n```\n\n#### Available GPT-5 Models\n\n```swift\npublic enum Model {\n    case gpt5        // Complex reasoning, broad world knowledge, and code-heavy or multi-step agentic tasks\n    case gpt5Mini    // Cost-optimized reasoning and chat; balances speed, cost, and capability\n    case gpt5Nano    // High-throughput tasks, especially simple instruction-following or classification\n    // ... other models\n}\n```\n\n#### TextConfiguration with Verbosity\n\n```swift\n// Create a text configuration with verbosity control\nlet textConfig = TextConfiguration(\n    format: .text,       // Can be .text, .jsonObject, or .jsonSchema\n    verbosity: \"medium\"  // Controls response detail level\n)\n```\n\nRelated guides:\n\n- [Quickstart](https://platform.openai.com/docs/quickstart?api-mode=responses)\n- [Text inputs and outputs](https://platform.openai.com/docs/guides/text?api-mode=responses)\n- [Image inputs](https://platform.openai.com/docs/guides/images?api-mode=responses)\n- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses)\n- [Function calling](https://platform.openai.com/docs/guides/function-calling?api-mode=responses)\n- [Conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses)\n- [Extend the models with tools](https://platform.openai.com/docs/guides/tools?api-mode=responses)\n\nParameters\n```swift\n/// [Creates a model response.](https://platform.openai.com/docs/api-reference/responses/create)\npublic struct ModelResponseParameter: Codable {\n\n   /// Text, image, or file inputs to the model, used to generate a response.\n   /// A text input to the model, equivalent to a text input with the user role.\n   /// A list of one or many input items to the model, containing different content types.\n   public var input: InputType\n\n   /// Model ID used to generate the response, like gpt-4o or o1. OpenAI offers a wide range of models with\n   /// different capabilities, performance characteristics, and price points.\n   /// Refer to the model guide to browse and compare available models.\n   public var model: String\n\n   /// Specify additional output data to include in the model response. Currently supported values are:\n   /// file_search_call.results : Include the search results of the file search tool call.\n   /// message.input_image.image_url : Include image urls from the input message.\n   /// computer_call_output.output.image_url : Include image urls from the computer call output.\n   public var include: [String]?\n\n   /// Inserts a system (or developer) message as the first item in the model's context.\n   /// When using along with previous_response_id, the instructions from a previous response will be not be\n   /// carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses.\n   public var instructions: String?\n\n   /// An upper bound for the number of tokens that can be generated for a response, including visible output tokens\n   /// and reasoning tokens.\n   public var maxOutputTokens: Int?\n\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information\n   /// about the object in a structured format, and querying for objects via API or the dashboard.\n   /// Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.\n   public var metadata: [String: String]?\n\n   /// Whether to allow the model to run tool calls in parallel.\n   /// Defaults to true\n   public var parallelToolCalls: Bool?\n\n   /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n   /// Learn more about conversation state.\n   public var previousResponseId: String?\n\n   /// o-series models only\n   /// Configuration options for reasoning models.\n   public var reasoning: Reasoning?\n\n   /// Whether to store the generated model response for later retrieval via API.\n   /// Defaults to true\n   public var store: Bool?\n\n   /// If set to true, the model response data will be streamed to the client as it is generated using server-sent events.\n   public var stream: Bool?\n\n   /// What sampling temperature to use, between 0 and 2.\n   /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n   /// We generally recommend altering this or top_p but not both.\n   /// Defaults to 1\n   public var temperature: Double?\n\n   /// Configuration options for a text response from the model. Can be plain text or structured JSON data.\n   public var text: TextConfiguration?\n\n   /// How the model should select which tool (or tools) to use when generating a response.\n   /// See the tools parameter to see how to specify which tools the model can call.\n   public var toolChoice: ToolChoiceMode?\n\n   /// An array of tools the model may call while generating a response. You can specify which tool to use by setting the tool_choice parameter.\n   public var tools: [Tool]?\n\n   /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n   /// So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n   /// We generally recommend altering this or temperature but not both.\n   /// Defaults to 1\n   public var topP: Double?\n\n   /// The truncation strategy to use for the model response.\n   /// Defaults to disabled\n   public var truncation: String?\n\n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n   public var user: String?\n}\n```\n\n[The Response object](https://platform.openai.com/docs/api-reference/responses/object)\n\n```swift\n/// The Response object returned when retrieving a model response\npublic struct ResponseModel: Decodable {\n\n   /// Unix timestamp (in seconds) of when this Response was created.\n   public let createdAt: Int\n\n   /// An error object returned when the model fails to generate a Response.\n   public let error: ErrorObject?\n\n   /// Unique identifier for this Response.\n   public let id: String\n\n   /// Details about why the response is incomplete.\n   public let incompleteDetails: IncompleteDetails?\n\n   /// Inserts a system (or developer) message as the first item in the model's context.\n   public let instructions: String?\n\n   /// An upper bound for the number of tokens that can be generated for a response, including visible output tokens\n   /// and reasoning tokens.\n   public let maxOutputTokens: Int?\n\n   /// Set of 16 key-value pairs that can be attached to an object.\n   public let metadata: [String: String]\n\n   /// Model ID used to generate the response, like gpt-4o or o1.\n   public let model: String\n\n   /// The object type of this resource - always set to response.\n   public let object: String\n\n   /// An array of content items generated by the model.\n   public let output: [OutputItem]\n\n   /// Whether to allow the model to run tool calls in parallel.\n   public let parallelToolCalls: Bool\n\n   /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n   public let previousResponseId: String?\n\n   /// Configuration options for reasoning models.\n   public let reasoning: Reasoning?\n\n   /// The status of the response generation. One of completed, failed, in_progress, or incomplete.\n   public let status: String\n\n   /// What sampling temperature to use, between 0 and 2.\n   public let temperature: Double?\n\n   /// Configuration options for a text response from the model.\n   public let text: TextConfiguration\n\n   /// How the model should select which tool (or tools) to use when generating a response.\n   public let toolChoice: ToolChoiceMode\n\n   /// An array of tools the model may call while generating a response.\n   public let tools: [Tool]\n\n   /// An alternative to sampling with temperature, called nucleus sampling.\n   public let topP: Double?\n\n   /// The truncation strategy to use for the model response.\n   public let truncation: String?\n\n   /// Represents token usage details.\n   public let usage: Usage?\n\n   /// A unique identifier representing your end-user.\n   public let user: String?\n   \n   /// Convenience property that aggregates all text output from output_text items in the output array.\n   /// Similar to the outputText property in Python and JavaScript SDKs.\n   public var outputText: String? \n}\n```\n\nInput Types\n```swift\n// InputType represents the input to the Response API\npublic enum InputType: Codable {\n    case string(String)  // Simple text input\n    case array([InputItem])  // Array of input items for complex conversations\n}\n\n// InputItem represents different types of input\npublic enum InputItem: Codable {\n    case message(InputMessage)  // User, assistant, system messages\n    case functionToolCall(FunctionToolCall)  // Function calls\n    case functionToolCallOutput(FunctionToolCallOutput)  // Function outputs\n    // ... other input types\n}\n\n// InputMessage structure with support for response IDs\npublic struct InputMessage: Codable {\n    public let role: String  // \"user\", \"assistant\", \"system\"\n    public let content: MessageContent\n    public let type: String?  // Always \"message\"\n    public let status: String?  // \"completed\" for assistant messages\n    public let id: String?  // Response ID for assistant messages\n}\n\n// MessageContent can be text or array of content items\npublic enum MessageContent: Codable {\n    case text(String)\n    case array([ContentItem])  // For multimodal content\n}\n```\n\nUsage\n\nSimple text input\n```swift\nlet prompt = \"What is the capital of France?\"\nlet parameters = ModelResponseParameter(input: .string(prompt), model: .gpt4o)\nlet response = try await service.responseCreate(parameters)\n```\n\nText input with reasoning\n```swift\nlet prompt = \"How much wood would a woodchuck chuck?\"\nlet parameters = ModelResponseParameter(\n    input: .string(prompt),\n    model: .o3Mini,\n    reasoning: Reasoning(effort: \"high\")\n)\nlet response = try await service.responseCreate(parameters)\n```\n\nImage input\n```swift\nlet textPrompt = \"What is in this image?\"\nlet imageUrl = \"https://example.com/path/to/image.jpg\"\nlet imageContent = ContentItem.imageUrl(ImageUrlContent(imageUrl: imageUrl))\nlet textContent = ContentItem.text(TextContent(text: textPrompt))\nlet message = InputItem(role: \"user\", content: [textContent, imageContent])\nlet parameters = ModelResponseParameter(input: .array([message]), model: .gpt4o)\nlet response = try await service.responseCreate(parameters)\n```\n\nUsing tools (web search)\n```swift\nlet prompt = \"What was a positive news story from today?\"\nlet parameters = ModelResponseParameter(\n    input: .string(prompt),\n    model: .gpt4o,\n    tools: [Tool(type: \"web_search_preview\", function: nil)]\n)\nlet response = try await service.responseCreate(parameters)\n```\n\nUsing tools (file search)\n```swift\nlet prompt = \"What are the key points in the document?\"\nlet parameters = ModelResponseParameter(\n    input: .string(prompt),\n    model: .gpt4o,\n    tools: [\n        Tool(\n            type: \"file_search\",\n            function: ChatCompletionParameters.ChatFunction(\n                name: \"file_search\",\n                strict: false,\n                description: \"Search through files\",\n                parameters: JSONSchema(\n                    type: .object,\n                    properties: [\n                        \"vector_store_ids\": JSONSchema(\n                            type: .array,\n                            items: JSONSchema(type: .string)\n                        ),\n                        \"max_num_results\": JSONSchema(type: .integer)\n                    ],\n                    required: [\"vector_store_ids\"],\n                    additionalProperties: false\n                )\n            )\n        )\n    ]\n)\nlet response = try await service.responseCreate(parameters)\n```\n\nFunction calling\n```swift\nlet prompt = \"What is the weather like in Boston today?\"\nlet parameters = ModelResponseParameter(\n    input: .string(prompt),\n    model: .gpt4o,\n    tools: [\n        Tool(\n            type: \"function\",\n            function: ChatCompletionParameters.ChatFunction(\n                name: \"get_current_weather\",\n                strict: false,\n                description: \"Get the current weather in a given location\",\n                parameters: JSONSchema(\n                    type: .object,\n                    properties: [\n                        \"location\": JSONSchema(\n                            type: .string,\n                            description: \"The city and state, e.g. San Francisco, CA\"\n                        ),\n                        \"unit\": JSONSchema(\n                            type: .string,\n                            enum: [\"celsius\", \"fahrenheit\"]\n                        )\n                    ],\n                    required: [\"location\", \"unit\"],\n                    additionalProperties: false\n                )\n            )\n        )\n    ],\n    toolChoice: .auto\n)\nlet response = try await service.responseCreate(parameters)\n```\n\nRetrieving a response\n```swift\nlet responseId = \"resp_abc123\"\nlet response = try await service.responseModel(id: responseId)\n```\n\n#### Streaming Responses\n\nThe Response API supports streaming responses using Server-Sent Events (SSE). This allows you to receive partial responses as they are generated, enabling real-time UI updates and better user experience.\n\nStream Events\n```swift\n// The ResponseStreamEvent enum represents all possible streaming events\npublic enum ResponseStreamEvent: Decodable {\n  case responseCreated(ResponseCreatedEvent)\n  case responseInProgress(ResponseInProgressEvent)\n  case responseCompleted(ResponseCompletedEvent)\n  case responseFailed(ResponseFailedEvent)\n  case outputItemAdded(OutputItemAddedEvent)\n  case outputTextDelta(OutputTextDeltaEvent)\n  case outputTextDone(OutputTextDoneEvent)\n  case functionCallArgumentsDelta(FunctionCallArgumentsDeltaEvent)\n  case reasoningSummaryTextDelta(ReasoningSummaryTextDeltaEvent)\n  case error(ErrorEvent)\n  // ... and many more event types\n}\n```\n\nBasic Streaming Example\n```swift\n// Enable streaming by setting stream: true\nlet parameters = ModelResponseParameter(\n    input: .string(\"Tell me a story\"),\n    model: .gpt4o,\n    stream: true\n)\n\n// Create a stream\nlet stream = try await service.responseCreateStream(parameters)\n\n// Process events as they arrive\nfor try await event in stream {\n    switch event {\n    case .outputTextDelta(let delta):\n        // Append text chunk to your UI\n        print(delta.delta, terminator: \"\")\n        \n    case .responseCompleted(let completed):\n        // Response is complete\n        print(\"\\nResponse ID: \\(completed.response.id)\")\n        \n    case .error(let error):\n        // Handle errors\n        print(\"Error: \\(error.message)\")\n        \n    default:\n        // Handle other events as needed\n        break\n    }\n}\n```\n\nStreaming with Conversation State\n```swift\n// Maintain conversation continuity with previousResponseId\nvar previousResponseId: String? = nil\nvar messages: [(role: String, content: String)] = []\n\n// First message\nlet firstParams = ModelResponseParameter(\n    input: .string(\"Hello!\"),\n    model: .gpt4o,\n    stream: true\n)\n\nlet firstStream = try await service.responseCreateStream(firstParams)\nvar firstResponse = \"\"\n\nfor try await event in firstStream {\n    switch event {\n    case .outputTextDelta(let delta):\n        firstResponse += delta.delta\n        \n    case .responseCompleted(let completed):\n        previousResponseId = completed.response.id\n        messages.append((role: \"user\", content: \"Hello!\"))\n        messages.append((role: \"assistant\", content: firstResponse))\n        \n    default:\n        break\n    }\n}\n\n// Follow-up message with conversation context\nvar inputArray: [InputItem] = []\n\n// Add conversation history\nfor message in messages {\n    inputArray.append(.message(InputMessage(\n        role: message.role,\n        content: .text(message.content)\n    )))\n}\n\n// Add new user message\ninputArray.append(.message(InputMessage(\n    role: \"user\",\n    content: .text(\"How are you?\")\n)))\n\nlet followUpParams = ModelResponseParameter(\n    input: .array(inputArray),\n    model: .gpt4o,\n    previousResponseId: previousResponseId,\n    stream: true\n)\n\nlet followUpStream = try await service.responseCreateStream(followUpParams)\n// Process the follow-up stream...\n```\n\nStreaming with Tools and Function Calling\n```swift\nlet parameters = ModelResponseParameter(\n    input: .string(\"What's the weather in San Francisco?\"),\n    model: .gpt4o,\n    tools: [\n        Tool(\n            type: \"function\",\n            function: ChatCompletionParameters.ChatFunction(\n                name: \"get_weather\",\n                description: \"Get current weather\",\n                parameters: JSONSchema(\n                    type: .object,\n                    properties: [\n                        \"location\": JSONSchema(type: .string)\n                    ],\n                    required: [\"location\"]\n                )\n            )\n        )\n    ],\n    stream: true\n)\n\nlet stream = try await service.responseCreateStream(parameters)\nvar functionCallArguments = \"\"\n\nfor try await event in stream {\n    switch event {\n    case .functionCallArgumentsDelta(let delta):\n        // Accumulate function call arguments\n        functionCallArguments += delta.delta\n        \n    case .functionCallArgumentsDone(let done):\n        // Function call is complete\n        print(\"Function: \\(done.name)\")\n        print(\"Arguments: \\(functionCallArguments)\")\n        \n    case .outputTextDelta(let delta):\n        // Regular text output\n        print(delta.delta, terminator: \"\")\n        \n    default:\n        break\n    }\n}\n```\n\nCanceling a Stream\n```swift\n// Streams can be canceled using Swift's task cancellation\nlet streamTask = Task {\n    let stream = try await service.responseCreateStream(parameters)\n    \n    for try await event in stream {\n        // Check if task is cancelled\n        if Task.isCancelled {\n            break\n        }\n        \n        // Process events...\n    }\n}\n\n// Cancel the stream when needed\nstreamTask.cancel()\n```\n\nComplete Streaming Implementation Example\n```swift\n@MainActor\n@Observable\nclass ResponseStreamProvider {\n    var messages: [Message] = []\n    var isStreaming = false\n    var error: String?\n    \n    private let service: OpenAIService\n    private var previousResponseId: String?\n    private var streamTask: Task<Void, Never>?\n    \n    init(service: OpenAIService) {\n        self.service = service\n    }\n    \n    func sendMessage(_ text: String) {\n        streamTask?.cancel()\n        \n        // Add user message\n        messages.append(Message(role: .user, content: text))\n        \n        // Start streaming\n        streamTask = Task {\n            await streamResponse(for: text)\n        }\n    }\n    \n    private func streamResponse(for userInput: String) async {\n        isStreaming = true\n        error = nil\n        \n        // Create streaming message placeholder\n        let streamingMessage = Message(role: .assistant, content: \"\", isStreaming: true)\n        messages.append(streamingMessage)\n        \n        do {\n            // Build conversation history\n            var inputArray: [InputItem] = []\n            for message in messages.dropLast(2) {\n                inputArray.append(.message(InputMessage(\n                    role: message.role.rawValue,\n                    content: .text(message.content)\n                )))\n            }\n            inputArray.append(.message(InputMessage(\n                role: \"user\",\n                content: .text(userInput)\n            )))\n            \n            let parameters = ModelResponseParameter(\n                input: .array(inputArray),\n                model: .gpt4o,\n                previousResponseId: previousResponseId,\n                stream: true\n            )\n            \n            let stream = try await service.responseCreateStream(parameters)\n            var accumulatedText = \"\"\n            \n            for try await event in stream {\n                guard !Task.isCancelled else { break }\n                \n                switch event {\n                case .outputTextDelta(let delta):\n                    accumulatedText += delta.delta\n                    updateStreamingMessage(with: accumulatedText)\n                    \n                case .responseCompleted(let completed):\n                    previousResponseId = completed.response.id\n                    finalizeStreamingMessage(with: accumulatedText, responseId: completed.response.id)\n                    \n                case .error(let errorEvent):\n                    throw APIError.requestFailed(description: errorEvent.message)\n                    \n                default:\n                    break\n                }\n            }\n        } catch {\n            self.error = error.localizedDescription\n            messages.removeLast() // Remove streaming message on error\n        }\n        \n        isStreaming = false\n    }\n    \n    private func updateStreamingMessage(with content: String) {\n        if let index = messages.lastIndex(where: { $0.isStreaming }) {\n            messages[index].content = content\n        }\n    }\n    \n    private func finalizeStreamingMessage(with content: String, responseId: String) {\n        if let index = messages.lastIndex(where: { $0.isStreaming }) {\n            messages[index].content = content\n            messages[index].isStreaming = false\n            messages[index].responseId = responseId\n        }\n    }\n}\n```\n\n### Embeddings\nParameters\n```swift\n/// [Creates](https://platform.openai.com/docs/api-reference/embeddings/create) an embedding vector representing the input text.\npublic struct EmbeddingParameter: Encodable {\n   \n   /// ID of the model to use. You can use the List models API to see all of your available models, or see our [Model overview ](https://platform.openai.com/docs/models/overview) for descriptions of them.\n   let model: String\n   /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or an array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for text-embedding-ada-002) and cannot be an empty string. [How to Count Tokens with `tiktoken`](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)\n   let input: String\n   \n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more.](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids)\n   let user: String?\n   \n   public enum Model: String {\n      case textEmbeddingAda002 = \"text-embedding-ada-002\"\n   }\n   \n   public init(\n      model: Model = .textEmbeddingAda002,\n      input: String,\n      user: String? = nil)\n   {\n      self.model = model.value\n      self.input = input\n      self.user = user\n   }\n}\n```\nResponse\n```swift\n/// [Represents an embedding vector returned by embedding endpoint.](https://platform.openai.com/docs/api-reference/embeddings/object)\npublic struct EmbeddingObject: Decodable {\n   \n   /// The object type, which is always \"embedding\".\n   public let object: String\n   /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.[https://platform.openai.com/docs/guides/embeddings]\n   public let embedding: [Float]\n   /// The index of the embedding in the list of embeddings.\n   public let index: Int\n}\n```\n\nUsage\n```swift\nlet prompt = \"Hello world.\"\nlet embeddingObjects = try await service.createEmbeddings(parameters: parameters).data\n```\n\n### Fine-tuning\nParameters\n```swift\n/// [Creates a job](https://platform.openai.com/docs/api-reference/fine-tuning/create) that fine-tunes a specified model from a given dataset.\n///Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\npublic struct FineTuningJobParameters: Encodable {\n   \n   /// The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/models/overview).\n   let model: String\n   /// The ID of an uploaded file that contains training data.\n   /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file.\n   /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose fine-tune.\n   /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details.\n   let trainingFile: String\n   /// The hyperparameters used for the fine-tuning job.\n   let hyperparameters: HyperParameters?\n   /// A string of up to 18 characters that will be added to your fine-tuned model name.\n   /// For example, a suffix of \"custom-model-name\" would produce a model name like ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel.\n   /// Defaults to null.\n   let suffix: String?\n   /// The ID of an uploaded file that contains validation data.\n   /// If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files.\n   /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose fine-tune.\n   /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details.\n   let validationFile: String?\n   /// A list of integrations to enable for your fine-tuning job.\n   let integrations: [Integration]?\n   /// The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you.\n   let seed: Int?\n   \n   /// Fine-tuning is [currently available](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned) for the following models:\n   /// gpt-3.5-turbo-0613 (recommended)\n   /// babbage-002\n   /// davinci-002\n   /// OpenAI expects gpt-3.5-turbo to be the right model for most users in terms of results and ease of use, unless you are migrating a legacy fine-tuned model.\n   public enum Model: String {\n      case gpt35 = \"gpt-3.5-turbo-0613\" /// recommended\n      case babbage002 = \"babbage-002\"\n      case davinci002 = \"davinci-002\"\n   }\n   \n   public struct HyperParameters: Encodable {\n      /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n      /// Defaults to auto.\n      let nEpochs: Int?\n      \n      public init(\n         nEpochs: Int?)\n      {\n         self.nEpochs = nEpochs\n      }\n   }\n   \n   public init(\n      model: Model,\n      trainingFile: String,\n      hyperparameters: HyperParameters? = nil,\n      suffix: String? = nil,\n      validationFile: String? = nil)\n   {\n      self.model = model.rawValue\n      self.trainingFile = trainingFile\n      self.hyperparameters = hyperparameters\n      self.suffix = suffix\n      self.validationFile = validationFile\n   }\n}\n```\nResponse\n```swift\n/// The fine_tuning.job object represents a [fine-tuning job](https://platform.openai.com/docs/api-reference/fine-tuning/object) that has been created through the API.\npublic struct FineTuningJobObject: Decodable {\n   \n   /// The object identifier, which can be referenced in the API endpoints.\n   public let id: String\n   /// The Unix timestamp (in seconds) for when the fine-tuning job was created.\n   public let createdAt: Int\n  /// For fine-tuning jobs that have failed, this will contain more information on the cause of the failure.\n   public let error: OpenAIErrorResponse.Error?\n   /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running.\n   public let fineTunedModel: String?\n   /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running.\n   public let finishedAt: Int?\n   /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)  for more details.\n   public let hyperparameters: HyperParameters\n   /// The base model that is being fine-tuned.\n   public let model: String\n   /// The object type, which is always \"fine_tuning.job\".\n   public let object: String\n   /// The organization that owns the fine-tuning job.\n   public let organizationId: String\n   /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n   public let resultFiles: [String]\n   /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.\n   public let status: String\n   /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running.\n   public let trainedTokens: Int?\n   \n   /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n   public let trainingFile: String\n   /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n   public let validationFile: String?\n   \n   public enum Status: String {\n      case validatingFiles = \"validating_files\"\n      case queued\n      case running\n      case succeeded\n      case failed\n      case cancelled\n   }\n   \n   public struct HyperParameters: Decodable {\n      /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. \"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs.\n      public let nEpochs: IntOrStringValue\n   }\n}\n```\n\nUsage\nList fine-tuning jobs\n```swift\nlet fineTuningJobs = try await service.istFineTuningJobs()\n```\nCreate fine-tuning job\n```swift\nlet trainingFileID = \"file-Atc9okK0MOuQwQzDJCZXnrh6\" // The id of the file that has been uploaded using the `Files` API. https://platform.openai.com/docs/api-reference/fine-tuning/create#fine-tuning/create-training_file\nlet parameters = FineTuningJobParameters(model: .gpt35, trainingFile: trainingFileID)\nlet fineTuningJob = try await service.createFineTuningJob(parameters: parameters)\n```\nRetrieve fine-tuning job\n```swift\nlet fineTuningJobID = \"ftjob-abc123\"\nlet fineTuningJob = try await service.retrieveFineTuningJob(id: fineTuningJobID)\n```\nCancel fine-tuning job\n```swift\nlet fineTuningJobID = \"ftjob-abc123\"\nlet canceledFineTuningJob = try await service.cancelFineTuningJobWith(id: fineTuningJobID)\n```\n#### Fine-tuning job event object\nResponse\n```swift\n/// [Fine-tuning job event object](https://platform.openai.com/docs/api-reference/fine-tuning/event-object)\npublic struct FineTuningJobEventObject: Decodable {\n   \n   public let id: String\n   \n   public let createdAt: Int\n   \n   public let level: String\n   \n   public let message: String\n   \n   public let object: String\n   \n   public let type: String?\n   \n   public let data: Data?\n   \n   public struct Data: Decodable {\n      public let step: Int\n      public let trainLoss: Double\n      public let trainMeanTokenAccuracy: Double\n   }\n}\n```\nUsage\n```swift\nlet fineTuningJobID = \"ftjob-abc123\"\nlet jobEvents = try await service.listFineTuningEventsForJobWith(id: id, after: nil, limit: nil).data\n```\n\n### Batch\nParameters\n```swift\npublic struct BatchParameter: Encodable {\n   \n   /// The ID of an uploaded file that contains requests for the new batch.\n   /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file.\n   /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), and must be uploaded with the purpose batch.\n   let inputFileID: String\n   /// The endpoint to be used for all requests in the batch. Currently only /v1/chat/completions is supported.\n   let endpoint: String\n   /// The time frame within which the batch should be processed. Currently only 24h is supported.\n   let completionWindow: String\n   /// Optional custom metadata for the batch.\n   let metadata: [String: String]?\n   \n   enum CodingKeys: String, CodingKey {\n      case inputFileID = \"input_file_id\"\n      case endpoint\n      case completionWindow = \"completion_window\"\n      case metadata\n   }\n}\n```\nResponse\n```swift\npublic struct BatchObject: Decodable {\n   \n   let id: String\n   /// The object type, which is always batch.\n   let object: String\n   /// The OpenAI API endpoint used by the batch.\n   let endpoint: String\n   \n   let errors: Error\n   /// The ID of the input file for the batch.\n   let inputFileID: String\n   /// The time frame within which the batch should be processed.\n   let completionWindow: String\n   /// The current status of the batch.\n   let status: String\n   /// The ID of the file containing the outputs of successfully executed requests.\n   let outputFileID: String\n   /// The ID of the file containing the outputs of requests with errors.\n   let errorFileID: String\n   /// The Unix timestamp (in seconds) for when the batch was created.\n   let createdAt: Int\n   /// The Unix timestamp (in seconds) for when the batch started processing.\n   let inProgressAt: Int\n   /// The Unix timestamp (in seconds) for when the batch will expire.\n   let expiresAt: Int\n   /// The Unix timestamp (in seconds) for when the batch started finalizing.\n   let finalizingAt: Int\n   /// The Unix timestamp (in seconds) for when the batch was completed.\n   let completedAt: Int\n   /// The Unix timestamp (in seconds) for when the batch failed.\n   let failedAt: Int\n   /// The Unix timestamp (in seconds) for when the batch expired.\n   let expiredAt: Int\n   /// The Unix timestamp (in seconds) for when the batch started cancelling.\n   let cancellingAt: Int\n   /// The Unix timestamp (in seconds) for when the batch was cancelled.\n   let cancelledAt: Int\n   /// The request counts for different statuses within the batch.\n   let requestCounts: RequestCount\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   let metadata: [String: String]\n   \n   public struct Error: Decodable {\n      \n      let object: String\n      let data: [Data]\n\n      public struct Data: Decodable {\n         \n         /// An error code identifying the error type.\n         let code: String\n         /// A human-readable message providing more details about the error.\n         let message: String\n         /// The name of the parameter that caused the error, if applicable.\n         let param: String?\n         /// The line number of the input file where the error occurred, if applicable.\n         let line: Int?\n      }\n   }\n   \n   public struct RequestCount: Decodable {\n      \n      /// Total number of requests in the batch.\n      let total: Int\n      /// Number of requests that have been completed successfully.\n      let completed: Int\n      /// Number of requests that have failed.\n      let failed: Int\n   }\n}\n```\nUsage\n\nCreate batch\n```swift\nlet inputFileID = \"file-abc123\"\nlet endpoint = \"/v1/chat/completions\"\nlet completionWindow = \"24h\"\nlet parameter = BatchParameter(inputFileID: inputFileID, endpoint: endpoint, completionWindow: completionWindow, metadata: nil)\nlet batch = try await service.createBatch(parameters: parameters)\n```\n\nRetrieve batch\n```swift\nlet batchID = \"batch_abc123\"\nlet batch = try await service.retrieveBatch(id: batchID)\n```\n\nCancel batch\n```swift\nlet batchID = \"batch_abc123\"\nlet batch = try await service.cancelBatch(id: batchID)\n```\n\nList batch\n```swift\nlet batches = try await service.listBatch(after: nil, limit: nil)\n```\n\n### Files\nParameters\n```swift\n/// [Upload a file](https://platform.openai.com/docs/api-reference/files/create) that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.\npublic struct FileParameters: Encodable {\n   \n   /// The name of the file asset is not documented in OpenAI's official documentation; however, it is essential for constructing the multipart request.\n   let fileName: String\n   /// The file object (not file name) to be uploaded.\n   /// If the purpose is set to \"fine-tune\", the file will be used for fine-tuning.\n   let file: Data\n   /// The intended purpose of the uploaded file.\n   /// Use \"fine-tune\" for [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning.\n   let purpose: String\n   \n   public init(\n      fileName: String,\n      file: Data,\n      purpose: String)\n   {\n      self.fileName = fileName\n      self.file = file\n      self.purpose = purpose\n   }\n}\n```\nResponse\n```swift\n/// The [File object](https://platform.openai.com/docs/api-reference/files/object) represents a document that has been uploaded to OpenAI.\npublic struct FileObject: Decodable {\n   \n   /// The file identifier, which can be referenced in the API endpoints.\n   public let id: String\n   /// The size of the file in bytes.\n   public let bytes: Int\n   /// The Unix timestamp (in seconds) for when the file was created.\n   public let createdAt: Int\n   /// The name of the file.\n   public let filename: String\n   /// The object type, which is always \"file\".\n   public let object: String\n   /// The intended purpose of the file. Currently, only \"fine-tune\" is supported.\n   public let purpose: String\n   /// The current status of the file, which can be either uploaded, processed, pending, error, deleting or deleted.\n   public let status: String\n   /// Additional details about the status of the file. If the file is in the error state, this will include a message describing the error.\n   public let statusDetails: String?\n   \n   public enum Status: String {\n      case uploaded\n      case processed\n      case pending\n      case error\n      case deleting\n      case deleted\n   }\n\n   public init(\n      id: String,\n      bytes: Int,\n      createdAt: Int,\n      filename: String,\n      object: String,\n      purpose: String,\n      status: Status,\n      statusDetails: String?)\n   {\n      self.id = id\n      self.bytes = bytes\n      self.createdAt = createdAt\n      self.filename = filename\n      self.object = object\n      self.purpose = purpose\n      self.status = status.rawValue\n      self.statusDetails = statusDetails\n   }\n}\n```\nUsage\nList files\n```swift\nlet files = try await service.listFiles().data\n```\n### Upload file\n```swift\nlet fileName = \"worldCupData.jsonl\"\nlet data = Data(contentsOfURL:_) // Data retrieved from the file named \"worldCupData.jsonl\".\nlet parameters = FileParameters(fileName: \"WorldCupData\", file: data, purpose: \"fine-tune\") // Important: make sure to provide a file name.\nlet uploadedFile =  try await service.uploadFile(parameters: parameters) \n```\nDelete file\n```swift\nlet fileID = \"file-abc123\"\nlet deletedStatus = try await service.deleteFileWith(id: fileID)\n```\nRetrieve file\n```swift\nlet fileID = \"file-abc123\"\nlet retrievedFile = try await service.retrieveFileWith(id: fileID)\n```\nRetrieve file content\n```swift\nlet fileID = \"file-abc123\"\nlet fileContent = try await service.retrieveContentForFileWith(id: fileID)\n```\n\n### Images\n\nThis library supports latest OpenAI Image generation\n\n- Parameters Create\n\n```swift\n/// 'Create Image':\n/// https://platform.openai.com/docs/api-reference/images/create\npublic struct CreateImageParameters: Encodable {\n   \n   /// A text description of the desired image(s).\n   /// The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.\n   public let prompt: String\n   \n   // MARK: - Optional properties\n   \n   /// Allows to set transparency for the background of the generated image(s).\n   /// This parameter is only supported for `gpt-image-1`.\n   /// Must be one of `transparent`, `opaque` or `auto` (default value).\n   /// When `auto` is used, the model will automatically determine the best background for the image.\n   /// If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`.\n   public let background: Background?\n   \n   /// The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`.\n   /// Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.\n   public let model: Model?\n   \n   /// Control the content-moderation level for images generated by `gpt-image-1`.\n   /// Must be either low for less restrictive filtering or auto (default value).\n   public let moderation: Moderation?\n   \n   /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.\n   /// Defaults to `1`\n   public let n: Int?\n   \n   /// The compression level (0-100%) for the generated images.\n   /// This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.\n   public let outputCompression: Int?\n   \n   /// The format in which the generated images are returned.\n   /// This parameter is only supported for `gpt-image-1`.\n   /// Must be one of `png`, `jpeg`, or `webp`.\n   public let outputFormat: OutputFormat?\n   \n   /// The quality of the image that will be generated.\n   /// - `auto` (default value) will automatically select the best quality for the given model.\n   /// - `high`, `medium` and `low` are supported for gpt-image-1.\n   /// - `hd` and `standard` are supported for dall-e-3.\n   /// - `standard` is the only option for dall-e-2.\n   public let quality: Quality?\n   \n   /// The format in which generated images with dall-e-2 and dall-e-3 are returned.\n   /// Must be one of `url` or `b64_json`.\n   /// URLs are only valid for 60 minutes after the image has been generated.\n   /// This parameter isn't supported for `gpt-image-1` which will always return base64-encoded images.\n   public let responseFormat: ResponseFormat?\n   \n   /// The size of the generated images.\n   /// - For gpt-image-1, one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value)\n   /// - For dall-e-3, one of `1024x1024`, `1792x1024`, or `1024x1792`\n   /// - For dall-e-2, one of `256x256`, `512x512`, or `1024x1024`\n   public let size: String?\n   \n   /// The style of the generated images.\n   /// This parameter is only supported for `dall-e-3`.\n   /// Must be one of `vivid` or `natural`.\n   /// Vivid causes the model to lean towards generating hyper-real and dramatic images.\n   /// Natural causes the model to produce more natural, less hyper-real looking images.\n   public let style: Style?\n   \n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n   public let user: String?\n}\n```\n\n- Parameters Edit\n\n```swift\n/// Creates an edited or extended image given one or more source images and a prompt.\n/// This endpoint only supports `gpt-image-1` and `dall-e-2`.\npublic struct CreateImageEditParameters: Encodable {\n   \n   /// The image(s) to edit.\n   /// For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 25MB.\n   /// For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB.\n   let image: [Data]\n   \n   /// A text description of the desired image(s).\n   /// The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.\n   let prompt: String\n   \n   /// An additional image whose fully transparent areas indicate where `image` should be edited.\n   /// If there are multiple images provided, the mask will be applied on the first image.\n   /// Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.\n   let mask: Data?\n   \n   /// The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported.\n   /// Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.\n   let model: String?\n   \n   /// The number of images to generate. Must be between 1 and 10.\n   /// Defaults to 1.\n   let n: Int?\n   \n   /// The quality of the image that will be generated.\n   /// `high`, `medium` and `low` are only supported for `gpt-image-1`.\n   /// `dall-e-2` only supports `standard` quality.\n   /// Defaults to `auto`.\n   let quality: String?\n   \n   /// The format in which the generated images are returned.\n   /// Must be one of `url` or `b64_json`.\n   /// URLs are only valid for 60 minutes after the image has been generated.\n   /// This parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images.\n   let responseFormat: String?\n   \n   /// The size of the generated images.\n   /// Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`,\n   /// and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.\n   let size: String?\n   \n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n   let user: String?\n}\n```\n\n- Parameters Variations\n\n```swift\n/// Creates a variation of a given image.\n/// This endpoint only supports `dall-e-2`.\npublic struct CreateImageVariationParameters: Encodable {\n   \n   /// The image to use as the basis for the variation(s).\n   /// Must be a valid PNG file, less than 4MB, and square.\n   let image: Data\n   \n   /// The model to use for image generation. Only `dall-e-2` is supported at this time.\n   /// Defaults to `dall-e-2`.\n   let model: String?\n   \n   /// The number of images to generate. Must be between 1 and 10.\n   /// Defaults to 1.\n   let n: Int?\n   \n   /// The format in which the generated images are returned.\n   /// Must be one of `url` or `b64_json`.\n   /// URLs are only valid for 60 minutes after the image has been generated.\n   /// Defaults to `url`.\n   let responseFormat: String?\n   \n   /// The size of the generated images.\n   /// Must be one of `256x256`, `512x512`, or `1024x1024`.\n   /// Defaults to `1024x1024`.\n   let size: String?\n   \n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n   let user: String?\n}\n```\n\n- Request example\n\n```swift\nimport SwiftOpenAI\n\nlet service = OpenAIServiceFactory.service(apiKey: \"<YOUR_KEY>\")\n\n// ❶ Describe the image you want\nlet prompt = \"A watercolor dragon-unicorn hybrid flying above snowy mountains\"\n\n// ❷ Build parameters with the brand-new types (commit 880a15c)\nlet params = CreateImageParameters(\n    prompt: prompt,\n    model:  .gptImage1,      // .dallE3 / .dallE2 also valid\n    n:      1,               // 1-10  (only 1 for DALL-E 3)\n    quality: .high,          // .hd / .standard for DALL-E 3\n    size:   \"1024x1024\"      // use \"1792x1024\" or \"1024x1792\" for wide / tall\n)\n\ndo {\n    // ❸ Fire the request – returns a `CreateImageResponse`\n    let result = try await service.createImages(parameters: params)\n    let url    = result.data?.first?.url          // or `b64Json` for base-64\n    print(\"Image URL:\", url ?? \"none\")\n} catch {\n    print(\"Generation failed:\", error)\n}\n```\n\nFor a sample app example go to the `Examples/SwiftOpenAIExample` project on this repo.\n\n⚠️ This library Also keeps compatinility with previous Image generation.\n\n\nFor handling image sizes, we utilize the `Dalle` model. An enum with associated values has been defined to represent its size constraints accurately.\n\n [DALL·E](https://platform.openai.com/docs/models/dall-e)\n \n DALL·E is a AI system that can create realistic images and art from a description in natural language. DALL·E 3 currently supports the ability, given a prompt, to create a new image with a specific size. DALL·E 2 also support the ability to edit an existing image, or create variations of a user provided image.\n \n DALL·E 3 is available through our Images API along with DALL·E 2. You can try DALL·E 3 through ChatGPT Plus.\n \n \n | MODEL     | DESCRIPTION                                                  |\n |-----------|--------------------------------------------------------------|\n | dall-e-3  | DALL·E 3 New                                                 |\n |           | The latest DALL·E model released in Nov 2023. Learn more.    |\n | dall-e-2  | The previous DALL·E model released in Nov 2022.              |\n |           | The 2nd iteration of DALL·E with more realistic, accurate,   |\n |           | and 4x greater resolution images than the original model.    |\n\npublic enum Dalle {\n   \n   case dalle2(Dalle2ImageSize)\n   case dalle3(Dalle3ImageSize)\n   \n   public enum Dalle2ImageSize: String {\n      case small = \"256x256\"\n      case medium = \"512x512\"\n      case large = \"1024x1024\"\n   }\n   \n   public enum Dalle3ImageSize: String {\n      case largeSquare = \"1024x1024\"\n      case landscape  = \"1792x1024\"\n      case portrait = \"1024x1792\"\n   }\n   \n   var model: String {\n      switch self {\n      case .dalle2: return Model.dalle2.rawValue\n      case .dalle3: return Model.dalle3.rawValue\n      }\n   }\n   \n   var size: String {\n      switch self {\n      case .dalle2(let dalle2ImageSize):\n         return dalle2ImageSize.rawValue\n      case .dalle3(let dalle3ImageSize):\n         return dalle3ImageSize.rawValue\n      }\n   }\n}\n\n#### Image create\nParameters\n```swift\npublic struct ImageCreateParameters: Encodable {\n   \n   /// A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n   let prompt: String\n   /// The model to use for image generation. Defaults to dall-e-2\n   let model: String?\n   /// The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported.\n   let n: Int?\n   /// The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. Defaults to standard\n   let quality: String?\n   /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url\n   let responseFormat: String?\n   /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. Defaults to 1024x1024\n   let size: String?\n   /// The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. Defaults to vivid\n   let style: String?\n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices)\n   let user: String?\n   \n   public init(\n      prompt: String,\n      model: Dalle,\n      numberOfImages: Int = 1,\n      quality: String? = nil,\n      responseFormat: ImageResponseFormat? = nil,\n      style: String? = nil,\n      user: String? = nil)\n   {\n   self.prompt = prompt\n   self.model = model.model\n   self.n = numberOfImages\n   self.quality = quality\n   self.responseFormat = responseFormat?.rawValue\n   self.size = model.size\n   self.style = style\n   self.user = user\n   }   \n}\n```\n#### Image Edit \nParameters\n```swift\n/// [Creates an edited or extended image given an original image and a prompt.](https://platform.openai.com/docs/api-reference/images/createEdit)\npublic struct ImageEditParameters: Encodable {\n   \n   /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.\n   let image: Data\n   /// A text description of the desired image(s). The maximum length is 1000 characters.\n   let prompt: String\n   /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.\n   let mask: Data?\n   /// The model to use for image generation. Only dall-e-2 is supported at this time. Defaults to dall-e-2\n   let model: String?\n   /// The number of images to generate. Must be between 1 and 10. Defaults to 1\n   let n: Int?\n   /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defaults to 1024x1024\n   let size: String?\n   /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url\n   let responseFormat: String?\n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices)\n   let user: String?\n   \n   public init(\n      image: UIImage,\n      model: Dalle? = nil,\n      mask: UIImage? = nil,\n      prompt: String,\n      numberOfImages: Int? = nil,\n      responseFormat: ImageResponseFormat? = nil,\n      user: String? = nil)\n   {\n      if (image.pngData() == nil) {\n         assertionFailure(\"Failed to get PNG data from image\")\n      }\n      if let mask, mask.pngData() == nil {\n         assertionFailure(\"Failed to get PNG data from mask\")\n      }\n      if let model, model.model != Model.dalle2.rawValue {\n         assertionFailure(\"Only dall-e-2 is supported at this time [https://platform.openai.com/docs/api-reference/images/createEdit]\")\n      }\n      self.image = image.pngData()!\n      self.model = model?.model\n      self.mask = mask?.pngData()\n      self.prompt = prompt\n      self.n = numberOfImages\n      self.size = model?.size\n      self.responseFormat = responseFormat?.rawValue\n      self.user = user\n   }\n}\n```\n#### Image variation\nParameters\n```swift\n/// [Creates a variation of a given image.](https://platform.openai.com/docs/api-reference/images/createVariation)\npublic struct ImageVariationParameters: Encodable {\n   \n   /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.\n   let image: Data\n   /// The model to use for image generation. Only dall-e-2 is supported at this time. Defaults to dall-e-2\n   let model: String?\n   /// The number of images to generate. Must be between 1 and 10. Defaults to 1\n   let n: Int?\n   /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url\n   let responseFormat: String?\n   /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defaults to 1024x1024\n   let size: String?\n   /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices)\n   let user: String?\n   \n   public init(\n      image: UIImage,\n      model: Dalle? = nil,\n      numberOfImages: Int? = nil,\n      responseFormat: ImageResponseFormat? = nil,\n      user: String? = nil)\n   {\n      if let model, model.model != Model.dalle2.rawValue {\n         assertionFailure(\"Only dall-e-2 is supported at this time [https://platform.openai.com/docs/api-reference/images/createEdit]\")\n      }\n      self.image = image.pngData()!\n      self.n = numberOfImages\n      self.model = model?.model\n      self.size = model?.size\n      self.responseFormat = responseFormat?.rawValue\n      self.user = user\n   }\n}\n```\nResponse\n```swift\n/// [Represents the url or the content of an image generated by the OpenAI API.](https://platform.openai.com/docs/api-reference/images/object)\npublic struct ImageObject: Decodable {\n   /// The URL of the generated image, if response_format is url (default).\n   public let url: URL?\n   /// The base64-encoded JSON of the generated image, if response_format is b64_json.\n   public let b64Json: String?\n   /// The prompt that was used to generate the image, if there was any revision to the prompt.\n   public let revisedPrompt: String?\n}\n```\n\nUsage\n```swift\n/// Create image\nlet prompt = \"A mix of a dragon and an unicorn\"\nlet createParameters = ImageCreateParameters(prompt: prompt, model: .dalle3(.largeSquare))\nlet imageURLS = try await service.legacyCreateImages(parameters: createParameters).data.map(\\.url)\n```\n```swift\n/// Edit image\nlet data = Data(contentsOfURL:_) // the data from an image.\nlet image = UIImage(data: data)\nlet prompt = \"Add a background filled with pink balloons.\"\nlet editParameters = ImageEditParameters(image: image, prompt: prompt, numberOfImages: 4)  \nlet imageURLS = try await service.legacyEditImage(parameters: parameters).data.map(\\.url)\n```\n```swift\n/// Image variations\nlet data = Data(contentsOfURL:_) // the data from an image.\nlet image = UIImage(data: data)\nlet variationParameters = ImageVariationParameters(image: image, numberOfImages: 4)\nlet imageURLS = try await service.legacyCreateImageVariations(parameters: parameters).data.map(\\.url)\n```\n\n### Models\nResponse\n```swift\n\n/// Describes an OpenAI [model](https://platform.openai.com/docs/api-reference/models/object) offering that can be used with the API.\npublic struct ModelObject: Decodable {\n   \n   /// The model identifier, which can be referenced in the API endpoints.\n   public let id: String\n   /// The Unix timestamp (in seconds) when the model was created.\n   public let created: Int\n   /// The object type, which is always \"model\".\n   public let object: String\n   /// The organization that owns the model.\n   public let ownedBy: String\n   /// An array representing the current permissions of a model. Each element in the array corresponds to a specific permission setting. If there are no permissions or if the data is unavailable, the array may be nil.\n   public let permission: [Permission]?\n   \n   public struct Permission: Decodable {\n      public let id: String?\n      public let object: String?\n      public let created: Int?\n      public let allowCreateEngine: Bool?\n      public let allowSampling: Bool?\n      public let allowLogprobs: Bool?\n      public let allowSearchIndices: Bool?\n      public let allowView: Bool?\n      public let allowFineTuning: Bool?\n      public let organization: String?\n      public let group: String?\n      public let isBlocking: Bool?\n   }\n   \n   /// Represents the response from the [delete](https://platform.openai.com/docs/api-reference/models/delete) fine-tuning API\n   public struct DeletionStatus: Decodable {\n      \n      public let id: String\n      public let object: String\n      public let deleted: Bool\n   }\n}\n```\nUsage\n```swift\n/// List models\nlet models = try await service.listModels().data\n```\n```swift\n/// Retrieve model\nlet modelID = \"gpt-3.5-turbo-instruct\"\nlet retrievedModel = try await service.retrieveModelWith(id: modelID)\n```\n```swift\n/// Delete fine tuned model\nlet modelID = \"fine-tune-model-id\"\nlet deletionStatus = try await service.deleteFineTuneModelWith(id: modelID)\n```\n### Moderations\nParameters\n```swift\n/// [Classifies if text violates OpenAI's Content Policy.](https://platform.openai.com/docs/api-reference/moderations/create)\npublic struct ModerationParameter<Input: Encodable>: Encodable {\n   \n   /// The input text to classify, string or array.\n   let input: Input\n   /// Two content moderations models are available: text-moderation-stable and text-moderation-latest.\n   /// The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.\n   let model: String?\n   \n   enum Model: String {\n      case stable = \"text-moderation-stable\"\n      case latest = \"text-moderation-latest\"\n   }\n   \n   init(\n      input: Input,\n      model: Model? = nil)\n   {\n      self.input = input\n      self.model = model?.rawValue\n   }\n}\n```\nResponse\n```swift\n/// The [moderation object](https://platform.openai.com/docs/api-reference/moderations/object). Represents policy compliance report by OpenAI's content moderation model against a given input.\npublic struct ModerationObject: Decodable {\n   \n   /// The unique identifier for the moderation request.\n   public let id: String\n   /// The model used to generate the moderation results.\n   public let model: String\n   /// A list of moderation objects.\n   public let results: [Moderation]\n   \n   public struct Moderation: Decodable {\n      \n      /// Whether the content violates OpenAI's usage policies.\n      public let flagged: Bool\n      /// A list of the categories, and whether they are flagged or not.\n      public let categories: Category<Bool>\n      /// A list of the categories along with their scores as predicted by model.\n      public let categoryScores: Category<Double>\n      \n      public struct Category<T: Decodable>: Decodable {\n         \n         /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment.\n         public let hate: T\n         /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.\n         public let hateThreatening: T\n         /// Content that expresses, incites, or promotes harassing language towards any target.\n         public let harassment: T\n         /// Harassment content that also includes violence or serious harm towards any target.\n         public let harassmentThreatening: T\n         /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.\n         public let selfHarm: T\n         /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.\n         public let selfHarmIntent: T\n         /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.\n         public let selfHarmInstructions: T\n         /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).\n         public let sexual: T\n         /// Sexual content that includes an individual who is under 18 years old.\n         public let sexualMinors: T\n         /// Content that depicts death, violence, or physical injury.\n         public let violence: T\n         /// Content that depicts death, violence, or physical injury in graphic detail.\n         public let violenceGraphic: T\n      }\n   }\n}\n```\nUsage\n```swift\n/// Single prompt\nlet prompt = \"I am going to kill him\"\nlet parameters = ModerationParameter(input: prompt)\nlet isFlagged = try await service.createModerationFromText(parameters: parameters)\n```\n```swift\n/// Multiple prompts\nlet prompts = [\"I am going to kill him\", \"I am going to die\"]\nlet parameters = ModerationParameter(input: prompts)\nlet isFlagged = try await service.createModerationFromTexts(parameters: parameters)\n```\n\n### **BETA**\n### Assistants\nParameters\n```swift\n/// Create an [assistant](https://platform.openai.com/docs/api-reference/assistants/createAssistant) with a model and instructions.\n/// Modifies an [assistant](https://platform.openai.com/docs/api-reference/assistants/modifyAssistant).\npublic struct AssistantParameters: Encodable {\n   \n   /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.\n   public var model: String?\n   /// The name of the assistant. The maximum length is 256 characters.\n   public var name: String?\n   /// The description of the assistant. The maximum length is 512 characters.\n   public var description: String?\n   /// The system instructions that the assistant uses. The maximum length is 32768 characters.\n   public var instructions: String?\n   /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function. Defaults to []\n   public var tools: [AssistantObject.Tool] = []\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public var metadata: [String: String]?\n   /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n   /// Defaults to 1\n   public var temperature: Double?\n   /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n   /// Defaults to 1\n   public var topP: Double?\n   /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.\n   /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n   /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n   /// Defaults to `auto`\n   public var responseFormat: ResponseFormat?\n   \n   public enum Action {\n      case create(model: String) // model is required on creation of assistant.\n      case modify(model: String?) // model is optional on modification of assistant.\n      \n      var model: String? {\n         switch self {\n         case .create(let model): return model\n         case .modify(let model): return model\n         }\n      }\n   }\n}\n```\nResponse\n```swift\n/// Represents an [assistant](https://platform.openai.com/docs/api-reference/assistants) that can call the model and use tools.\npublic struct AssistantObject: Decodable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always \"assistant\".\n   public let object: String\n   /// The Unix timestamp (in seconds) for when the assistant was created.\n   public let createdAt: Int\n   /// The name of the assistant. The maximum length is 256 characters.\n   public let name: String?\n   /// The description of the assistant. The maximum length is 512 characters.\n   public let description: String?\n   /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.\n   public let model: String\n   /// The system instructions that the assistant uses. The maximum length is 32768 characters.\n   public let instructions: String?\n   /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function.\n   public let tools: [Tool]\n   /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order.\n   /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n   public let toolResources: ToolResources?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public let metadata: [String: String]?\n   /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n   /// Defaults to 1\n   public var temperature: Double?\n   /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n   /// Defaults to 1\n   public var topP: Double?\n   /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.\n   /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n   /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n   /// Defaults to `auto`\n   public var responseFormat: ResponseFormat?\n\n   public struct Tool: Codable {\n      \n      /// The type of tool being defined.\n      public let type: String\n      public let function: ChatCompletionParameters.ChatFunction?\n      \n      public enum ToolType: String, CaseIterable {\n         case codeInterpreter = \"code_interpreter\"\n         case fileSearch = \"file_search\"\n         case function\n      }\n      \n      /// Helper.\n      public var displayToolType: ToolType? { .init(rawValue: type) }\n      \n      public init(\n         type: ToolType,\n         function: ChatCompletionParameters.ChatFunction? = nil)\n      {\n         self.type = type.rawValue\n         self.function = function\n      }\n   }\n   \n   public struct DeletionStatus: Decodable {\n      public let id: String\n      public let object: String\n      public let deleted: Bool\n   }\n}\n```\n\nUsage\n\nCreate Assistant\n```swift\nlet parameters = AssistantParameters(action: .create(model: Model.gpt41106Preview.rawValue), name: \"Math tutor\")\nlet assistant = try await service.createAssistant(parameters: parameters)\n```\nRetrieve Assistant\n```swift\nlet assistantID = \"asst_abc123\"\nlet assistant = try await service.retrieveAssistant(id: assistantID)\n```\nModify Assistant\n```swift\nlet assistantID = \"asst_abc123\"\nlet parameters = AssistantParameters(action: .modify, name: \"Math tutor for kids\")\nlet assistant = try await service.modifyAssistant(id: assistantID, parameters: parameters)\n```\nDelete Assistant\n```swift\nlet assistantID = \"asst_abc123\"\nlet deletionStatus = try await service.deleteAssistant(id: assistantID)\n```\nList Assistants\n```swift\nlet assistants = try await service.listAssistants()\n```\n\n### Threads\nParameters\n```swift\n/// Create a [Thread](https://platform.openai.com/docs/api-reference/threads/createThread)\npublic struct CreateThreadParameters: Encodable {\n   \n   /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with.\n   public var messages: [MessageObject]?\n      /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n   public var toolResources: ToolResources?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public var metadata: [String: String]?\n}\n```\nResponse\n```swift\n/// A [thread object](https://platform.openai.com/docs/api-reference/threads) represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages).\npublic struct ThreadObject: Decodable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always thread.\n   public let object: String\n   /// The Unix timestamp (in seconds) for when the thread was created.\n   public let createdAt: Int\n   /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n   public var toolResources: ToolResources?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public let metadata: [String: String]\n   \n}\n```\n\nUsage\n\nCreate thread.\n```swift\nlet parameters = CreateThreadParameters()\nlet thread = try await service.createThread(parameters: parameters)\n```\nRetrieve thread.\n```swift\nlet threadID = \"thread_abc123\"\nlet thread = try await service.retrieveThread(id: id)\n```\nModify thread.\n```swift\nlet threadID = \"thread_abc123\"\nlet paramaters = CreateThreadParameters(metadata: [\"modified\": \"true\", \"user\": \"abc123\"]\nlet thread = try await service.modifyThread(id: id, parameters: parameters)\n```\nDelete thread.\n```swift\nlet threadID = \"thread_abc123\"\nlet thread = try await service.deleteThread(id: id)\n```\n\n### Messages\nParameters\n[Create a Message](https://platform.openai.com/docs/api-reference/messages/createMessage))\n```swift\npublic struct MessageParameter: Encodable {\n   \n   /// The role of the entity that is creating the message. Allowed values include:\n   /// user: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n   /// assistant: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n   let role: String\n   /// The content of the message, which can be a string or an array of content parts (text, image URL, image file).\n   let content: Content\n   /// A list of files attached to the message, and the tools they should be added to.\n   let attachments: [MessageAttachment]?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   let metadata: [String: String]?\n}\n```\n[Modify a Message](https://platform.openai.com/docs/api-reference/messages/modifyMessage))\n```swift\npublic struct ModifyMessageParameters: Encodable {\n   \n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public var metadata: [String: String]\n}\n```\nResponse\n```swift\n/// Represents a [message](https://platform.openai.com/docs/api-reference/messages) within a [thread](https://platform.openai.com/docs/api-reference/threads).\npublic struct MessageObject: Codable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always thread.message.\n   public let object: String\n   /// The Unix timestamp (in seconds) for when the message was created.\n   public let createdAt: Int\n   /// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to.\n   public let threadID: String\n   /// The status of the message, which can be either in_progress, incomplete, or completed.\n   public let status: String\n   /// On an incomplete message, details about why the message is incomplete.\n   public let incompleteDetails: IncompleteDetails?\n   /// The Unix timestamp (in seconds) for when the message was completed.\n   public let completedAt: Int\n   /// The entity that produced the message. One of user or assistant.\n   public let role: String\n   /// The content of the message in array of text and/or images.\n   public let content: [MessageContent]\n   /// If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message.\n   public let assistantID: String?\n   /// If applicable, the ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the authoring of this message.\n   public let runID: String?\n   /// A list of files attached to the message, and the tools they were added to.\n   public let attachments: [MessageAttachment]?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public let metadata: [String: String]?\n   \n   enum Role: String {\n      case user\n      case assistant\n   }\n}\n\n// MARK: MessageContent\n\npublic enum MessageContent: Codable {\n   \n   case imageFile(ImageFile)\n   case text(Text)\n}\n\n// MARK: Image File\n\npublic struct ImageFile: Codable {\n   /// Always image_file.\n   public let type: String\n   \n   /// References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.\n   public let imageFile: ImageFileContent\n   \n   public struct ImageFileContent: Codable {\n      \n      /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content.\n      public let fileID: String\n   }\n}\n\n// MARK: Text\n\npublic struct Text: Codable {\n   \n   /// Always text.\n   public let type: String\n   /// The text content that is part of a message.\n   public let text: TextContent\n   \n   public struct TextContent: Codable {\n      // The data that makes up the text.\n      public let value: String\n      \n      public let annotations: [Annotation]\n   }\n}\n\n// MARK: Annotation\n\npublic enum Annotation: Codable {\n   \n   case fileCitation(FileCitation)\n   case filePath(FilePath)\n}\n\n// MARK: FileCitation\n\n/// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the \"retrieval\" tool to search files.\npublic struct FileCitation: Codable {\n   \n   /// Always file_citation.\n   public let type: String\n   /// The text in the message content that needs to be replaced.\n   public let text: String\n   public let fileCitation: FileCitation\n   public  let startIndex: Int\n   public let endIndex: Int\n   \n   public struct FileCitation: Codable {\n      \n      /// The ID of the specific File the citation is from.\n      public let fileID: String\n      /// The specific quote in the file.\n      public let quote: String\n\n   }\n}\n\n// MARK: FilePath\n\n/// A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.\npublic struct FilePath: Codable {\n   \n   /// Always file_path\n   public let type: String\n   /// The text in the message content that needs to be replaced.\n   public let text: String\n   public let filePath: FilePath\n   public let startIndex: Int\n   public let endIndex: Int\n   \n   public struct FilePath: Codable {\n      /// The ID of the file that was generated.\n      public let fileID: String\n   }\n}\n```\n\nUsage\n\nCreate Message.\n```swift\nlet threadID = \"thread_abc123\"\nlet prompt = \"Give me some ideas for a birthday party.\"\nlet parameters = MessageParameter(role: \"user\", content: .stringContent(prompt)\")\nlet message = try await service.createMessage(threadID: threadID, parameters: parameters)\n```\n\nRetrieve Message.\n```swift\nlet threadID = \"thread_abc123\"\nlet messageID = \"msg_abc123\"\nlet message = try await service.retrieveMessage(threadID: threadID, messageID: messageID)\n```\n\nModify Message.\n```swift\nlet threadID = \"thread_abc123\"\nlet messageID = \"msg_abc123\"\nlet parameters = ModifyMessageParameters(metadata: [\"modified\": \"true\", \"user\": \"abc123\"]\nlet message = try await service.modifyMessage(threadID: threadID, messageID: messageID, parameters: parameters)\n```\n\nList Messages\n```swift\nlet threadID = \"thread_abc123\"\nlet messages = try await service.listMessages(threadID: threadID, limit: nil, order: nil, after: nil, before: nil) \n```\n\n### Runs\nParameters\n\n[Create a run](https://platform.openai.com/docs/api-reference/runs/createRun)\n```swift\npublic struct RunParameter: Encodable {\n   \n   /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run.\n    let assistantID: String\n   /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.\n   let model: String?\n   /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.\n   let instructions: String?\n   /// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions.\n   let additionalInstructions: String?\n   /// Adds additional messages to the thread before creating the run.\n   let additionalMessages: [MessageParameter]?\n   /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.\n   let tools: [AssistantObject.Tool]?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   let metadata: [String: String]?\n   /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n   /// Optional Defaults to 1\n   let temperature: Double?\n   /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message.\n   var stream: Bool\n   /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status complete. See incomplete_details for more info.\n   let maxPromptTokens: Int?\n   /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status complete. See incomplete_details for more info.\n   let maxCompletionTokens: Int?\n   /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.\n   let truncationStrategy: TruncationStrategy?\n   /// Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling a tool. Specifying a particular tool like {\"type\": \"file_search\"} or {\"type\": \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that tool.\n   let toolChoice: ToolChoice?\n   /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n   /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n   /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n   let responseFormat: ResponseFormat?\n}\n```\n[Modify a Run](https://platform.openai.com/docs/api-reference/runs/modifyRun)\n```swift\npublic struct ModifyRunParameters: Encodable {\n   \n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public var metadata: [String: String]\n   \n   public init(\n      metadata: [String : String])\n   {\n      self.metadata = metadata\n   }\n}\n```\n[Creates a Thread and Runs.](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun)\n```swift\npublic struct CreateThreadAndRunParameter: Encodable {\n   \n   /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run.\n   let assistantId: String\n   /// A thread to create.\n   let thread: CreateThreadParameters?\n   /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.\n   let model: String?\n   /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.\n   let instructions: String?\n   /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.\n   let tools: [AssistantObject.Tool]?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   let metadata: [String: String]?\n   /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n   /// Defaults to 1\n   let temperature: Double?\n   /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n   /// We generally recommend altering this or temperature but not both.\n   let topP: Double?\n   /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message.\n   var stream: Bool = false\n   /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status incomplete. See incomplete_details for more info.\n   let maxPromptTokens: Int?\n   /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status complete. See incomplete_details for more info.\n   let maxCompletionTokens: Int?\n   /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.\n   let truncationStrategy: TruncationStrategy?\n   /// Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling a tool. Specifying a particular tool like {\"type\": \"file_search\"} or {\"type\": \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that tool.\n   let toolChoice: ToolChoice?\n   /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n   /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n   /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n   let responseFormat: ResponseFormat?\n}\n```\n[Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)\n```swift\npublic struct RunToolsOutputParameter: Encodable {\n   \n   /// A list of tools for which the outputs are being submitted.\n   public let toolOutputs: [ToolOutput]\n   /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message.\n   public let stream: Bool\n}\n```\n   \nResponse\n```swift\npublic struct RunObject: Decodable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always thread.run.\n   public let object: String\n   /// The Unix timestamp (in seconds) for when the run was created.\n   public let createdAt: Int?\n   /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run.\n   public let threadID: String\n   /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run.\n   public let assistantID: String\n   /// The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired.\n   public let status: String\n   /// Details on the action required to continue the run. Will be null if no action is required.\n   public let requiredAction: RequiredAction?\n   /// The last error associated with this run. Will be null if there are no errors.\n   public let lastError: LastError?\n   /// The Unix timestamp (in seconds) for when the run will expire.\n   public let expiresAt: Int?\n   /// The Unix timestamp (in seconds) for when the run was started.\n   public let startedAt: Int?\n   /// The Unix timestamp (in seconds) for when the run was cancelled.\n   public let cancelledAt: Int?\n   /// The Unix timestamp (in seconds) for when the run failed.\n   public let failedAt: Int?\n   /// The Unix timestamp (in seconds) for when the run was completed.\n   public let completedAt: Int?\n   /// Details on why the run is incomplete. Will be null if the run is not incomplete.\n   public let incompleteDetails: IncompleteDetails?\n   /// The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.\n   public let model: String\n   /// The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.\n   public let instructions: String?\n   /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.\n   public let tools: [AssistantObject.Tool]\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public let metadata: [String: String]\n   /// Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).\n   public let usage: Usage?\n   /// The sampling temperature used for this run. If not set, defaults to 1.\n   public let temperature: Double?\n   /// The nucleus sampling value used for this run. If not set, defaults to 1.\n   public let topP: Double?\n   /// The maximum number of prompt tokens specified to have been used over the course of the run.\n   public let maxPromptTokens: Int?\n   /// The maximum number of completion tokens specified to have been used over the course of the run.\n   public let maxCompletionTokens: Int?\n   /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.\n   public let truncationStrategy: TruncationStrategy?\n   /// Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling a tool. Specifying a particular tool like {\"type\": \"TOOL_TYPE\"} or {\"type\": \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that tool.\n   public let toolChoice: ToolChoice?\n   /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n   /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n   /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n   public let responseFormat: ResponseFormat?\n}\n```\nUsage\n\nCreate a Run\n```swift\nlet assistantID = \"asst_abc123\"\nlet parameters = RunParameter(assistantID: assistantID)\nlet run = try await service.createRun(threadID: threadID, parameters: parameters)\n```\nRetrieve a Run\n```swift\nlet threadID = \"thread_abc123\"\nlet runID = \"run_abc123\"\nlet run = try await service.retrieveRun(threadID: threadID, runID: runID)\n```\nModify a Run\n```swift\nlet threadID = \"thread_abc123\"\nlet runID = \"run_abc123\"\nlet parameters = ModifyRunParameters(metadata: [\"modified\": \"true\", \"user\": \"abc123\"]\nlet message = try await service.modifyRun(threadID: threadID, messageID: messageID, parameters: parameters)\n```\nList runs\n```swift\nlet threadID = \"thread_abc123\"\nlet runs = try await service.listRuns(threadID: threadID, limit: nil, order: nil, after: nil, before: nil) \n```\nSubmit tool outputs to Run\n```swift\nlet threadID = \"thread_abc123\"\nlet runID = \"run_abc123\"\nlet toolCallID = \"call_abc123\"\nlet output = \"28C\"\nlet parameters = RunToolsOutputParameter(toolOutputs: [.init(toolCallId: toolCallID, output: output)])\nlet run = try await service.submitToolOutputsToRun(threadID: threadID\", runID: runID\", parameters: parameters)\n```\nCancel a Run\n```swift\n/// Cancels a run that is in_progress.\nlet threadID = \"thread_abc123\"\nlet runID = \"run_abc123\"\nlet run = try await service.cancelRun(threadID: threadID, runID: runID)\n```\nCreate thread and Run\n```swift\nlet assistantID = \"asst_abc123\"\nlet parameters = CreateThreadAndRunParameter(assistantID: assistantID)\nlet run = service.createThreadAndRun(parameters: parameters)\n```\n\n### Run Step Object\nRepresents a [step](https://platform.openai.com/docs/api-reference/runs/step-object) in execution of a run.\nResponse\n```swift\npublic struct RunStepObject: Decodable {\n   \n   /// The identifier of the run step, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always `thread.run.step``.\n   public let object: String\n   /// The Unix timestamp (in seconds) for when the run step was created.\n   public let createdAt: Int\n   /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step.\n   public let assistantId: String\n   /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run.\n   public let threadId: String\n   ///The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of.\n   public let runId: String\n   /// The type of run step, which can be either message_creation or tool_calls.\n   public let type: String\n   /// The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.\n   public let status: String\n   /// The details of the run step.\n   public let stepDetails: RunStepDetails\n   /// The last error associated with this run step. Will be null if there are no errors.\n   public let lastError: RunObject.LastError?\n   /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.\n   public let expiredAt: Int?\n   /// The Unix timestamp (in seconds) for when the run step was cancelled.\n   public let cancelledAt: Int?\n   /// The Unix timestamp (in seconds) for when the run step failed.\n   public let failedAt: Int?\n   /// The Unix timestamp (in seconds) for when the run step completed.\n   public let completedAt: Int?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   public let metadata: [String: String]?\n   /// Usage statistics related to the run step. This value will be null while the run step's status is in_progress.\n   public let usage: Usage?\n}\n```\nUsage\nRetrieve a Run step\n```swift\nlet threadID = \"thread_abc123\"\nlet runID = \"run_abc123\"\nlet stepID = \"step_abc123\"\nlet runStep = try await service.retrieveRunstep(threadID: threadID, runID: runID, stepID: stepID)\n```\nList run steps\n```swift\nlet threadID = \"thread_abc123\"\nlet runID = \"run_abc123\"\nlet runSteps = try await service.listRunSteps(threadID: threadID, runID: runID, limit: nil, order: nil, after: nil, before: nil) \n```\n\n### Run Step Detail\n\nThe details of the run step.\n\n```swift\npublic struct RunStepDetails: Codable {\n   \n   /// `message_creation` or `tool_calls`\n   public let type: String\n   /// Details of the message creation by the run step.\n   public let messageCreation: MessageCreation?\n   /// Details of the tool call.\n   public let toolCalls: [ToolCall]?\n}\n```\n\n### Assistants Streaming\n\nAssistants API [streaming.](https://platform.openai.com/docs/api-reference/assistants-streaming)\n\nStream the result of executing a Run or resuming a Run after submitting tool outputs.\n\nYou can stream events from the [Create Thread and Run](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun), [Create Run](https://platform.openai.com/docs/api-reference/runs/createRun), and [Submit Tool Outputs](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoints by passing \"stream\": true. The response will be a Server-Sent events stream.\n\nOpenAI Python tutorial(https://platform.openai.com/docs/assistants/overview?context=with-streaming))\n\n### Message Delta Object\n\n[MessageDeltaObject](https://platform.openai.com/docs/api-reference/assistants-streaming/message-delta-object) Represents a message delta i.e. any changed fields on a message during streaming.\n\n```swift\npublic struct MessageDeltaObject: Decodable {\n   \n   /// The identifier of the message, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always thread.message.delta.\n   public let object: String\n   /// The delta containing the fields that have changed on the Message.\n   public let delta: Delta\n   \n   public struct Delta: Decodable {\n      \n      /// The entity that produced the message. One of user or assistant.\n      public let role: String\n      /// The content of the message in array of text and/or images.\n      public let content: [MessageContent]\n   }\n}\n```\n\n### Run Step Delta Object\n\nRepresents a [run step delta](https://platform.openai.com/docs/api-reference/assistants-streaming/run-step-delta-object) i.e. any changed fields on a run step during streaming.\n\n```swift\npublic struct RunStepDeltaObject: Decodable {\n   \n   /// The identifier of the run step, which can be referenced in API endpoints.\n   public let id: String\n   /// The object type, which is always thread.run.step.delta.\n   public let object: String\n   /// The delta containing the fields that have changed on the run step.\n   public let delta: Delta\n   \n   public struct Delta: Decodable {\n      \n      /// The details of the run step.\n      public let stepDetails: RunStepDetails\n      \n      private enum CodingKeys: String, CodingKey {\n         case stepDetails = \"step_details\"\n      }\n   }\n}\n```\n\n⚠️ To utilize the `createRunAndStreamMessage`, first create an assistant and initiate a thread.\n\nUsage\n[Create Run](https://platform.openai.com/docs/api-reference/runs/createRun) with stream.\n\nThe `createRunAndStreamMessage` streams [events](https://platform.openai.com/docs/api-reference/assistants-streaming/events), You can decide which one you need for your implementation. For example, this is how you can access message delta and run step delta objects\n\n```swift\nlet assistantID = \"asst_abc123\"\nlet threadID = \"thread_abc123\"\nlet messageParameter = MessageParameter(role: .user, content: \"Tell me the square root of 1235\")\nlet message = try await service.createMessage(threadID: threadID, parameters: messageParameter)\nlet runParameters = RunParameter(assistantID: assistantID)\nlet stream = try await service.createRunAndStreamMessage(threadID: threadID, parameters: runParameters)\n\n         for try await result in stream {\n            switch result {\n            case .threadMessageDelta(let messageDelta):\n               let content = messageDelta.delta.content.first\n               switch content {\n               case .imageFile, nil:\n                  break\n               case .text(let textContent):\n                  print(textContent.text.value) // this will print the streamed response for a message.\n               }\n               \n            case .threadRunStepDelta(let runStepDelta):\n               if let toolCall = runStepDelta.delta.stepDetails.toolCalls?.first?.toolCall {\n                  switch toolCall {\n                  case .codeInterpreterToolCall(let toolCall):\n                     print(toolCall.input ?? \"\") // this will print the streamed response for code interpreter tool call.\n                  case .fileSearchToolCall(let toolCall):\n                     print(\"File search tool call\")\n                  case .functionToolCall(let toolCall):\n                     print(\"Function tool call\")\n                  case nil:\n                     break\n                  }\n               }\n            }\n         }\n```\n\nYou can go to the [Examples folder](https://github.com/jamesrochabrun/SwiftOpenAI/tree/main/Examples/SwiftOpenAIExample/SwiftOpenAIExample) in this package, navigate to the 'Configure Assistants' tab, create an assistant, and follow the subsequent steps.\n\n### Stream support has also been added to:\n\n[Create Thread and Run](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun):\n\n```swift\n   /// Creates a thread and run with stream enabled.\n   ///\n   /// - Parameter parameters: The parameters needed to create a thread and run.\n   /// - Returns: An AsyncThrowingStream of [AssistantStreamEvent](https://platform.openai.com/docs/api-reference/assistants-streaming/events) objects.\n   /// - Throws: An error if the request fails.\n   ///\n   /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun).\n   func createThreadAndRunStream(\n      parameters: CreateThreadAndRunParameter)\n   async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n```\n\n[Submit Tool Outputs](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs):\n\n```swift\n   /// When a run has the status: \"requires_action\" and required_action.type is submit_tool_outputs, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. Stream enabled\n   ///\n   /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) to which this run belongs.\n   /// - Parameter runID: The ID of the run that requires the tool output submission.\n   /// - Parameter parameters: The parameters needed for the run tools output.\n   /// - Returns: An AsyncThrowingStream of [AssistantStreamEvent](https://platform.openai.com/docs/api-reference/assistants-streaming/events) objects.\n   /// - Throws: An error if the request fails.\n   ///\n   /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs).\n   func submitToolOutputsToRunStream(\n      threadID: String,\n      runID: String,\n      parameters: RunToolsOutputParameter)\n   async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n```\n\n### Vector Stores\nParameters\n```swift\npublic struct VectorStoreParameter: Encodable {\n   \n   /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like file_search that can access files.\n   let fileIDS: [String]?\n   /// The name of the vector store.\n   let name: String?\n   /// The expiration policy for a vector store.\n   let expiresAfter: ExpirationPolicy?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   let metadata: [String: String]?\n}\n```\nResponse\n```swift\npublic struct VectorStoreObject: Decodable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   let id: String\n   /// The object type, which is always vector_store.\n   let object: String\n   /// The Unix timestamp (in seconds) for when the vector store was created.\n   let createdAt: Int\n   /// The name of the vector store.\n   let name: String\n   /// The total number of bytes used by the files in the vector store.\n   let usageBytes: Int\n   \n   let fileCounts: FileCount\n   /// The status of the vector store, which can be either expired, in_progress, or completed. A status of completed indicates that the vector store is ready for use.\n   let status: String\n   /// The expiration policy for a vector store.\n   let expiresAfter: ExpirationPolicy?\n   /// The Unix timestamp (in seconds) for when the vector store will expire.\n   let expiresAt: Int?\n   /// The Unix timestamp (in seconds) for when the vector store was last active.\n   let lastActiveAt: Int?\n   /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n   let metadata: [String: String]\n   \n   public struct FileCount: Decodable {\n      \n      /// The number of files that are currently being processed.\n      let inProgress: Int\n      /// The number of files that have been successfully processed.\n      let completed: Int\n      /// The number of files that have failed to process.\n      let failed: Int\n      /// The number of files that were cancelled.\n      let cancelled: Int\n      /// The total number of files.\n      let total: Int\n   }\n}\n```\nUsage\n[Create vector Store](https://platform.openai.com/docs/api-reference/vector-stores/create)\n```swift\nlet name = \"Support FAQ\"\nlet parameters = VectorStoreParameter(name: name)\ntry vectorStore = try await service.createVectorStore(parameters: parameters)\n```\n\n[List Vector stores](https://platform.openai.com/docs/api-reference/vector-stores/list)\n```swift\nlet vectorStores = try await service.listVectorStores(limit: nil, order: nil, after: nil, before: nil)\n```\n\n[Retrieve Vector store](https://platform.openai.com/docs/api-reference/vector-stores/retrieve)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet vectorStore = try await service.retrieveVectorStore(id: vectorStoreID)\n```\n\n[Modify Vector store](https://platform.openai.com/docs/api-reference/vector-stores/modify)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet vectorStore = try await service.modifyVectorStore(id: vectorStoreID)\n```\n\n[Delete Vector store](https://platform.openai.com/docs/api-reference/vector-stores/delete)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet deletionStatus = try await service.deleteVectorStore(id: vectorStoreID)\n```\n\n### Vector Store File\nParameters\n```swift\npublic struct VectorStoreFileParameter: Encodable {\n   \n   /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like file_search that can access files.\n   let fileID: String\n}\n```\nResponse\n```swift\npublic struct VectorStoreFileObject: Decodable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   let id: String\n   /// The object type, which is always vector_store.file.\n   let object: String\n   /// The total vector store usage in bytes. Note that this may be different from the original file size.\n   let usageBytes: Int\n   /// The Unix timestamp (in seconds) for when the vector store file was created.\n   let createdAt: Int\n   /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to.\n   let vectorStoreID: String\n   /// The status of the vector store file, which can be either in_progress, completed, cancelled, or failed. The status completed indicates that the vector store file is ready for use.\n   let status: String\n   /// The last error associated with this vector store file. Will be null if there are no errors.\n   let lastError: LastError?\n}\n```\n\nUsage\n[Create vector store file](https://platform.openai.com/docs/api-reference/vector-stores-files/createFile)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet fileID = \"file-abc123\"\nlet parameters = VectorStoreFileParameter(fileID: fileID)\nlet vectoreStoreFile = try await service.createVectorStoreFile(vectorStoreID: vectorStoreID, parameters: parameters)\n```\n\n[List vector store files](https://platform.openai.com/docs/api-reference/vector-stores-files/listFiles)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet vectorStoreFiles = try await service.listVectorStoreFiles(vectorStoreID: vectorStoreID, limit: nil, order: nil, aftre: nil, before: nil, filter: nil)\n```\n\n[Retrieve vector store file](https://platform.openai.com/docs/api-reference/vector-stores-files/getFile)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet fileID = \"file-abc123\"\nlet vectoreStoreFile = try await service.retrieveVectorStoreFile(vectorStoreID: vectorStoreID, fileID: fileID)\n```\n\n[Delete vector store file](https://platform.openai.com/docs/api-reference/vector-stores-files/deleteFile)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet fileID = \"file-abc123\"\nlet deletionStatus = try await service.deleteVectorStoreFile(vectorStoreID: vectorStoreID, fileID: fileID)\n```\n\n### Vector Store File Batch\nParameters\n```swift\npublic struct VectorStoreFileBatchParameter: Encodable {\n   \n   /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like file_search that can access files.\n   let fileIDS: [String]\n}\n```\nResponse\n```swift\npublic struct VectorStoreFileBatchObject: Decodable {\n   \n   /// The identifier, which can be referenced in API endpoints.\n   let id: String\n   /// The object type, which is always vector_store.file_batch.\n   let object: String\n   /// The Unix timestamp (in seconds) for when the vector store files batch was created.\n   let createdAt: Int\n   /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to.\n   let vectorStoreID: String\n   /// The status of the vector store files batch, which can be either in_progress, completed, cancelled or failed.\n   let status: String\n   \n   let fileCounts: FileCount\n}\n```\nUsage\n\n[Create vector store file batch](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet fileIDS = [\"file-abc123\", \"file-abc456\"]\nlet parameters = VectorStoreFileBatchParameter(fileIDS: fileIDS)\nlet vectorStoreFileBatch = try await service.\n   createVectorStoreFileBatch(vectorStoreID: vectorStoreID, parameters: parameters)\n```\n\n[Retrieve vector store file batch](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/getBatch)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet batchID = \"vsfb_abc123\"\nlet vectorStoreFileBatch = try await service.retrieveVectorStoreFileBatch(vectorStoreID: vectorStoreID, batchID: batchID)\n```\n\n[Cancel vector store file batch](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/cancelBatch)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet batchID = \"vsfb_abc123\"\nlet vectorStoreFileBatch = try await service.cancelVectorStoreFileBatch(vectorStoreID: vectorStoreID, batchID: batchID)\n```\n\n[List vector store files in a batch](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/listBatchFiles)\n```swift\nlet vectorStoreID = \"vs_abc123\"\nlet batchID = \"vsfb_abc123\"\nlet vectorStoreFiles = try await service.listVectorStoreFilesInABatch(vectorStoreID: vectorStoreID, batchID: batchID)\n```\n\n⚠️ We currently support Only Assistants Beta 2. If you need support for Assistants V1, you can access it in the jroch-supported-branch-for-assistants-v1 branch or in the v2.3 release.. [Check OpenAI Documentation for details on migration.](https://platform.openai.com/docs/assistants/migration))\n\n## Anthropic\n\nAnthropic provides OpenAI compatibility, for more, visit the [documentation](https://docs.anthropic.com/en/api/openai-sdk#getting-started-with-the-openai-sdk)\n\nTo use Claude models with `SwiftOpenAI` you can.\n\n```swift\nlet anthropicApiKey = \"\"\nlet openAIService = OpenAIServiceFactory.service(apiKey: anthropicApiKey, \n                     overrideBaseURL: \"https://api.anthropic.com\", \n                     overrideVersion: \"v1\")\n```\n\nNow you can create the completio parameters like this:\n\n```swift\nlet parameters = ChatCompletionParameters(\n   messages: [.init(\n   role: .user,\n   content: \"Are you Claude?\")],\n   model: .custom(\"claude-3-7-sonnet-20250219\"))\n```\n\nFor a more complete Anthropic Swift Package, you can use [SwiftAnthropic](https://github.com/jamesrochabrun/SwiftAnthropic)\n\n## Azure OpenAI\n\nThis library provides support for both chat completions and chat stream completions through Azure OpenAI. Currently, `DefaultOpenAIAzureService` supports chat completions, including both streamed and non-streamed options.\n\nFor more information about Azure configuration refer to the [documentation.](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)\n\nTo instantiate `DefaultOpenAIAzureService` you need to provide a `AzureOpenAIConfiguration`\n\n```swift\nlet azureConfiguration = AzureOpenAIConfiguration(\n                           resourceName: \"YOUR_RESOURCE_NAME\", \n                           openAIAPIKey: .apiKey(\"YOUR_OPENAI_APIKEY), \n                           apiVersion: \"THE_API_VERSION\")\n                           \nlet service = OpenAIServiceFactory.service(azureConfiguration: azureConfiguration)           \n```\n\nsupported api version can be found on the azure [documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#completions)\n\nCurrent Supported versions\n\n```2022-12-01```\n```2023-03-15-preview```\n```2023-05-15```\n```2023-06-01-preview```\n```2023-07-01-preview```\n```2023-08-01-preview```\n```2023-09-01-preview```\n\n### Usage on [Chat completions](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions):\n\n```swift\nlet parameters = ChatCompletionParameters(\n                     messages: [.init(role: .user, content: .text(prompt))], \n                     model: .custom(\"DEPLOYMENT_NAME\") /// The deployment name you chose when you deployed the model. e.g: \"gpt-35-turbo-0613\"\nlet completionObject = try await service.startChat(parameters: parameters)\n```\n\n## AIProxy\n\n### What is it?\n\n[AIProxy](https://www.aiproxy.pro) is a backend for iOS apps that proxies requests from your app to OpenAI.\nUsing a proxy keeps your OpenAI key secret, protecting you from unexpectedly high bills due to key theft.\nRequests are only proxied if they pass your defined rate limits and Apple's [DeviceCheck](https://developer.apple.com/documentation/devicecheck) verification.\nWe offer AIProxy support so you can safely distribute apps built with SwiftOpenAI.\n\n### How does my SwiftOpenAI code change?\n\nProxy requests through AIProxy with two changes to your Xcode project:\n\n1. Instead of initializing `service` with:\n\n        let apiKey = \"your_openai_api_key_here\"\n        let service = OpenAIServiceFactory.service(apiKey: apiKey)\n\nUse:\n\n        let service = OpenAIServiceFactory.service(\n            aiproxyPartialKey: \"your_partial_key_goes_here\",\n            aiproxyServiceURL: \"your_service_url_goes_here\"\n        )\n\nThe `aiproxyPartialKey` and `aiproxyServiceURL` values are provided to you on the [AIProxy developer dashboard](https://developer.aiproxy.pro)\n\n2. Add an `AIPROXY_DEVICE_CHECK_BYPASS' env variable to Xcode. This token is provided to you in the AIProxy\n   developer dashboard, and is necessary for the iOS simulator to communicate with the AIProxy backend.\n    - Type `cmd shift ,` to open up the \"Edit Schemes\" menu in Xcode\n    - Select `Run` in the sidebar\n    - Select `Arguments` from the top nav\n    - Add to the \"Environment Variables\" section (not the \"Arguments Passed on Launch\" section) an env\n      variable with name `AIPROXY_DEVICE_CHECK_BYPASS` and value that we provided you in the AIProxy dashboard\n\n\n⚠️  The `AIPROXY_DEVICE_CHECK_BYPASS` is intended for the simulator only. Do not let it leak into\na distribution build of your app (including a TestFlight distribution). If you follow the steps above,\nthen the constant won't leak because env variables are not packaged into the app bundle.\n\n#### What is the `AIPROXY_DEVICE_CHECK_BYPASS` constant?\n\nAIProxy uses Apple's [DeviceCheck](https://developer.apple.com/documentation/devicecheck) to ensure\nthat requests received by the backend originated from your app on a legitimate Apple device.\nHowever, the iOS simulator cannot produce DeviceCheck tokens. Rather than requiring you to\nconstantly build and run on device during development, AIProxy provides a way to skip the\nDeviceCheck integrity check. The token is intended for use by developers only. If an attacker gets\nthe token, they can make requests to your AIProxy project without including a DeviceCheck token, and\nthus remove one level of protection.\n\n#### What is the `aiproxyPartialKey` constant?\n\nThis constant is safe to include in distributed version of your app. It is one part of an\nencrypted representation of your real secret key. The other part resides on AIProxy's backend.\nAs your app makes requests to AIProxy, the two encrypted parts are paired, decrypted, and used\nto fulfill the request to OpenAI.\n\n#### How to setup my project on AIProxy?\n\nPlease see the [AIProxy integration guide](https://www.aiproxy.pro/docs/integration-guide.html)\n\n\n### ⚠️  Disclaimer\n\nContributors of SwiftOpenAI shall not be liable for any damages or losses caused by third parties.\nContributors of this library provide third party integrations as a convenience. Any use of a third\nparty's services are assumed at your own risk.\n\n\n## Ollama\n\nOllama now has built-in compatibility with the OpenAI [Chat Completions API](https://github.com/ollama/ollama/blob/main/docs/openai.md), making it possible to use more tooling and applications with Ollama locally.\n\n<img width=\"783\" alt=\"Screenshot 2024-06-24 at 11 52 35 PM\" src=\"https://github.com/jamesrochabrun/SwiftOpenAI/assets/5378604/db2264cb-408c-471d-b65b-912795c082ed\">\n\n### ⚠️ Important\n\nRemember that these models run locally, so you need to download them. If you want to use llama3, you can open the terminal and run the following command:\n\n```python\nollama pull llama3\n```\n\nyou can follow [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/openai.md) for more.\n\n### How to use this models locally using SwiftOpenAI?\n\nTo use local models with an `OpenAIService` in your application, you need to provide a URL. \n\n```swift\nlet service = OpenAIServiceFactory.service(baseURL: \"http://localhost:11434\")\n```\n\nThen you can use the completions API as follows:\n\n```swift\nlet prompt = \"Tell me a joke\"\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .text(prompt))], model: .custom(\"llama3\"))\nlet chatCompletionObject = service.startStreamedChat(parameters: parameters)\n```\n\n⚠️ Note: You can probably use the `OpenAIServiceFactory.service(apiKey:overrideBaseURL:proxyPath)` for any OpenAI compatible service.\n\n### Resources:\n\n[Ollama OpenAI compatibility docs.](https://github.com/ollama/ollama/blob/main/docs/openai.md)\n[Ollama OpenAI compatibility blog post.](https://ollama.com/blog/openai-compatibility)\n\n### Notes\n\nYou can also use this service constructor to provide any URL or apiKey if you need.\n\n```swift\nlet service = OpenAIServiceFactory.service(apiKey: \"YOUR_API_KEY\", baseURL: \"http://localhost:11434\")\n```\n\n## Groq\n\n<img width=\"792\" alt=\"Screenshot 2024-10-11 at 11 49 04 PM\" src=\"https://github.com/user-attachments/assets/7afb36a2-b2d8-4f89-9592-f4cece20d469\">\n\nGroq API is mostly compatible with OpenAI's client libraries like `SwiftOpenAI` to use Groq using this library you just need to create an instance of `OpenAIService` like this:\n\n```swift\nlet apiKey = \"your_api_key\"\nlet service = OpenAIServiceFactory.service(apiKey: apiKey, overrideBaseURL: \"https://api.groq.com/\", proxyPath: \"openai\")\n\n```\n\nFor Supported API's using Groq visit its [documentation](https://console.groq.com/docs/openai).\n\n## xAI\n\n<img width=\"792\" alt=\"xAI Grok\" src=\"https://github.com/user-attachments/assets/596ef28a-b8ea-4868-b37c-36ae28d77a30\">\n\nxAI provides an OpenAI-compatible completion API to its Grok models. You can use the OpenAI SDK to access these models.\n\n```swift\nlet apiKey = \"your_api_xai_key\"\nlet service = OpenAIServiceFactory.service(apiKey: apiKey, overrideBaseURL: \"https://api.x.ai\", overrideVersion: \"v1\")\n```\n\nFor more information about the `xAI` api visit its [documentation](https://docs.x.ai/docs/overview).\n\n## OpenRouter\n\n<img width=\"734\" alt=\"Image\" src=\"https://github.com/user-attachments/assets/2d658d07-0b41-4b5f-a094-ec7856f6fe98\" />\n\n[OpenRouter](https://openrouter.ai/docs/quick-start) provides an OpenAI-compatible completion API to 314 models & providers that you can call directly, or using the OpenAI SDK. Additionally, some third-party SDKs are available.\n\n```swift\n\n// Creating the service\n\nlet apiKey = \"your_api_key\"\nlet servcie = OpenAIServiceFactory.service(apiKey: apiKey, \n   overrideBaseURL: \"https://openrouter.ai\", \n   proxyPath: \"api\",\n   extraHeaders: [\n      \"HTTP-Referer\": \"<YOUR_SITE_URL>\", // Optional. Site URL for rankings on openrouter.ai.\n         \"X-Title\": \"<YOUR_SITE_NAME>\"  // Optional. Site title for rankings on openrouter.ai.\n   ])\n\n// Making a request\n\nlet prompt = \"What is the Manhattan project?\"\nlet parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .text(prompt))], model: .custom(\"deepseek/deepseek-r1:free\"))\nlet stream = service.startStreamedChat(parameters: parameters)\n```\n\nFor more inofrmation about the `OpenRouter` api visit its [documentation](https://openrouter.ai/docs/quick-start).\n\n## DeepSeek\n\n![Image](https://github.com/user-attachments/assets/7733f011-691a-4de7-b715-c090e3647304)\n\nThe [DeepSeek](https://api-docs.deepseek.com/) API uses an API format compatible with OpenAI. By modifying the configuration, you can use SwiftOpenAI to access the DeepSeek API.\n\nCreating the service\n\n```swift\n\nlet apiKey = \"your_api_key\"\nlet service = OpenAIServiceFactory.service(\n   apiKey: apiKey,\n   overrideBaseURL: \"https://api.deepseek.com\")\n```\n\nNon-Streaming Example\n\n```swift\nlet prompt = \"What is the Manhattan project?\"\nlet parameters = ChatCompletionParameters(\n    messages: [.init(role: .user, content: .text(prompt))],\n    model: .custom(\"deepseek-reasoner\")\n)\n\ndo {\n    let result = try await service.chat(parameters: parameters)\n    \n    // Access the response content\n    if let content = result.choices.first?.message.content {\n        print(\"Response: \\(content)\")\n    }\n    \n    // Access reasoning content if available\n    if let reasoning = result.choices.first?.message.reasoningContent {\n        print(\"Reasoning: \\(reasoning)\")\n    }\n} catch {\n    print(\"Error: \\(error)\")\n}\n```\n\nStreaming Example\n\n```swift\nlet prompt = \"What is the Manhattan project?\"\nlet parameters = ChatCompletionParameters(\n    messages: [.init(role: .user, content: .text(prompt))],\n    model: .custom(\"deepseek-reasoner\")\n)\n\n// Start the stream\ndo {\n    let stream = try await service.startStreamedChat(parameters: parameters)\n    for try await result in stream {\n        let content = result.choices.first?.delta.content ?? \"\"\n        self.message += content\n        \n        // Optional: Handle reasoning content if available\n        if let reasoning = result.choices.first?.delta.reasoningContent {\n            self.reasoningMessage += reasoning\n        }\n    }\n} catch APIError.responseUnsuccessful(let description, let statusCode) {\n    self.errorMessage = \"Network error with status code: \\(statusCode) and description: \\(description)\"\n} catch {\n    self.errorMessage = error.localizedDescription\n}\n```\n\nNotes\n\n- The DeepSeek API is compatible with OpenAI's format but uses different model names\n- Use .custom(\"deepseek-reasoner\") to specify the DeepSeek model\n- The `reasoningContent` field is optional and specific to DeepSeek's API\n- Error handling follows the same pattern as standard OpenAI requests.\n\nFor more inofrmation about the `DeepSeek` api visit its [documentation](https://api-docs.deepseek.com).\n\n## Gemini\n\n<img width=\"982\" alt=\"Screenshot 2024-11-12 at 10 53 43 AM\" src=\"https://github.com/user-attachments/assets/cebc18fe-b96d-4ffe-912e-77d625249cf2\">\n\nGemini is now accessible from the OpenAI Library. Announcement .\n`SwiftOpenAI` support all OpenAI endpoints, however Please refer to Gemini documentation to understand which API's are currently compatible' \n\nGemini is now accessible through the OpenAI Library. See the announcement [here](https://developers.googleblog.com/en/gemini-is-now-accessible-from-the-openai-library/).\nSwiftOpenAI supports all OpenAI endpoints. However, please refer to the [Gemini documentation](https://ai.google.dev/gemini-api/docs/openai) to understand which APIs are currently compatible.\"\n\n\nYou can instantiate a `OpenAIService` using your Gemini token like this...\n\n```swift\nlet geminiAPIKey = \"your_api_key\"\nlet baseURL = \"https://generativelanguage.googleapis.com\"\nlet version = \"v1beta\"\n\nlet service = OpenAIServiceFactory.service(\n   apiKey: apiKey, \n   overrideBaseURL: baseURL, \n   overrideVersion: version)\n```\n\nYou can now create a chat request using the .custom model parameter and pass the model name as a string.\n\n```swift\nlet parameters = ChatCompletionParameters(\n      messages: [.init(\n      role: .user,\n      content: content)],\n      model: .custom(\"gemini-1.5-flash\"))\n\nlet stream = try await service.startStreamedChat(parameters: parameters)\n```\n\n## Collaboration\nOpen a PR for any proposed change pointing it to `main` branch. Unit tests are highly appreciated ❤️\n\n"
  },
  {
    "path": "Sources/OpenAI/AIProxy/AIProxyCertificatePinning.swift",
    "content": "//\n//  AIProxyCertificatePinning.swift\n//\n//\n//  Created by Lou Zell on 6/23/24.\n//\n#if !os(Linux)\nimport Foundation\nimport OSLog\n\nprivate let aiproxyLogger = Logger(\n  subsystem: Bundle.main.bundleIdentifier ?? \"UnknownApp\",\n  category: \"SwiftOpenAI+AIProxyCertificatePinning\")\n\n// MARK: - AIProxyCertificatePinningDelegate\n\n/// ## About\n/// Use this class in conjunction with a URLSession to adopt certificate pinning in your app.\n/// Cert pinning greatly reduces the ability for an attacker to snoop on your traffic.\n///\n/// A common misunderstanding about https is that it's hard for an attacker to read your traffic.\n/// Unfortunately, that is only true if you, as the developer, control both sides of the pipe.\n/// As an app developer, this is almost never the case. You ship your apps to the app store, and\n/// attackers install them. When an attacker has your app on hardware they control (e.g. an iPhone),\n/// it is trivial for them to MITM your app and read encrypted traffic.\n///\n/// Certificate pinning adds an additional layer of security by only allowing the TLS handshake to\n/// succeed if your app recognizes the public key from the other side. I have baked in several AIProxy\n/// public keys to this implementation.\n///\n/// This also functions as a reference implementation for any other libraries that want to interact\n/// with the aiproxy.pro service using certificate pinning.\n///\n/// ## Implementor's note, and a gotcha\n/// Use an instance of this class as the delegate to URLSession. For example:\n///\n///     let mySession = URLSession(\n///        configuration: .default,\n///        delegate: AIProxyCertificatePinningDelegate(),\n///        delegateQueue: nil\n///     )\n///\n/// In a perfect world, this would be all that is required of you. In fact, it is all that is required to protect requests made\n/// with `await mySession.data(for:)`, because Foundation calls `urlSession:didReceiveChallenge:`\n/// internally. However, `await mySession.bytes(for:)` is not protected, which is rather odd. As a workaround,\n/// change your callsites from:\n///\n///     await mySession.bytes(for: request)\n///\n/// to:\n///\n///     await mySession.bytes(\n///         for: request,\n///         delegate: mySession.delegate as? URLSessionTaskDelegate\n///     )\n///\n/// If you encounter other calls in the wild that do not invoke `urlSession:didReceiveChallenge:` on this class,\n/// please report them to me.\nfinal class AIProxyCertificatePinningDelegate: NSObject, URLSessionDelegate, URLSessionTaskDelegate {\n  func urlSession(\n    _: URLSession,\n    task _: URLSessionTask,\n    didReceive challenge: URLAuthenticationChallenge)\n    async -> (URLSession.AuthChallengeDisposition, URLCredential?)\n  {\n    answerChallenge(challenge)\n  }\n\n  func urlSession(\n    _: URLSession,\n    didReceive challenge: URLAuthenticationChallenge)\n    async -> (URLSession.AuthChallengeDisposition, URLCredential?)\n  {\n    answerChallenge(challenge)\n  }\n\n  private func answerChallenge(\n    _ challenge: URLAuthenticationChallenge)\n    -> (URLSession.AuthChallengeDisposition, URLCredential?)\n  {\n    guard let secTrust = challenge.protectionSpace.serverTrust else {\n      aiproxyLogger.error(\"Could not access the server's security space\")\n      return (.cancelAuthenticationChallenge, nil)\n    }\n\n    guard let certificate = getServerCert(secTrust: secTrust) else {\n      aiproxyLogger.error(\"Could not access the server's TLS cert\")\n      return (.cancelAuthenticationChallenge, nil)\n    }\n\n    let serverPublicKey = SecCertificateCopyKey(certificate)!\n    let serverPublicKeyData = SecKeyCopyExternalRepresentation(serverPublicKey, nil)!\n\n    for publicKeyData in publicKeysAsData {\n      if serverPublicKeyData as Data == publicKeyData {\n        let credential = URLCredential(trust: secTrust)\n        return (.useCredential, credential)\n      }\n    }\n    return (.cancelAuthenticationChallenge, nil)\n  }\n}\n\n// MARK: - Private\n\nprivate var publicKeysAsData: [Data] = publicKeysAsHex.map { publicKeyAsHex in\n  let keyData = Data(publicKeyAsHex)\n\n  let attributes: [String: Any] = [\n    kSecAttrKeyType as String: kSecAttrKeyTypeECSECPrimeRandom,\n    kSecAttrKeyClass as String: kSecAttrKeyClassPublic,\n    kSecAttrKeySizeInBits as String: 256,\n  ]\n\n  var error: Unmanaged<CFError>?\n  let publicKey = SecKeyCreateWithData(keyData as CFData, attributes as CFDictionary, &error)!\n\n  let localPublicKeyData = SecKeyCopyExternalRepresentation(publicKey, nil)! as Data\n\n  if let error {\n    print(\"Failed to create public key: \\(error.takeRetainedValue() as Error)\")\n    fatalError()\n  }\n  return localPublicKeyData\n}\n\nprivate let publicKeysAsHex: [[UInt8]] = [\n  // live on api.aiproxy.com\n  [\n    0x04, 0x4A, 0x42, 0x12, 0xE7, 0xED, 0x36, 0xB4, 0xA9, 0x1F, 0x96, 0x7E, 0xCF, 0xBD, 0xE0,\n    0x9D, 0xEA, 0x4B, 0xFB, 0xAF, 0xE7, 0xC6, 0x93, 0xF0, 0xBF, 0x92, 0x0F, 0x12, 0x7A, 0x22,\n    0x7D, 0x00, 0x77, 0x81, 0xA5, 0x06, 0x26, 0x06, 0x5C, 0x47, 0x8F, 0x57, 0xEF, 0x41, 0x39,\n    0x0B, 0x3D, 0x41, 0x72, 0x68, 0x33, 0x86, 0x69, 0x14, 0x2A, 0x36, 0x4D, 0x74, 0x7D, 0xBC,\n    0x60, 0x91, 0xFF, 0xCC, 0x29,\n  ],\n\n  // live on api.aiproxy.pro\n  [\n    0x04, 0x25, 0xA2, 0xD1, 0x81, 0xC0, 0x38, 0xCE, 0x57, 0xAA, 0x6E, 0xF0, 0x5A, 0xC3, 0x6A,\n    0xA7, 0xC4, 0x69, 0x69, 0xCB, 0xEB, 0x24, 0xE5, 0x20, 0x7D, 0x06, 0xCB, 0xC7, 0x49, 0xD5,\n    0x0C, 0xAC, 0xE6, 0x96, 0xC5, 0xC9, 0x28, 0x00, 0x8E, 0x69, 0xFF, 0x9D, 0x32, 0x01, 0x53,\n    0x74, 0xAB, 0xFD, 0x46, 0x03, 0x32, 0xED, 0x93, 0x7F, 0x0F, 0xE9, 0xD9, 0xC3, 0xAF, 0xE7,\n    0xA5, 0xCB, 0xC1, 0x29, 0x35,\n  ],\n\n  // live on beta-api.aiproxy.pro\n  [\n    0x04, 0xAF, 0xB2, 0xCC, 0xE2, 0x51, 0x92, 0xCF, 0xB8, 0x01, 0x25, 0xC1, 0xB8, 0xDA, 0x29,\n    0x51, 0x9F, 0x91, 0x4C, 0xAA, 0x09, 0x66, 0x3D, 0x81, 0xD7, 0xAD, 0x6F, 0xDB, 0x78, 0x10,\n    0xD4, 0xBE, 0xCD, 0x4F, 0xE3, 0xAF, 0x4F, 0xB6, 0xD2, 0xCA, 0x85, 0xB6, 0xC7, 0x3E, 0xB4,\n    0x61, 0x62, 0xE1, 0xFC, 0x90, 0xD6, 0x84, 0x1F, 0x98, 0xCA, 0x83, 0x60, 0x8B, 0x65, 0xCB,\n    0x1A, 0x57, 0x6E, 0x32, 0x35,\n  ],\n\n  // backup-EC-key-A.key\n  [\n    0x04, 0x2C, 0x25, 0x74, 0xBC, 0x7E, 0x18, 0x10, 0x27, 0xBD, 0x03, 0x56, 0x4A, 0x7B, 0x32,\n    0xD2, 0xC1, 0xB0, 0x2E, 0x58, 0x85, 0x9A, 0xB0, 0x7D, 0xCD, 0x7E, 0x23, 0x33, 0x88, 0x2F,\n    0xC0, 0xFE, 0xCE, 0x2E, 0xBF, 0x36, 0x67, 0xC6, 0x81, 0xF6, 0x52, 0x2B, 0x9B, 0xAF, 0x97,\n    0x3C, 0xAC, 0x00, 0x39, 0xD8, 0xCC, 0x43, 0x6B, 0x1D, 0x65, 0xA5, 0xAD, 0xD1, 0x57, 0x4B,\n    0xAD, 0xB1, 0x17, 0xD3, 0x10,\n  ],\n\n  // backup-EC-key-B.key\n  [\n    0x04, 0x34, 0xAE, 0x84, 0x94, 0xE9, 0x02, 0xF0, 0x78, 0x0E, 0xEE, 0xE6, 0x4E, 0x39, 0x7F,\n    0xB4, 0x84, 0xF6, 0xEC, 0x55, 0x20, 0x0D, 0x36, 0xE9, 0xA6, 0x44, 0x6B, 0x9B, 0xE1, 0xEF,\n    0x19, 0xE7, 0x90, 0x5B, 0xF4, 0xA3, 0x29, 0xF3, 0x56, 0x7C, 0x60, 0x97, 0xF0, 0xC6, 0x61,\n    0x83, 0x31, 0x5D, 0x2D, 0xC9, 0xCC, 0x40, 0x43, 0xAD, 0x81, 0x63, 0xFD, 0xCF, 0xE2, 0x8E,\n    0xFA, 0x07, 0x09, 0xF6, 0xF2,\n  ],\n\n  // backup-EC-key-C.key\n  [\n    0x04, 0x84, 0x4E, 0x33, 0xC8, 0x60, 0xE7, 0x78, 0xAA, 0xA2, 0xB6, 0x0B, 0xCF, 0x7A, 0x52,\n    0x43, 0xD1, 0x6D, 0x58, 0xFF, 0x17, 0xB8, 0xEA, 0x8A, 0x39, 0x53, 0xFB, 0x8B, 0x66, 0x7D,\n    0x10, 0x39, 0x80, 0x2C, 0x8D, 0xC9, 0xC3, 0x34, 0x33, 0x98, 0x14, 0xEB, 0x88, 0x7B, 0xF5,\n    0x4D, 0x1F, 0x07, 0xAE, 0x6A, 0x02, 0x6B, 0xF5, 0x9B, 0xA8, 0xC6, 0x55, 0x5C, 0x27, 0xCD,\n    0x1B, 0xC0, 0x27, 0x2D, 0x82,\n  ],\n]\n\nprivate func getServerCert(secTrust: SecTrust) -> SecCertificate? {\n  if #available(macOS 12.0, iOS 15.0, *) {\n    guard let certs = SecTrustCopyCertificateChain(secTrust) as? [SecCertificate] else {\n      return nil\n    }\n    return certs[0]\n  } else {\n    return SecTrustGetCertificateAtIndex(secTrust, 0)\n  }\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/AIProxy/AIProxyService.swift",
    "content": "//\n//  AIProxyService.swift\n//\n//\n//  Created by Lou Zell on 3/27/24.\n//\n#if !os(Linux)\nimport Foundation\n\nprivate let aiproxySecureDelegate = AIProxyCertificatePinningDelegate()\n\n// MARK: - AIProxyService\n\nstruct AIProxyService: OpenAIService {\n  /// Initializes an instance of the OpenAI service with the required configurations.\n  ///\n  /// - Parameters:\n  ///   - partialKey: Your partial key provided during the integration process at `dashboard.aiproxy.pro`.\n  ///                 Refer to the [integration guide](https://www.aiproxy.pro/docs/integration-guide.html)\n  ///                 for details on acquiring your partial key. This is required.\n  ///   - serviceURL: Your service URL, also provided during the integration process. If you integrated before\n  ///                 July 22nd, 2024, you can leave this parameter blank, and it will default to\n  ///                 `\"https://api.aiproxy.pro\"`. This is optional.\n  ///   - clientID: An optional client ID to annotate requests in the AIProxy developer dashboard.\n  ///               If left blank, AIProxy generates client IDs for you. Most users can safely leave this blank.\n  ///   - organizationID: An optional OpenAI organization ID. Refer to the [organization documentation](https://platform.openai.com/docs/api-reference/organization-optional)\n  ///                     for details on its usage. Defaults to `nil`.\n  ///   - debugEnabled: A flag to enable printing request events during DEBUG builds. Set this to `true` for debugging.\n  init(\n    partialKey: String,\n    serviceURL: String? = nil,\n    clientID: String? = nil,\n    organizationID: String? = nil,\n    debugEnabled: Bool)\n  {\n    decoder = JSONDecoder()\n    self.partialKey = partialKey\n    self.clientID = clientID\n    self.organizationID = organizationID\n    self.debugEnabled = debugEnabled\n    openAIEnvironment = .init(baseURL: serviceURL ?? \"https://api.aiproxy.pro\", proxyPath: nil, version: \"v1\")\n    httpClient = URLSessionHTTPClientAdapter(\n      urlSession: URLSession(\n        configuration: .default,\n        delegate: aiproxySecureDelegate,\n        delegateQueue: nil))\n  }\n\n  let httpClient: HTTPClient\n  let decoder: JSONDecoder\n  let openAIEnvironment: OpenAIEnvironment\n\n  // MARK: Audio\n\n  func createTranscription(\n    parameters: AudioTranscriptionParameters)\n    async throws -> AudioObject\n  {\n    let request = try await OpenAIAPI.audio(.transcriptions).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: AudioObject.self, with: request)\n  }\n\n  func createTranslation(\n    parameters: AudioTranslationParameters)\n    async throws -> AudioObject\n  {\n    let request = try await OpenAIAPI.audio(.translations).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: AudioObject.self, with: request)\n  }\n\n  func createSpeech(\n    parameters: AudioSpeechParameters)\n    async throws -> AudioSpeechObject\n  {\n    let request = try await OpenAIAPI.audio(.speech).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    let data = try await fetchAudio(with: request)\n    return AudioSpeechObject(output: data)\n  }\n\n  #if canImport(AVFoundation)\n  func realtimeSession(\n    model _: String,\n    configuration _: OpenAIRealtimeSessionConfiguration)\n    async throws -> OpenAIRealtimeSession\n  {\n    fatalError(\"Realtime API is not yet supported for AIProxy. Please use DefaultOpenAIService instead.\")\n  }\n  #endif\n\n  // MARK: Chat\n\n  func startChat(\n    parameters: ChatCompletionParameters)\n    async throws -> ChatCompletionObject\n  {\n    var chatParameters = parameters\n    chatParameters.stream = false\n    let request = try await OpenAIAPI.chat.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: chatParameters)\n    return try await fetch(debugEnabled: debugEnabled, type: ChatCompletionObject.self, with: request)\n  }\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters)\n    async throws -> AsyncThrowingStream<ChatCompletionChunkObject, Error>\n  {\n    var chatParameters = parameters\n    chatParameters.stream = true\n    chatParameters.streamOptions = .init(includeUsage: true)\n    let request = try await OpenAIAPI.chat.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: chatParameters)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ChatCompletionChunkObject.self, with: request)\n  }\n\n  // MARK: Embeddings\n\n  func createEmbeddings(\n    parameters: EmbeddingParameter)\n    async throws -> OpenAIResponse<EmbeddingObject>\n  {\n    let request = try await OpenAIAPI.embeddings.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<EmbeddingObject>.self, with: request)\n  }\n\n  // MARK: Fine-tuning\n\n  func createFineTuningJob(\n    parameters: FineTuningJobParameters)\n    async throws -> FineTuningJobObject\n  {\n    let request = try await OpenAIAPI.fineTuning(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: FineTuningJobObject.self, with: request)\n  }\n\n  func listFineTuningJobs(\n    after lastJobID: String? = nil,\n    limit: Int? = nil)\n    async throws -> OpenAIResponse<FineTuningJobObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let lastJobID, let limit {\n      queryItems = [.init(name: \"after\", value: lastJobID), .init(name: \"limit\", value: \"\\(limit)\")]\n    } else if let lastJobID {\n      queryItems = [.init(name: \"after\", value: lastJobID)]\n    } else if let limit {\n      queryItems = [.init(name: \"limit\", value: \"\\(limit)\")]\n    }\n\n    let request = try await OpenAIAPI.fineTuning(.list).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<FineTuningJobObject>.self, with: request)\n  }\n\n  func retrieveFineTuningJob(\n    id: String)\n    async throws -> FineTuningJobObject\n  {\n    let request = try await OpenAIAPI.fineTuning(.retrieve(jobID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetch(debugEnabled: debugEnabled, type: FineTuningJobObject.self, with: request)\n  }\n\n  func cancelFineTuningJobWith(\n    id: String)\n    async throws -> FineTuningJobObject\n  {\n    let request = try await OpenAIAPI.fineTuning(.cancel(jobID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post)\n    return try await fetch(debugEnabled: debugEnabled, type: FineTuningJobObject.self, with: request)\n  }\n\n  func listFineTuningEventsForJobWith(\n    id: String,\n    after lastEventId: String? = nil,\n    limit: Int? = nil)\n    async throws -> OpenAIResponse<FineTuningJobEventObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let lastEventId, let limit {\n      queryItems = [.init(name: \"after\", value: lastEventId), .init(name: \"limit\", value: \"\\(limit)\")]\n    } else if let lastEventId {\n      queryItems = [.init(name: \"after\", value: lastEventId)]\n    } else if let limit {\n      queryItems = [.init(name: \"limit\", value: \"\\(limit)\")]\n    }\n    let request = try await OpenAIAPI.fineTuning(.events(jobID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<FineTuningJobEventObject>.self, with: request)\n  }\n\n  // MARK: Files\n\n  func listFiles()\n    async throws -> OpenAIResponse<FileObject>\n  {\n    let request = try await OpenAIAPI.file(.list).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<FileObject>.self, with: request)\n  }\n\n  func uploadFile(\n    parameters: FileParameters)\n    async throws -> FileObject\n  {\n    let request = try await OpenAIAPI.file(.upload).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: FileObject.self, with: request)\n  }\n\n  func deleteFileWith(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.file(.delete(fileID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func retrieveFileWith(\n    id: String)\n    async throws -> FileObject\n  {\n    let request = try await OpenAIAPI.file(.retrieve(fileID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetch(debugEnabled: debugEnabled, type: FileObject.self, with: request)\n  }\n\n  func retrieveContentForFileWith(\n    id: String)\n    async throws -> [[String: Any]]\n  {\n    let request = try await OpenAIAPI.file(.retrieveFileContent(fileID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetchContentsOfFile(request: request)\n  }\n\n  // MARK: Images\n\n  func legacyCreateImages(\n    parameters: ImageCreateParameters)\n    async throws -> OpenAIResponse<ImageObject>\n  {\n    let request = try await OpenAIAPI.images(.generations).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ImageObject>.self, with: request)\n  }\n\n  func legacyEditImage(\n    parameters: ImageEditParameters)\n    async throws -> OpenAIResponse<ImageObject>\n  {\n    let request = try await OpenAIAPI.images(.edits).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ImageObject>.self, with: request)\n  }\n\n  func legacyCreateImageVariations(\n    parameters: ImageVariationParameters)\n    async throws -> OpenAIResponse<ImageObject>\n  {\n    let request = try await OpenAIAPI.images(.variations).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ImageObject>.self, with: request)\n  }\n\n  func createImages(\n    parameters: CreateImageParameters)\n    async throws -> CreateImageResponse\n  {\n    let request = try await OpenAIAPI.images(.generations).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: CreateImageResponse.self, with: request)\n  }\n\n  func editImage(\n    parameters: CreateImageEditParameters)\n    async throws -> CreateImageResponse\n  {\n    let request = try await OpenAIAPI.images(.edits).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: CreateImageResponse.self, with: request)\n  }\n\n  func createImageVariations(\n    parameters: CreateImageVariationParameters)\n    async throws -> CreateImageResponse\n  {\n    let request = try await OpenAIAPI.images(.variations).multiPartRequest(\n      aiproxyPartialKey: partialKey,\n      openAIEnvironment: openAIEnvironment,\n      clientID: clientID,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: CreateImageResponse.self, with: request)\n  }\n\n  // MARK: Models\n\n  func listModels()\n    async throws -> OpenAIResponse<ModelObject>\n  {\n    let request = try await OpenAIAPI.model(.list).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ModelObject>.self, with: request)\n  }\n\n  func retrieveModelWith(\n    id: String)\n    async throws -> ModelObject\n  {\n    let request = try await OpenAIAPI.model(.retrieve(modelID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetch(debugEnabled: debugEnabled, type: ModelObject.self, with: request)\n  }\n\n  func deleteFineTuneModelWith(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.model(.deleteFineTuneModel(modelID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Moderations\n\n  func createModerationFromText(\n    parameters: ModerationParameter<String>)\n    async throws -> ModerationObject\n  {\n    let request = try await OpenAIAPI.moderations.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: ModerationObject.self, with: request)\n  }\n\n  func createModerationFromTexts(\n    parameters: ModerationParameter<[String]>)\n    async throws -> ModerationObject\n  {\n    let request = try await OpenAIAPI.moderations.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: ModerationObject.self, with: request)\n  }\n\n  // MARK: Assistants [BETA]\n\n  func createAssistant(\n    parameters: AssistantParameters)\n    async throws -> AssistantObject\n  {\n    let request = try await OpenAIAPI.assistant(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  func retrieveAssistant(\n    id: String)\n    async throws -> AssistantObject\n  {\n    let request = try await OpenAIAPI.assistant(.retrieve(assistantID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  func modifyAssistant(\n    id: String,\n    parameters: AssistantParameters)\n    async throws -> AssistantObject\n  {\n    let request = try await OpenAIAPI.assistant(.modify(assistantID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  func deleteAssistant(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.assistant(.delete(assistantID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func listAssistants(\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<AssistantObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try await OpenAIAPI.assistant(.list).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<AssistantObject>.self, with: request)\n  }\n\n  // MARK: Thread [BETA]\n\n  func createThread(\n    parameters: CreateThreadParameters)\n    async throws -> ThreadObject\n  {\n    let request = try await OpenAIAPI.thread(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  func retrieveThread(id: String)\n    async throws -> ThreadObject\n  {\n    let request = try await OpenAIAPI.thread(.retrieve(threadID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  func modifyThread(\n    id: String,\n    parameters: ModifyThreadParameters)\n    async throws -> ThreadObject\n  {\n    let request = try await OpenAIAPI.thread(.modify(threadID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  func deleteThread(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.thread(.delete(threadID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Message [BETA]\n\n  func createMessage(\n    threadID: String,\n    parameters: MessageParameter)\n    async throws -> MessageObject\n  {\n    let request = try await OpenAIAPI.message(.create(threadID: threadID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  func retrieveMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> MessageObject\n  {\n    let request = try await OpenAIAPI.message(.retrieve(threadID: threadID, messageID: messageID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  func modifyMessage(\n    threadID: String,\n    messageID: String,\n    parameters: ModifyMessageParameters)\n    async throws -> MessageObject\n  {\n    let request = try await OpenAIAPI.message(.modify(threadID: threadID, messageID: messageID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  func deleteMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.message(.delete(threadID: threadID, messageID: messageID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func listMessages(\n    threadID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil,\n    runID _: String? = nil)\n    async throws -> OpenAIResponse<MessageObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try await OpenAIAPI.message(.list(threadID: threadID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<MessageObject>.self, with: request)\n  }\n\n  // MARK: Run [BETA]\n\n  func createRun(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> RunObject\n  {\n    let request = try await OpenAIAPI.run(.create(threadID: threadID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func retrieveRun(\n    threadID: String,\n    runID: String)\n    async throws -> RunObject\n  {\n    let request = try await OpenAIAPI.run(.retrieve(threadID: threadID, runID: runID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func modifyRun(\n    threadID: String,\n    runID: String,\n    parameters: ModifyRunParameters)\n    async throws -> RunObject\n  {\n    let request = try await OpenAIAPI.run(.modify(threadID: threadID, runID: runID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func listRuns(\n    threadID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<RunObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try await OpenAIAPI.run(.list(threadID: threadID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<RunObject>.self, with: request)\n  }\n\n  func cancelRun(\n    threadID: String,\n    runID: String)\n    async throws -> RunObject\n  {\n    let request = try await OpenAIAPI.run(.cancel(threadID: threadID, runID: runID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func submitToolOutputsToRun(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> RunObject\n  {\n    let request = try await OpenAIAPI.run(.submitToolOutput(threadID: threadID, runID: runID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func createThreadAndRun(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> RunObject\n  {\n    let request = try await OpenAIAPI.run(.createThreadAndRun).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  // MARK: Run Step [BETA]\n\n  func retrieveRunstep(\n    threadID: String,\n    runID: String,\n    stepID: String)\n    async throws -> RunStepObject\n  {\n    let request = try await OpenAIAPI.runStep(.retrieve(threadID: threadID, runID: runID, stepID: stepID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: RunStepObject.self, with: request)\n  }\n\n  func listRunSteps(\n    threadID: String,\n    runID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<RunStepObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try await OpenAIAPI.runStep(.list(threadID: threadID, runID: runID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<RunStepObject>.self, with: request)\n  }\n\n  func createRunStream(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runParameters = parameters\n    runParameters.stream = true\n    let request = try await OpenAIAPI.run(.create(threadID: threadID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: runParameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  func createThreadAndRunStream(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runParameters = parameters\n    runParameters.stream = true\n    let request = try await OpenAIAPI.run(.createThreadAndRun).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  func submitToolOutputsToRunStream(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runToolsOutputParameter = parameters\n    runToolsOutputParameter.stream = true\n    let request = try await OpenAIAPI.run(.submitToolOutput(threadID: threadID, runID: runID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: runToolsOutputParameter,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  // MARK: Batch\n\n  func createBatch(\n    parameters: BatchParameter)\n    async throws -> BatchObject\n  {\n    let request = try await OpenAIAPI.batch(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: BatchObject.self, with: request)\n  }\n\n  func retrieveBatch(\n    id: String)\n    async throws -> BatchObject\n  {\n    let request = try await OpenAIAPI.batch(.retrieve(batchID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get)\n    return try await fetch(debugEnabled: debugEnabled, type: BatchObject.self, with: request)\n  }\n\n  func cancelBatch(\n    id: String)\n    async throws -> BatchObject\n  {\n    let request = try await OpenAIAPI.batch(.cancel(batchID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post)\n    return try await fetch(debugEnabled: debugEnabled, type: BatchObject.self, with: request)\n  }\n\n  func listBatch(\n    after: String? = nil,\n    limit: Int? = nil)\n    async throws -> OpenAIResponse<BatchObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    let request = try await OpenAIAPI.batch(.list).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<BatchObject>.self, with: request)\n  }\n\n  // MARK: Vector Store\n\n  func createVectorStore(\n    parameters: VectorStoreParameter)\n    async throws -> VectorStoreObject\n  {\n    let request = try await OpenAIAPI.vectorStore(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  func listVectorStores(\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<VectorStoreObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try await OpenAIAPI.vectorStore(.list).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreObject>.self, with: request)\n  }\n\n  func retrieveVectorStore(\n    id: String)\n    async throws -> VectorStoreObject\n  {\n    let request = try await OpenAIAPI.vectorStore(.retrieve(vectorStoreID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  func modifyVectorStore(\n    parameters: VectorStoreParameter,\n    id: String)\n    async throws -> VectorStoreObject\n  {\n    let request = try await OpenAIAPI.vectorStore(.modify(vectorStoreID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  func deleteVectorStore(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.vectorStore(.modify(vectorStoreID: id)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Vector Store Files\n\n  func createVectorStoreFile(\n    vectorStoreID _: String,\n    parameters: VectorStoreFileParameter)\n    async throws -> VectorStoreFileObject\n  {\n    let request = try await OpenAIAPI.vectorStore(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileObject.self, with: request)\n  }\n\n  func listVectorStoreFiles(\n    vectorStoreID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil,\n    filter: String? = nil)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let filter {\n      queryItems.append(.init(name: \"filter\", value: filter))\n    }\n    let request = try await OpenAIAPI.vectorStoreFile(.list(vectorStoreID: vectorStoreID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreFileObject>.self, with: request)\n  }\n\n  func retrieveVectorStoreFile(\n    vectorStoreID: String,\n    fileID: String)\n    async throws -> VectorStoreFileObject\n  {\n    let request = try await OpenAIAPI.vectorStoreFile(.retrieve(vectorStoreID: vectorStoreID, fileID: fileID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileObject.self, with: request)\n  }\n\n  func deleteVectorStoreFile(\n    vectorStoreID: String,\n    fileID: String)\n    async throws -> DeletionStatus\n  {\n    let request = try await OpenAIAPI.vectorStoreFile(.delete(vectorStoreID: vectorStoreID, fileID: fileID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Vector Store File Batch\n\n  func createVectorStoreFileBatch(\n    vectorStoreID: String,\n    parameters: VectorStoreFileBatchParameter)\n    async throws -> VectorStoreFileBatchObject\n  {\n    let request = try await OpenAIAPI.vectorStoreFileBatch(.create(vectorStoreID: vectorStoreID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileBatchObject.self, with: request)\n  }\n\n  func retrieveVectorStoreFileBatch(\n    vectorStoreID: String,\n    batchID: String)\n    async throws -> VectorStoreFileBatchObject\n  {\n    let request = try await OpenAIAPI.vectorStoreFileBatch(.retrieve(vectorStoreID: vectorStoreID, batchID: batchID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileBatchObject.self, with: request)\n  }\n\n  func cancelVectorStoreFileBatch(\n    vectorStoreID: String,\n    batchID: String)\n    async throws -> VectorStoreFileBatchObject\n  {\n    let request = try await OpenAIAPI.vectorStoreFileBatch(.cancel(vectorStoreID: vectorStoreID, batchID: batchID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileBatchObject.self, with: request)\n  }\n\n  func listVectorStoreFilesInABatch(\n    vectorStoreID: String,\n    batchID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil,\n    filter: String? = nil)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let filter {\n      queryItems.append(.init(name: \"filter\", value: filter))\n    }\n    let request = try await OpenAIAPI.vectorStoreFileBatch(.list(vectorStoreID: vectorStoreID, batchID: batchID)).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreFileObject>.self, with: request)\n  }\n\n  // MARK: Response\n\n  func responseCreate(\n    _ parameters: ModelResponseParameter)\n    async throws -> ResponseModel\n  {\n    var responseParameters = parameters\n    responseParameters.stream = false\n    let request = try await OpenAIAPI.chat.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: responseParameters)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  func responseModel(\n    id _: String,\n    parameters _: GetResponseParameter?)\n    async throws -> ResponseModel\n  {\n    let request = try await OpenAIAPI.chat.request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  func responseModelStream(\n    id _: String,\n    parameters _: GetResponseParameter?)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    fatalError(\"responseModelStream not implemented for AIProxy Service\")\n  }\n\n  func responseCreateStream(\n    _ parameters: ModelResponseParameter)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    var responseParameters = parameters\n    responseParameters.stream = true\n    let request = try await OpenAIAPI.response(.create).request(\n      aiproxyPartialKey: partialKey,\n      clientID: clientID,\n      organizationID: organizationID,\n      openAIEnvironment: openAIEnvironment,\n      method: .post,\n      params: responseParameters)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ResponseStreamEvent.self, with: request)\n  }\n\n  func responseDelete(\n    id _: String)\n    async throws -> DeletionStatus\n  {\n    fatalError(\"responseDelete not implemented for AIProxy Service\")\n  }\n\n  func responseCancel(\n    id _: String)\n    async throws -> ResponseModel\n  {\n    fatalError(\"responseCancel not implemented for AIProxy Service\")\n  }\n\n  func responseInputItems(\n    id _: String,\n    parameters _: GetInputItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\"responseInputItems not implemented for AIProxy Service\")\n  }\n\n  // MARK: - Conversations\n\n  func conversationCreate(\n    parameters _: CreateConversationParameter?)\n    async throws -> ConversationModel\n  {\n    fatalError(\"conversationCreate not implemented for AIProxy Service\")\n  }\n\n  func getConversation(\n    id _: String)\n    async throws -> ConversationModel\n  {\n    fatalError(\"getConversation not implemented for AIProxy Service\")\n  }\n\n  func updateConversation(\n    id _: String,\n    parameters _: UpdateConversationParameter)\n    async throws -> ConversationModel\n  {\n    fatalError(\"updateConversation not implemented for AIProxy Service\")\n  }\n\n  func deleteConversation(\n    id _: String)\n    async throws -> DeletionStatus\n  {\n    fatalError(\"deleteConversation not implemented for AIProxy Service\")\n  }\n\n  func getConversationItems(\n    id _: String,\n    parameters _: GetConversationItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\"getConversationItems not implemented for AIProxy Service\")\n  }\n\n  func createConversationItems(\n    id _: String,\n    parameters _: CreateConversationItemsParameter)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\"createConversationItems not implemented for AIProxy Service\")\n  }\n\n  func getConversationItem(\n    conversationID _: String,\n    itemID _: String,\n    parameters _: GetConversationItemParameter?)\n    async throws -> InputItem\n  {\n    fatalError(\"getConversationItem not implemented for AIProxy Service\")\n  }\n\n  func deleteConversationItem(\n    conversationID _: String,\n    itemID _: String)\n    async throws -> ConversationModel\n  {\n    fatalError(\"deleteConversationItem not implemented for AIProxy Service\")\n  }\n\n  private static let assistantsBetaV2 = \"assistants=v2\"\n\n  /// Your partial key is provided during the integration process at dashboard.aiproxy.pro\n  /// Please see the [integration guide](https://www.aiproxy.pro/docs/integration-guide.html) for acquiring your partial key\n  private let partialKey: String\n  /// Optionally supply your own client IDs to annotate requests with in the AIProxy developer dashboard.\n  /// It is safe to leave this blank (most people do). If you leave it blank, AIProxy generates client IDs for you.\n  private let clientID: String?\n  /// Set this flag to TRUE if you need to print request events in DEBUG builds.\n  private let debugEnabled: Bool\n\n  /// [organization](https://platform.openai.com/docs/api-reference/organization-optional)\n  private let organizationID: String?\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/AIProxy/Endpoint+AIProxy.swift",
    "content": "//\n//  Endpoint+AIProxy.swift\n//\n//\n//  Created by Lou Zell on 3/26/24.\n//\n#if !os(Linux)\nimport DeviceCheck\nimport Foundation\nimport OSLog\n#if canImport(UIKit)\nimport UIKit\n#endif\n#if canImport(IOKit)\nimport IOKit\n#endif\n#if os(watchOS)\nimport WatchKit\n#endif\n\nprivate let aiproxyLogger = Logger(\n  subsystem: Bundle.main.bundleIdentifier ?? \"UnknownApp\",\n  category: \"SwiftOpenAI+AIProxy\")\n\nprivate let deviceCheckWarning = \"\"\"\n  AIProxy warning: DeviceCheck is not available on this device.\n\n  To use AIProxy on an iOS simulator, set an AIPROXY_DEVICE_CHECK_BYPASS environment variable.\n\n  See the AIProxy section of the README at https://github.com/jamesrochabrun/SwiftOpenAI for instructions.\n  \"\"\"\n\n// MARK: Endpoint+AIProxy\n\nextension Endpoint {\n  func request(\n    aiproxyPartialKey: String,\n    clientID: String?,\n    organizationID: String?,\n    openAIEnvironment: OpenAIEnvironment,\n    method: HTTPMethod,\n    params: Encodable? = nil,\n    queryItems: [URLQueryItem] = [],\n    betaHeaderField: String? = nil)\n    async throws -> URLRequest\n  {\n    let finalPath = path(in: openAIEnvironment)\n    var request = URLRequest(url: urlComponents(serviceURL: openAIEnvironment.baseURL, path: finalPath, queryItems: queryItems)\n      .url!)\n    request.addValue(\"application/json\", forHTTPHeaderField: \"Content-Type\")\n    request.addValue(aiproxyPartialKey, forHTTPHeaderField: \"aiproxy-partial-key\")\n    if let organizationID {\n      request.addValue(organizationID, forHTTPHeaderField: \"OpenAI-Organization\")\n    }\n    if let betaHeaderField {\n      request.addValue(betaHeaderField, forHTTPHeaderField: \"OpenAI-Beta\")\n    }\n    if let clientID = clientID ?? getClientID() {\n      request.addValue(clientID, forHTTPHeaderField: \"aiproxy-client-id\")\n    }\n    if let deviceCheckToken = await getDeviceCheckToken() {\n      request.addValue(deviceCheckToken, forHTTPHeaderField: \"aiproxy-devicecheck\")\n    }\n    #if DEBUG && targetEnvironment(simulator)\n    if let deviceCheckBypass = ProcessInfo.processInfo.environment[\"AIPROXY_DEVICE_CHECK_BYPASS\"] {\n      request.addValue(deviceCheckBypass, forHTTPHeaderField: \"aiproxy-devicecheck-bypass\")\n    }\n    #endif\n    request.httpMethod = method.rawValue\n    if let params {\n      request.httpBody = try JSONEncoder().encode(params)\n    }\n    return request\n  }\n\n  func multiPartRequest(\n    aiproxyPartialKey: String,\n    openAIEnvironment: OpenAIEnvironment,\n    clientID: String?,\n    organizationID: String?,\n    method: HTTPMethod,\n    params: MultipartFormDataParameters,\n    queryItems: [URLQueryItem] = [])\n    async throws -> URLRequest\n  {\n    let finalPath = path(in: openAIEnvironment)\n    var request = URLRequest(url: urlComponents(serviceURL: openAIEnvironment.baseURL, path: finalPath, queryItems: queryItems)\n      .url!)\n    request.httpMethod = method.rawValue\n    request.addValue(aiproxyPartialKey, forHTTPHeaderField: \"aiproxy-partial-key\")\n    if let organizationID {\n      request.addValue(organizationID, forHTTPHeaderField: \"OpenAI-Organization\")\n    }\n    if let clientID = clientID ?? getClientID() {\n      request.addValue(clientID, forHTTPHeaderField: \"aiproxy-client-id\")\n    }\n    if let deviceCheckToken = await getDeviceCheckToken() {\n      request.addValue(deviceCheckToken, forHTTPHeaderField: \"aiproxy-devicecheck\")\n    }\n    #if DEBUG && targetEnvironment(simulator)\n    if let deviceCheckBypass = ProcessInfo.processInfo.environment[\"AIPROXY_DEVICE_CHECK_BYPASS\"] {\n      request.addValue(deviceCheckBypass, forHTTPHeaderField: \"aiproxy-devicecheck-bypass\")\n    }\n    #endif\n    let boundary = UUID().uuidString\n    request.addValue(\"multipart/form-data; boundary=\\(boundary)\", forHTTPHeaderField: \"Content-Type\")\n    request.httpBody = params.encode(boundary: boundary)\n    return request\n  }\n\n  private func urlComponents(\n    serviceURL: String,\n    path: String,\n    queryItems: [URLQueryItem])\n    -> URLComponents\n  {\n    var components = URLComponents(string: serviceURL)!\n    components.path = components.path.appending(path)\n    if !queryItems.isEmpty {\n      components.queryItems = queryItems\n    }\n    return components\n  }\n}\n\n// MARK: Private Helpers\n\n/// Gets a device check token for use in your calls to aiproxy.\n/// The device token may be nil when targeting the iOS simulator.\nprivate func getDeviceCheckToken() async -> String? {\n  guard DCDevice.current.isSupported else {\n    if ProcessInfo.processInfo.environment[\"AIPROXY_DEVICE_CHECK_BYPASS\"] == nil {\n      aiproxyLogger.warning(\"\\(deviceCheckWarning, privacy: .public)\")\n    }\n    return nil\n  }\n\n  do {\n    let data = try await DCDevice.current.generateToken()\n    return data.base64EncodedString()\n  } catch {\n    aiproxyLogger.error(\"Could not create DeviceCheck token. Are you using an explicit bundle identifier?\")\n    return nil\n  }\n}\n\n/// Get a unique ID for this client\nprivate func getClientID() -> String? {\n  #if os(watchOS)\n  return WKInterfaceDevice.current().identifierForVendor?.uuidString\n  #elseif canImport(UIKit)\n  return UIDevice.current.identifierForVendor?.uuidString\n  #elseif canImport(IOKit)\n  return getIdentifierFromIOKit()\n  #else\n  return nil\n  #endif\n}\n\n    // MARK: IOKit conditional dependency\n\n/// These functions are used on macOS for creating a client identifier.\n/// Unfortunately, macOS does not have a straightforward helper like UIKit's `identifierForVendor`\n#if canImport(IOKit)\nprivate func getIdentifierFromIOKit() -> String? {\n  guard let macBytes = copy_mac_address() as? Data else {\n    return nil\n  }\n  let macHex = macBytes.map { String(format: \"%02X\", $0) }\n  return macHex.joined(separator: \":\")\n}\n\n/// This function is taken from the Apple sample code at:\n/// https://developer.apple.com/documentation/appstorereceipts/validating_receipts_on_the_device#3744656\nprivate func io_service(named name: String, wantBuiltIn: Bool) -> io_service_t? {\n  let default_port = kIOMainPortDefault\n  var iterator = io_iterator_t()\n  defer {\n    if iterator != IO_OBJECT_NULL {\n      IOObjectRelease(iterator)\n    }\n  }\n\n  guard\n    let matchingDict = IOBSDNameMatching(default_port, 0, name),\n    IOServiceGetMatchingServices(\n      default_port,\n      matchingDict as CFDictionary,\n      &iterator) == KERN_SUCCESS,\n    iterator != IO_OBJECT_NULL\n  else {\n    return nil\n  }\n\n  var candidate = IOIteratorNext(iterator)\n  while candidate != IO_OBJECT_NULL {\n    if\n      let cftype = IORegistryEntryCreateCFProperty(\n        candidate,\n        \"IOBuiltin\" as CFString,\n        kCFAllocatorDefault,\n        0)\n    {\n      let isBuiltIn = cftype.takeRetainedValue() as! CFBoolean\n      if wantBuiltIn == CFBooleanGetValue(isBuiltIn) {\n        return candidate\n      }\n    }\n\n    IOObjectRelease(candidate)\n    candidate = IOIteratorNext(iterator)\n  }\n\n  return nil\n}\n\n/// This function is taken from the Apple sample code at:\n/// https://developer.apple.com/documentation/appstorereceipts/validating_receipts_on_the_device#3744656\nprivate func copy_mac_address() -> CFData? {\n  // Prefer built-in network interfaces.\n  // For example, an external Ethernet adaptor can displace\n  // the built-in Wi-Fi as en0.\n  guard\n    let service = io_service(named: \"en0\", wantBuiltIn: true)\n    ?? io_service(named: \"en1\", wantBuiltIn: true)\n    ?? io_service(named: \"en0\", wantBuiltIn: false)\n  else { return nil }\n  defer { IOObjectRelease(service) }\n\n  if\n    let cftype = IORegistryEntrySearchCFProperty(\n      service,\n      kIOServicePlane,\n      \"IOMACAddress\" as CFString,\n      kCFAllocatorDefault,\n      IOOptionBits(kIORegistryIterateRecursively | kIORegistryIterateParents))\n  {\n    return (cftype as! CFData)\n  }\n\n  return nil\n}\n#endif\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Azure/AzureOpenAIAPI.swift",
    "content": "//\n//  AzureOpenAIAPI.swift\n//\n//\n//  Created by James Rochabrun on 1/23/24.\n//\n\nimport Foundation\n\n// MARK: - AzureOpenAIAPI\n\nenum AzureOpenAIAPI {\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/assistants-reference?tabs=python\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistant\n  case assistant(AssistantCategory)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions\n  case chat(deploymentID: String)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/assistants-reference-messages?tabs=python\n  case message(MessageCategory)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/assistants-reference-runs?tabs=python\n  case run(RunCategory)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/assistants-reference-runs?tabs=python#list-run-steps\n  case runStep(RunStepCategory)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/assistants-reference-threads?tabs=python#create-a-thread\n  case thread(ThreadCategory)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/file-search?tabs=python#vector-stores\n  case vectorStore(VectorStoreCategory)\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/file-search?tabs=python#vector-stores\n  case vectorStoreFile(VectorStoreFileCategory)\n\n  /// OpenAI's most advanced interface for generating model responses. Supports text and image inputs, and text outputs. Create stateful interactions with the model, using the output of previous responses as input. Extend the model's capabilities with built-in tools for file search, web search, computer use, and more. Allow the model access to external systems and data using function calling.\n  case response(ResponseCategory) // https://platform.openai.com/docs/api-reference/responses\n\n  enum AssistantCategory {\n    case create\n    case list\n    case retrieve(assistantID: String)\n    case modify(assistantID: String)\n    case delete(assistantID: String)\n  }\n\n  enum MessageCategory {\n    case create(threadID: String)\n    case retrieve(threadID: String, messageID: String)\n    case modify(threadID: String, messageID: String)\n    case delete(threadID: String, messageID: String)\n    case list(threadID: String)\n  }\n\n  enum RunCategory {\n    case create(threadID: String)\n    case retrieve(threadID: String, runID: String)\n    case modify(threadID: String, runID: String)\n    case list(threadID: String)\n    case cancel(threadID: String, runID: String)\n    case submitToolOutput(threadID: String, runID: String)\n    case createThreadAndRun\n  }\n\n  enum RunStepCategory {\n    case retrieve(threadID: String, runID: String, stepID: String)\n    case list(threadID: String, runID: String)\n  }\n\n  enum ThreadCategory {\n    case create\n    case retrieve(threadID: String)\n    case modify(threadID: String)\n    case delete(threadID: String)\n  }\n\n  enum VectorStoreCategory {\n    case create\n    case list\n    case retrieve(vectorStoreID: String)\n    case modify(vectorStoreID: String)\n    case delete(vectorStoreID: String)\n  }\n\n  /// https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/file-search?tabs=python#file-search-support\n  enum VectorStoreFileCategory {\n    case create(vectorStoreID: String)\n    case list(vectorStoreID: String)\n    case retrieve(vectorStoreID: String, fileID: String)\n    case delete(vectorStoreID: String, fileID: String)\n  }\n\n  enum ResponseCategory {\n    case create(deploymentID: String)\n    case retrieve(responseID: String)\n  }\n}\n\n// MARK: Endpoint\n\nextension AzureOpenAIAPI: Endpoint {\n  func path(in _: OpenAIEnvironment) -> String {\n    switch self {\n    case .chat(let deploymentID): \"/openai/deployments/\\(deploymentID)/chat/completions\"\n\n    case .assistant(let category):\n      switch category {\n      case .create, .list: \"/openai/assistants\"\n      case .retrieve(let assistantID), .modify(let assistantID), .delete(let assistantID): \"/openai/assistants/\\(assistantID)\"\n      }\n\n    case .message(let category):\n      switch category {\n      case .create(let threadID), .list(let threadID): \"/openai/threads/\\(threadID)/messages\"\n      case .retrieve(let threadID, let messageID), .modify(let threadID, let messageID),\n           .delete(let threadID, let messageID): \"/openai/threads/\\(threadID)/messages/\\(messageID)\"\n      }\n\n    case .run(let category):\n      switch category {\n      case .create(let threadID), .list(let threadID): \"/openai/threads/\\(threadID)/runs\"\n      case .retrieve(let threadID, let runID), .modify(let threadID, let runID): \"/openai/threads/\\(threadID)/runs/\\(runID)\"\n      case .cancel(let threadID, let runID): \"/openai/threads/\\(threadID)/runs/\\(runID)/cancel\"\n      case .submitToolOutput(let threadID, let runID): \"/openai/threads/\\(threadID)/runs/\\(runID)/submit_tool_outputs\"\n      case .createThreadAndRun: \"/openai/threads/runs\"\n      }\n\n    case .runStep(let category):\n      switch category {\n      case .retrieve(let threadID, let runID, let stepID): \"/openai/threads/\\(threadID)/runs/\\(runID)/steps/\\(stepID)\"\n      case .list(let threadID, let runID): \"/openai/threads/\\(threadID)/runs/\\(runID)/steps\"\n      }\n\n    case .thread(let category):\n      switch category {\n      case .create: \"/openai/threads\"\n      case .retrieve(let threadID), .modify(let threadID), .delete(let threadID): \"/openai/threads/\\(threadID)\"\n      }\n\n    case .vectorStore(let category):\n      switch category {\n      case .create, .list: \"/openai/vector_stores\"\n      case .retrieve(let vectorStoreID), .modify(let vectorStoreID),\n           .delete(let vectorStoreID): \"/openai/vector_stores/\\(vectorStoreID)\"\n      }\n\n    case .vectorStoreFile(let category):\n      switch category {\n      case .create(let vectorStoreID), .list(let vectorStoreID): \"/openai/vector_stores/\\(vectorStoreID)/files\"\n      case .retrieve(let vectorStoreID, let fileID),\n           .delete(let vectorStoreID, let fileID): \"/openai/vector_stores/\\(vectorStoreID)/files/\\(fileID)\"\n      }\n\n    case .response(let category):\n      switch category {\n      case .create(let deploymentID): \"/openai/deployments/\\(deploymentID)/responses\"\n      case .retrieve(let responseID): \"/openai/responses/\\(responseID)\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Azure/AzureOpenAIConfiguration.swift",
    "content": "//\n//  AzureOpenAIConfiguration.swift\n//\n//\n//  Created by James Rochabrun on 1/23/24.\n//\n\nimport Foundation\n\n/// [Reference](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)\npublic struct AzureOpenAIConfiguration {\n  public init(\n    resourceName: String,\n    openAIAPIKey: Authorization,\n    apiVersion: String,\n    extraHeaders: [String: String]? = nil)\n  {\n    self.resourceName = resourceName\n    self.openAIAPIKey = openAIAPIKey\n    self.apiVersion = apiVersion\n    self.extraHeaders = extraHeaders\n  }\n\n  /// The name of your Azure OpenAI Resource.\n  let resourceName: String\n\n  /// The OpenAI API Key\n  let openAIAPIKey: Authorization\n\n  /// The API version to use for this operation. This follows the YYYY-MM-DD format.\n  let apiVersion: String\n\n  /// Azure configuration extra headers for a request.\n  let extraHeaders: [String: String]?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Azure/DefaultOpenAIAzureService.swift",
    "content": "//\n//  DefaultOpenAIAzureService.swift\n//\n//\n//  Created by James Rochabrun on 1/23/24.\n//\n\nimport Foundation\n#if os(Linux)\nimport FoundationNetworking\n#endif\n\n// MARK: - DefaultOpenAIAzureService\n\npublic final class DefaultOpenAIAzureService: OpenAIService {\n  public init(\n    azureConfiguration: AzureOpenAIConfiguration,\n    httpClient: HTTPClient,\n    decoder: JSONDecoder = .init(),\n    debugEnabled: Bool)\n  {\n    self.httpClient = httpClient\n    self.decoder = decoder\n    openAIEnvironment = OpenAIEnvironment(\n      baseURL: \"https://\\(azureConfiguration.resourceName).openai.azure.com\",\n      proxyPath: nil,\n      version: nil)\n    apiKey = azureConfiguration.openAIAPIKey\n    extraHeaders = azureConfiguration.extraHeaders\n    initialQueryItems = [.init(name: \"api-version\", value: azureConfiguration.apiVersion)]\n    self.debugEnabled = debugEnabled\n  }\n\n  public let httpClient: HTTPClient\n  public let decoder: JSONDecoder\n  public let openAIEnvironment: OpenAIEnvironment\n\n  public func createTranscription(parameters _: AudioTranscriptionParameters) async throws -> AudioObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createTranslation(parameters _: AudioTranslationParameters) async throws -> AudioObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createSpeech(parameters _: AudioSpeechParameters) async throws -> AudioSpeechObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  #if canImport(AVFoundation)\n  public func realtimeSession(\n    model _: String,\n    configuration _: OpenAIRealtimeSessionConfiguration)\n    async throws -> OpenAIRealtimeSession\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n  #endif\n\n  public func startChat(parameters: ChatCompletionParameters) async throws -> ChatCompletionObject {\n    var chatParameters = parameters\n    chatParameters.stream = false\n    let request = try AzureOpenAIAPI.chat(deploymentID: parameters.model).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: chatParameters,\n      queryItems: initialQueryItems)\n    return try await fetch(debugEnabled: debugEnabled, type: ChatCompletionObject.self, with: request)\n  }\n\n  public func startStreamedChat(parameters: ChatCompletionParameters) async throws\n    -> AsyncThrowingStream<ChatCompletionChunkObject, Error>\n  {\n    var chatParameters = parameters\n    chatParameters.stream = true\n    let request = try AzureOpenAIAPI.chat(deploymentID: parameters.model).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: chatParameters,\n      queryItems: initialQueryItems)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ChatCompletionChunkObject.self, with: request)\n  }\n\n  public func createEmbeddings(parameters _: EmbeddingParameter) async throws -> OpenAIResponse<EmbeddingObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createFineTuningJob(parameters _: FineTuningJobParameters) async throws -> FineTuningJobObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func listFineTuningJobs(after _: String?, limit _: Int?) async throws -> OpenAIResponse<FineTuningJobObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func retrieveFineTuningJob(id _: String) async throws -> FineTuningJobObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func cancelFineTuningJobWith(id _: String) async throws -> FineTuningJobObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func listFineTuningEventsForJobWith(\n    id _: String,\n    after _: String?,\n    limit _: Int?)\n    async throws -> OpenAIResponse<FineTuningJobEventObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func listFiles() async throws -> OpenAIResponse<FileObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func uploadFile(parameters _: FileParameters) async throws -> FileObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func deleteFileWith(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func retrieveFileWith(id _: String) async throws -> FileObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func retrieveContentForFileWith(id _: String) async throws -> [[String: Any]] {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func legacyCreateImages(parameters _: ImageCreateParameters) async throws -> OpenAIResponse<ImageObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func legacyEditImage(parameters _: ImageEditParameters) async throws -> OpenAIResponse<ImageObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func legacyCreateImageVariations(parameters _: ImageVariationParameters) async throws -> OpenAIResponse<ImageObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createImages(\n    parameters _: CreateImageParameters)\n    async throws -> CreateImageResponse\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func editImage(\n    parameters _: CreateImageEditParameters)\n    async throws -> CreateImageResponse\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createImageVariations(\n    parameters _: CreateImageVariationParameters)\n    async throws -> CreateImageResponse\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func listModels() async throws -> OpenAIResponse<ModelObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func retrieveModelWith(id _: String) async throws -> ModelObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func deleteFineTuneModelWith(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createModerationFromText(parameters _: ModerationParameter<String>) async throws -> ModerationObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createModerationFromTexts(parameters _: ModerationParameter<[String]>) async throws -> ModerationObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func createAssistant(parameters: AssistantParameters) async throws -> AssistantObject {\n    let request = try AzureOpenAIAPI.assistant(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  public func retrieveAssistant(id: String) async throws -> AssistantObject {\n    let request = try AzureOpenAIAPI.assistant(.retrieve(assistantID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  public func modifyAssistant(id: String, parameters: AssistantParameters) async throws -> AssistantObject {\n    let request = try AzureOpenAIAPI.assistant(.modify(assistantID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  public func deleteAssistant(id: String) async throws -> DeletionStatus {\n    let request = try AzureOpenAIAPI.assistant(.delete(assistantID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .delete,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  public func listAssistants(\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<AssistantObject>\n  {\n    var queryItems: [URLQueryItem] = initialQueryItems\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try AzureOpenAIAPI.assistant(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<AssistantObject>.self, with: request)\n  }\n\n  public func createThread(parameters: CreateThreadParameters) async throws -> ThreadObject {\n    let request = try AzureOpenAIAPI.thread(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  public func retrieveThread(id: String) async throws -> ThreadObject {\n    let request = try AzureOpenAIAPI.thread(.retrieve(threadID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  public func modifyThread(id: String, parameters: ModifyThreadParameters) async throws -> ThreadObject {\n    let request = try AzureOpenAIAPI.thread(.modify(threadID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  public func deleteThread(id: String) async throws -> DeletionStatus {\n    let request = try AzureOpenAIAPI.thread(.delete(threadID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .delete,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  public func createMessage(threadID: String, parameters: MessageParameter) async throws -> MessageObject {\n    let request = try AzureOpenAIAPI.message(.create(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  public func retrieveMessage(threadID: String, messageID: String) async throws -> MessageObject {\n    let request = try AzureOpenAIAPI.message(.retrieve(threadID: threadID, messageID: messageID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  public func modifyMessage(\n    threadID: String,\n    messageID: String,\n    parameters: ModifyMessageParameters)\n    async throws -> MessageObject\n  {\n    let request = try AzureOpenAIAPI.message(.modify(threadID: threadID, messageID: messageID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  public func deleteMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> DeletionStatus\n  {\n    let request = try AzureOpenAIAPI.message(.delete(threadID: threadID, messageID: messageID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .delete,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  public func listMessages(\n    threadID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?,\n    runID: String?)\n    async throws -> OpenAIResponse<MessageObject>\n  {\n    var queryItems: [URLQueryItem] = initialQueryItems\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let runID {\n      queryItems.append(.init(name: \"run_id\", value: runID))\n    }\n    let request = try AzureOpenAIAPI.message(.list(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<MessageObject>.self, with: request)\n  }\n\n  public func createRun(threadID: String, parameters: RunParameter) async throws -> RunObject {\n    let request = try AzureOpenAIAPI.run(.create(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  public func retrieveRun(threadID: String, runID: String) async throws -> RunObject {\n    let request = try AzureOpenAIAPI.run(.retrieve(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  public func modifyRun(threadID: String, runID: String, parameters: ModifyRunParameters) async throws -> RunObject {\n    let request = try AzureOpenAIAPI.run(.modify(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  public func listRuns(\n    threadID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<RunObject>\n  {\n    var queryItems: [URLQueryItem] = initialQueryItems\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try AzureOpenAIAPI.run(.list(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<RunObject>.self, with: request)\n  }\n\n  public func cancelRun(threadID: String, runID: String) async throws -> RunObject {\n    let request = try AzureOpenAIAPI.run(.cancel(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  public func submitToolOutputsToRun(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> RunObject\n  {\n    let request = try AzureOpenAIAPI.run(.submitToolOutput(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  public func createThreadAndRun(parameters: CreateThreadAndRunParameter) async throws -> RunObject {\n    let request = try AzureOpenAIAPI.run(.createThreadAndRun).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  public func retrieveRunstep(threadID: String, runID: String, stepID: String) async throws -> RunStepObject {\n    let request = try OpenAIAPI.runStep(.retrieve(threadID: threadID, runID: runID, stepID: stepID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunStepObject.self, with: request)\n  }\n\n  public func listRunSteps(\n    threadID: String,\n    runID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<RunStepObject>\n  {\n    var queryItems: [URLQueryItem] = initialQueryItems\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try AzureOpenAIAPI.runStep(.list(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<RunStepObject>.self, with: request)\n  }\n\n  public func createThreadAndRunStream(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runParameters = parameters\n    runParameters.stream = true\n    let request = try AzureOpenAIAPI.run(.createThreadAndRun).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: runParameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  public func createRunStream(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runParameters = parameters\n    runParameters.stream = true\n    let request = try AzureOpenAIAPI.run(.create(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: runParameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  public func submitToolOutputsToRunStream(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runToolsOutputParameter = parameters\n    runToolsOutputParameter.stream = true\n    let request = try AzureOpenAIAPI.run(.submitToolOutput(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: runToolsOutputParameter,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  // MARK: Batch\n\n  public func createBatch(\n    parameters _: BatchParameter)\n    async throws -> BatchObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func retrieveBatch(\n    id _: String)\n    async throws -> BatchObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func cancelBatch(\n    id _: String)\n    async throws -> BatchObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func listBatch(\n    after _: String?,\n    limit _: Int?)\n    async throws -> OpenAIResponse<BatchObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  // MARK: Vector Store\n\n  public func createVectorStore(\n    parameters: VectorStoreParameter)\n    async throws -> VectorStoreObject\n  {\n    let request = try AzureOpenAIAPI.vectorStore(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  public func listVectorStores(\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<VectorStoreObject>\n  {\n    var queryItems: [URLQueryItem] = initialQueryItems\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try AzureOpenAIAPI.vectorStore(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreObject>.self, with: request)\n  }\n\n  public func retrieveVectorStore(\n    id: String)\n    async throws -> VectorStoreObject\n  {\n    let request = try AzureOpenAIAPI.vectorStore(.retrieve(vectorStoreID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  public func modifyVectorStore(\n    parameters: VectorStoreParameter,\n    id: String)\n    async throws -> VectorStoreObject\n  {\n    let request = try AzureOpenAIAPI.vectorStore(.modify(vectorStoreID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  public func deleteVectorStore(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try AzureOpenAIAPI.vectorStore(.delete(vectorStoreID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .delete,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Vector Store Files\n\n  public func createVectorStoreFile(\n    vectorStoreID: String,\n    parameters: VectorStoreFileParameter)\n    async throws -> VectorStoreFileObject\n  {\n    let request = try AzureOpenAIAPI.vectorStoreFile(.create(vectorStoreID: vectorStoreID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: parameters,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileObject.self, with: request)\n  }\n\n  public func listVectorStoreFiles(\n    vectorStoreID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?,\n    filter: String?)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    var queryItems: [URLQueryItem] = initialQueryItems\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let filter {\n      queryItems.append(.init(name: \"filter\", value: filter))\n    }\n    let request = try AzureOpenAIAPI.vectorStoreFile(.list(vectorStoreID: vectorStoreID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreFileObject>.self, with: request)\n  }\n\n  public func retrieveVectorStoreFile(vectorStoreID: String, fileID: String) async throws -> VectorStoreFileObject {\n    let request = try AzureOpenAIAPI.vectorStoreFile(.retrieve(vectorStoreID: vectorStoreID, fileID: fileID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .get,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileObject.self, with: request)\n  }\n\n  public func deleteVectorStoreFile(vectorStoreID: String, fileID: String) async throws -> DeletionStatus {\n    let request = try AzureOpenAIAPI.vectorStoreFile(.delete(vectorStoreID: vectorStoreID, fileID: fileID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .delete,\n      queryItems: initialQueryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  public func createVectorStoreFileBatch(\n    vectorStoreID _: String,\n    parameters _: VectorStoreFileBatchParameter)\n    async throws -> VectorStoreFileBatchObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func retrieveVectorStoreFileBatch(\n    vectorStoreID _: String,\n    batchID _: String)\n    async throws -> VectorStoreFileBatchObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func cancelVectorStoreFileBatch(vectorStoreID _: String, batchID _: String) async throws -> VectorStoreFileBatchObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  public func listVectorStoreFilesInABatch(\n    vectorStoreID _: String,\n    batchID _: String,\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?,\n    filter _: String?)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  // MARK: Response\n\n  public func responseCreate(\n    _ parameters: ModelResponseParameter)\n    async throws -> ResponseModel\n  {\n    var responseParameters = parameters\n    responseParameters.stream = false\n    let request = try AzureOpenAIAPI.response(.create(deploymentID: parameters.model)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: responseParameters,\n      queryItems: initialQueryItems)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  public func responseModel(\n    id: String,\n    parameters _: GetResponseParameter?)\n    async throws -> ResponseModel\n  {\n    let request = try AzureOpenAIAPI.response(.retrieve(responseID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      queryItems: initialQueryItems)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  public func responseModelStream(\n    id _: String,\n    parameters _: GetResponseParameter?)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    fatalError(\"responseModelStream not implemented for Azure OpenAI Service\")\n  }\n\n  public func responseCreateStream(\n    _ parameters: ModelResponseParameter)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    var responseParameters = parameters\n    responseParameters.stream = true\n    let request = try AzureOpenAIAPI.response(.create(deploymentID: parameters.model)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: responseParameters,\n      queryItems: initialQueryItems)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ResponseStreamEvent.self, with: request)\n  }\n\n  public func responseDelete(\n    id _: String)\n    async throws -> DeletionStatus\n  {\n    fatalError(\"responseDelete not implemented for Azure OpenAI Service\")\n  }\n\n  public func responseCancel(\n    id _: String)\n    async throws -> ResponseModel\n  {\n    fatalError(\"responseCancel not implemented for Azure OpenAI Service\")\n  }\n\n  public func responseInputItems(\n    id _: String,\n    parameters _: GetInputItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\"responseInputItems not implemented for Azure OpenAI Service\")\n  }\n\n  // MARK: - Conversations\n\n  public func conversationCreate(\n    parameters _: CreateConversationParameter?)\n    async throws -> ConversationModel\n  {\n    fatalError(\"conversationCreate not implemented for Azure OpenAI Service\")\n  }\n\n  public func getConversation(\n    id _: String)\n    async throws -> ConversationModel\n  {\n    fatalError(\"getConversation not implemented for Azure OpenAI Service\")\n  }\n\n  public func updateConversation(\n    id _: String,\n    parameters _: UpdateConversationParameter)\n    async throws -> ConversationModel\n  {\n    fatalError(\"updateConversation not implemented for Azure OpenAI Service\")\n  }\n\n  public func deleteConversation(\n    id _: String)\n    async throws -> DeletionStatus\n  {\n    fatalError(\"deleteConversation not implemented for Azure OpenAI Service\")\n  }\n\n  public func getConversationItems(\n    id _: String,\n    parameters _: GetConversationItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\"getConversationItems not implemented for Azure OpenAI Service\")\n  }\n\n  public func createConversationItems(\n    id _: String,\n    parameters _: CreateConversationItemsParameter)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\"createConversationItems not implemented for Azure OpenAI Service\")\n  }\n\n  public func getConversationItem(\n    conversationID _: String,\n    itemID _: String,\n    parameters _: GetConversationItemParameter?)\n    async throws -> InputItem\n  {\n    fatalError(\"getConversationItem not implemented for Azure OpenAI Service\")\n  }\n\n  public func deleteConversationItem(\n    conversationID _: String,\n    itemID _: String)\n    async throws -> ConversationModel\n  {\n    fatalError(\"deleteConversationItem not implemented for Azure OpenAI Service\")\n  }\n\n  private static let assistantsBetaV2 = \"assistants=v2\"\n\n  private let apiKey: Authorization\n  private let initialQueryItems: [URLQueryItem]\n  /// Set this flag to TRUE if you need to print request events in DEBUG builds.\n  private let debugEnabled: Bool\n\n  /// Assistants API\n  private let extraHeaders: [String: String]?\n}\n"
  },
  {
    "path": "Sources/OpenAI/LocalModelService/LocalModelAPI.swift",
    "content": "//\n//  LocalModelAPI.swift\n//\n//\n//  Created by James Rochabrun on 6/30/24.\n//\n\nimport Foundation\n\n// MARK: - LocalModelAPI\n\nenum LocalModelAPI {\n  case chat\n}\n\n// MARK: Endpoint\n\nextension LocalModelAPI: Endpoint {\n  /// Builds the final path that includes:\n  ///   - optional proxy path (e.g. \"/my-proxy\")\n  ///   - version if non-nil (e.g. \"/v1\")\n  ///   - then the specific endpoint path (e.g. \"/assistants\")\n  func path(in openAIEnvironment: OpenAIEnvironment) -> String {\n    // 1) Potentially prepend proxy path if `proxyPath` is non-empty\n    let proxyPart =\n      if let envProxyPart = openAIEnvironment.proxyPath, !envProxyPart.isEmpty {\n        \"/\\(envProxyPart)\"\n      } else {\n        \"\"\n      }\n    let mainPart = openAIPath(in: openAIEnvironment)\n\n    return proxyPart + mainPart // e.g. \"/my-proxy/v1/assistants\"\n  }\n\n  func openAIPath(in openAIEnvironment: OpenAIEnvironment) -> String {\n    let version =\n      if let envOverrideVersion = openAIEnvironment.version, !envOverrideVersion.isEmpty {\n        \"/\\(envOverrideVersion)\"\n      } else {\n        \"\"\n      }\n    switch self {\n    case .chat: return \"\\(version)/chat/completions\"\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/LocalModelService/LocalModelService.swift",
    "content": "//\n//  LocalModelService.swift\n//\n//\n//  Created by James Rochabrun on 6/30/24.\n//\n\nimport Foundation\n\nstruct LocalModelService: OpenAIService {\n  init(\n    apiKey: Authorization = .apiKey(\"\"),\n    baseURL: String,\n    proxyPath: String? = nil,\n    overrideVersion: String? = nil,\n    httpClient: HTTPClient,\n    decoder: JSONDecoder = .init(),\n    debugEnabled: Bool)\n  {\n    self.httpClient = httpClient\n    self.decoder = decoder\n    self.apiKey = apiKey\n    openAIEnvironment = OpenAIEnvironment(baseURL: baseURL, proxyPath: proxyPath, version: overrideVersion ?? \"v1\")\n    self.debugEnabled = debugEnabled\n  }\n\n  let httpClient: HTTPClient\n  let decoder: JSONDecoder\n  let openAIEnvironment: OpenAIEnvironment\n\n  func createImages(\n    parameters _: CreateImageParameters)\n    async throws -> CreateImageResponse\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func editImage(\n    parameters _: CreateImageEditParameters)\n    async throws -> CreateImageResponse\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createImageVariations(\n    parameters _: CreateImageVariationParameters)\n    async throws -> CreateImageResponse\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createTranscription(parameters _: AudioTranscriptionParameters) async throws -> AudioObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createTranslation(parameters _: AudioTranslationParameters) async throws -> AudioObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createSpeech(parameters _: AudioSpeechParameters) async throws -> AudioSpeechObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  #if canImport(AVFoundation)\n  func realtimeSession(\n    model _: String,\n    configuration _: OpenAIRealtimeSessionConfiguration)\n    async throws -> OpenAIRealtimeSession\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n  #endif\n\n  func startChat(\n    parameters: ChatCompletionParameters)\n    async throws -> ChatCompletionObject\n  {\n    var chatParameters = parameters\n    chatParameters.stream = false\n    let request = try LocalModelAPI.chat.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: chatParameters)\n    return try await fetch(debugEnabled: debugEnabled, type: ChatCompletionObject.self, with: request)\n  }\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters)\n    async throws -> AsyncThrowingStream<ChatCompletionChunkObject, Error>\n  {\n    var chatParameters = parameters\n    chatParameters.stream = true\n    chatParameters.streamOptions = .init(includeUsage: true)\n    let request = try LocalModelAPI.chat.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: nil,\n      method: .post,\n      params: chatParameters)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ChatCompletionChunkObject.self, with: request)\n  }\n\n  func createEmbeddings(parameters _: EmbeddingParameter) async throws -> OpenAIResponse<EmbeddingObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createFineTuningJob(parameters _: FineTuningJobParameters) async throws -> FineTuningJobObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listFineTuningJobs(after _: String?, limit _: Int?) async throws -> OpenAIResponse<FineTuningJobObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveFineTuningJob(id _: String) async throws -> FineTuningJobObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func cancelFineTuningJobWith(id _: String) async throws -> FineTuningJobObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listFineTuningEventsForJobWith(\n    id _: String,\n    after _: String?,\n    limit _: Int?)\n    async throws -> OpenAIResponse<FineTuningJobEventObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listFiles() async throws -> OpenAIResponse<FileObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func uploadFile(parameters _: FileParameters) async throws -> FileObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteFileWith(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveFileWith(id _: String) async throws -> FileObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveContentForFileWith(id _: String) async throws -> [[String: Any]] {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func legacyCreateImages(parameters _: ImageCreateParameters) async throws -> OpenAIResponse<ImageObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func legacyEditImage(parameters _: ImageEditParameters) async throws -> OpenAIResponse<ImageObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func legacyCreateImageVariations(parameters _: ImageVariationParameters) async throws -> OpenAIResponse<ImageObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listModels() async throws -> OpenAIResponse<ModelObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveModelWith(id _: String) async throws -> ModelObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteFineTuneModelWith(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createModerationFromText(parameters _: ModerationParameter<String>) async throws -> ModerationObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createModerationFromTexts(parameters _: ModerationParameter<[String]>) async throws -> ModerationObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createAssistant(parameters _: AssistantParameters) async throws -> AssistantObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveAssistant(id _: String) async throws -> AssistantObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func modifyAssistant(id _: String, parameters _: AssistantParameters) async throws -> AssistantObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteAssistant(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listAssistants(\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?)\n    async throws -> OpenAIResponse<AssistantObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createThread(parameters _: CreateThreadParameters) async throws -> ThreadObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveThread(id _: String) async throws -> ThreadObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func modifyThread(id _: String, parameters _: ModifyThreadParameters) async throws -> ThreadObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteThread(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createMessage(threadID _: String, parameters _: MessageParameter) async throws -> MessageObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveMessage(threadID _: String, messageID _: String) async throws -> MessageObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func modifyMessage(\n    threadID _: String,\n    messageID _: String,\n    parameters _: ModifyMessageParameters)\n    async throws -> MessageObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteMessage(threadID _: String, messageID _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listMessages(\n    threadID _: String,\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?,\n    runID _: String?)\n    async throws -> OpenAIResponse<MessageObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createRun(threadID _: String, parameters _: RunParameter) async throws -> RunObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveRun(threadID _: String, runID _: String) async throws -> RunObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func modifyRun(threadID _: String, runID _: String, parameters _: ModifyRunParameters) async throws -> RunObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listRuns(\n    threadID _: String,\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?)\n    async throws -> OpenAIResponse<RunObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func cancelRun(threadID _: String, runID _: String) async throws -> RunObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func submitToolOutputsToRun(\n    threadID _: String,\n    runID _: String,\n    parameters _: RunToolsOutputParameter)\n    async throws -> RunObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createThreadAndRun(parameters _: CreateThreadAndRunParameter) async throws -> RunObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveRunstep(threadID _: String, runID _: String, stepID _: String) async throws -> RunStepObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listRunSteps(\n    threadID _: String,\n    runID _: String,\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?)\n    async throws -> OpenAIResponse<RunStepObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createThreadAndRunStream(parameters _: CreateThreadAndRunParameter) async throws\n    -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createRunStream(\n    threadID _: String,\n    parameters _: RunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func submitToolOutputsToRunStream(\n    threadID _: String,\n    runID _: String,\n    parameters _: RunToolsOutputParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createBatch(parameters _: BatchParameter) async throws -> BatchObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveBatch(id _: String) async throws -> BatchObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func cancelBatch(id _: String) async throws -> BatchObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listBatch(after _: String?, limit _: Int?) async throws -> OpenAIResponse<BatchObject> {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createVectorStore(parameters _: VectorStoreParameter) async throws -> VectorStoreObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listVectorStores(\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?)\n    async throws -> OpenAIResponse<VectorStoreObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveVectorStore(id _: String) async throws -> VectorStoreObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func modifyVectorStore(parameters _: VectorStoreParameter, id _: String) async throws -> VectorStoreObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteVectorStore(id _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createVectorStoreFile(\n    vectorStoreID _: String,\n    parameters _: VectorStoreFileParameter)\n    async throws -> VectorStoreFileObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listVectorStoreFiles(\n    vectorStoreID _: String,\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?,\n    filter _: String?)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveVectorStoreFile(vectorStoreID _: String, fileID _: String) async throws -> VectorStoreFileObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteVectorStoreFile(vectorStoreID _: String, fileID _: String) async throws -> DeletionStatus {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createVectorStoreFileBatch(\n    vectorStoreID _: String,\n    parameters _: VectorStoreFileBatchParameter)\n    async throws -> VectorStoreFileBatchObject\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func retrieveVectorStoreFileBatch(vectorStoreID _: String, batchID _: String) async throws -> VectorStoreFileBatchObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func cancelVectorStoreFileBatch(vectorStoreID _: String, batchID _: String) async throws -> VectorStoreFileBatchObject {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func listVectorStoreFilesInABatch(\n    vectorStoreID _: String,\n    batchID _: String,\n    limit _: Int?,\n    order _: String?,\n    after _: String?,\n    before _: String?,\n    filter _: String?)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  // MARK: Response\n\n  func responseCreate(\n    _: ModelResponseParameter)\n    async throws -> ResponseModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func responseModel(\n    id _: String,\n    parameters _: GetResponseParameter?)\n    async throws -> ResponseModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func responseModelStream(\n    id _: String,\n    parameters _: GetResponseParameter?)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func responseCreateStream(\n    _: ModelResponseParameter)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func responseDelete(\n    id _: String)\n    async throws -> DeletionStatus\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func responseCancel(\n    id _: String)\n    async throws -> ResponseModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func responseInputItems(\n    id _: String,\n    parameters _: GetInputItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  // MARK: - Conversations\n\n  func conversationCreate(\n    parameters _: CreateConversationParameter?)\n    async throws -> ConversationModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func getConversation(\n    id _: String)\n    async throws -> ConversationModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func updateConversation(\n    id _: String,\n    parameters _: UpdateConversationParameter)\n    async throws -> ConversationModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteConversation(\n    id _: String)\n    async throws -> DeletionStatus\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func getConversationItems(\n    id _: String,\n    parameters _: GetConversationItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func createConversationItems(\n    id _: String,\n    parameters _: CreateConversationItemsParameter)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func getConversationItem(\n    conversationID _: String,\n    itemID _: String,\n    parameters _: GetConversationItemParameter?)\n    async throws -> InputItem\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  func deleteConversationItem(\n    conversationID _: String,\n    itemID _: String)\n    async throws -> ConversationModel\n  {\n    fatalError(\n      \"Currently, this API is not supported. We welcome and encourage contributions to our open-source project. Please consider opening an issue or submitting a pull request to add support for this feature.\")\n  }\n\n  /// [authentication](https://platform.openai.com/docs/api-reference/authentication)\n  private let apiKey: Authorization\n  /// Set this flag to TRUE if you need to print request events in DEBUG builds.\n  private let debugEnabled: Bool\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/AudioPCMPlayer.swift",
    "content": "//\n//  AudioPCMPlayer.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\nimport AVFoundation\nimport Foundation\nimport OSLog\n\nprivate let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Audio\")\n\n// MARK: - AudioPCMPlayer\n\n/// # Warning\n/// The order that you initialize `AudioPCMPlayer()` and `MicrophonePCMSampleVendor()` matters, unfortunately.\n///\n/// The voice processing audio unit on iOS has a volume bug that is not present on macOS.\n/// The volume of playback depends on the initialization order of AVAudioEngine and the `kAudioUnitSubType_VoiceProcessingIO` Audio Unit.\n/// We use AudioEngine for playback in this file, and the voice processing audio unit in MicrophonePCMSampleVendor.\n///\n/// I find the best result to be initializing `AudioPCMPlayer()` first. Otherwise, the playback volume is too quiet on iOS.\n@RealtimeActor\nfinal class AudioPCMPlayer {\n\n  init(audioEngine: AVAudioEngine) async throws {\n    self.audioEngine = audioEngine\n    guard\n      let inputFormat = AVAudioFormat(\n        commonFormat: .pcmFormatInt16,\n        sampleRate: 24000,\n        channels: 1,\n        interleaved: true)\n    else {\n      throw AudioPCMPlayerError.couldNotConfigureAudioEngine(\n        \"Could not create input format for AudioPCMPlayer\")\n    }\n\n    guard\n      let playableFormat = AVAudioFormat(\n        commonFormat: .pcmFormatFloat32,\n        sampleRate: 24000,\n        channels: 1,\n        interleaved: true)\n    else {\n      throw AudioPCMPlayerError.couldNotConfigureAudioEngine(\n        \"Could not create playback format for AudioPCMPlayer\")\n    }\n\n    let node = AVAudioPlayerNode()\n\n    audioEngine.attach(node)\n    audioEngine.connect(node, to: audioEngine.outputNode, format: playableFormat)\n\n    playerNode = node\n    self.inputFormat = inputFormat\n    self.playableFormat = playableFormat\n  }\n\n  deinit {\n    logger.debug(\"AudioPCMPlayer is being freed\")\n  }\n\n  public func playPCM16Audio(from base64String: String) {\n    guard let audioData = Data(base64Encoded: base64String) else {\n      logger.error(\"Could not decode base64 string for audio playback\")\n      return\n    }\n\n    var bufferList = AudioBufferList(\n      mNumberBuffers: 1,\n      mBuffers:\n      AudioBuffer(\n        mNumberChannels: 1,\n        mDataByteSize: UInt32(audioData.count),\n        mData: UnsafeMutableRawPointer(mutating: (audioData as NSData).bytes)))\n\n    guard\n      let inPCMBuf = AVAudioPCMBuffer(\n        pcmFormat: inputFormat,\n        bufferListNoCopy: &bufferList)\n    else {\n      logger.error(\"Could not create input buffer for audio playback\")\n      return\n    }\n\n    guard\n      let outPCMBuf = AVAudioPCMBuffer(\n        pcmFormat: playableFormat,\n        frameCapacity: AVAudioFrameCount(UInt32(audioData.count) * 2))\n    else {\n      logger.error(\"Could not create output buffer for audio playback\")\n      return\n    }\n\n    guard let converter = AVAudioConverter(from: inputFormat, to: playableFormat) else {\n      logger.error(\"Could not create audio converter needed to map from pcm16int to pcm32float\")\n      return\n    }\n\n    do {\n      try converter.convert(to: outPCMBuf, from: inPCMBuf)\n    } catch {\n      logger.error(\"Could not map from pcm16int to pcm32float: \\(error.localizedDescription)\")\n      return\n    }\n\n    if audioEngine.isRunning {\n      playerNode.scheduleBuffer(outPCMBuf, at: nil, options: [], completionHandler: { })\n      playerNode.play()\n    }\n  }\n\n  public func interruptPlayback() {\n    logger.debug(\"Interrupting playback\")\n    playerNode.stop()\n  }\n\n  let audioEngine: AVAudioEngine\n\n  private let inputFormat: AVAudioFormat\n  private let playableFormat: AVAudioFormat\n  private let playerNode: AVAudioPlayerNode\n\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/AudioPCMPlayerError.swift",
    "content": "//\n//  AudioPCMPlayerError.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\npublic enum AudioPCMPlayerError: LocalizedError, Sendable {\n  case couldNotConfigureAudioEngine(String)\n\n  public var errorDescription: String? {\n    switch self {\n    case .couldNotConfigureAudioEngine(let message):\n      message\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/AudioUtils.swift",
    "content": "//\n//  AudioUtils.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\nimport AVFoundation\nimport Foundation\nimport OSLog\n\n#if canImport(AudioToolbox)\nimport AudioToolbox\n#endif\n\nprivate let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Audio\")\n\n// MARK: - AudioUtils\n\npublic enum AudioUtils {\n\n  nonisolated public static var headphonesConnected: Bool {\n    #if os(macOS)\n    return audioToolboxHeadphonesConnected()\n    #else\n    return audioSessionHeadphonesConnected()\n    #endif\n  }\n\n  /// Encodes an AVAudioPCMBuffer to base64 string for transmission to OpenAI\n  nonisolated public static func base64EncodeAudioPCMBuffer(from buffer: AVAudioPCMBuffer) -> String? {\n    guard buffer.format.channelCount == 1 else {\n      logger.error(\"This encoding routine assumes a single channel\")\n      return nil\n    }\n\n    guard let audioBufferPtr = buffer.audioBufferList.pointee.mBuffers.mData else {\n      logger.error(\"No audio buffer list available to encode\")\n      return nil\n    }\n\n    let audioBufferLength = Int(buffer.audioBufferList.pointee.mBuffers.mDataByteSize)\n    return Data(bytes: audioBufferPtr, count: audioBufferLength).base64EncodedString()\n  }\n\n  #if os(macOS)\n  nonisolated static func getDefaultAudioInputDevice() -> AudioDeviceID? {\n    var deviceID = AudioDeviceID()\n    var propSize = UInt32(MemoryLayout<AudioDeviceID>.size)\n    var address = AudioObjectPropertyAddress(\n      mSelector: kAudioHardwarePropertyDefaultInputDevice,\n      mScope: kAudioObjectPropertyScopeGlobal,\n      mElement: kAudioObjectPropertyElementMain)\n    let err = AudioObjectGetPropertyData(\n      AudioObjectID(kAudioObjectSystemObject),\n      &address,\n      0,\n      nil,\n      &propSize,\n      &deviceID)\n    guard err == noErr else {\n      logger.error(\"Could not query for default audio input device\")\n      return nil\n    }\n    return deviceID\n  }\n\n  nonisolated static func getAllAudioInputDevices() -> [AudioDeviceID] {\n    var propSize: UInt32 = 0\n    var address = AudioObjectPropertyAddress(\n      mSelector: kAudioHardwarePropertyDevices,\n      mScope: kAudioObjectPropertyScopeGlobal,\n      mElement: kAudioObjectPropertyElementMain)\n    var err = AudioObjectGetPropertyDataSize(\n      AudioObjectID(kAudioObjectSystemObject),\n      &address,\n      0,\n      nil,\n      &propSize)\n    guard err == noErr else {\n      logger.error(\"Could not set propSize, needed for querying all audio devices\")\n      return []\n    }\n\n    var devices = [AudioDeviceID](\n      repeating: 0,\n      count: Int(propSize / UInt32(MemoryLayout<AudioDeviceID>.size)))\n    err = AudioObjectGetPropertyData(\n      AudioObjectID(kAudioObjectSystemObject),\n      &address,\n      0,\n      nil,\n      &propSize,\n      &devices)\n    guard err == noErr else {\n      logger.error(\"Could not query for all audio devices\")\n      return []\n    }\n    return devices\n  }\n  #endif\n\n}\n\n#if !os(macOS)\nnonisolated private func audioSessionHeadphonesConnected() -> Bool {\n  let session = AVAudioSession.sharedInstance()\n  let outputs = session.currentRoute.outputs\n\n  for output in outputs {\n    if\n      output.portType == .headphones ||\n      output.portType == .bluetoothA2DP ||\n      output.portType == .bluetoothLE ||\n      output.portType == .bluetoothHFP\n    {\n      return true\n    }\n  }\n  return false\n}\n#endif\n\n#if os(macOS)\nnonisolated private func audioToolboxHeadphonesConnected() -> Bool {\n  for deviceID in AudioUtils.getAllAudioInputDevices() {\n    if isHeadphoneDevice(deviceID: deviceID), isDeviceAlive(deviceID: deviceID) {\n      return true\n    }\n  }\n  return false\n}\n\nnonisolated private func isHeadphoneDevice(deviceID: AudioDeviceID) -> Bool {\n  guard hasOutputStreams(deviceID: deviceID) else {\n    return false\n  }\n\n  let transportType = getTransportType(deviceID: deviceID)\n\n  if\n    [\n      kAudioDeviceTransportTypeBluetooth,\n      kAudioDeviceTransportTypeBluetoothLE,\n      kAudioDeviceTransportTypeUSB,\n    ].contains(transportType)\n  {\n    return true\n  }\n\n  // For built-in devices, we need to check the device name or UID\n  if transportType == kAudioDeviceTransportTypeBuiltIn {\n    return isBuiltInHeadphonePort(deviceID: deviceID)\n  }\n\n  return false\n}\n\nnonisolated private func getTransportType(deviceID: AudioDeviceID) -> UInt32 {\n  var transportType = UInt32(0)\n  var propSize = UInt32(MemoryLayout<UInt32>.size)\n  var address = AudioObjectPropertyAddress(\n    mSelector: kAudioDevicePropertyTransportType,\n    mScope: kAudioObjectPropertyScopeGlobal,\n    mElement: kAudioObjectPropertyElementMain)\n  let err = AudioObjectGetPropertyData(\n    deviceID,\n    &address,\n    0,\n    nil,\n    &propSize,\n    &transportType)\n  guard err == noErr else {\n    logger.error(\"Could not get transport type for audio device\")\n    return 0\n  }\n  return transportType\n}\n\nnonisolated private func isBuiltInHeadphonePort(deviceID: AudioDeviceID) -> Bool {\n  var deviceUID: CFString? = nil\n  var propSize = UInt32(MemoryLayout<CFString>.size)\n  var address = AudioObjectPropertyAddress(\n    mSelector: kAudioDevicePropertyDeviceUID,\n    mScope: kAudioObjectPropertyScopeGlobal,\n    mElement: kAudioObjectPropertyElementMain)\n\n  let err = withUnsafeMutablePointer(to: &deviceUID) { ptr -> OSStatus in\n    return AudioObjectGetPropertyData(\n      deviceID,\n      &address,\n      0,\n      nil,\n      &propSize,\n      ptr)\n  }\n\n  guard err == noErr, let uidString = deviceUID as? String else {\n    logger.error(\"Could not get mic's uidString from CFString\")\n    return false\n  }\n\n  return [\"headphone\", \"lineout\"].contains { uidString.lowercased().contains($0) }\n}\n\nnonisolated private func hasOutputStreams(deviceID: AudioDeviceID) -> Bool {\n  var address = AudioObjectPropertyAddress(\n    mSelector: kAudioDevicePropertyStreams,\n    mScope: kAudioObjectPropertyScopeOutput,\n    mElement: kAudioObjectPropertyElementMain)\n  var propSize: UInt32 = 0\n  let err = AudioObjectGetPropertyDataSize(\n    deviceID,\n    &address,\n    0,\n    nil,\n    &propSize)\n  guard err == noErr else {\n    logger.error(\"Could not check for output streams on audio device\")\n    return false\n  }\n  return propSize > 0\n}\n\nnonisolated private func isDeviceAlive(deviceID: AudioDeviceID) -> Bool {\n  var isAlive: UInt32 = 0\n  var propSize = UInt32(MemoryLayout<UInt32>.size)\n  var address = AudioObjectPropertyAddress(\n    mSelector: kAudioDevicePropertyDeviceIsAlive,\n    mScope: kAudioObjectPropertyScopeGlobal,\n    mElement: kAudioObjectPropertyElementMain)\n  let err = AudioObjectGetPropertyData(\n    deviceID,\n    &address,\n    0,\n    nil,\n    &propSize,\n    &isAlive)\n  guard err == noErr else {\n    logger.error(\"Could not check if the audio input is alive\")\n    return false\n  }\n  return isAlive != 0\n}\n#endif\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/MicrophonePCMSampleVendor.swift",
    "content": "//\n//  MicrophonePCMSampleVendor.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\nimport AVFoundation\n\n@RealtimeActor\nprotocol MicrophonePCMSampleVendor: AnyObject {\n  func start() throws -> AsyncStream<AVAudioPCMBuffer>\n  func stop()\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/MicrophonePCMSampleVendorAE.swift",
    "content": "//\n//  MicrophonePCMSampleVendorAE.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\n@preconcurrency import AVFoundation\nimport Foundation\nimport OSLog\n\nprivate let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Audio\")\n\n// MARK: - MicrophonePCMSampleVendorAE\n\n/// This is an AVAudioEngine-based implementation that vends PCM16 microphone samples.\n///\n/// ## Requirements\n///\n/// - Assumes an `NSMicrophoneUsageDescription` description has been added to Target > Info\n/// - Assumes that microphone permissions have already been granted\n///\n/// #Usage\n///\n///     ```\n///     let microphoneVendor = try MicrophonePCMSampleVendorAE()\n///     let micStream = try microphoneVendor.start()\n///     Task {\n///         for await buffer in micStream {\n///             // Use buffer\n///         }\n///     }\n///     // ... some time later ...\n///     microphoneVendor.stop()\n///     ```\n///\n/// References:\n/// Apple sample code: https://developer.apple.com/documentation/avfaudio/using-voice-processing\n/// Apple technical note: https://developer.apple.com/documentation/technotes/tn3136-avaudioconverter-performing-sample-rate-conversions\n/// My apple forum question: https://developer.apple.com/forums/thread/771530\n@RealtimeActor\nclass MicrophonePCMSampleVendorAE: MicrophonePCMSampleVendor {\n  init(audioEngine: AVAudioEngine) throws {\n    self.audioEngine = audioEngine\n    inputNode = self.audioEngine.inputNode\n\n    if !AudioUtils.headphonesConnected {\n      try inputNode.setVoiceProcessingEnabled(true)\n    }\n\n    let debugText = \"\"\"\n      Using AudioEngine based PCM sample vendor.\n      The input node's input format is: \\(inputNode.inputFormat(forBus: 0))\n      The input node's output format is: \\(inputNode.outputFormat(forBus: 0))\n      \"\"\"\n    logger.debug(\"\\(debugText)\")\n  }\n\n  deinit {\n    logger.debug(\"MicrophonePCMSampleVendorAE is being freed\")\n  }\n\n  func start() throws -> AsyncStream<AVAudioPCMBuffer> {\n    guard\n      let desiredTapFormat = AVAudioFormat(\n        commonFormat: .pcmFormatInt16,\n        sampleRate: inputNode.outputFormat(forBus: 0).sampleRate,\n        channels: 1,\n        interleaved: false)\n    else {\n      throw OpenAIError.audioConfigurationError(\"Could not create the desired tap format for realtime\")\n    }\n\n    // The buffer size argument specifies the target number of audio frames.\n    // For a single channel, a single audio frame has a single audio sample.\n    //\n    // Try to get 50ms updates.\n    // 50ms is half the granularity of our target accumulator (we accumulate into 100ms payloads that we send up to OpenAI)\n    //\n    // There is a note on the installTap documentation that says AudioEngine may\n    // adjust the bufferSize internally.\n    let targetBufferSize = UInt32(desiredTapFormat.sampleRate / 20) // 50ms buffers\n    logger.info(\"PCMSampleVendorAE target buffer size is: \\(targetBufferSize)\")\n\n    return AsyncStream<AVAudioPCMBuffer> { [weak self] continuation in\n      guard let this = self else { return }\n      this.continuation = continuation\n      this.installTapNonIsolated(\n        inputNode: this.inputNode,\n        bufferSize: targetBufferSize,\n        format: desiredTapFormat)\n    }\n  }\n\n  func stop() {\n    continuation?.finish()\n    continuation = nil\n    inputNode.removeTap(onBus: 0)\n    try? inputNode.setVoiceProcessingEnabled(false)\n    microphonePCMSampleVendorCommon.audioConverter = nil\n  }\n\n  private let audioEngine: AVAudioEngine\n  private let inputNode: AVAudioInputNode\n  private let microphonePCMSampleVendorCommon = MicrophonePCMSampleVendorCommon()\n  private var continuation: AsyncStream<AVAudioPCMBuffer>.Continuation?\n\n  private nonisolated func installTapNonIsolated(\n    inputNode: AVAudioInputNode,\n    bufferSize: AVAudioFrameCount,\n    format: AVAudioFormat)\n  {\n    inputNode.installTap(onBus: 0, bufferSize: bufferSize, format: format) { [weak self] sampleBuffer, _ in\n      guard let self else { return }\n      Task { await self.processBuffer(sampleBuffer) }\n    }\n  }\n\n  private func processBuffer(_ buffer: AVAudioPCMBuffer) {\n    if let accumulatedBuffer = microphonePCMSampleVendorCommon.resampleAndAccumulate(buffer) {\n      continuation?.yield(accumulatedBuffer)\n    }\n  }\n\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/MicrophonePCMSampleVendorAT.swift",
    "content": "//\n//  MicrophonePCMSampleVendorAT.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\n#if os(macOS) || os(iOS)\n\nimport AudioToolbox\n@preconcurrency import AVFoundation\nimport Foundation\nimport OSLog\n\nprivate let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Audio\")\n\nnonisolated private let kVoiceProcessingInputSampleRate: Double = 44100\n\n/// This is an AudioToolbox-based implementation that vends PCM16 microphone samples at a\n/// sample rate that OpenAI's realtime models expect.\n///\n/// ## Requirements\n///\n/// - Assumes an `NSMicrophoneUsageDescription` description has been added to Target > Info\n/// - Assumes that microphone permissions have already been granted\n///\n/// ## Usage\n///\n///     ```\n///     let microphoneVendor = MicrophonePCMSampleVendorAT()\n///     let micStream = try microphoneVendor.start()\n///     Task {\n///         for await buffer in micStream {\n///             // Use buffer\n///         }\n///     }\n///     // ... some time later ...\n///     microphoneVendor.stop()\n///     ```\n@RealtimeActor\nclass MicrophonePCMSampleVendorAT: MicrophonePCMSampleVendor {\n\n  public init() { }\n\n  deinit {\n    logger.debug(\"MicrophonePCMSampleVendorAT is being freed\")\n  }\n\n  public func start() throws -> AsyncStream<AVAudioPCMBuffer> {\n    var desc = AudioComponentDescription(\n      componentType: kAudioUnitType_Output,\n      componentSubType: kAudioUnitSubType_VoiceProcessingIO,\n      componentManufacturer: kAudioUnitManufacturer_Apple,\n      componentFlags: 0,\n      componentFlagsMask: 0)\n\n    guard let component = AudioComponentFindNext(nil, &desc) else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not find an audio component with VoiceProcessingIO\")\n    }\n\n    AudioComponentInstanceNew(component, &audioUnit)\n    guard let audioUnit else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not instantiate an audio component with VoiceProcessingIO\")\n    }\n\n    var one: UInt32 = 1\n    var err = AudioUnitSetProperty(\n      audioUnit,\n      kAudioOutputUnitProperty_EnableIO,\n      kAudioUnitScope_Input,\n      1,\n      &one,\n      UInt32(MemoryLayout.size(ofValue: one)))\n\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not enable the input scope of the microphone bus\")\n    }\n\n    var zero: UInt32 = 0\n    err = AudioUnitSetProperty(\n      audioUnit,\n      kAudioOutputUnitProperty_EnableIO,\n      kAudioUnitScope_Output,\n      0,\n      &zero,\n      UInt32(MemoryLayout.size(ofValue: one)))\n\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not disable the output scope of the speaker bus\")\n    }\n\n    var hardwareASBD = AudioStreamBasicDescription()\n    var size = UInt32(MemoryLayout<AudioStreamBasicDescription>.size)\n    let _ = AudioUnitGetProperty(\n      audioUnit,\n      kAudioUnitProperty_StreamFormat,\n      kAudioUnitScope_Input,\n      1,\n      &hardwareASBD,\n      &size)\n    logger.debug(\"Hardware mic is natively at \\(hardwareASBD.mSampleRate) sample rate\")\n\n    var ioFormat = AudioStreamBasicDescription(\n      mSampleRate: kVoiceProcessingInputSampleRate,\n      mFormatID: kAudioFormatLinearPCM,\n      mFormatFlags: kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked,\n      mBytesPerPacket: 2,\n      mFramesPerPacket: 1,\n      mBytesPerFrame: 2,\n      mChannelsPerFrame: 1,\n      mBitsPerChannel: 16,\n      mReserved: 0)\n\n    err = AudioUnitSetProperty(\n      audioUnit,\n      kAudioUnitProperty_StreamFormat,\n      kAudioUnitScope_Output,\n      1,\n      &ioFormat,\n      UInt32(MemoryLayout<AudioStreamBasicDescription>.size))\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not set ASBD on the output scope of the mic bus\")\n    }\n\n    #if os(macOS)\n    if let deviceID = AudioUtils.getDefaultAudioInputDevice() {\n      // Try to get 50ms updates.\n      // 50ms is half the granularity of our target accumulator (we accumulate into 100ms payloads that we send up to OpenAI)\n      var bufferSize = UInt32(kVoiceProcessingInputSampleRate / 20)\n      var propertyAddress = AudioObjectPropertyAddress(\n        mSelector: kAudioDevicePropertyBufferFrameSize,\n        mScope: kAudioDevicePropertyScopeInput,\n        mElement: kAudioObjectPropertyElementMain)\n\n      let size = UInt32(MemoryLayout.size(ofValue: bufferSize))\n      let status = AudioObjectSetPropertyData(\n        deviceID,\n        &propertyAddress,\n        0,\n        nil,\n        size,\n        &bufferSize)\n\n      if status != noErr {\n        logger.debug(\"Could not set desired buffer size\")\n      }\n    }\n    #endif\n\n    var inputCallbackStruct = AURenderCallbackStruct(\n      inputProc: audioRenderCallback,\n      inputProcRefCon: Unmanaged.passUnretained(self).toOpaque())\n    err = AudioUnitSetProperty(\n      audioUnit,\n      kAudioOutputUnitProperty_SetInputCallback,\n      kAudioUnitScope_Global,\n      1,\n      &inputCallbackStruct,\n      UInt32(MemoryLayout<AURenderCallbackStruct>.size))\n\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not set the render callback on the voice processing audio unit\")\n    }\n\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not configure auto gain control\")\n    }\n\n    err = AudioUnitInitialize(audioUnit)\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not initialize the audio unit\")\n    }\n\n    err = AudioOutputUnitStart(audioUnit)\n    guard err == noErr else {\n      throw MicrophonePCMSampleVendorError.couldNotConfigureAudioUnit(\n        \"Could not start the audio unit\")\n    }\n\n    return AsyncStream<AVAudioPCMBuffer> { [weak self] continuation in\n      self?.continuation = continuation\n    }\n  }\n\n  public func stop() {\n    continuation?.finish()\n    continuation = nil\n\n    // Capture the audio unit reference\n    if let au = audioUnit {\n      // Clear the property immediately on RealtimeActor\n      audioUnit = nil\n\n      // Dispatch the actual AudioToolbox cleanup to a background queue\n      // to avoid priority inversion with the audio render thread.\n      // Note: AudioUnit is a C pointer type (non-Sendable), but the AudioToolbox\n      // cleanup APIs are thread-safe and we've already cleared our reference.\n      DispatchQueue.global(qos: .utility).async {\n        AudioOutputUnitStop(au)\n        AudioUnitUninitialize(au)\n        AudioComponentInstanceDispose(au)\n      }\n    }\n\n    microphonePCMSampleVendorCommon.audioConverter = nil\n  }\n\n  fileprivate func didReceiveRenderCallback(\n    _ ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>,\n    _ inTimeStamp: UnsafePointer<AudioTimeStamp>,\n    _ inBusNumber: UInt32,\n    _ inNumberFrames: UInt32)\n  {\n    guard let audioUnit else {\n      logger.error(\"There is no audioUnit attached to the sample vendor. Render callback should not be called\")\n      return\n    }\n    var bufferList = AudioBufferList(\n      mNumberBuffers: 1,\n      mBuffers: AudioBuffer(\n        mNumberChannels: 1,\n        mDataByteSize: inNumberFrames * 2,\n        mData: UnsafeMutableRawPointer.allocate(\n          byteCount: Int(inNumberFrames) * 2,\n          alignment: MemoryLayout<Int16>.alignment)))\n\n    let status = AudioUnitRender(\n      audioUnit,\n      ioActionFlags,\n      inTimeStamp,\n      inBusNumber,\n      inNumberFrames,\n      &bufferList)\n\n    guard status == noErr else {\n      logger.error(\"Could not render voice processed audio data to bufferList\")\n      return\n    }\n\n    guard\n      let audioFormat = AVAudioFormat(\n        commonFormat: .pcmFormatInt16,\n        sampleRate: kVoiceProcessingInputSampleRate,\n        channels: 1,\n        interleaved: true)\n    else {\n      logger.error(\"Could not create audio format inside render callback.\")\n      return\n    }\n\n    if\n      let sampleBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, bufferListNoCopy: &bufferList),\n      let accumulatedBuffer = microphonePCMSampleVendorCommon.resampleAndAccumulate(sampleBuffer)\n    {\n      // If the buffer has accumulated to a sufficient level, give it back to the caller\n      Task { @RealtimeActor in\n        self.continuation?.yield(accumulatedBuffer)\n      }\n    }\n  }\n\n  private var audioUnit: AudioUnit?\n  private let microphonePCMSampleVendorCommon = MicrophonePCMSampleVendorCommon()\n  private var continuation: AsyncStream<AVAudioPCMBuffer>.Continuation?\n\n}\n\n/// This @RealtimeActor annotation is a lie.\n@RealtimeActor private let audioRenderCallback: AURenderCallback = {\n  inRefCon,\n  ioActionFlags,\n  inTimeStamp,\n  inBusNumber,\n  inNumberFrames,\n  _ in\n  let microphonePCMSampleVendor = Unmanaged<MicrophonePCMSampleVendorAT>\n    .fromOpaque(inRefCon)\n    .takeUnretainedValue()\n  microphonePCMSampleVendor.didReceiveRenderCallback(\n    ioActionFlags,\n    inTimeStamp,\n    inBusNumber,\n    inNumberFrames)\n  return noErr\n}\n#endif\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/MicrophonePCMSampleVendorCommon.swift",
    "content": "//\n//  MicrophonePCMSampleVendorCommon.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\n@preconcurrency import AVFoundation\nimport OSLog\n\nprivate let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Audio\")\n\n// MARK: - MicrophonePCMSampleVendorCommon\n\n/// This protocol is used as a mixin.\n/// Please see MicrophonePCMSampleVendor.swift for the protocol that defines a user interface.\nnonisolated final class MicrophonePCMSampleVendorCommon {\n  var bufferAccumulator: AVAudioPCMBuffer?\n  var audioConverter: AVAudioConverter?\n\n  func resampleAndAccumulate(_ pcm16Buffer: AVAudioPCMBuffer) -> AVAudioPCMBuffer? {\n    if\n      let resampledBuffer = convertPCM16BufferToExpectedSampleRate(pcm16Buffer),\n      let accumulatedBuffer = accummulateAndVendIfFull(resampledBuffer)\n    {\n      return accumulatedBuffer\n    }\n    return nil\n  }\n\n  private func convertPCM16BufferToExpectedSampleRate(_ pcm16Buffer: AVAudioPCMBuffer) -> AVAudioPCMBuffer? {\n    guard\n      let audioFormat = AVAudioFormat(\n        commonFormat: .pcmFormatInt16,\n        sampleRate: 24000.0,\n        channels: 1,\n        interleaved: false)\n    else {\n      logger.error(\"Could not create target audio format\")\n      return nil\n    }\n\n    if audioConverter == nil {\n      audioConverter = AVAudioConverter(from: pcm16Buffer.format, to: audioFormat)\n    }\n\n    guard let converter = audioConverter else {\n      logger.error(\"There is no audio converter to use for PCM16 resampling\")\n      return nil\n    }\n\n    guard\n      let outputBuffer = AVAudioPCMBuffer(\n        pcmFormat: audioFormat,\n        frameCapacity: AVAudioFrameCount(audioFormat.sampleRate * 2.0))\n    else {\n      logger.error(\"Could not create output buffer for PCM16 resampling\")\n      return nil\n    }\n\n    // See the docstring on AVAudioConverterInputBlock in AVAudioConverter.h\n    //\n    // The block will keep getting invoked until either the frame capacity is\n    // reached or outStatus.pointee is set to `.noDataNow` or `.endStream`.\n    var error: NSError?\n    nonisolated(unsafe) var ptr: UInt32 = 0\n    let targetFrameLength = pcm16Buffer.frameLength\n    let _ = converter.convert(to: outputBuffer, error: &error) { numberOfFrames, outStatus in\n      guard\n        ptr < targetFrameLength,\n        let workingCopy = advancedPCMBuffer_noCopy(pcm16Buffer, offset: ptr)\n      else {\n        outStatus.pointee = .noDataNow\n        return nil\n      }\n      let amountToFill = min(numberOfFrames, targetFrameLength - ptr)\n      outStatus.pointee = .haveData\n      ptr += amountToFill\n      workingCopy.frameLength = amountToFill\n      return workingCopy\n    }\n\n    if let error {\n      logger.error(\"Error converting to expected sample rate: \\(error.localizedDescription)\")\n      return nil\n    }\n\n    return outputBuffer\n  }\n\n  /// The incoming buffer here must be guaranteed at 24kHz in PCM16Int format.\n  private func accummulateAndVendIfFull(_ buf: AVAudioPCMBuffer) -> AVAudioPCMBuffer? {\n    var returnBuffer: AVAudioPCMBuffer? = nil\n    let targetAccumulatorLength = 2400\n    if bufferAccumulator == nil {\n      bufferAccumulator = AVAudioPCMBuffer(pcmFormat: buf.format, frameCapacity: AVAudioFrameCount(targetAccumulatorLength * 2))\n    }\n    guard let accumulator = bufferAccumulator else { return nil }\n\n    let copyFrames = min(buf.frameLength, accumulator.frameCapacity - accumulator.frameLength)\n    let dst = accumulator.int16ChannelData![0].advanced(by: Int(accumulator.frameLength))\n    let src = buf.int16ChannelData![0]\n\n    dst.update(from: src, count: Int(copyFrames))\n    accumulator.frameLength += copyFrames\n    if accumulator.frameLength >= targetAccumulatorLength {\n      returnBuffer = accumulator\n      bufferAccumulator = nil\n    }\n    return returnBuffer\n  }\n}\n\nnonisolated private func advancedPCMBuffer_noCopy(_ originalBuffer: AVAudioPCMBuffer, offset: UInt32) -> AVAudioPCMBuffer? {\n  let audioBufferList = originalBuffer.mutableAudioBufferList\n  guard\n    audioBufferList.pointee.mNumberBuffers == 1,\n    audioBufferList.pointee.mBuffers.mNumberChannels == 1\n  else {\n    logger.error(\"Broken programmer assumption. Audio conversion depends on single channel PCM16 as input\")\n    return nil\n  }\n  guard let audioBufferData = audioBufferList.pointee.mBuffers.mData else {\n    logger.error(\"Could not get audio buffer data from the original PCM16 buffer\")\n    return nil\n  }\n  // advanced(by:) is O(1)\n  audioBufferList.pointee.mBuffers.mData = audioBufferData.advanced(\n    by: Int(offset) * MemoryLayout<UInt16>.size)\n  return AVAudioPCMBuffer(\n    pcmFormat: originalBuffer.format,\n    bufferListNoCopy: audioBufferList)\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Audio/MicrophonePCMSampleVendorError.swift",
    "content": "//\n//  MicrophonePCMSampleVendorError.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\npublic enum MicrophonePCMSampleVendorError: LocalizedError, Sendable {\n  case couldNotConfigureAudioUnit(String)\n\n  public var errorDescription: String? {\n    switch self {\n    case .couldNotConfigureAudioUnit(let message):\n      message\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Networking/AsyncHTTPClientAdapter.swift",
    "content": "//\n//  AsyncHTTPClientAdapter.swift\n//  SwiftOpenAI\n//\n//  Created by Joe Fabisevich on 5/18/25.\n//\n\n#if os(Linux)\nimport AsyncHTTPClient\nimport Foundation\nimport NIOCore\nimport NIOFoundationCompat\nimport NIOHTTP1\n\n/// Adapter that implements HTTPClient protocol using AsyncHTTPClient\npublic class AsyncHTTPClientAdapter: HTTPClient {\n  /// Initializes a new AsyncHTTPClientAdapter with the provided AsyncHTTPClient\n  /// - Parameter client: The AsyncHTTPClient instance to use\n  public init(client: AsyncHTTPClient.HTTPClient) {\n    self.client = client\n  }\n\n  deinit {\n    shutdown()\n  }\n\n  /// Creates a new AsyncHTTPClientAdapter with a default configuration\n  /// - Returns: A new AsyncHTTPClientAdapter instance\n  public static func createDefault() -> AsyncHTTPClientAdapter {\n    let httpClient = AsyncHTTPClient.HTTPClient(\n      eventLoopGroupProvider: .singleton,\n      configuration: AsyncHTTPClient.HTTPClient.Configuration(\n        certificateVerification: .fullVerification,\n        timeout: .init(\n          connect: .seconds(30),\n          read: .seconds(30)),\n        backgroundActivityLogger: nil))\n    return AsyncHTTPClientAdapter(client: httpClient)\n  }\n\n  /// Fetches data for a given HTTP request\n  /// - Parameter request: The HTTP request to perform\n  /// - Returns: A tuple containing the data and HTTP response\n  public func data(for request: HTTPRequest) async throws -> (Data, HTTPResponse) {\n    let asyncHTTPClientRequest = try createAsyncHTTPClientRequest(from: request)\n\n    let response = try await client.execute(asyncHTTPClientRequest, deadline: .now() + .seconds(60))\n    let body = try await response.body.collect(upTo: 100 * 1024 * 1024) // 100 MB max\n\n    let data = Data(buffer: body)\n    let httpResponse = HTTPResponse(\n      statusCode: Int(response.status.code),\n      headers: convertHeaders(response.headers))\n\n    return (data, httpResponse)\n  }\n\n  /// Fetches a byte stream for a given HTTP request\n  /// - Parameter request: The HTTP request to perform\n  /// - Returns: A tuple containing the byte stream and HTTP response\n  public func bytes(for request: HTTPRequest) async throws -> (HTTPByteStream, HTTPResponse) {\n    let asyncHTTPClientRequest = try createAsyncHTTPClientRequest(from: request)\n\n    let response = try await client.execute(asyncHTTPClientRequest, deadline: .now() + .seconds(60))\n    let httpResponse = HTTPResponse(\n      statusCode: Int(response.status.code),\n      headers: convertHeaders(response.headers))\n\n    let stream = AsyncThrowingStream<String, Error> { continuation in\n      Task {\n        do {\n          for try await byteBuffer in response.body {\n            if let string = byteBuffer.getString(at: 0, length: byteBuffer.readableBytes) {\n              let lines = string.split(separator: \"\\n\", omittingEmptySubsequences: false)\n              for line in lines {\n                continuation.yield(String(line))\n              }\n            }\n          }\n          continuation.finish()\n        } catch {\n          continuation.finish(throwing: error)\n        }\n      }\n    }\n\n    return (.lines(stream), httpResponse)\n  }\n\n  /// Properly shutdown the HTTP client\n  public func shutdown() {\n    try? client.shutdown().wait()\n  }\n\n  /// The underlying AsyncHTTPClient instance\n  private let client: AsyncHTTPClient.HTTPClient\n\n  /// Converts our HTTPRequest to AsyncHTTPClient's Request\n  /// - Parameter request: Our HTTPRequest\n  /// - Returns: AsyncHTTPClient Request\n  private func createAsyncHTTPClientRequest(from request: HTTPRequest) throws -> HTTPClientRequest {\n    var asyncHTTPClientRequest = HTTPClientRequest(url: request.url.absoluteString)\n    asyncHTTPClientRequest.method = NIOHTTP1.HTTPMethod(rawValue: request.method.rawValue)\n\n    // Add headers\n    for (key, value) in request.headers {\n      asyncHTTPClientRequest.headers.add(name: key, value: value)\n    }\n\n    // Add body if present\n    if let body = request.body {\n      asyncHTTPClientRequest.body = .bytes(body)\n    }\n\n    return asyncHTTPClientRequest\n  }\n\n  /// Converts NIOHTTP1 headers to a dictionary\n  /// - Parameter headers: NIOHTTP1 HTTPHeaders\n  /// - Returns: Dictionary of header name-value pairs\n  private func convertHeaders(_ headers: HTTPHeaders) -> [String: String] {\n    var result = [String: String]()\n    for header in headers {\n      result[header.name] = header.value\n    }\n    return result\n  }\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Networking/Endpoint.swift",
    "content": "//\n//  Endpoint.swift\n//\n//\n//  Created by James Rochabrun on 10/11/23.\n//\n\nimport Foundation\n#if os(Linux)\nimport FoundationNetworking\n#endif\n\n// MARK: - HTTPMethod\n\npublic enum HTTPMethod: String {\n  case post = \"POST\"\n  case get = \"GET\"\n  case delete = \"DELETE\"\n}\n\n// MARK: - Endpoint\n\nprotocol Endpoint {\n  func path(\n    in openAIEnvironment: OpenAIEnvironment)\n    -> String\n}\n\n// MARK: Endpoint+Requests\n\nextension Endpoint {\n  func request(\n    apiKey: Authorization,\n    openAIEnvironment: OpenAIEnvironment,\n    organizationID: String?,\n    method: HTTPMethod,\n    params: Encodable? = nil,\n    queryItems: [URLQueryItem] = [],\n    betaHeaderField: String? = nil,\n    extraHeaders: [String: String]? = nil)\n    throws -> URLRequest\n  {\n    let finalPath = path(in: openAIEnvironment)\n    let components = urlComponents(base: openAIEnvironment.baseURL, path: finalPath, queryItems: queryItems)\n    guard let url = components.url else {\n      throw URLError(.badURL)\n    }\n    var request = URLRequest(url: url)\n    request.addValue(\"application/json\", forHTTPHeaderField: \"Content-Type\")\n    request.addValue(apiKey.value, forHTTPHeaderField: apiKey.headerField)\n    if let organizationID {\n      request.addValue(organizationID, forHTTPHeaderField: \"OpenAI-Organization\")\n    }\n    if let betaHeaderField {\n      request.addValue(betaHeaderField, forHTTPHeaderField: \"OpenAI-Beta\")\n    }\n    if let extraHeaders {\n      for header in extraHeaders {\n        request.addValue(header.value, forHTTPHeaderField: header.key)\n      }\n    }\n    request.httpMethod = method.rawValue\n    if let params {\n      request.httpBody = try JSONEncoder().encode(params)\n    }\n    return request\n  }\n\n  func multiPartRequest(\n    apiKey: Authorization,\n    openAIEnvironment: OpenAIEnvironment,\n    organizationID: String?,\n    method: HTTPMethod,\n    params: MultipartFormDataParameters,\n    queryItems: [URLQueryItem] = [])\n    throws -> URLRequest\n  {\n    let finalPath = path(in: openAIEnvironment)\n    let components = urlComponents(base: openAIEnvironment.baseURL, path: finalPath, queryItems: queryItems)\n    guard let url = components.url else {\n      throw URLError(.badURL)\n    }\n    var request = URLRequest(url: url)\n    request.httpMethod = method.rawValue\n    let boundary = UUID().uuidString\n    request.addValue(apiKey.value, forHTTPHeaderField: apiKey.headerField)\n    if let organizationID {\n      request.addValue(organizationID, forHTTPHeaderField: \"OpenAI-Organization\")\n    }\n    request.addValue(\"multipart/form-data; boundary=\\(boundary)\", forHTTPHeaderField: \"Content-Type\")\n    request.httpBody = params.encode(boundary: boundary)\n    return request\n  }\n\n  private func urlComponents(\n    base: String,\n    path: String,\n    queryItems: [URLQueryItem])\n    -> URLComponents\n  {\n    guard var components = URLComponents(string: base) else {\n      fatalError(\"Invalid base URL: \\(base)\")\n    }\n    components.path = path\n    if !queryItems.isEmpty {\n      components.queryItems = queryItems\n    }\n    return components\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Networking/HTTPClient.swift",
    "content": "import Foundation\n\n#if canImport(FoundationNetworking)\nimport FoundationNetworking\n#endif\n\n// MARK: - HTTPClient\n\n/// Protocol that abstracts HTTP client functionality\npublic protocol HTTPClient {\n  /// Fetches data for a given HTTP request\n  /// - Parameter request: The HTTP request to perform\n  /// - Returns: A tuple containing the data and HTTP response\n  func data(for request: HTTPRequest) async throws -> (Data, HTTPResponse)\n\n  /// Fetches a byte stream for a given HTTP request\n  /// - Parameter request: The HTTP request to perform\n  /// - Returns: A tuple containing the byte stream and HTTP response\n  func bytes(for request: HTTPRequest) async throws -> (HTTPByteStream, HTTPResponse)\n}\n\n// MARK: - HTTPRequest\n\n/// Represents an HTTP request with platform-agnostic properties\npublic struct HTTPRequest {\n  public init(url: URL, method: HTTPMethod, headers: [String: String], body: Data? = nil) {\n    self.url = url\n    self.method = method\n    self.headers = headers\n    self.body = body\n  }\n\n  /// Initializes an HTTPRequest from a URLRequest\n  /// - Parameter urlRequest: The URLRequest to convert\n  public init(from urlRequest: URLRequest) throws {\n    guard let url = urlRequest.url else {\n      throw URLError(.badURL)\n    }\n\n    guard\n      let httpMethodString = urlRequest.httpMethod,\n      let httpMethod = HTTPMethod(rawValue: httpMethodString)\n    else {\n      throw URLError(.unsupportedURL)\n    }\n\n    var headers = [String: String]()\n    if let allHTTPHeaderFields = urlRequest.allHTTPHeaderFields {\n      headers = allHTTPHeaderFields\n    }\n\n    self.url = url\n    method = httpMethod\n    self.headers = headers\n    body = urlRequest.httpBody\n  }\n\n  /// The URL for the request\n  var url: URL\n  /// The HTTP method for the request\n  var method: HTTPMethod\n  /// The HTTP headers for the request\n  var headers: [String: String]\n  /// The body of the request, if any\n  var body: Data?\n}\n\n// MARK: - HTTPResponse\n\n/// Represents an HTTP response with platform-agnostic properties\npublic struct HTTPResponse {\n  /// The HTTP status code of the response\n  var statusCode: Int\n  /// The HTTP headers in the response\n  var headers: [String: String]\n\n  public init(statusCode: Int, headers: [String: String]) {\n    self.statusCode = statusCode\n    self.headers = headers\n  }\n}\n\n// MARK: - HTTPByteStream\n\n/// Represents a stream of bytes or lines from an HTTP response\npublic enum HTTPByteStream {\n  /// A stream of bytes\n  case bytes(AsyncThrowingStream<UInt8, Error>)\n  /// A stream of lines (strings)\n  case lines(AsyncThrowingStream<String, Error>)\n}\n\n// MARK: - HTTPClientFactory\n\npublic enum HTTPClientFactory {\n  /// Creates a default HTTPClient implementation appropriate for the current platform\n  /// - Returns: URLSessionHTTPClientAdapter on Apple platforms, AsyncHTTPClientAdapter on Linux\n  public static func createDefault() -> HTTPClient {\n    #if os(Linux)\n    return AsyncHTTPClientAdapter.createDefault()\n    #else\n    return URLSessionHTTPClientAdapter()\n    #endif\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Networking/MultipartFormDataBuilder.swift",
    "content": "//\n//  MultipartFormDataBuilder.swift\n//\n//\n//  Created by James Rochabrun on 10/11/23.\n//\n\nimport Foundation\n\n// MARK: - MultipartFormDataBuilder\n\nstruct MultipartFormDataBuilder {\n  let boundary: String\n  let entries: [MultipartFormDataEntry]\n\n  init(\n    boundary: String,\n    entries: [MultipartFormDataEntry])\n  {\n    self.boundary = boundary\n    self.entries = entries\n  }\n\n  func build() -> Data {\n    var httpData = entries\n      .map { $0.makeData(boundary: boundary) }\n      .reduce(Data(), +)\n    httpData.append(\"--\\(boundary)--\\r\\n\")\n    return httpData\n  }\n}\n\n// MARK: - MultipartFormDataEntry\n\nenum MultipartFormDataEntry {\n  case file(paramName: String, fileName: String?, fileData: Data, contentType: String)\n  case string(paramName: String, value: Any?)\n}\n\n// MARK: MultipartFormDataEntry+Data\n\nextension MultipartFormDataEntry {\n  func makeData(boundary: String) -> Data {\n    var body = Data()\n    switch self {\n    case .file(let paramName, let fileName, let fileData, let contentType):\n      body.append(\"--\\(boundary)\\r\\n\")\n      if let fileName {\n        body.append(\"Content-Disposition: form-data; name=\\\"\\(paramName)\\\"; filename=\\\"\\(fileName)\\\"\\r\\n\")\n      } else {\n        body.append(\"Content-Disposition: form-data; name=\\\"\\(paramName)\\\"\\r\\n\")\n      }\n      body.append(\"Content-Type: \\(contentType)\\r\\n\\r\\n\")\n      body.append(fileData)\n      body.append(\"\\r\\n\")\n\n    case .string(let paramName, let value):\n      if let value {\n        body.append(\"--\\(boundary)\\r\\n\")\n        body.append(\"Content-Disposition: form-data; name=\\\"\\(paramName)\\\"\\r\\n\\r\\n\")\n        body.append(\"\\(value)\\r\\n\")\n      }\n    }\n    return body\n  }\n}\n\nextension Data {\n  fileprivate mutating func append(_ string: String) {\n    let data = string.data(\n      using: String.Encoding.utf8,\n      allowLossyConversion: true)\n    append(data!)\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Networking/OpenAIAPI.swift",
    "content": "//\n//  OpenAIAPI.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n// MARK: - OpenAIAPI\n\nenum OpenAIAPI {\n  case assistant(AssistantCategory) // https://platform.openai.com/docs/api-reference/assistants\n  case audio(AudioCategory) // https://platform.openai.com/docs/api-reference/audio\n  case chat /// https://platform.openai.com/docs/api-reference/chat\n  case embeddings // https://platform.openai.com/docs/api-reference/embeddings\n  case file(FileCategory) // https://platform.openai.com/docs/api-reference/files\n  case fineTuning(FineTuningCategory) // https://platform.openai.com/docs/api-reference/fine-tuning\n  case images(ImageCategory) // https://platform.openai.com/docs/api-reference/images\n  case message(MessageCategory) // https://platform.openai.com/docs/api-reference/messages\n  case model(ModelCategory) // https://platform.openai.com/docs/api-reference/models\n  case moderations // https://platform.openai.com/docs/api-reference/moderations\n  case run(RunCategory) // https://platform.openai.com/docs/api-reference/runs\n  case runStep(RunStepCategory) // https://platform.openai.com/docs/api-reference/runs/step-object\n  case thread(ThreadCategory) // https://platform.openai.com/docs/api-reference/threads\n  case batch(BatchCategory) // https://platform.openai.com/docs/api-reference/batch\n  case vectorStore(VectorStoreCategory) // https://platform.openai.com/docs/api-reference/vector-stores\n  case vectorStoreFile(VectorStoreFileCategory) // https://platform.openai.com/docs/api-reference/vector-stores-files\n  case vectorStoreFileBatch(VectorStoreFileBatch) // https://platform.openai.com/docs/api-reference/vector-stores-file-batches\n\n  /// OpenAI's most advanced interface for generating model responses. Supports text and image inputs, and text outputs. Create stateful interactions with the model, using the output of previous responses as input. Extend the model's capabilities with built-in tools for file search, web search, computer use, and more. Allow the model access to external systems and data using function calling.\n  case response(ResponseCategory) // https://platform.openai.com/docs/api-reference/responses\n\n  /// Conversations\n  /// Create and manage conversations to store and retrieve conversation state across Response API calls.\n  case conversantions(ConversationCategory) // https://platform.openai.com/docs/api-reference/conversations\n\n  enum AssistantCategory {\n    case create\n    case list\n    case retrieve(assistantID: String)\n    case modify(assistantID: String)\n    case delete(assistantID: String)\n  }\n\n  enum AudioCategory: String {\n    case transcriptions\n    case translations\n    case speech\n  }\n\n  enum FileCategory {\n    case list\n    case upload\n    case delete(fileID: String)\n    case retrieve(fileID: String)\n    case retrieveFileContent(fileID: String)\n  }\n\n  enum FineTuningCategory {\n    case create\n    case list\n    case retrieve(jobID: String)\n    case cancel(jobID: String)\n    case events(jobID: String)\n  }\n\n  enum ImageCategory: String {\n    case generations\n    case edits\n    case variations\n  }\n\n  enum MessageCategory {\n    case create(threadID: String)\n    case retrieve(threadID: String, messageID: String)\n    case modify(threadID: String, messageID: String)\n    case delete(threadID: String, messageID: String)\n    case list(threadID: String)\n  }\n\n  enum ModelCategory {\n    case list\n    case retrieve(modelID: String)\n    case deleteFineTuneModel(modelID: String)\n  }\n\n  enum RunCategory {\n    case create(threadID: String)\n    case retrieve(threadID: String, runID: String)\n    case modify(threadID: String, runID: String)\n    case list(threadID: String)\n    case cancel(threadID: String, runID: String)\n    case submitToolOutput(threadID: String, runID: String)\n    case createThreadAndRun\n  }\n\n  enum RunStepCategory {\n    case retrieve(threadID: String, runID: String, stepID: String)\n    case list(threadID: String, runID: String)\n  }\n\n  enum ThreadCategory {\n    case create\n    case retrieve(threadID: String)\n    case modify(threadID: String)\n    case delete(threadID: String)\n  }\n\n  enum BatchCategory {\n    case create\n    case retrieve(batchID: String)\n    case cancel(batchID: String)\n    case list\n  }\n\n  enum VectorStoreCategory {\n    case create\n    case list\n    case retrieve(vectorStoreID: String)\n    case modify(vectorStoreID: String)\n    case delete(vectorStoreID: String)\n  }\n\n  enum VectorStoreFileCategory {\n    case create(vectorStoreID: String)\n    case list(vectorStoreID: String)\n    case retrieve(vectorStoreID: String, fileID: String)\n    case delete(vectorStoreID: String, fileID: String)\n  }\n\n  enum VectorStoreFileBatch {\n    case create(vectorStoreID: String)\n    case retrieve(vectorStoreID: String, batchID: String)\n    case cancel(vectorStoreID: String, batchID: String)\n    case list(vectorStoreID: String, batchID: String)\n  }\n\n  enum ResponseCategory {\n    case create\n    case get(responseID: String)\n    case delete(responseID: String)\n    case cancel(responseID: String)\n    case inputItems(responseID: String)\n  }\n\n  enum ConversationCategory {\n    case create\n    case get(conversationID: String)\n    case update(conversationID: String)\n    case delete(conversationID: String)\n    case items(conversationID: String)\n    case createItems(conversationID: String)\n    case item(conversationID: String, itemID: String)\n    case deleteItem(conversationID: String, itemID: String)\n  }\n}\n\n// MARK: Endpoint\n\nextension OpenAIAPI: Endpoint {\n  /// Builds the final path that includes:\n  ///\n  ///   - optional proxy path (e.g. \"/my-proxy\")\n  ///   - version if non-nil (e.g. \"/v1\")\n  ///   - then the specific endpoint path (e.g. \"/assistants\")\n  func path(in openAIEnvironment: OpenAIEnvironment) -> String {\n    // 1) Potentially prepend proxy path if `proxyPath` is non-empty\n    let proxyPart =\n      if let envProxyPart = openAIEnvironment.proxyPath, !envProxyPart.isEmpty {\n        \"/\\(envProxyPart)\"\n      } else {\n        \"\"\n      }\n    let mainPart = openAIPath(in: openAIEnvironment)\n\n    return proxyPart + mainPart // e.g. \"/my-proxy/v1/assistants\"\n  }\n\n  func openAIPath(in openAIEnvironment: OpenAIEnvironment) -> String {\n    let version =\n      if let envOverrideVersion = openAIEnvironment.version, !envOverrideVersion.isEmpty {\n        \"/\\(envOverrideVersion)\"\n      } else {\n        \"\"\n      }\n\n    switch self {\n    case .assistant(let category):\n      switch category {\n      case .create, .list: return \"\\(version)/assistants\"\n      case .retrieve(let assistantID), .modify(let assistantID),\n           .delete(let assistantID): return \"\\(version)/assistants/\\(assistantID)\"\n      }\n\n    case .audio(let category): return \"\\(version)/audio/\\(category.rawValue)\"\n\n    case .batch(let category):\n      switch category {\n      case .create, .list: return \"\\(version)/batches\"\n      case .retrieve(let batchID): return \"\\(version)/batches/\\(batchID)\"\n      case .cancel(let batchID): return \"\\(version)/batches/\\(batchID)/cancel\"\n      }\n\n    case .chat: return \"\\(version)/chat/completions\"\n\n    case .embeddings: return \"\\(version)/embeddings\"\n\n    case .file(let category):\n      switch category {\n      case .list, .upload: return \"\\(version)/files\"\n      case .delete(let fileID), .retrieve(let fileID): return \"\\(version)/files/\\(fileID)\"\n      case .retrieveFileContent(let fileID): return \"\\(version)/files/\\(fileID)/content\"\n      }\n\n    case .fineTuning(let category):\n      switch category {\n      case .create, .list: return \"\\(version)/fine_tuning/jobs\"\n      case .retrieve(let jobID): return \"\\(version)/fine_tuning/jobs/\\(jobID)\"\n      case .cancel(let jobID): return \"\\(version)/fine_tuning/jobs/\\(jobID)/cancel\"\n      case .events(let jobID): return \"\\(version)/fine_tuning/jobs/\\(jobID)/events\"\n      }\n\n    case .images(let category): return \"\\(version)/images/\\(category.rawValue)\"\n\n    case .message(let category):\n      switch category {\n      case .create(let threadID), .list(let threadID): return \"\\(version)/threads/\\(threadID)/messages\"\n      case .retrieve(let threadID, let messageID), .modify(let threadID, let messageID),\n           .delete(let threadID, let messageID): return \"\\(version)/threads/\\(threadID)/messages/\\(messageID)\"\n      }\n\n    case .model(let category):\n      switch category {\n      case .list: return \"\\(version)/models\"\n      case .retrieve(let modelID), .deleteFineTuneModel(let modelID): return \"\\(version)/models/\\(modelID)\"\n      }\n\n    case .moderations: return \"\\(version)/moderations\"\n\n    case .run(let category):\n      switch category {\n      case .create(let threadID), .list(let threadID): return \"\\(version)/threads/\\(threadID)/runs\"\n      case .retrieve(let threadID, let runID),\n           .modify(let threadID, let runID): return \"\\(version)/threads/\\(threadID)/runs/\\(runID)\"\n      case .cancel(let threadID, let runID): return \"\\(version)/threads/\\(threadID)/runs/\\(runID)/cancel\"\n      case .submitToolOutput(let threadID, let runID): return \"\\(version)/threads/\\(threadID)/runs/\\(runID)/submit_tool_outputs\"\n      case .createThreadAndRun: return \"\\(version)/threads/runs\"\n      }\n\n    case .runStep(let category):\n      switch category {\n      case .retrieve(let threadID, let runID, let stepID): return \"\\(version)/threads/\\(threadID)/runs/\\(runID)/steps/\\(stepID)\"\n      case .list(let threadID, let runID): return \"\\(version)/threads/\\(threadID)/runs/\\(runID)/steps\"\n      }\n\n    case .thread(let category):\n      switch category {\n      case .create: return \"\\(version)/threads\"\n      case .retrieve(let threadID), .modify(let threadID), .delete(let threadID): return \"\\(version)/threads/\\(threadID)\"\n      }\n\n    case .vectorStore(let category):\n      switch category {\n      case .create, .list: return \"\\(version)/vector_stores\"\n      case .retrieve(let vectorStoreID), .modify(let vectorStoreID),\n           .delete(let vectorStoreID): return \"\\(version)/vector_stores/\\(vectorStoreID)\"\n      }\n\n    case .vectorStoreFile(let category):\n      switch category {\n      case .create(let vectorStoreID), .list(let vectorStoreID): return \"\\(version)/vector_stores/\\(vectorStoreID)/files\"\n      case .retrieve(let vectorStoreID, let fileID),\n           .delete(let vectorStoreID, let fileID): return \"\\(version)/vector_stores/\\(vectorStoreID)/files/\\(fileID)\"\n      }\n\n    case .vectorStoreFileBatch(let category):\n      switch category {\n      case .create(let vectorStoreID): return\"\\(version)/vector_stores/\\(vectorStoreID)/file_batches\"\n      case .retrieve(let vectorStoreID, let batchID): return \"\\(version)/vector_stores/\\(vectorStoreID)/file_batches/\\(batchID)\"\n      case .cancel(\n        let vectorStoreID,\n        let batchID): return \"\\(version)/vector_stores/\\(vectorStoreID)/file_batches/\\(batchID)/cancel\"\n      case .list(let vectorStoreID, let batchID): return \"\\(version)/vector_stores/\\(vectorStoreID)/file_batches/\\(batchID)/files\"\n      }\n\n    case .response(let category):\n      switch category {\n      case .create: return \"\\(version)/responses\"\n      case .get(let responseID): return \"\\(version)/responses/\\(responseID)\"\n      case .delete(let responseID): return \"\\(version)/responses/\\(responseID)\"\n      case .cancel(let responseID): return \"\\(version)/responses/\\(responseID)/cancel\"\n      case .inputItems(let responseID): return \"\\(version)/responses/\\(responseID)/input_items\"\n      }\n\n    case .conversantions(let category):\n      switch category {\n      case .create: return \"\\(version)/conversations\"\n      case .get(let conversationID): return \"\\(version)/conversations/\\(conversationID)\"\n      case .update(let conversationID): return \"\\(version)/conversations/\\(conversationID)\"\n      case .delete(let conversationID): return \"\\(version)/conversations/\\(conversationID)\"\n      case .items(let conversationID): return \"\\(version)/conversations/\\(conversationID)/items\"\n      case .createItems(let conversationID): return \"\\(version)/conversations/\\(conversationID)/items\"\n      case .item(let conversationID, let itemID): return \"\\(version)/conversations/\\(conversationID)/items/\\(itemID)\"\n      case .deleteItem(let conversationID, let itemID): return \"\\(version)/conversations/\\(conversationID)/items/\\(itemID)\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Private/Networking/URLSessionHTTPClientAdapter.swift",
    "content": "import Foundation\n\n#if canImport(FoundationNetworking)\nimport FoundationNetworking\n#endif\n\n#if !os(Linux)\n/// Adapter that implements HTTPClient protocol using URLSession\npublic class URLSessionHTTPClientAdapter: HTTPClient {\n  /// Initializes a new URLSessionHTTPClientAdapter with the provided URLSession\n  /// - Parameter urlSession: The URLSession instance to use. Defaults to `URLSession.shared`.\n  public init(urlSession: URLSession = .shared) {\n    self.urlSession = urlSession\n  }\n\n  /// Fetches data for a given HTTP request\n  /// - Parameter request: The HTTP request to perform\n  /// - Returns: A tuple containing the data and HTTP response\n  public func data(for request: HTTPRequest) async throws -> (Data, HTTPResponse) {\n    let urlRequest = try createURLRequest(from: request)\n\n    let (data, urlResponse) = try await urlSession.data(for: urlRequest)\n\n    guard let httpURLResponse = urlResponse as? HTTPURLResponse else {\n      throw URLError(.badServerResponse) // Or a custom error\n    }\n\n    let response = HTTPResponse(\n      statusCode: httpURLResponse.statusCode,\n      headers: convertHeaders(httpURLResponse.allHeaderFields))\n\n    return (data, response)\n  }\n\n  /// Fetches a byte stream for a given HTTP request\n  /// - Parameter request: The HTTP request to perform\n  /// - Returns: A tuple containing the byte stream and HTTP response\n  public func bytes(for request: HTTPRequest) async throws -> (HTTPByteStream, HTTPResponse) {\n    let urlRequest = try createURLRequest(from: request)\n\n    let (asyncBytes, urlResponse) = try await urlSession.bytes(for: urlRequest)\n\n    guard let httpURLResponse = urlResponse as? HTTPURLResponse else {\n      throw URLError(.badServerResponse) // Or a custom error\n    }\n\n    let response = HTTPResponse(\n      statusCode: httpURLResponse.statusCode,\n      headers: convertHeaders(httpURLResponse.allHeaderFields))\n\n    let stream = AsyncThrowingStream<String, Error> { continuation in\n      Task {\n        do {\n          for try await line in asyncBytes.lines {\n            continuation.yield(line)\n          }\n          continuation.finish()\n        } catch {\n          continuation.finish(throwing: error)\n        }\n      }\n    }\n\n    return (.lines(stream), response)\n  }\n\n  private let urlSession: URLSession\n\n  /// Converts our HTTPRequest to URLRequest\n  /// - Parameter request: Our HTTPRequest\n  /// - Returns: URLRequest\n  private func createURLRequest(from request: HTTPRequest) throws -> URLRequest {\n    var urlRequest = URLRequest(url: request.url)\n    urlRequest.httpMethod = request.method.rawValue\n\n    for (key, value) in request.headers {\n      urlRequest.setValue(value, forHTTPHeaderField: key)\n    }\n\n    urlRequest.httpBody = request.body\n\n    return urlRequest\n  }\n\n  /// Converts HTTPURLResponse headers to a dictionary [String: String]\n  /// - Parameter headers: The headers from HTTPURLResponse (i.e. `allHeaderFields`)\n  /// - Returns: Dictionary of header name-value pairs\n  private func convertHeaders(_ headers: [AnyHashable: Any]) -> [String: String] {\n    var result = [String: String]()\n    for (key, value) in headers {\n      if let keyString = key as? String, let valueString = value as? String {\n        result[keyString] = valueString\n      }\n    }\n    return result\n  }\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Realtime/OpenAIRealtimeSession.swift",
    "content": "//\n//  OpenAIRealtimeSession.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\nimport AVFoundation\nimport Foundation\nimport OSLog\n\nnonisolated private let kWebsocketDisconnectedErrorCode = 57\nnonisolated private let kWebsocketDisconnectedEarlyThreshold: TimeInterval = 3\n\n// MARK: - OpenAIRealtimeSession\n\n@RealtimeActor\nopen class OpenAIRealtimeSession {\n  nonisolated init(\n    webSocketTask: URLSessionWebSocketTask,\n    sessionConfiguration: OpenAIRealtimeSessionConfiguration)\n  {\n    self.webSocketTask = webSocketTask\n    self.sessionConfiguration = sessionConfiguration\n\n    Task { @RealtimeActor in\n      await self.sendMessage(OpenAIRealtimeSessionUpdate(session: self.sessionConfiguration))\n    }\n    self.webSocketTask.resume()\n    receiveMessage()\n  }\n\n  deinit {\n    logger.debug(\"OpenAIRealtimeSession is being freed\")\n  }\n\n  /// Messages sent from OpenAI are published on this receiver as they arrive\n  public var receiver: AsyncStream<OpenAIRealtimeMessage> {\n    AsyncStream { continuation in\n      self.continuation = continuation\n    }\n  }\n\n  /// Sends a message through the websocket connection\n  public func sendMessage(_ encodable: Encodable) async {\n    guard !isTearingDown else {\n      logger.debug(\"Ignoring ws sendMessage. The RT session is tearing down.\")\n      return\n    }\n    do {\n      let encoder = JSONEncoder()\n      encoder.outputFormatting = [.sortedKeys]\n      let data = try encoder.encode(encodable)\n      guard let str = String(data: data, encoding: .utf8) else {\n        logger.error(\"Could not get utf8 string representation of data\")\n        return\n      }\n      let wsMessage = URLSessionWebSocketTask.Message.string(str)\n      try await webSocketTask.send(wsMessage)\n    } catch {\n      logger.error(\"Could not send message to OpenAI: \\(error.localizedDescription)\")\n    }\n  }\n\n  /// Close the websocket connection\n  public func disconnect() {\n    isTearingDown = true\n    continuation?.finish()\n    continuation = nil\n    webSocketTask.cancel()\n  }\n\n  let sessionConfiguration: OpenAIRealtimeSessionConfiguration\n\n  private var isTearingDown = false\n  private let webSocketTask: URLSessionWebSocketTask\n  private var continuation: AsyncStream<OpenAIRealtimeMessage>.Continuation?\n  private let setupTime = Date()\n  private let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Realtime\")\n\n  /// Tells the websocket task to receive a new message\n  nonisolated private func receiveMessage() {\n    webSocketTask.receive { result in\n      switch result {\n      case .failure(let error as NSError):\n        Task { @RealtimeActor in\n          await self.didReceiveWebSocketError(error)\n        }\n\n      case .success(let message):\n        Task { @RealtimeActor in\n          await self.didReceiveWebSocketMessage(message)\n        }\n      }\n    }\n  }\n\n  /// Handles socket errors. We disconnect on all errors.\n  private func didReceiveWebSocketError(_ error: NSError) {\n    guard !isTearingDown else {\n      return\n    }\n\n    switch error.code {\n    case kWebsocketDisconnectedErrorCode:\n      let disconnectedEarly = Date().timeIntervalSince(setupTime) <= kWebsocketDisconnectedEarlyThreshold\n      if disconnectedEarly {\n        logger.warning(\"Websocket disconnected immediately after connection\")\n      } else {\n        logger.debug(\"Websocket disconnected normally\")\n      }\n\n    default:\n      logger.error(\"Received ws error: \\(error.localizedDescription)\")\n    }\n\n    disconnect()\n  }\n\n  /// Handles received websocket messages\n  private func didReceiveWebSocketMessage(_ message: URLSessionWebSocketTask.Message) {\n    switch message {\n    case .string(let text):\n      if let data = text.data(using: .utf8) {\n        didReceiveWebSocketData(data)\n      }\n\n    case .data(let data):\n      didReceiveWebSocketData(data)\n\n    @unknown default:\n      logger.error(\"Received an unknown websocket message format\")\n      disconnect()\n    }\n  }\n\n  private func didReceiveWebSocketData(_ data: Data) {\n    guard !isTearingDown else {\n      // The caller already initiated disconnect,\n      // don't send any more messages back to the caller\n      return\n    }\n\n    guard\n      let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any],\n      let messageType = json[\"type\"] as? String\n    else {\n      logger.error(\"Received websocket data that we don't understand\")\n      disconnect()\n      return\n    }\n    logger.debug(\"Received \\(messageType) from OpenAI\")\n\n    switch messageType {\n    case \"error\":\n      let errorBody = String(describing: json[\"error\"] as? [String: Any])\n      logger.error(\"Received error from OpenAI websocket: \\(errorBody)\")\n      continuation?.yield(.error(errorBody))\n\n    case \"session.created\":\n      continuation?.yield(.sessionCreated)\n\n    case \"session.updated\":\n      continuation?.yield(.sessionUpdated)\n\n    case \"response.audio.delta\":\n      if let base64Audio = json[\"delta\"] as? String {\n        continuation?.yield(.responseAudioDelta(base64Audio))\n      }\n\n    case \"response.created\":\n      continuation?.yield(.responseCreated)\n\n    case \"input_audio_buffer.speech_started\":\n      continuation?.yield(.inputAudioBufferSpeechStarted)\n\n    case \"response.function_call_arguments.done\":\n      if\n        let name = json[\"name\"] as? String,\n        let arguments = json[\"arguments\"] as? String,\n        let callId = json[\"call_id\"] as? String\n      {\n        continuation?.yield(.responseFunctionCallArgumentsDone(name, arguments, callId))\n      }\n\n    // New cases for handling transcription messages\n    case \"response.audio_transcript.delta\":\n      if let delta = json[\"delta\"] as? String {\n        continuation?.yield(.responseTranscriptDelta(delta))\n      }\n\n    case \"response.audio_transcript.done\":\n      if let transcript = json[\"transcript\"] as? String {\n        continuation?.yield(.responseTranscriptDone(transcript))\n      }\n\n    case \"input_audio_buffer.transcript\":\n      if let transcript = json[\"transcript\"] as? String {\n        continuation?.yield(.inputAudioBufferTranscript(transcript))\n      }\n\n    case \"conversation.item.input_audio_transcription.delta\":\n      if let delta = json[\"delta\"] as? String {\n        continuation?.yield(.inputAudioTranscriptionDelta(delta))\n      }\n\n    case \"conversation.item.input_audio_transcription.completed\":\n      if let transcript = json[\"transcript\"] as? String {\n        continuation?.yield(.inputAudioTranscriptionCompleted(transcript))\n      }\n\n    // MCP (Model Context Protocol) message types\n    case \"mcp_list_tools.in_progress\":\n      logger.debug(\"MCP: Tool discovery in progress\")\n      continuation?.yield(.mcpListToolsInProgress)\n\n    case \"mcp_list_tools.completed\":\n      logger.debug(\"MCP: Tool discovery completed\")\n      if let tools = json[\"tools\"] as? [String: Any] {\n        continuation?.yield(.mcpListToolsCompleted(tools))\n      } else {\n        continuation?.yield(.mcpListToolsCompleted([:]))\n      }\n\n    case \"mcp_list_tools.failed\":\n      logger.error(\"MCP: Tool discovery failed\")\n      logger.error(\"Full JSON payload: \\(String(describing: json))\")\n\n      let errorDetails = json[\"error\"] as? [String: Any]\n      let errorMessage = errorDetails?[\"message\"] as? String\n      let errorCode = errorDetails?[\"code\"] as? String\n\n      // Also check for top-level error fields\n      let topLevelMessage = json[\"message\"] as? String\n      let topLevelCode = json[\"code\"] as? String\n      let topLevelReason = json[\"reason\"] as? String\n\n      let finalMessage = errorMessage ?? topLevelMessage ?? topLevelReason ?? \"Unknown MCP error\"\n      let finalCode = errorCode ?? topLevelCode\n      let fullError = finalCode != nil ? \"[\\(finalCode!)] \\(finalMessage)\" : finalMessage\n\n      logger.error(\"MCP Error: \\(fullError)\")\n      logger.error(\"Error details: \\(String(describing: errorDetails))\")\n      logger\n        .error(\n          \"Top-level fields: message=\\(String(describing: topLevelMessage)), code=\\(String(describing: topLevelCode)), reason=\\(String(describing: topLevelReason))\")\n\n      continuation?.yield(.mcpListToolsFailed(fullError))\n\n    case \"response.mcp_call.completed\":\n      let eventId = json[\"event_id\"] as? String\n      let itemId = json[\"item_id\"] as? String\n      let outputIndex = json[\"output_index\"] as? Int\n      continuation?.yield(.responseMcpCallCompleted(eventId: eventId, itemId: itemId, outputIndex: outputIndex))\n\n    case \"response.mcp_call.in_progress\":\n      continuation?.yield(.responseMcpCallInProgress)\n\n    case \"response.done\":\n      // Handle response completion (may contain errors like insufficient_quota)\n      if\n        let response = json[\"response\"] as? [String: Any],\n        let status = response[\"status\"] as? String\n      {\n        logger.debug(\"Response done with status: \\(status)\")\n\n        // Pass the full response object for detailed error handling\n        continuation?.yield(.responseDone(status: status, statusDetails: response))\n\n        // Log errors for debugging\n        if\n          let statusDetails = response[\"status_details\"] as? [String: Any],\n          let error = statusDetails[\"error\"] as? [String: Any]\n        {\n          let code = error[\"code\"] as? String ?? \"unknown\"\n          let message = error[\"message\"] as? String ?? \"Unknown error\"\n          logger.error(\"Response error: [\\(code)] \\(message)\")\n        }\n      } else {\n        logger.warning(\"Received response.done with unexpected format\")\n      }\n\n    case \"response.text.delta\":\n      if let delta = json[\"delta\"] as? String {\n        continuation?.yield(.responseTextDelta(delta))\n      }\n\n    case \"response.text.done\":\n      if let text = json[\"text\"] as? String {\n        continuation?.yield(.responseTextDone(text))\n      }\n\n    case \"response.output_item.added\":\n      if\n        let item = json[\"item\"] as? [String: Any],\n        let itemId = item[\"id\"] as? String,\n        let type = item[\"type\"] as? String\n      {\n        continuation?.yield(.responseOutputItemAdded(itemId: itemId, type: type))\n      }\n\n    case \"response.output_item.done\":\n      if\n        let item = json[\"item\"] as? [String: Any],\n        let itemId = item[\"id\"] as? String,\n        let type = item[\"type\"] as? String\n      {\n        let content = item[\"content\"] as? [[String: Any]]\n        continuation?.yield(.responseOutputItemDone(itemId: itemId, type: type, content: content))\n      }\n\n    case \"response.content_part.added\":\n      if\n        let part = json[\"part\"] as? [String: Any],\n        let type = part[\"type\"] as? String\n      {\n        continuation?.yield(.responseContentPartAdded(type: type))\n      }\n\n    case \"response.content_part.done\":\n      if\n        let part = json[\"part\"] as? [String: Any],\n        let type = part[\"type\"] as? String\n      {\n        let text = part[\"text\"] as? String\n        continuation?.yield(.responseContentPartDone(type: type, text: text))\n      }\n\n    case \"conversation.item.created\":\n      if\n        let item = json[\"item\"] as? [String: Any],\n        let itemId = item[\"id\"] as? String,\n        let type = item[\"type\"] as? String\n      {\n        let role = item[\"role\"] as? String\n        continuation?.yield(.conversationItemCreated(itemId: itemId, type: type, role: role))\n      }\n\n    default:\n      // Log unhandled message types with more detail for debugging\n      logger.warning(\"⚠️ Unhandled message type: \\(messageType)\")\n      logger.debug(\"Full JSON: \\(String(describing: json))\")\n      break\n    }\n\n    if messageType != \"error\", !isTearingDown {\n      receiveMessage()\n    }\n  }\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Private/Realtime/RealtimeActor.swift",
    "content": "//\n//  RealtimeActor.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\n@globalActor\npublic actor RealtimeActor {\n  public static let shared = RealtimeActor()\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Assistant/AssistantParameters.swift",
    "content": "//\n//  AssistantParameters.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// Create an [assistant](https://platform.openai.com/docs/api-reference/assistants/createAssistant) with a model and instructions.\n/// Modifies an [assistant](https://platform.openai.com/docs/api-reference/assistants/modifyAssistant).\npublic struct AssistantParameters: Encodable {\n  public init(\n    action: Action? = nil,\n    name: String? = nil,\n    description: String? = nil,\n    instructions: String? = nil,\n    tools: [AssistantObject.Tool] = [],\n    toolResources: ToolResources? = nil,\n    metadata: [String: String]? = nil,\n    temperature: Double? = nil,\n    topP: Double? = nil,\n    responseFormat: ResponseFormat? = nil)\n  {\n    model = action?.model\n    self.name = name\n    self.description = description\n    self.instructions = instructions\n    self.tools = tools\n    self.toolResources = toolResources\n    self.metadata = metadata\n    self.temperature = temperature\n    self.topP = topP\n    self.responseFormat = responseFormat\n  }\n\n  public enum Action {\n    case create(model: String) // model is required on creation of assistant.\n    case modify(model: String?) // model is optional on modification of assistant.\n\n    var model: String? {\n      switch self {\n      case .create(let model): model\n      case .modify(let model): model\n      }\n    }\n  }\n\n  /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.\n  public var model: String?\n  /// The name of the assistant. The maximum length is 256 characters.\n  public var name: String?\n  /// The description of the assistant. The maximum length is 512 characters.\n  public var description: String?\n  /// The system instructions that the assistant uses. The maximum length is 32768 characters.\n  public var instructions: String?\n  /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function. Defaults to []\n  public var tools = [AssistantObject.Tool]()\n  /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n  public var toolResources: ToolResources?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public var metadata: [String: String]?\n  /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// Defaults to 1\n  public var temperature: Double?\n  /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n  /// Defaults to 1\n  public var topP: Double?\n  /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.\n  /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n  /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n  /// Defaults to `auto`\n  public var responseFormat: ResponseFormat?\n\n  /// Encoding only no nil or non empty parameters, this will avoid sending nil values when using this parameter in the \"modifyAssistant\" request.\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n    if let model {\n      try container.encode(model, forKey: .model)\n    }\n    if let name {\n      try container.encode(name, forKey: .name)\n    }\n    if let description {\n      try container.encode(description, forKey: .description)\n    }\n    if let instructions {\n      try container.encode(instructions, forKey: .instructions)\n    }\n    if !tools.isEmpty {\n      try container.encode(tools, forKey: .tools)\n    }\n    if let toolResources {\n      try container.encode(toolResources, forKey: .toolResources)\n    }\n    if let metadata {\n      try container.encode(metadata, forKey: .metadata)\n    }\n    if let temperature {\n      try container.encode(temperature, forKey: .temperature)\n    }\n    if let topP {\n      try container.encode(topP, forKey: .topP)\n    }\n    if let responseFormat {\n      try container.encode(responseFormat, forKey: .responseFormat)\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case model\n    case name\n    case description\n    case instructions\n    case tools\n    case metadata\n    case temperature\n    case topP = \"top_p\"\n    case responseFormat = \"response_format\"\n    case toolResources = \"tool_resources\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Audio/AudioSpeechParameters.swift",
    "content": "//\n//  AudioSpeechParameters.swift\n//\n//\n//  Created by James Rochabrun on 11/14/23.\n//\n\nimport Foundation\n\n/// [Generates audio from the input text.](https://platform.openai.com/docs/api-reference/audio/createSpeech)\npublic struct AudioSpeechParameters: Encodable {\n  public init(\n    model: TTSModel,\n    input: String,\n    voice: Voice,\n    responseFormat: ResponseFormat? = nil,\n    speed: Double? = nil)\n  {\n    self.model = model.rawValue\n    self.input = input\n    self.voice = voice.rawValue\n    self.responseFormat = responseFormat?.rawValue\n    self.speed = speed\n  }\n\n  public enum TTSModel {\n    case tts1\n    case tts1HD\n    case custom(model: String)\n\n    var rawValue: String {\n      switch self {\n      case .tts1:\n        \"tts-1\"\n      case .tts1HD:\n        \"tts-1-hd\"\n      case .custom(let model):\n        model\n      }\n    }\n  }\n\n  public enum Voice: String {\n    case alloy\n    case echo\n    case fable\n    case onyx\n    case nova\n    case shimmer\n    case ash\n    case coral\n    case sage\n  }\n\n  public enum ResponseFormat: String {\n    case mp3\n    case opus\n    case aac\n    case flac\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case model\n    case input\n    case voice\n    case responseFormat = \"response_format\"\n    case speed\n  }\n\n  /// One of the available [TTS models](https://platform.openai.com/docs/models/tts): tts-1 or tts-1-hd\n  let model: String\n  /// The text to generate audio for. The maximum length is 4096 characters.\n  let input: String\n  /// The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the [Text to speech guide.](https://platform.openai.com/docs/guides/text-to-speech/voice-options)\n  let voice: String\n  /// Defaults to mp3, The format to audio in. Supported formats are mp3, opus, aac, and flac.\n  let responseFormat: String?\n  /// Defaults to 1,  The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.\n  let speed: Double?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Audio/AudioTranscriptionParameters.swift",
    "content": "//\n//  AudioTranscriptionParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n// MARK: - AudioTranscriptionParameters\n\n/// [Transcribes audio into the input language.](https://platform.openai.com/docs/api-reference/audio/createTranscription)\npublic struct AudioTranscriptionParameters: Encodable {\n  public init(\n    fileName: String,\n    file: Data,\n    model: Model = .whisperOne,\n    prompt: String? = nil,\n    responseFormat: String? = nil,\n    temperature: Double? = nil,\n    language: String? = nil,\n    timestampGranularities: [String]? = nil)\n  {\n    self.fileName = fileName\n    self.file = file\n    self.model = model.value\n    self.prompt = prompt\n    self.responseFormat = responseFormat\n    self.temperature = temperature\n    self.language = language\n    self.timestampGranularities = timestampGranularities\n  }\n\n  public enum Model {\n    case whisperOne\n    case custom(model: String)\n    var value: String {\n      switch self {\n      case .whisperOne:\n        \"whisper-1\"\n      case .custom(let model):\n        model\n      }\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case file\n    case model\n    case prompt\n    case responseFormat = \"response_format\"\n    case temperature\n    case language\n    case timestampGranularities = \"timestamp_granularities[]\"\n  }\n\n  /// The name of the file asset is not documented in OpenAI's official documentation; however, it is essential for constructing the multipart request.\n  let fileName: String\n  /// The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n  let file: Data\n  /// ID of the model to use. Only whisper-1 is currently available.\n  let model: String\n  /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n  let language: String?\n  /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language.\n  let prompt: String?\n  /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. Defaults to json\n  let responseFormat: String?\n  /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. Defaults to 0\n  let temperature: Double?\n  /// Defaults to segment\n  /// The timestamp granularities to populate for this transcription. response_format must be set verbose_json to use timestamp granularities. Either or both of these options are supported: word, or segment. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.\n  let timestampGranularities: [String]?\n}\n\n// MARK: MultipartFormDataParameters\n\nextension AudioTranscriptionParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    MultipartFormDataBuilder(boundary: boundary, entries: [\n      .file(paramName: Self.CodingKeys.file.rawValue, fileName: fileName, fileData: file, contentType: \"audio/mpeg\"),\n      .string(paramName: Self.CodingKeys.model.rawValue, value: model),\n      .string(paramName: Self.CodingKeys.language.rawValue, value: language),\n      .string(paramName: Self.CodingKeys.prompt.rawValue, value: prompt),\n      .string(paramName: Self.CodingKeys.responseFormat.rawValue, value: responseFormat),\n      .string(paramName: Self.CodingKeys.temperature.rawValue, value: temperature),\n      .string(paramName: Self.CodingKeys.timestampGranularities.rawValue, value: timestampGranularities),\n    ]).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Audio/AudioTranslationParameters.swift",
    "content": "//\n//  AudioTranslationParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n// MARK: - AudioTranslationParameters\n\n/// Translates audio into English. [Create translation](https://platform.openai.com/docs/api-reference/audio/createTranslation).\npublic struct AudioTranslationParameters: Encodable {\n  public init(\n    fileName: String,\n    file: Data,\n    model: Model = .whisperOne,\n    prompt: String? = nil,\n    responseFormat: String? = nil,\n    temperature: Double? = nil)\n  {\n    self.fileName = fileName\n    self.file = file\n    self.model = model.value\n    self.prompt = prompt\n    self.responseFormat = responseFormat\n    self.temperature = temperature\n  }\n\n  public enum Model {\n    case whisperOne\n    case custom(model: String)\n\n    var value: String {\n      switch self {\n      case .whisperOne:\n        \"whisper-1\"\n      case .custom(let model):\n        model\n      }\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case file\n    case model\n    case prompt\n    case responseFormat = \"response_format\"\n    case temperature\n  }\n\n  /// The name of the file asset is not documented in OpenAI's official documentation; however, it is essential for constructing the multipart request.\n  let fileName: String\n  /// The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n  let file: Data\n  /// ID of the model to use. Only whisper-1 is currently available.\n  let model: String\n  /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language.\n  let prompt: String?\n  /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. Defaults to json\n  let responseFormat: String?\n  /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. Defaults to 0\n  let temperature: Double?\n}\n\n// MARK: MultipartFormDataParameters\n\nextension AudioTranslationParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    MultipartFormDataBuilder(boundary: boundary, entries: [\n      .file(paramName: Self.CodingKeys.file.rawValue, fileName: fileName, fileData: file, contentType: \"audio/mpeg\"),\n      .string(paramName: Self.CodingKeys.model.rawValue, value: model),\n      .string(paramName: Self.CodingKeys.prompt.rawValue, value: prompt),\n      .string(paramName: Self.CodingKeys.responseFormat.rawValue, value: responseFormat),\n      .string(paramName: Self.CodingKeys.temperature.rawValue, value: temperature),\n    ]).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Batch/BatchParameter.swift",
    "content": "//\n//  BatchParameter.swift\n//\n//\n//  Created by James Rochabrun on 4/27/24.\n//\n\nimport Foundation\n\n/// [Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.](https://platform.openai.com/docs/api-reference/batch/create)\npublic struct BatchParameter: Encodable {\n  /// The ID of an uploaded file that contains requests for the new batch.\n  /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file.\n  /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), and must be uploaded with the purpose batch.\n  let inputFileID: String\n  /// The endpoint to be used for all requests in the batch. Currently only /v1/chat/completions is supported.\n  let endpoint: String\n  /// The time frame within which the batch should be processed. Currently only 24h is supported.\n  let completionWindow: String\n  /// Optional custom metadata for the batch.\n  let metadata: [String: String]?\n\n  enum CodingKeys: String, CodingKey {\n    case inputFileID = \"input_file_id\"\n    case endpoint\n    case completionWindow = \"completion_window\"\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Chat/ChatCompletionParameters.swift",
    "content": "//\n//  ChatCompletionParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n/// [Create chat completion](https://platform.openai.com/docs/api-reference/chat/create)\n/// For Azure available parameters make sure to visit [Azure API reeference](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)\npublic struct ChatCompletionParameters: Encodable {\n  public init(\n    messages: [Message],\n    model: Model,\n    store: Bool? = nil,\n    reasoningEffort: ReasoningEffort? = nil,\n    verbosity: Verbosity? = nil,\n    metadata: [String: String]? = nil,\n    frequencyPenalty: Double? = nil,\n    functionCall: FunctionCall? = nil,\n    toolChoice: ToolChoice? = nil,\n    functions: [ChatFunction]? = nil,\n    tools: [Tool]? = nil,\n    parallelToolCalls: Bool? = nil,\n    logitBias: [Int: Double]? = nil,\n    logProbs: Bool? = nil,\n    topLogprobs: Int? = nil,\n    maxTokens: Int? = nil,\n    n: Int? = nil,\n    modalities: [String]? = nil,\n    prediction: Prediction? = nil,\n    audio: Audio? = nil,\n    responseFormat: ResponseFormat? = nil,\n    presencePenalty: Double? = nil,\n    serviceTier: ServiceTier? = nil,\n    seed: Int? = nil,\n    stop: [String]? = nil,\n    temperature: Double? = nil,\n    topProbability: Double? = nil,\n    user: String? = nil,\n    streamOptions: StreamOptions? = nil)\n  {\n    self.messages = messages\n    self.model = model.value\n    self.store = store\n    self.reasoningEffort = reasoningEffort?.rawValue\n    self.verbosity = verbosity?.rawValue\n    self.metadata = metadata\n    self.frequencyPenalty = frequencyPenalty\n    self.functionCall = functionCall\n    self.toolChoice = toolChoice\n    self.functions = functions\n    self.tools = tools\n    self.parallelToolCalls = parallelToolCalls\n    self.logitBias = logitBias\n    logprobs = logProbs\n    self.topLogprobs = topLogprobs\n    self.maxTokens = maxTokens\n    self.n = n\n    self.modalities = modalities\n    self.prediction = prediction\n    self.audio = audio\n    self.responseFormat = responseFormat\n    self.presencePenalty = presencePenalty\n    self.serviceTier = serviceTier?.rawValue\n    self.seed = seed\n    self.stop = stop\n    self.temperature = temperature\n    topP = topProbability\n    self.user = user\n    self.streamOptions = streamOptions\n  }\n\n  public struct Message: Encodable {\n    public init(\n      role: Role,\n      content: ContentType,\n      refusal: String? = nil,\n      name: String? = nil,\n      audio: Audio? = nil,\n      functionCall: FunctionCall? = nil,\n      toolCalls: [ToolCall]? = nil,\n      toolCallID: String? = nil)\n    {\n      self.role = role.rawValue\n      self.content = content\n      self.refusal = refusal\n      self.name = name\n      self.audio = audio\n      self.functionCall = functionCall\n      self.toolCalls = toolCalls\n      self.toolCallID = toolCallID\n    }\n\n    public enum ContentType: Encodable {\n      case text(String)\n      case contentArray([MessageContent])\n\n      public enum MessageContent: Encodable, Equatable, Hashable {\n        case text(String)\n        case imageUrl(ImageDetail)\n        case inputAudio(AudioDetail)\n\n        public struct ImageDetail: Encodable, Equatable, Hashable {\n          public let url: URL\n          public let detail: String?\n\n          enum CodingKeys: String, CodingKey {\n            case url\n            case detail\n          }\n\n          public func encode(to encoder: Encoder) throws {\n            var container = encoder.container(keyedBy: CodingKeys.self)\n            try container.encode(url, forKey: .url)\n            try container.encodeIfPresent(detail, forKey: .detail)\n          }\n\n          public init(url: URL, detail: String? = nil) {\n            self.url = url\n            self.detail = detail\n          }\n        }\n\n        public struct AudioDetail: Encodable, Equatable, Hashable {\n          public let data: String\n          public let format: String\n\n          enum CodingKeys: String, CodingKey {\n            case data\n            case format\n          }\n\n          public func encode(to encoder: Encoder) throws {\n            var container = encoder.container(keyedBy: CodingKeys.self)\n            try container.encode(data, forKey: .data)\n            try container.encode(format, forKey: .format)\n          }\n\n          public init(data: String, format: String) {\n            self.data = data\n            self.format = format\n          }\n        }\n\n        public static func ==(lhs: MessageContent, rhs: MessageContent) -> Bool {\n          switch (lhs, rhs) {\n          case (.text(let a), .text(let b)):\n            a == b\n          case (.imageUrl(let a), .imageUrl(let b)):\n            a == b\n          case (.inputAudio(let a), .inputAudio(let b)):\n            a == b\n          default:\n            false\n          }\n        }\n\n        public func encode(to encoder: Encoder) throws {\n          var container = encoder.container(keyedBy: CodingKeys.self)\n          switch self {\n          case .text(let text):\n            try container.encode(\"text\", forKey: .type)\n            try container.encode(text, forKey: .text)\n\n          case .imageUrl(let imageDetail):\n            try container.encode(\"image_url\", forKey: .type)\n            try container.encode(imageDetail, forKey: .imageUrl)\n\n          case .inputAudio(let audioDetail):\n            try container.encode(\"input_audio\", forKey: .type)\n            try container.encode(audioDetail, forKey: .inputAudio)\n          }\n        }\n\n        public func hash(into hasher: inout Hasher) {\n          switch self {\n          case .text(let string):\n            hasher.combine(string)\n          case .imageUrl(let imageDetail):\n            hasher.combine(imageDetail)\n          case .inputAudio(let audioDetail):\n            hasher.combine(audioDetail)\n          }\n        }\n\n        enum CodingKeys: String, CodingKey {\n          case type\n          case text\n          case imageUrl = \"image_url\"\n          case inputAudio = \"input_audio\"\n        }\n      }\n\n      public func encode(to encoder: Encoder) throws {\n        var container = encoder.singleValueContainer()\n        switch self {\n        case .text(let text):\n          try container.encode(text)\n        case .contentArray(let contentArray):\n          try container.encode(contentArray)\n        }\n      }\n    }\n\n    public enum Role: String {\n      case system // content, role\n      case user // content, role\n      case assistant // content, role, tool_calls\n      case tool // content, role, tool_call_id\n    }\n\n    public struct Audio: Encodable {\n      /// Unique identifier for a previous audio response from the model.\n      public let id: String\n\n      public init(id: String) {\n        self.id = id\n      }\n    }\n\n    /// The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n    public let content: ContentType\n    /// The refusal message by the assistant.\n    public let refusal: String?\n    /// The role of the messages author. One of system, user, assistant, or tool message.\n    public let role: String\n    /// The name of the author of this message. name is required if role is function, and it should be the name of the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n    public let name: String?\n    /// Data about a previous audio response from the model. [Learn more.](https://platform.openai.com/docs/guides/audio)\n    public let audio: Audio?\n\n    enum CodingKeys: String, CodingKey {\n      case role\n      case content\n      case refusal\n      case name\n      case audio\n      case functionCall = \"function_call\"\n      case toolCalls = \"tool_calls\"\n      case toolCallID = \"tool_call_id\"\n    }\n\n    /// The name and arguments of a function that should be called, as generated by the model.\n    @available(*, deprecated, message: \"Deprecated and replaced by `tool_calls`\")\n    let functionCall: FunctionCall?\n    /// The tool calls generated by the model, such as function calls.\n    let toolCalls: [ToolCall]?\n    /// Tool call that this message is responding to.\n    let toolCallID: String?\n  }\n\n  @available(*, deprecated, message: \"Deprecated in favor of ToolChoice.\")\n  public enum FunctionCall: Encodable, Equatable {\n    case none\n    case auto\n    case function(String)\n\n    public func encode(to encoder: Encoder) throws {\n      switch self {\n      case .none:\n        var container = encoder.singleValueContainer()\n        try container.encode(CodingKeys.none.rawValue)\n\n      case .auto:\n        var container = encoder.singleValueContainer()\n        try container.encode(CodingKeys.auto.rawValue)\n\n      case .function(let name):\n        var container = encoder.container(keyedBy: CodingKeys.self)\n        try container.encode(name, forKey: .function)\n      }\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case none\n      case auto\n      case function = \"name\"\n    }\n  }\n\n  /// [Documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)\n  public struct Tool: Encodable {\n    /// The type of the tool. Currently, only `function` is supported.\n    public let type: String\n    /// object\n    public let function: ChatFunction\n\n    public init(\n      type: String = \"function\",\n      function: ChatFunction)\n    {\n      self.type = type\n      self.function = function\n    }\n  }\n\n  public struct ChatFunction: Codable, Equatable {\n    public init(\n      name: String,\n      strict: Bool?,\n      description: String?,\n      parameters: JSONSchema?)\n    {\n      self.name = name\n      self.strict = strict\n      self.description = description\n      self.parameters = parameters\n    }\n\n    /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n    public let name: String\n    /// A description of what the function does, used by the model to choose when and how to call the function.\n    public let description: String?\n    /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format.\n    /// Omitting parameters defines a function with an empty parameter list.\n    public let parameters: JSONSchema?\n    /// Defaults to false, Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the [function calling guide].(https://platform.openai.com/docs/api-reference/chat/docs/guides/function-calling)\n    public let strict: Bool?\n  }\n\n  public enum ServiceTier: String, Encodable {\n    /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n    /// If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n    /// If set to 'default', the request will be processed in the shared cluster.\n    /// When this parameter is set, the response body will include the service_tier utilized.\n    case auto\n    case `default`\n  }\n\n  public struct StreamOptions: Encodable {\n    /// If set, an additional chunk will be streamed before the data: [DONE] message.\n    /// The usage field on this chunk shows the token usage statistics for the entire request,\n    /// and the choices field will always be an empty array. All other chunks will also include\n    /// a usage field, but with a null value.\n    let includeUsage: Bool\n\n    enum CodingKeys: String, CodingKey {\n      case includeUsage = \"include_usage\"\n    }\n\n    public init(includeUsage: Bool) {\n      self.includeUsage = includeUsage\n    }\n  }\n\n  /// Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]\n  /// [Learn more.](https://platform.openai.com/docs/guides/audio)\n  public struct Audio: Encodable {\n    /// Specifies the voice type. Supported voices are alloy, echo, fable, onyx, nova, and shimmer.\n    public let voice: String\n    /// Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16.\n    public let format: String\n\n    public init(\n      voice: String,\n      format: String)\n    {\n      self.voice = voice\n      self.format = format\n    }\n  }\n\n  public struct Prediction: Encodable {\n    public init(content: PredictionContent, type: String = \"content\") {\n      self.type = type\n      self.content = content\n    }\n\n    public enum PredictionContent: Encodable {\n      case text(String)\n      case contentArray([ContentPart])\n\n      public func encode(to encoder: Encoder) throws {\n        var container = encoder.singleValueContainer()\n        switch self {\n        case .text(let text):\n          try container.encode(text)\n        case .contentArray(let parts):\n          try container.encode(parts)\n        }\n      }\n    }\n\n    public struct ContentPart: Encodable {\n      public let type: String\n      public let text: String\n\n      public init(type: String, text: String) {\n        self.type = type\n        self.text = text\n      }\n    }\n\n    public let type: String\n    public let content: PredictionContent\n  }\n\n  public enum ReasoningEffort: String, Encodable {\n    case low\n    case medium\n    case high\n    /// The new minimal setting produces very few reasoning tokens for cases where you need the fastest possible time-to-first-token. We often see better performance when the model can produce a few tokens when needed versus none. The default is medium.\n    ///\n    /// The minimal setting performs especially well in coding and instruction following scenarios, adhering closely to given directions. However, it may require prompting to act more proactively. To improve the model's reasoning quality, even at minimal effort, encourage it to “think” or outline its steps before answering.\n    case minimal\n  }\n\n  /// Verbosity determines how many output tokens are generated. Lowering the number of tokens reduces overall latency. While the model's reasoning approach stays mostly the same, the model finds ways to answer more concisely—which can either improve or diminish answer quality, depending on your use case. Here are some scenarios for both ends of the verbosity spectrum:\n  /// High verbosity: Use when you need the model to provide thorough explanations of documents or perform extensive code refactoring.\n  /// Low verbosity: Best for situations where you want concise answers or simple code generation, such as SQL queries.\n  /// Models before GPT-5 have used medium verbosity by default. With GPT-5, we make this option configurable as one of high, medium, or low.\n  /// When generating code, medium and high verbosity levels yield longer, more structured code with inline explanations, while low verbosity produces shorter, more concise code with minimal commentary.\n  public enum Verbosity: String, Encodable {\n    case high\n    case medium\n    case low\n  }\n\n  /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models)\n  public var messages: [Message]\n  /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/how-we-use-your-data) table for details on which models work with the Chat API.\n  public var model: String\n  /// Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products.\n  /// Defaults to false\n  public var store: Bool?\n  /// Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n  /// Defaults to medium o1 models only\n  public var reasoningEffort: String?\n  /// Verbosity determines how many output tokens are generated. Lowering the number of tokens reduces overall latency.\n  public var verbosity: String?\n  /// Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions).\n  public var metadata: [String: String]?\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. Defaults to 0\n  /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)\n  public var frequencyPenalty: Double?\n  /// Controls how the model responds to function calls. none means the model does not call a function, and responds to the end-user. auto means the model can pick between an end-user or calling a function. Specifying a particular function via {\"name\": \"my_function\"} forces the model to call that function. none is the default when no functions are present. auto is the default if functions are present.\n  @available(*, deprecated, message: \"Deprecated in favor of tool_choice.\")\n  public var functionCall: FunctionCall?\n  /// Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message.\n  /// auto means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"type: \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that function.\n  /// `none` is the default when no functions are present. auto is the default if functions are present.\n  public var toolChoice: ToolChoice?\n  /// A list of functions the model may generate JSON inputs for.\n  @available(*, deprecated, message: \"Deprecated in favor of tools.\")\n  public var functions: [ChatFunction]?\n  /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n  public var tools: [Tool]?\n  /// Whether to enable parallel function calling during tool use. Defaults to true.\n  public var parallelToolCalls: Bool?\n  /// Modify the likelihood of specified tokens appearing in the completion.\n  /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. Defaults to null.\n  public var logitBias: [Int: Double]?\n  /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model. Defaults to false.\n  public var logprobs: Bool?\n  /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.\n  public var topLogprobs: Int?\n  /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\n  /// This value is now deprecated in favor of max_completion_tokens, and is not compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning)\n  public var maxTokens: Int?\n  /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning)\n  public var maCompletionTokens: Int?\n  /// How many chat completion choices to generate for each input message. Defaults to 1.\n  public var n: Int?\n  /// Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default:\n  /// [\"text\"]\n  /// The gpt-4o-audio-preview model can also be used to [generate audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use:\n  /// [\"text\", \"audio\"]\n  public var modalities: [String]?\n  /// Configuration for a [Predicted Output](https://platform.openai.com/docs/guides/predicted-outputs), which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content.\n  public var prediction: Prediction?\n  /// Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. [Learn more.](https://platform.openai.com/docs/guides/audio)\n  public var audio: Audio?\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Defaults to 0\n  /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)\n  public var presencePenalty: Double?\n  /// An object specifying the format that the model must output. Used to enable JSON mode.\n  /// Setting to `{ type: \"json_object\" }` enables `JSON` mode, which guarantees the message the model generates is valid JSON.\n  /// Important: when using `JSON` mode you must still instruct the model to produce `JSON` yourself via some conversation message, for example via your system message. If you don't do this, the model may generate an unending stream of whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance of a \"stuck\" request. Also note that the message content may be partial (i.e. cut off) if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n  public var responseFormat: ResponseFormat?\n  /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n  /// If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n  /// If set to 'default', the request will be processed in the shared cluster.\n  /// When this parameter is set, the response body will include the service_tier utilized.\n  public var serviceTier: String?\n  /// This feature is in `Beta`. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n  /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n  public var seed: Int?\n  /// Up to 4 sequences where the API will stop generating further tokens. Defaults to null.\n  public var stop: [String]?\n  /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// We generally recommend altering this or `top_p` but not both. Defaults to 1.\n  public var temperature: Double?\n  /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or `temperature` but not both. Defaults to 1\n  public var topP: Double?\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n  /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).\n  public var user: String?\n\n  enum CodingKeys: String, CodingKey {\n    case messages\n    case model\n    case store\n    case reasoningEffort = \"reasoning_effort\"\n    case verbosity\n    case metadata\n    case frequencyPenalty = \"frequency_penalty\"\n    case toolChoice = \"tool_choice\"\n    case functionCall = \"function_call\"\n    case tools\n    case parallelToolCalls = \"parallel_tool_calls\"\n    case functions\n    case logitBias = \"logit_bias\"\n    case logprobs\n    case topLogprobs = \"top_logprobs\"\n    case maxTokens = \"max_tokens\"\n    case maCompletionTokens = \"max_completion_tokens\"\n    case n\n    case modalities\n    case prediction\n    case audio\n    case responseFormat = \"response_format\"\n    case presencePenalty = \"presence_penalty\"\n    case seed\n    case serviceTier = \"service_tier\"\n    case stop\n    case stream\n    case streamOptions = \"stream_options\"\n    case temperature\n    case topP = \"top_p\"\n    case user\n  }\n\n  /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format) as they become available, with the stream terminated by a data: [DONE] message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions ).\n  /// Defaults to false.\n  var stream: Bool?\n  /// Options for streaming response. Only set this when you set stream: true\n  var streamOptions: StreamOptions?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Conversations/CreateConversationItemsParameter.swift",
    "content": "//\n//  CreateConversationItemsParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 10/05/25.\n//\n\nimport Foundation\n\n// MARK: CreateConversationItemsParameter\n\n/// [Create items in a conversation](https://platform.openai.com/docs/api-reference/conversations/create-items)\npublic struct CreateConversationItemsParameter: Codable {\n  /// Initialize a new CreateConversationItemsParameter\n  public init(\n    items: [InputItem],\n    include: [ResponseInclude]? = nil)\n  {\n    self.items = items\n    self.include = include?.map(\\.rawValue)\n  }\n\n  /// The items to add to the conversation. You may add up to 20 items at a time.\n  public var items: [InputItem]\n\n  /// Additional fields to include in the response.\n  /// Note: This becomes a query parameter, not a body parameter\n  public var include: [String]?\n\n  enum CodingKeys: String, CodingKey {\n    case items\n    case include\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Conversations/CreateConversationParameter.swift",
    "content": "//\n//  CreateConversationParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 10/05/25.\n//\n\nimport Foundation\n\n// MARK: CreateConversationParameter\n\n/// [Create a conversation](https://platform.openai.com/docs/api-reference/conversations/create)\npublic struct CreateConversationParameter: Codable {\n  /// Initialize a new CreateConversationParameter\n  public init(\n    items: [InputItem]? = nil,\n    metadata: [String: String]? = nil)\n  {\n    self.items = items\n    self.metadata = metadata\n  }\n\n  /// Initial items to include in the conversation context. You may add up to 20 items at a time.\n  public var items: [InputItem]?\n\n  /// Set of 16 key-value pairs that can be attached to an object.\n  /// Keys: max 64 characters, Values: max 512 characters\n  public var metadata: [String: String]?\n\n  enum CodingKeys: String, CodingKey {\n    case items\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Conversations/GetConversationItemParameter.swift",
    "content": "//\n//  GetConversationItemParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 10/05/25.\n//\n\nimport Foundation\n\n// MARK: GetConversationItemParameter\n\n/// [Retrieve an item from a conversation](https://platform.openai.com/docs/api-reference/conversations/retrieve-item)\npublic struct GetConversationItemParameter: Codable {\n  /// Initialize a new GetConversationItemParameter\n  public init(\n    include: [ResponseInclude]? = nil)\n  {\n    self.include = include?.map(\\.rawValue)\n  }\n\n  /// Additional fields to include in the response.\n  /// Supported values: web_search_call.action.sources, code_interpreter_call.outputs,\n  /// computer_call_output.output.image_url, file_search_call.results,\n  /// message.input_image.image_url, message.output_text.logprobs, reasoning.encrypted_content\n  public var include: [String]?\n\n  enum CodingKeys: String, CodingKey {\n    case include\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Conversations/GetConversationItemsParameter.swift",
    "content": "//\n//  GetConversationItemsParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 10/05/25.\n//\n\nimport Foundation\n\n// MARK: GetConversationItemsParameter\n\n/// [List items for a conversation](https://platform.openai.com/docs/api-reference/conversations/list-items)\npublic struct GetConversationItemsParameter: Codable {\n  /// Initialize a new GetConversationItemsParameter\n  public init(\n    after: String? = nil,\n    include: [ResponseInclude]? = nil,\n    limit: Int? = nil,\n    order: String? = nil)\n  {\n    self.after = after\n    self.include = include?.map(\\.rawValue)\n    self.limit = limit\n    self.order = order\n  }\n\n  /// An item ID to list items after, used in pagination.\n  public var after: String?\n\n  /// Specify additional output data to include in the model response.\n  /// Supported values: web_search_call.action.sources, code_interpreter_call.outputs,\n  /// computer_call_output.output.image_url, file_search_call.results,\n  /// message.input_image.image_url, message.output_text.logprobs, reasoning.encrypted_content\n  public var include: [String]?\n\n  /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  public var limit: Int?\n\n  /// The order to return the items in. Default is desc. One of 'asc' or 'desc'.\n  public var order: String?\n\n  enum CodingKeys: String, CodingKey {\n    case after\n    case include\n    case limit\n    case order\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Conversations/UpdateConversationParameter.swift",
    "content": "//\n//  UpdateConversationParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 10/05/25.\n//\n\nimport Foundation\n\n// MARK: UpdateConversationParameter\n\n/// [Update a conversation](https://platform.openai.com/docs/api-reference/conversations/update)\npublic struct UpdateConversationParameter: Codable {\n  /// Initialize a new UpdateConversationParameter\n  public init(\n    metadata: [String: String])\n  {\n    self.metadata = metadata\n  }\n\n  /// Set of 16 key-value pairs that can be attached to an object.\n  /// Keys: max 64 characters, Values: max 512 characters\n  public var metadata: [String: String]\n\n  enum CodingKeys: String, CodingKey {\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Embedding/EmbeddingParameter.swift",
    "content": "//\n//  EmbeddingParameter.swift\n//\n//\n//  Created by James Rochabrun on 10/12/23.\n//\n\nimport Foundation\n\n/// [Creates](https://platform.openai.com/docs/api-reference/embeddings/create) an embedding vector representing the input text.\npublic struct EmbeddingParameter: Encodable {\n  public init(\n    input: String,\n    model: Model = .textEmbeddingAda002,\n    encodingFormat: String?,\n    dimensions: Int?,\n    user: String? = nil)\n  {\n    self.input = input\n    self.model = model.rawValue\n    self.encodingFormat = encodingFormat\n    self.dimensions = dimensions\n    self.user = user\n  }\n\n  public enum Model: String {\n    case textEmbeddingAda002 = \"text-embedding-ada-002\"\n    case textEmbedding3Large = \"text-embedding-3-large\"\n    case textEmbedding3Small = \"text-embedding-3-small\"\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case input\n    case model\n    case encodingFormat = \"encoding_format\"\n    case dimensions\n    case user\n  }\n\n  /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for text-embedding-ada-002) and cannot be an empty string. [How to Count Tokens with `tiktoken`](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)\n  let input: String\n  /// ID of the model to use. You can use the List models API to see all of your available models, or see our [Model overview ](https://platform.openai.com/docs/models/overview) for descriptions of them.\n  let model: String\n  /// The format to return the embeddings in. Can be either float or [base64](https://pypi.org/project/pybase64/).\n  /// Defaults to \"float\"\n  let encodingFormat: String?\n  /// The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n  let dimensions: Int?\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more.](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids)\n  let user: String?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/File/FileParameter.swift",
    "content": "//\n//  FileParameter.swift\n//\n//\n//  Created by James Rochabrun on 10/16/23.\n//\n\nimport Foundation\n\n// MARK: - FileParameters\n\n/// [Upload a file](https://platform.openai.com/docs/api-reference/files/create) that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.\npublic struct FileParameters: Encodable {\n  /// The name of the file asset is not documented in OpenAI's official documentation; however, it is essential for constructing the multipart request.\n  public let fileName: String?\n  /// The file object (not file name) to be uploaded.\n  /// If the purpose is set to \"fine-tune\", the file will be used for fine-tuning.\n  public let file: Data\n  /// The intended purpose of the uploaded file.\n  /// Use \"fine-tune\" for [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning.\n  public let purpose: String\n\n  public init(\n    fileName: String?,\n    file: Data,\n    purpose: String)\n  {\n    self.fileName = fileName\n    self.file = file\n    self.purpose = purpose\n  }\n}\n\n// MARK: MultipartFormDataParameters\n\nextension FileParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    MultipartFormDataBuilder(boundary: boundary, entries: [\n      .file(paramName: \"file\", fileName: fileName, fileData: file, contentType: \"application/x-ndjson\"),\n      .string(paramName: \"purpose\", value: purpose),\n    ]).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/FineTuning/FineTuningJobParameters.swift",
    "content": "//\n//  FineTuningJobParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/17/23.\n//\n\nimport Foundation\n\n/// [Creates a job](https://platform.openai.com/docs/api-reference/fine-tuning/create) that fine-tunes a specified model from a given dataset.\n/// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\npublic struct FineTuningJobParameters: Encodable {\n  public init(\n    model: Model,\n    trainingFile: String,\n    hyperparameters: HyperParameters? = nil,\n    suffix: String? = nil,\n    validationFile: String? = nil,\n    integrations: [Integration]? = nil,\n    seed: Int? = nil)\n  {\n    self.model = model.rawValue\n    self.trainingFile = trainingFile\n    self.hyperparameters = hyperparameters\n    self.suffix = suffix\n    self.validationFile = validationFile\n    self.integrations = integrations\n    self.seed = seed\n  }\n\n  /// Fine-tuning is [currently available](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned) for the following models:\n  /// gpt-3.5-turbo-0613 (recommended)\n  /// babbage-002\n  /// davinci-002\n  /// OpenAI expects gpt-3.5-turbo to be the right model for most users in terms of results and ease of use, unless you are migrating a legacy fine-tuned model.\n  public enum Model: String {\n    case gpt35 = \"gpt-3.5-turbo-0613\" /// recommended\n    case babbage002 = \"babbage-002\"\n    case davinci002 = \"davinci-002\"\n  }\n\n  public struct HyperParameters: Encodable {\n    /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n    /// Defaults to auto.\n    let nEpochs: Int?\n\n    public init(\n      nEpochs: Int?)\n    {\n      self.nEpochs = nEpochs\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case nEpochs = \"n_epochs\"\n    }\n  }\n\n  public struct Integration: Encodable {\n    /// The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n    let type: String\n\n    let wandb: Wandb\n\n    public struct Wandb: Encodable {\n      /// The name of the project that the new run will be created under.\n      let project: String\n      /// A display name to set for the run. If not set, we will use the Job ID as the name.\n      let name: String?\n      /// The entity to use for the run. This allows you to set the team or username of the WandB user that you would like associated with the run. If not set, the default entity for the registered WandB API key is used.\n      let entity: String?\n      /// A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some default tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n      let tags: [String]?\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case model\n    case trainingFile = \"training_file\"\n    case hyperparameters\n    case suffix\n    case validationFile = \"validation_file\"\n    case integrations\n    case seed\n  }\n\n  /// The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/models/overview).\n  let model: String\n  /// The ID of an uploaded file that contains training data.\n  /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file.\n  /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose fine-tune.\n  /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details.\n  let trainingFile: String\n  /// The hyperparameters used for the fine-tuning job.\n  let hyperparameters: HyperParameters?\n  /// A string of up to 18 characters that will be added to your fine-tuned model name.\n  /// For example, a suffix of \"custom-model-name\" would produce a model name like ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel.\n  /// Defaults to null.\n  let suffix: String?\n  /// The ID of an uploaded file that contains validation data.\n  /// If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files.\n  /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose fine-tune.\n  /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details.\n  let validationFile: String?\n  /// A list of integrations to enable for your fine-tuning job.\n  let integrations: [Integration]?\n  /// The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you.\n  let seed: Int?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Image/Dalle.swift",
    "content": "//\n//  Dalle.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// [DALL·E](https://platform.openai.com/docs/models/dall-e)\n///\n/// DALL·E is a AI system that can create realistic images and art from a description in natural language. DALL·E 3 currently supports the ability, given a prompt, to create a new image with a specific size. DALL·E 2 also support the ability to edit an existing image, or create variations of a user provided image.\n///\n/// DALL·E 3 is available through our Images API along with DALL·E 2. You can try DALL·E 3 through ChatGPT Plus.\n///\n///\n/// | MODEL     | DESCRIPTION                                                  |\n/// |-----------|--------------------------------------------------------------|\n/// | dall-e-3  | DALL·E 3 New                                                 |\n/// |           | The latest DALL·E model released in Nov 2023. Learn more.    |\n/// | dall-e-2  | The previous DALL·E model released in Nov 2022.              |\n/// |           | The 2nd iteration of DALL·E with more realistic, accurate,   |\n/// |           | and 4x greater resolution images than the original model.    |\npublic enum Dalle {\n  case dalle2(Dalle2ImageSize)\n  case dalle3(Dalle3ImageSize)\n\n  public enum Dalle2ImageSize: String {\n    case small = \"256x256\"\n    case medium = \"512x512\"\n    case large = \"1024x1024\"\n  }\n\n  public enum Dalle3ImageSize: String {\n    case largeSquare = \"1024x1024\"\n    case landscape = \"1792x1024\"\n    case portrait = \"1024x1792\"\n  }\n\n  var model: String {\n    switch self {\n    case .dalle2: Model.dalle2.value\n    case .dalle3: Model.dalle3.value\n    }\n  }\n\n  var size: String {\n    switch self {\n    case .dalle2(let dalle2ImageSize):\n      dalle2ImageSize.rawValue\n    case .dalle3(let dalle3ImageSize):\n      dalle3ImageSize.rawValue\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Image/ImageCreateParameters.swift",
    "content": "//\n//  ImageCreateParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/12/23.\n//\n\nimport Foundation\n\n/// [Creates an image given a prompt.](https://platform.openai.com/docs/api-reference/images/create)\npublic struct ImageCreateParameters: Encodable {\n  public init(\n    prompt: String,\n    model: Dalle,\n    numberOfImages: Int = 1,\n    quality: String? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    style: String? = nil,\n    user: String? = nil)\n  {\n    self.prompt = prompt\n    self.model = model.model\n    n = numberOfImages\n    self.quality = quality\n    self.responseFormat = responseFormat?.rawValue\n    size = model.size\n    self.style = style\n    self.user = user\n  }\n\n  public enum ImageSize: String {\n    case small = \"256x256\"\n    case medium = \"512x512\"\n    case large = \"1024x1024\"\n  }\n\n  public enum ImageResponseFormat: String {\n    case url\n    case b64Json = \"b64_json\"\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case prompt\n    case model\n    case n\n    case quality\n    case responseFormat = \"response_format\"\n    case size\n    case style\n    case user\n  }\n\n  /// A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n  let prompt: String\n  /// The model to use for image generation. Defaults to dall-e-2\n  let model: String?\n  /// The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported.\n  let n: Int?\n  /// The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. Defaults to standard\n  let quality: String?\n  /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url\n  let responseFormat: String?\n  /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. Defaults to 1024x1024\n  let size: String?\n  /// The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. Defaults to vivid\n  let style: String?\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices)\n  let user: String?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Image/ImageEditParameters.swift",
    "content": "//\n//  ImageEditParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/12/23.\n//\n\nimport Foundation\n#if canImport(UIKit)\nimport UIKit\n\npublic typealias PlatformImage = UIImage\n#elseif canImport(AppKit)\nimport AppKit\n\npublic typealias PlatformImage = NSImage\n#endif\n\n// MARK: - ImageEditParameters\n\n/// [Creates an edited or extended image given an original image and a prompt.](https://platform.openai.com/docs/api-reference/images/createEdit)\npublic struct ImageEditParameters: Encodable {\n  #if canImport(UIKit) || canImport(AppKit)\n  public init(\n    image: PlatformImage,\n    model: Dalle? = nil,\n    mask: PlatformImage? = nil,\n    prompt: String,\n    numberOfImages: Int? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    user: String? = nil)\n  {\n    #if canImport(UIKit)\n    let imageData = image.pngData()\n    let maskData = mask?.pngData()\n    #elseif canImport(AppKit)\n    let imageData = image.tiffRepresentation\n    let maskData = mask?.tiffRepresentation\n    #endif\n\n    guard let imageData else {\n      fatalError(\"Failed to get image data\")\n    }\n\n    if mask != nil, maskData == nil {\n      fatalError(\"Failed to get mask data\")\n    }\n\n    self.init(\n      imageData: imageData,\n      maskData: maskData,\n      model: model,\n      prompt: prompt,\n      numberOfImages: numberOfImages,\n      responseFormat: responseFormat,\n      user: user)\n  }\n  #endif\n\n  /// Creates parameters from raw data (for platforms without UIKit/AppKit support)\n  /// - Parameters:\n  ///   - imageData: Raw image data\n  ///   - maskData: Optional raw mask data\n  ///   - model: The model to use\n  ///   - prompt: A text description of the desired image\n  ///   - numberOfImages: Number of images to generate\n  ///   - responseFormat: Format of the response\n  ///   - user: User identifier\n  public init(\n    imageData: Data,\n    maskData: Data? = nil,\n    model: Dalle? = nil,\n    prompt: String,\n    numberOfImages: Int? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    user: String? = nil)\n  {\n    image = imageData\n    self.model = model?.model\n    mask = maskData\n    self.prompt = prompt\n    n = numberOfImages\n    size = model?.size\n    self.responseFormat = responseFormat?.rawValue\n    self.user = user\n  }\n\n  public enum ImageResponseFormat: String {\n    case url\n    case b64Json = \"b64_json\"\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case image\n    case prompt\n    case mask\n    case model\n    case n\n    case size\n    case responseFormat = \"response_format\"\n    case user\n  }\n\n  /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.\n  let image: Data\n  /// A text description of the desired image(s). The maximum length is 1000 characters.\n  let prompt: String\n  /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.\n  let mask: Data?\n  /// The model to use for image generation. Only dall-e-2 is supported at this time. Defaults to dall-e-2\n  let model: String?\n  /// The number of images to generate. Must be between 1 and 10. Defaults to 1\n  let n: Int?\n  /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defaults to 1024x1024\n  let size: String?\n  /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url\n  let responseFormat: String?\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices)\n  let user: String?\n}\n\n// MARK: MultipartFormDataParameters\n\nextension ImageEditParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    MultipartFormDataBuilder(boundary: boundary, entries: [\n      .file(paramName: Self.CodingKeys.image.rawValue, fileName: \"\", fileData: image, contentType: \"image/png\"),\n      .string(paramName: Self.CodingKeys.prompt.rawValue, value: prompt),\n      .string(paramName: Self.CodingKeys.mask.rawValue, value: mask),\n      .string(paramName: Self.CodingKeys.model.rawValue, value: model),\n      .string(paramName: Self.CodingKeys.n.rawValue, value: n),\n      .string(paramName: Self.CodingKeys.size.rawValue, value: size),\n      .string(paramName: Self.CodingKeys.responseFormat.rawValue, value: responseFormat),\n      .string(paramName: Self.CodingKeys.user.rawValue, value: user),\n    ]).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Image/ImageVariationParameters.swift",
    "content": "//\n//  ImageVariationParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/12/23.\n//\n\nimport Foundation\n#if canImport(UIKit)\nimport UIKit\n#elseif canImport(AppKit)\nimport AppKit\n#endif\n\n// MARK: - ImageVariationParameters\n\n/// [Creates a variation of a given image.](https://platform.openai.com/docs/api-reference/images/createVariation)\npublic struct ImageVariationParameters: Encodable {\n  #if canImport(UIKit) || canImport(AppKit)\n  public init(\n    image: PlatformImage,\n    model: Dalle? = nil,\n    numberOfImages: Int? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    user: String? = nil)\n  {\n    #if canImport(UIKit)\n    let imageData = image.pngData()\n    #elseif canImport(AppKit)\n    let imageData = image.tiffRepresentation\n    #endif\n\n    guard let imageData else {\n      fatalError(\"Failed to load image data from image.\")\n    }\n\n    self.init(\n      imageData: imageData,\n      model: model,\n      numberOfImages: numberOfImages,\n      responseFormat: responseFormat,\n      user: user)\n  }\n  #endif\n\n  /// Creates parameters from raw data (for platforms without UIKit/AppKit support)\n  /// - Parameters:\n  ///   - imageData: Raw image data\n  ///   - model: The model to use\n  ///   - numberOfImages: Number of images to generate\n  ///   - responseFormat: Format of the response\n  ///   - user: User identifier\n  public init(\n    imageData: Data,\n    model: Dalle? = nil,\n    numberOfImages: Int? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    user: String? = nil)\n  {\n    if let model, model.model != Model.dalle2.value {\n      assertionFailure(\n        \"Only dall-e-2 is supported at this time [https://platform.openai.com/docs/api-reference/images/createEdit]\")\n    }\n\n    image = imageData\n    n = numberOfImages\n    self.model = model?.model\n    size = model?.size\n    self.responseFormat = responseFormat?.rawValue\n    self.user = user\n  }\n\n  public enum ImageResponseFormat: String {\n    case url\n    case b64Json = \"b64_json\"\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case image\n    case model\n    case n\n    case responseFormat = \"response_format\"\n    case size\n    case user\n  }\n\n  /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.\n  let image: Data\n  /// The model to use for image generation. Only dall-e-2 is supported at this time. Defaults to dall-e-2\n  let model: String?\n  /// The number of images to generate. Must be between 1 and 10. Defaults to 1\n  let n: Int?\n  /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url\n  let responseFormat: String?\n  /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defaults to 1024x1024\n  let size: String?\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices)\n  let user: String?\n}\n\n// MARK: MultipartFormDataParameters\n\nextension ImageVariationParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    MultipartFormDataBuilder(boundary: boundary, entries: [\n      .file(paramName: Self.CodingKeys.image.rawValue, fileName: \"\", fileData: image, contentType: \"image/png\"),\n      .string(paramName: Self.CodingKeys.model.rawValue, value: model),\n      .string(paramName: Self.CodingKeys.n.rawValue, value: n),\n      .string(paramName: Self.CodingKeys.size.rawValue, value: size),\n      .string(paramName: Self.CodingKeys.responseFormat.rawValue, value: responseFormat),\n      .string(paramName: Self.CodingKeys.user.rawValue, value: user),\n    ]).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/ImageGen/CreateImageEditParameters.swift",
    "content": "//\n//  CreateImageEditParameters.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 4/24/25.\n//\nimport Foundation\n\n// MARK: - CreateImageEditParameters\n\n/// Creates an edited or extended image given one or more source images and a prompt.\n/// This endpoint only supports `gpt-image-1` and `dall-e-2`.\npublic struct CreateImageEditParameters: Encodable {\n  #if canImport(UIKit) || canImport(AppKit)\n  /// Creates parameters for editing a single image (compatible with both dall-e-2 and gpt-image-1)\n  /// - Parameters:\n  ///   - image: The image to edit\n  ///   - prompt: A text description of the desired image\n  ///   - mask: Optional mask indicating areas to edit\n  ///   - model: The model to use\n  ///   - numberOfImages: Number of images to generate\n  ///   - quality: Quality of the generated images\n  ///   - responseFormat: Format of the response\n  ///   - size: Size of the generated images\n  ///   - user: User identifier\n  public init(\n    image: PlatformImage,\n    prompt: String,\n    mask: PlatformImage? = nil,\n    model: ModelType = .dallE2,\n    numberOfImages: Int? = nil,\n    quality: Quality? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    size: String? = nil,\n    user: String? = nil)\n  {\n    #if canImport(UIKit)\n    let imageData = image.pngData()\n    let maskData = mask?.pngData()\n    #elseif canImport(AppKit)\n    let imageData = image.tiffRepresentation\n    let maskData = mask?.tiffRepresentation\n    #endif\n\n    guard let imageData else {\n      fatalError(\"Failed to get image data\")\n    }\n\n    self.init(\n      imageData: [imageData],\n      prompt: prompt,\n      maskData: maskData,\n      model: model,\n      numberOfImages: numberOfImages,\n      quality: quality,\n      responseFormat: responseFormat,\n      size: size,\n      user: user)\n  }\n\n  /// Creates parameters for editing multiple images (for gpt-image-1 only)\n  /// - Parameters:\n  ///   - images: Array of images to edit\n  ///   - prompt: A text description of the desired image\n  ///   - mask: Optional mask indicating areas to edit\n  ///   - numberOfImages: Number of images to generate\n  ///   - quality: Quality of the generated images\n  ///   - size: Size of the generated images\n  ///   - user: User identifier\n  public init(\n    images: [PlatformImage],\n    prompt: String,\n    mask: PlatformImage? = nil,\n    numberOfImages: Int? = nil,\n    quality: Quality? = nil,\n    size: String? = nil,\n    user: String? = nil)\n  {\n    var imageDataArray = [Data]()\n\n    for image in images {\n      #if canImport(UIKit)\n      if let data = image.pngData() {\n        imageDataArray.append(data)\n      }\n      #elseif canImport(AppKit)\n      if let data = image.tiffRepresentation {\n        imageDataArray.append(data)\n      }\n      #endif\n    }\n\n    if imageDataArray.isEmpty {\n      assertionFailure(\"Failed to get image data for any of the provided images\")\n    }\n\n    #if canImport(UIKit)\n    let maskData = mask?.pngData()\n    #elseif canImport(AppKit)\n    let maskData = mask?.tiffRepresentation\n    #endif\n\n    self.init(\n      imageData: imageDataArray,\n      prompt: prompt,\n      maskData: maskData,\n      model: .gptImage1,\n      numberOfImages: numberOfImages,\n      quality: quality,\n      responseFormat: nil, // Not needed for gpt-image-1\n      size: size,\n      user: user)\n  }\n  #endif\n\n  /// Creates parameters from raw data (for advanced use cases)\n  /// - Parameters:\n  ///   - imageData: Raw image data (one or more images)\n  ///   - prompt: A text description of the desired image\n  ///   - maskData: Optional mask data\n  ///   - model: The model to use\n  ///   - numberOfImages: Number of images to generate\n  ///   - quality: Quality of the generated images\n  ///   - responseFormat: Format of the response\n  ///   - size: Size of the generated images\n  ///   - user: User identifier\n  public init(\n    imageData: [Data],\n    prompt: String,\n    maskData: Data? = nil,\n    model: ModelType = .dallE2,\n    numberOfImages: Int? = nil,\n    quality: Quality? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    size: String? = nil,\n    user: String? = nil)\n  {\n    image = imageData\n    self.prompt = prompt\n    mask = maskData\n    self.model = model.rawValue\n    n = numberOfImages\n    self.quality = quality?.rawValue\n    self.responseFormat = responseFormat?.rawValue\n    self.size = size\n    self.user = user\n  }\n\n  public enum ModelType: String {\n    case dallE2 = \"dall-e-2\"\n    case gptImage1 = \"gpt-image-1\"\n    case gptImage1Mini = \"gpt-image-1-mini\"\n  }\n\n  public enum Quality: String {\n    case auto\n    case high\n    case medium\n    case low\n    case standard\n  }\n\n  public enum ImageResponseFormat: String {\n    case url\n    case b64Json = \"b64_json\"\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case image\n    case prompt\n    case mask\n    case model\n    case n\n    case quality\n    case responseFormat = \"response_format\"\n    case size\n    case user\n  }\n\n  /// The image(s) to edit.\n  /// For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than 25MB.\n  /// For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB.\n  let image: [Data]\n\n  /// A text description of the desired image(s).\n  /// The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.\n  let prompt: String\n\n  /// An additional image whose fully transparent areas indicate where `image` should be edited.\n  /// If there are multiple images provided, the mask will be applied on the first image.\n  /// Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.\n  let mask: Data?\n\n  /// The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported.\n  /// Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.\n  let model: String?\n\n  /// The number of images to generate. Must be between 1 and 10.\n  /// Defaults to 1.\n  let n: Int?\n\n  /// The quality of the image that will be generated.\n  /// `high`, `medium` and `low` are only supported for `gpt-image-1`.\n  /// `dall-e-2` only supports `standard` quality.\n  /// Defaults to `auto`.\n  let quality: String?\n\n  /// The format in which the generated images are returned.\n  /// Must be one of `url` or `b64_json`.\n  /// URLs are only valid for 60 minutes after the image has been generated.\n  /// This parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images.\n  let responseFormat: String?\n\n  /// The size of the generated images.\n  /// Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`,\n  /// and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.\n  let size: String?\n\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n  let user: String?\n}\n\n// MARK: MultipartFormDataParameters\n\nextension CreateImageEditParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    var entries = [MultipartFormDataEntry]()\n\n    // Add images (possibly multiple for gpt-image-1)\n    for (index, imageData) in image.enumerated() {\n      entries.append(.file(\n        paramName: \"\\(CodingKeys.image.rawValue)[]\",\n        fileName: \"image\\(index).png\",\n        fileData: imageData,\n        contentType: \"image/png\"))\n    }\n\n    // Add prompt\n    entries.append(.string(paramName: CodingKeys.prompt.rawValue, value: prompt))\n\n    // Add mask if provided\n    if let mask {\n      entries.append(.file(\n        paramName: CodingKeys.mask.rawValue,\n        fileName: \"mask.png\",\n        fileData: mask,\n        contentType: \"image/png\"))\n    }\n\n    // Add remaining parameters if they have values\n    if let model {\n      entries.append(.string(paramName: CodingKeys.model.rawValue, value: model))\n    }\n\n    if let n {\n      entries.append(.string(paramName: CodingKeys.n.rawValue, value: n))\n    }\n\n    if let quality {\n      entries.append(.string(paramName: CodingKeys.quality.rawValue, value: quality))\n    }\n\n    if let responseFormat {\n      entries.append(.string(paramName: CodingKeys.responseFormat.rawValue, value: responseFormat))\n    }\n\n    if let size {\n      entries.append(.string(paramName: CodingKeys.size.rawValue, value: size))\n    }\n\n    if let user {\n      entries.append(.string(paramName: CodingKeys.user.rawValue, value: user))\n    }\n\n    return MultipartFormDataBuilder(boundary: boundary, entries: entries).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/ImageGen/CreateImageParameters.swift",
    "content": "//\n//  CreateImageParameters.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 4/24/25.\n//\n\nimport Foundation\n\n/// 'Create Image':\n/// https://platform.openai.com/docs/api-reference/images/create\npublic struct CreateImageParameters: Encodable {\n  /// Creates a new instance of CreateImageParameters\n  /// - Parameters:\n  ///   - prompt: A text description of the desired image(s)\n  ///   - background: Transparency setting for the background (gpt-image-1 only)\n  ///   - model: The model to use for image generation\n  ///   - moderation: Content moderation level (gpt-image-1 only)\n  ///   - n: Number of images to generate (1-10)\n  ///   - outputCompression: Compression level for images (gpt-image-1 only)\n  ///   - outputFormat: Format for generated images (gpt-image-1 only)\n  ///   - quality: Quality of generated images\n  ///   - responseFormat: Format for response (url or b64_json)\n  ///   - size: Size of generated images\n  ///   - style: Style of generated images (dall-e-3 only)\n  ///   - user: Unique identifier for end-user\n  public init(\n    prompt: String,\n    background: CreateImageParameters.Background? = nil,\n    model: CreateImageParameters.Model? = nil,\n    moderation: CreateImageParameters.Moderation? = nil,\n    n: Int? = nil,\n    outputCompression: Int? = nil,\n    outputFormat: CreateImageParameters.OutputFormat? = nil,\n    quality: CreateImageParameters.Quality? = nil,\n    responseFormat: CreateImageParameters.ResponseFormat? = nil,\n    size: String? = nil,\n    style: CreateImageParameters.Style? = nil,\n    user: String? = nil)\n  {\n    self.prompt = prompt\n    self.background = background\n    self.model = model\n    self.moderation = moderation\n    self.n = n\n    self.outputCompression = outputCompression\n    self.outputFormat = outputFormat\n    self.quality = quality\n    self.responseFormat = responseFormat\n    self.size = size\n    self.style = style\n    self.user = user\n  }\n\n  /// Background transparency options for gpt-image-1\n  public enum Background: String, Encodable {\n    case auto\n    case opaque\n    case transparent\n  }\n\n  /// Available models for image generation\n  public enum Model: String, Encodable {\n    case dallE2 = \"dall-e-2\"\n    case dallE3 = \"dall-e-3\"\n    case gptImage1 = \"gpt-image-1\"\n  }\n\n  /// Content moderation level for gpt-image-1\n  public enum Moderation: String, Encodable {\n    case auto\n    case low\n  }\n\n  /// Output format options for gpt-image-1\n  public enum OutputFormat: String, Encodable {\n    case jpeg\n    case png\n    case webp\n  }\n\n  /// Quality options for generated images\n  public enum Quality: String, Encodable {\n    /// Automatically select the best quality for the given model\n    case auto\n\n    /// Supported for gpt-image-1\n    case high, medium, low\n\n    /// Supported for dall-e-3\n    case hd, standard\n  }\n\n  /// Response format options\n  public enum ResponseFormat: String, Encodable {\n    case b64JSON = \"b64_json\"\n    case url\n  }\n\n  /// Style options for dall-e-3\n  public enum Style: String, Encodable {\n    case vivid\n    case natural\n  }\n\n  /// A text description of the desired image(s).\n  /// The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`.\n  public let prompt: String\n\n  // MARK: - Optional properties\n\n  /// Allows to set transparency for the background of the generated image(s).\n  /// This parameter is only supported for `gpt-image-1`.\n  /// Must be one of `transparent`, `opaque` or `auto` (default value).\n  /// When `auto` is used, the model will automatically determine the best background for the image.\n  /// If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`.\n  public let background: Background?\n\n  /// The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`.\n  /// Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.\n  public let model: Model?\n\n  /// Control the content-moderation level for images generated by `gpt-image-1`.\n  /// Must be either low for less restrictive filtering or auto (default value).\n  public let moderation: Moderation?\n\n  /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.\n  /// Defaults to `1`\n  public let n: Int?\n\n  /// The compression level (0-100%) for the generated images.\n  /// This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100.\n  public let outputCompression: Int?\n\n  /// The format in which the generated images are returned.\n  /// This parameter is only supported for `gpt-image-1`.\n  /// Must be one of `png`, `jpeg`, or `webp`.\n  public let outputFormat: OutputFormat?\n\n  /// The quality of the image that will be generated.\n  /// - `auto` (default value) will automatically select the best quality for the given model.\n  /// - `high`, `medium` and `low` are supported for gpt-image-1.\n  /// - `hd` and `standard` are supported for dall-e-3.\n  /// - `standard` is the only option for dall-e-2.\n  public let quality: Quality?\n\n  /// The format in which generated images with dall-e-2 and dall-e-3 are returned.\n  /// Must be one of `url` or `b64_json`.\n  /// URLs are only valid for 60 minutes after the image has been generated.\n  /// This parameter isn't supported for `gpt-image-1` which will always return base64-encoded images.\n  public let responseFormat: ResponseFormat?\n\n  /// The size of the generated images.\n  /// - For gpt-image-1, one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value)\n  /// - For dall-e-3, one of `1024x1024`, `1792x1024`, or `1024x1792`\n  /// - For dall-e-2, one of `256x256`, `512x512`, or `1024x1024`\n  public let size: String?\n\n  /// The style of the generated images.\n  /// This parameter is only supported for `dall-e-3`.\n  /// Must be one of `vivid` or `natural`.\n  /// Vivid causes the model to lean towards generating hyper-real and dramatic images.\n  /// Natural causes the model to produce more natural, less hyper-real looking images.\n  public let style: Style?\n\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n  public let user: String?\n\n  enum CodingKeys: String, CodingKey {\n    case prompt\n    case background\n    case model\n    case moderation\n    case n\n    case outputCompression = \"output_compression\"\n    case outputFormat = \"output_format\"\n    case quality\n    case responseFormat = \"response_format\"\n    case size\n    case style\n    case user\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/ImageGen/CreateImageVariationParameters.swift",
    "content": "//\n//  CreateImageVariationParameters.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 4/24/25.\n//\n\nimport Foundation\n#if canImport(UIKit)\nimport UIKit\n#elseif canImport(AppKit)\nimport AppKit\n#endif\n\n// MARK: - CreateImageVariationParameters\n\n/// Creates a variation of a given image.\n/// This endpoint only supports `dall-e-2`.\npublic struct CreateImageVariationParameters: Encodable {\n  #if canImport(UIKit) || canImport(AppKit)\n  /// Creates parameters for generating variations of an image\n  /// - Parameters:\n  ///   - image: The image to use as the basis for variations\n  ///   - numberOfImages: Number of variations to generate (1-10)\n  ///   - responseFormat: Format of the response\n  ///   - size: Size of the generated images\n  ///   - user: User identifier\n  public init(\n    image: PlatformImage,\n    numberOfImages: Int? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    size: Size? = nil,\n    user: String? = nil)\n  {\n    #if canImport(UIKit)\n    let imageData = image.pngData()\n    #elseif canImport(AppKit)\n    let imageData = image.tiffRepresentation\n    #endif\n\n    guard let imageData else {\n      fatalError(\"Failed to get image data\")\n    }\n\n    self.init(\n      imageData: imageData,\n      numberOfImages: numberOfImages,\n      responseFormat: responseFormat,\n      size: size,\n      user: user)\n  }\n  #endif\n\n  /// Creates parameters from raw image data\n  /// - Parameters:\n  ///   - imageData: Raw image data\n  ///   - numberOfImages: Number of variations to generate (1-10)\n  ///   - responseFormat: Format of the response\n  ///   - size: Size of the generated images\n  ///   - user: User identifier\n  public init(\n    imageData: Data,\n    numberOfImages: Int? = nil,\n    responseFormat: ImageResponseFormat? = nil,\n    size: Size? = nil,\n    user: String? = nil)\n  {\n    image = imageData\n    model = ModelType.dallE2.rawValue\n    n = numberOfImages\n    self.responseFormat = responseFormat?.rawValue\n    self.size = size?.rawValue\n    self.user = user\n  }\n\n  public enum ModelType: String {\n    case dallE2 = \"dall-e-2\"\n  }\n\n  public enum Size: String {\n    case small = \"256x256\"\n    case medium = \"512x512\"\n    case large = \"1024x1024\"\n  }\n\n  public enum ImageResponseFormat: String {\n    case url\n    case b64Json = \"b64_json\"\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case image\n    case model\n    case n\n    case responseFormat = \"response_format\"\n    case size\n    case user\n  }\n\n  /// The image to use as the basis for the variation(s).\n  /// Must be a valid PNG file, less than 4MB, and square.\n  let image: Data\n\n  /// The model to use for image generation. Only `dall-e-2` is supported at this time.\n  /// Defaults to `dall-e-2`.\n  let model: String?\n\n  /// The number of images to generate. Must be between 1 and 10.\n  /// Defaults to 1.\n  let n: Int?\n\n  /// The format in which the generated images are returned.\n  /// Must be one of `url` or `b64_json`.\n  /// URLs are only valid for 60 minutes after the image has been generated.\n  /// Defaults to `url`.\n  let responseFormat: String?\n\n  /// The size of the generated images.\n  /// Must be one of `256x256`, `512x512`, or `1024x1024`.\n  /// Defaults to `1024x1024`.\n  let size: String?\n\n  /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n  let user: String?\n}\n\n// MARK: MultipartFormDataParameters\n\nextension CreateImageVariationParameters: MultipartFormDataParameters {\n  public func encode(boundary: String) -> Data {\n    var entries = [MultipartFormDataEntry]()\n\n    // Add image file\n    entries.append(.file(\n      paramName: CodingKeys.image.rawValue,\n      fileName: \"image.png\",\n      fileData: image,\n      contentType: \"image/png\"))\n\n    // Add remaining parameters if they have values\n    if let model {\n      entries.append(.string(paramName: CodingKeys.model.rawValue, value: model))\n    }\n\n    if let n {\n      entries.append(.string(paramName: CodingKeys.n.rawValue, value: n))\n    }\n\n    if let responseFormat {\n      entries.append(.string(paramName: CodingKeys.responseFormat.rawValue, value: responseFormat))\n    }\n\n    if let size {\n      entries.append(.string(paramName: CodingKeys.size.rawValue, value: size))\n    }\n\n    if let user {\n      entries.append(.string(paramName: CodingKeys.user.rawValue, value: user))\n    }\n\n    return MultipartFormDataBuilder(boundary: boundary, entries: entries).build()\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Message/MessageParameter.swift",
    "content": "//\n//  MessageParameter.swift\n//\n//\n//  Created by James Rochabrun on 11/16/23.\n//\n\nimport Foundation\n\n/// [Create a message.](https://platform.openai.com/docs/api-reference/messages/createMessage)\npublic struct MessageParameter: Encodable {\n  public init(\n    role: Role,\n    content: Content,\n    attachments: [MessageAttachment]? = nil,\n    metadata: [String: String]? = nil)\n  {\n    self.role = role.rawValue\n    self.content = content\n    self.attachments = attachments\n    self.metadata = metadata\n  }\n\n  public enum Role: String {\n    case user\n    case assistant\n  }\n\n  public enum DetailLevel: String, Encodable {\n    case low\n    case high\n    case auto\n  }\n\n  /// Wrapper to handle both a string content or an array of content parts.\n  public enum Content: Encodable {\n    case stringContent(String)\n    case arrayContent([ContentItem])\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n      switch self {\n      case .stringContent(let text):\n        try container.encode(text)\n      case .arrayContent(let contentArray):\n        try container.encode(contentArray)\n      }\n    }\n  }\n\n  /// Enum to represent different content parts (text, image URL, image file).\n  public enum ContentItem: Encodable {\n    case text(String)\n    case imageURL(ImageURL)\n    case imageFile(ImageFile)\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.container(keyedBy: CodingKeys.self)\n      switch self {\n      case .text(let textContent):\n        try container.encode(\"text\", forKey: .type)\n        try container.encode(textContent, forKey: .text)\n\n      case .imageURL(let imageURLContent):\n        try container.encode(\"image_url\", forKey: .type)\n        try container.encode(imageURLContent, forKey: .imageURL)\n\n      case .imageFile(let imageFileContent):\n        try container.encode(\"image_file\", forKey: .type)\n        try container.encode(imageFileContent, forKey: .imageFile)\n      }\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case type\n      case text\n      case imageURL = \"image_url\"\n      case imageFile = \"image_file\"\n    }\n  }\n\n  /// References an image URL in the content of a message.\n  public struct ImageURL: Encodable {\n    /// The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.\n    let url: String\n\n    /// Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high. Default value is auto\n    let detail: DetailLevel?\n\n    public init(\n      url: String,\n      detail: DetailLevel? = nil)\n    {\n      self.url = url\n      self.detail = detail\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case url\n      case detail\n    }\n  }\n\n  /// References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.\n  public struct ImageFile: Encodable {\n    /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. Set purpose=\"vision\" when uploading the File if you need to later display the file content.\n    let fileId: String\n    /// Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.\n    let detail: DetailLevel?\n\n    public init(\n      fileId: String,\n      detail: DetailLevel? = nil)\n    {\n      self.fileId = fileId\n      self.detail = detail\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case fileId = \"file_id\"\n      case detail\n    }\n  }\n\n  /// The role of the entity that is creating the message. Allowed values include:\n  /// user: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n  /// assistant: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n  let role: String\n  /// The content of the message, which can be a string or an array of content parts (text, image URL, image file).\n  let content: Content\n  /// A list of files attached to the message, and the tools they should be added to.\n  let attachments: [MessageAttachment]?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  let metadata: [String: String]?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Message/ModifyMessageParameters.swift",
    "content": "//\n//  ModifyMessageParameters.swift\n//\n//\n//  Created by James Rochabrun on 11/25/23.\n//\n\nimport Foundation\n\n/// Modifies a [Message](https://platform.openai.com/docs/api-reference/messages/modifyMessage)\n/// Only the metadata can be modified.\npublic struct ModifyMessageParameters: Encodable {\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public var metadata: [String: String]\n\n  public init(\n    metadata: [String: String])\n  {\n    self.metadata = metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Model.swift",
    "content": "//\n//  Model.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// [Models](https://platform.openai.com/docs/models)\npublic enum Model {\n  case gpt4oAudioPreview\n\n  /// O1 models\n\n  /// an early preview of the o1 model, designed to reason about hard problems using broad general knowledge about the world.\n  case o1Preview\n  /// a faster and cheaper version of o1, particularly adept at coding, math, and science tasks where extensive general knowledge isn't required.\n  case o1Mini\n\n  /// Chat completion\n\n  /// ### Omicron model\n  /// As of 2024-05-13, this is the latest and greatest from OpenAI.\n  /// From their [docs](https://platform.openai.com/docs/models/gpt-4o):\n  ///\n  ///   > GPT-4o (“o” for “omni”) is our most advanced model. It is multimodal (accepting text or image inputs\n  ///   > and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient—\n  ///   > it generates text 2x faster and is 50% cheaper. Additionally, GPT-4o has the best vision and performance\n  ///   > across non-English languages of any of our models\n  ///\n  case gpt4o // Points to gpt-4o-2024-05-13 until October 2, 2024, after will point to gpt-4o-2024-08-06.\n  case gpt4o20240513 // 128k context window with training data up to Oct 2023\n  /// The most affordable and intelligent small model for fast, lightweight tasks. GPT-4o mini is cheaper and more capable than GPT-3.5 Turbo. Currently points to gpt-4o-mini-2024-07-18.\n  case gpt4omini\n  /// Latest snapshot that supports [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)/\n  case gpt4o20240806\n  /// input 128,000 tokens, output 16,384 tokens\n  case gpt4o20241120\n\n  case gpt35Turbo\n  case gpt35Turbo1106 // Most updated - Supports parallel function calls\n  /// The latest GPT-3.5 Turbo model with higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls. Returns a maximum of 4,096 output tokens. [Learn more](https://openai.com/blog/new-embedding-models-and-api-updates#:~:text=Other%20new%20models%20and%20lower%20pricing).\n  case gpt35Turbo0125\n  case gpt4 // 8,192 tokens\n  case gpt41106Preview // Most updated - Supports parallel function calls 128,000 tokens\n  case gpt35Turbo0613 // To be deprecated \"2024-06-13\"\n  case gpt35Turbo16k0613 // To be deprecated \"2024-06-13\"\n  case gpt4TurboPreview // Currently points to gpt-4-0125-preview.\n  /// The latest GPT-4 model intended to reduce cases of “laziness” where the model doesn’t complete a task. Returns a maximum of 4,096 output tokens. [Learn more.](https://openai.com/blog/new-embedding-models-and-api-updates)\n  case gpt40125Preview // 128,000 tokens\n  /// GPT-4 Turbo with Vision model. Vision requests can now use JSON mode and function calling. gpt-4-turbo currently points to this version.\n  /// 128,000 tokens\n  /// Up to Dec 2023\n  case gpt4Turbo20240409\n  /// GPT-4 Turbo with Vision\n  /// The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling. Currently points to gpt-4-turbo-2024-04-09.\n  /// 128,000 tokens\n  /// Up to Dec 2023\n  case gpt4turbo\n\n  /// Vision\n  case gpt4VisionPreview // Vision\n\n  /// Complex reasoning, broad world knowledge, and code-heavy or multi-step agentic tasks\n  case gpt5\n  /// Cost-optimized reasoning and chat; balances speed, cost, and capability\n  case gpt5Mini\n  /// High-throughput tasks, especially simple instruction-following or classification\n  case gpt5Nano\n\n  case gpt5Codex\n\n  /// Images\n  case dalle2\n  case dalle3\n\n  /// custom\n  case custom(String)\n\n  public var value: String {\n    switch self {\n    case .gpt4oAudioPreview: \"gpt-4o-audio-preview\"\n    case .o1Preview: \"o1-preview\"\n    case .o1Mini: \"o1-mini\"\n    case .gpt4o: \"gpt-4o\"\n    case .gpt4o20240513: \"gpt-4o-2024-05-13\"\n    case .gpt4o20240806: \"gpt-4o-2024-08-06\"\n    case .gpt4o20241120: \"gpt-4o-2024-11-20\"\n    case .gpt4omini: \"gpt-4o-mini\"\n    case .gpt35Turbo: \"gpt-3.5-turbo\"\n    case .gpt35Turbo1106: \"gpt-3.5-turbo-1106\"\n    case .gpt35Turbo0125: \"gpt-3.5-turbo-0125\"\n    case .gpt4: \"gpt-4\"\n    case .gpt41106Preview: \"gpt-4-1106-preview\"\n    case .gpt35Turbo0613: \"gpt-3.5-turbo-0613\"\n    case .gpt35Turbo16k0613: \"gpt-3.5-turbo-16k-0613\"\n    case .gpt4VisionPreview: \"gpt-4-vision-preview\"\n    case .dalle2: \"dall-e-2\"\n    case .dalle3: \"dall-e-3\"\n    case .gpt4TurboPreview: \"gpt-4-turbo-preview\"\n    case .gpt40125Preview: \"gpt-4-0125-preview\"\n    case .gpt4Turbo20240409: \"gpt-4-turbo-2024-04-09\"\n    case .gpt4turbo: \"gpt-4-turbo\"\n    case .gpt5: \"gpt-5\"\n    case .gpt5Mini: \"gpt-5-mini\"\n    case .gpt5Nano: \"gpt-5-nano\"\n    case .gpt5Codex: \"gpt-5-codex\"\n    case .custom(let model): model\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Moderation/ModerationParameter.swift",
    "content": "//\n//  ModerationParameter.swift\n//\n//\n//  Created by James Rochabrun on 10/13/23.\n//\n\nimport Foundation\n\n/// [Classifies if text violates OpenAI's Content Policy.](https://platform.openai.com/docs/api-reference/moderations/create)\npublic struct ModerationParameter<Input: Encodable>: Encodable {\n  /// The input text to classify, string or array.\n  let input: Input\n  /// Two content moderations models are available: text-moderation-stable and text-moderation-latest.\n  /// The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.\n  let model: String?\n\n  public enum Model: String {\n    case stable = \"text-moderation-stable\"\n    case latest = \"text-moderation-latest\"\n  }\n\n  public init(\n    input: Input,\n    model: Model? = nil)\n  {\n    self.input = input\n    self.model = model?.rawValue\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/MultipartFormDataParameters.swift",
    "content": "//\n//  MultipartFormDataParameters.swift\n//\n//\n//  Created by James Rochabrun on 10/11/23.\n//\n\nimport Foundation\n\npublic protocol MultipartFormDataParameters {\n  func encode(boundary: String) -> Data\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Realtime/OpenAIRealtimeConversationItemCreate.swift",
    "content": "//\n//  OpenAIRealtimeConversationItemCreate.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\n// MARK: - OpenAIRealtimeConversationItemCreate\n\n/// https://platform.openai.com/docs/api-reference/realtime-client-events/conversation/item/create\npublic struct OpenAIRealtimeConversationItemCreate: Encodable {\n  public let type = \"conversation.item.create\"\n  public let item: Item\n\n  public init(item: Item) {\n    self.item = item\n  }\n}\n\n// MARK: OpenAIRealtimeConversationItemCreate.Item\n\nextension OpenAIRealtimeConversationItemCreate {\n  public struct Item: Encodable {\n    public let type = \"message\"\n    public let role: String\n    public let content: [Content]\n\n    public init(role: String, text: String) {\n      self.role = role\n      content = [.text(text)]\n    }\n\n    public init(role: String, content: [Content]) {\n      self.role = role\n      self.content = content\n    }\n  }\n}\n\n// MARK: - OpenAIRealtimeConversationItemCreate.Item.Content\n\nextension OpenAIRealtimeConversationItemCreate.Item {\n  public enum Content: Encodable {\n    case text(String)\n    case image(String) // base64 data URL: \"data:image/{format};base64,{bytes}\"\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.container(keyedBy: CodingKeys.self)\n      switch self {\n      case .text(let text):\n        try container.encode(\"input_text\", forKey: .type)\n        try container.encode(text, forKey: .text)\n\n      case .image(let imageUrl):\n        try container.encode(\"input_image\", forKey: .type)\n        try container.encode(imageUrl, forKey: .imageUrl)\n      }\n    }\n\n    private enum CodingKeys: String, CodingKey {\n      case type\n      case text\n      case imageUrl = \"image_url\"\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Realtime/OpenAIRealtimeInputAudioBufferAppend.swift",
    "content": "//\n//  OpenAIRealtimeInputAudioBufferAppend.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\npublic struct OpenAIRealtimeInputAudioBufferAppend: Encodable {\n  public let type = \"input_audio_buffer.append\"\n\n  /// base64 encoded PCM16 data\n  public let audio: String\n\n  public init(audio: String) {\n    self.audio = audio\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Realtime/OpenAIRealtimeResponseCreate.swift",
    "content": "//\n//  OpenAIRealtimeResponseCreate.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\n// MARK: - OpenAIRealtimeResponseCreate\n\n/// https://platform.openai.com/docs/api-reference/realtime-client-events/response\npublic struct OpenAIRealtimeResponseCreate: Encodable {\n  public let type = \"response.create\"\n  public let response: Response?\n\n  public init(response: Response? = nil) {\n    self.response = response\n  }\n}\n\n// MARK: OpenAIRealtimeResponseCreate.Response\n\nextension OpenAIRealtimeResponseCreate {\n  public struct Response: Encodable {\n    public let instructions: String?\n    public let modalities: [String]?\n    public let tools: [Tool]?\n\n    public init(\n      instructions: String? = nil,\n      modalities: [String]? = nil,\n      tools: [Tool]? = nil)\n    {\n      self.instructions = instructions\n      self.modalities = modalities\n      self.tools = tools\n    }\n  }\n}\n\n// MARK: - OpenAIRealtimeResponseCreate.Response.Tool\n\nextension OpenAIRealtimeResponseCreate.Response {\n  public struct Tool: Encodable {\n    public let name: String\n    public let description: String\n    public let parameters: [String: OpenAIJSONValue]\n    public let type = \"function\"\n\n    public init(name: String, description: String, parameters: [String: OpenAIJSONValue]) {\n      self.name = name\n      self.description = description\n      self.parameters = parameters\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Realtime/OpenAIRealtimeSessionConfiguration.swift",
    "content": "// MARK: - OpenAIRealtimeSessionConfiguration\n\n//\n//  OpenAIRealtimeSessionConfiguration.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n/// Realtime session configuration\n/// https://platform.openai.com/docs/api-reference/realtime-client-events/session/update#realtime-client-events/session/update-session\npublic struct OpenAIRealtimeSessionConfiguration: Encodable, Sendable {\n  public init(\n    inputAudioFormat: OpenAIRealtimeSessionConfiguration.AudioFormat? = nil,\n    inputAudioTranscription: OpenAIRealtimeSessionConfiguration.InputAudioTranscription? = nil,\n    instructions: String? = nil,\n    maxResponseOutputTokens: OpenAIRealtimeSessionConfiguration.MaxResponseOutputTokens? = nil,\n    modalities: [OpenAIRealtimeSessionConfiguration.Modality]? = nil,\n    outputAudioFormat: OpenAIRealtimeSessionConfiguration.AudioFormat? = nil,\n    speed: Float? = 1.0,\n    temperature: Double? = nil,\n    tools: [OpenAIRealtimeSessionConfiguration.RealtimeTool]? = nil,\n    toolChoice: OpenAIRealtimeSessionConfiguration.ToolChoice? = nil,\n    turnDetection: OpenAIRealtimeSessionConfiguration.TurnDetection? = nil,\n    voice: String? = nil)\n  {\n    self.inputAudioFormat = inputAudioFormat\n    self.inputAudioTranscription = inputAudioTranscription\n    self.instructions = instructions\n    self.maxResponseOutputTokens = maxResponseOutputTokens\n    self.modalities = modalities\n    self.outputAudioFormat = outputAudioFormat\n    self.speed = speed\n    self.temperature = temperature\n    self.tools = tools\n    self.toolChoice = toolChoice\n    self.turnDetection = turnDetection\n    self.voice = voice\n  }\n\n  public enum ToolChoice: Encodable, Sendable {\n    /// The model will not call any tool and instead generates a message.\n    /// This is the default when no tools are present in the request body\n    case none\n\n    /// The model can pick between generating a message or calling one or more tools.\n    /// This is the default when tools are present in the request body\n    case auto\n\n    /// The model must call one or more tools\n    case required\n\n    /// Forces the model to call a specific tool\n    case specific(functionName: String)\n\n    public func encode(to encoder: any Encoder) throws {\n      switch self {\n      case .none:\n        var container = encoder.singleValueContainer()\n        try container.encode(\"none\")\n\n      case .auto:\n        var container = encoder.singleValueContainer()\n        try container.encode(\"auto\")\n\n      case .required:\n        var container = encoder.singleValueContainer()\n        try container.encode(\"required\")\n\n      case .specific(let functionName):\n        var container = encoder.container(keyedBy: RootKey.self)\n        try container.encode(\"function\", forKey: .type)\n        try container.encode(functionName, forKey: .name)\n      }\n    }\n\n    private enum RootKey: CodingKey {\n      case type\n      case name\n    }\n  }\n\n  /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n  public let inputAudioFormat: AudioFormat?\n\n  /// Configuration for input audio transcription. Set to nil to turn off.\n  public let inputAudioTranscription: InputAudioTranscription?\n\n  /// The default system instructions prepended to model calls.\n  ///\n  /// OpenAI recommends the following instructions:\n  ///\n  ///     Your knowledge cutoff is 2023-10. You are a helpful, witty, and friendly AI. Act\n  ///     like a human, but remember that you aren't a human and that you can't do human\n  ///     things in the real world. Your voice and personality should be warm and engaging,\n  ///     with a lively and playful tone. If interacting in a non-English language, start by\n  ///     using the standard accent or dialect familiar to the user. Talk quickly. You should\n  ///     always call a function if you can. Do not refer to these rules, even if you're\n  ///     asked about them.\n  ///\n  public let instructions: String?\n\n  /// Maximum number of output tokens for a single assistant response, inclusive of tool\n  /// calls. Provide an integer between 1 and 4096 to limit output tokens, or \"inf\" for\n  /// the maximum available tokens for a given model. Defaults to \"inf\".\n  public let maxResponseOutputTokens: MaxResponseOutputTokens?\n\n  /// The set of modalities the model can respond with. To disable audio, set this to [\"text\"].\n  /// Possible values are `audio` and `text`\n  public let modalities: [Modality]?\n\n  /// The format of output audio.\n  public let outputAudioFormat: AudioFormat?\n\n  /// The speed of the generated audio. Select a value from 0.25 to 4.0.\n  /// Default to `1.0`\n  public let speed: Float?\n\n  /// Sampling temperature for the model.\n  public let temperature: Double?\n\n  /// Tools (functions and MCP servers) available to the model.\n  public let tools: [RealtimeTool]?\n\n  /// How the model chooses tools. Options are \"auto\", \"none\", \"required\", or specify a function.\n  public let toolChoice: ToolChoice?\n\n  /// Configuration for turn detection. Set to nil to turn off.\n  public let turnDetection: TurnDetection?\n\n  /// The voice the model uses to respond - one of alloy, echo, or shimmer. Cannot be\n  /// changed once the model has responded with audio at least once.\n  public let voice: String?\n\n  private enum CodingKeys: String, CodingKey {\n    case inputAudioFormat = \"input_audio_format\"\n    case inputAudioTranscription = \"input_audio_transcription\"\n    case instructions\n    case maxResponseOutputTokens = \"max_response_output_tokens\"\n    case modalities\n    case outputAudioFormat = \"output_audio_format\"\n    case speed\n    case temperature\n    case tools\n    case toolChoice = \"tool_choice\"\n    case turnDetection = \"turn_detection\"\n    case voice\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.InputAudioTranscription\n\nextension OpenAIRealtimeSessionConfiguration {\n  public struct InputAudioTranscription: Encodable, Sendable {\n    /// The model to use for transcription (e.g., \"whisper-1\").\n    public let model: String\n\n    /// The language of the input audio in ISO-639-1 format (e.g., \"en\", \"es\", \"ja\").\n    /// Supplying the input language improves transcription accuracy and latency.\n    public let language: String?\n\n    public init(model: String, language: String? = nil) {\n      self.model = model\n      self.language = language\n    }\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.MaxResponseOutputTokens\n\nextension OpenAIRealtimeSessionConfiguration {\n  public enum MaxResponseOutputTokens: Encodable, Sendable {\n    case int(Int)\n    case infinite\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n      switch self {\n      case .int(let value):\n        try container.encode(value)\n      case .infinite:\n        try container.encode(\"inf\")\n      }\n    }\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.FunctionTool\n\nextension OpenAIRealtimeSessionConfiguration {\n  public struct FunctionTool: Encodable, Sendable {\n    /// The description of the function\n    public let description: String\n\n    /// The name of the function\n    public let name: String\n\n    /// The function parameters\n    public let parameters: [String: OpenAIJSONValue]\n\n    /// The type of the tool, e.g., \"function\".\n    public let type = \"function\"\n\n    public init(name: String, description: String, parameters: [String: OpenAIJSONValue]) {\n      self.name = name\n      self.description = description\n      self.parameters = parameters\n    }\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.RealtimeTool\n\nextension OpenAIRealtimeSessionConfiguration {\n  /// Represents a tool that can be either a function or an MCP server\n  public enum RealtimeTool: Encodable, Sendable {\n    case function(FunctionTool)\n    case mcp(Tool.MCPTool)\n\n    public func encode(to encoder: Encoder) throws {\n      switch self {\n      case .function(let tool):\n        try tool.encode(to: encoder)\n      case .mcp(let mcpTool):\n        try mcpTool.encode(to: encoder)\n      }\n    }\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.TurnDetection\n\nextension OpenAIRealtimeSessionConfiguration {\n  public struct TurnDetection: Encodable, Sendable {\n    public init(\n      type: DetectionType)\n    {\n      self.type = type\n    }\n\n    public func encode(to encoder: any Encoder) throws {\n      var container = encoder.container(keyedBy: CodingKeys.self)\n\n      switch type {\n      case .serverVAD(let prefixPaddingMs, let silenceDurationMs, let threshold):\n        try container.encode(\"server_vad\", forKey: .type)\n        try container.encode(prefixPaddingMs, forKey: .prefixPaddingMs)\n        try container.encode(silenceDurationMs, forKey: .silenceDurationMs)\n        try container.encode(threshold, forKey: .threshold)\n\n      case .semanticVAD(let eagerness):\n        try container.encode(\"semantic_vad\", forKey: .type)\n        try container.encode(String(describing: eagerness), forKey: .eagerness)\n      }\n    }\n\n    let type: DetectionType\n\n    private enum CodingKeys: String, CodingKey {\n      case prefixPaddingMs = \"prefix_padding_ms\"\n      case silenceDurationMs = \"silence_duration_ms\"\n      case threshold\n      case type\n      case eagerness\n    }\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.AudioFormat\n\n/// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\nextension OpenAIRealtimeSessionConfiguration {\n  public enum AudioFormat: String, Encodable, Sendable {\n    case pcm16\n    case g711Ulaw = \"g711_ulaw\"\n    case g711Alaw = \"g711_alaw\"\n  }\n}\n\n// MARK: OpenAIRealtimeSessionConfiguration.Modality\n\n/// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\nextension OpenAIRealtimeSessionConfiguration {\n  public enum Modality: String, Encodable, Sendable {\n    case audio\n    case text\n  }\n}\n\n// MARK: - OpenAIRealtimeSessionConfiguration.TurnDetection.DetectionType\n\nextension OpenAIRealtimeSessionConfiguration.TurnDetection {\n  public enum DetectionType: Encodable, Sendable {\n    /// - Parameters:\n    ///   - prefixPaddingMs: Amount of audio to include before speech starts (in milliseconds).\n    ///                      OpenAI's default is 300\n    ///   - silenceDurationMs: Duration of silence to detect speech stop (in milliseconds).  With shorter values\n    ///                        the model will respond more quickly, but may jump in on short pauses from the user.\n    ///                        OpenAI's default is 500\n    ///   - threshold: Activation threshold for VAD (0.0 to 1.0). A higher threshold will require louder audio to\n    ///                activate the model, and thus might perform better in noisy environments.\n    ///                OpenAI's default is 0.5\n    case serverVAD(prefixPaddingMs: Int, silenceDurationMs: Int, threshold: Double)\n\n    /// - Parameters:\n    ///   - eagerness: The eagerness of the model to respond. `low` will wait longer for the user to\n    ///                continue speaking, `high` will respond more quickly.\n    ///                OpenAI's default is medium\n    case semanticVAD(eagerness: Eagerness)\n\n    public enum Eagerness: String, Encodable, Sendable {\n      case low\n      case medium\n      case high\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Realtime/OpenAIRealtimeSessionUpdate.swift",
    "content": "//\n//  OpenAIRealtimeSessionUpdate.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n/// Send this event to update the session's default configuration.\n///\n/// Docstrings from:\n/// https://platform.openai.com/docs/api-reference/realtime-client-events/session/update\npublic struct OpenAIRealtimeSessionUpdate: Encodable {\n  public init(\n    eventId: String? = nil,\n    session: OpenAIRealtimeSessionConfiguration)\n  {\n    self.eventId = eventId\n    self.session = session\n  }\n\n  /// Optional client-generated ID used to identify this event.\n  public let eventId: String?\n\n  /// Session configuration to update\n  public let session: OpenAIRealtimeSessionConfiguration\n\n  /// The event type, must be \"session.update\".\n  public let type = \"session.update\"\n\n  private enum CodingKeys: String, CodingKey {\n    case eventId = \"event_id\"\n    case session\n    case type\n  }\n\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/Conversation.swift",
    "content": "//\n//  Conversation.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: ConversationType\n\n/// Defaults to null\n/// The conversation that this response belongs to. Items from this conversation are prepended to input_items for this response request. Input items and output items from this response are automatically added to this conversation after this response completes.\npublic enum Conversation: Codable {\n  /// Conversation ID\n  /// The unique ID of the conversation.\n  case id(String)\n\n  /// Conversation object\n  /// The conversation that this response belongs to.\n  case object(ConversationObject)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n    if let stringValue = try? container.decode(String.self) {\n      self = .id(stringValue)\n    } else if let objectValue = try? container.decode(ConversationObject.self) {\n      self = .object(objectValue)\n    } else {\n      throw DecodingError.typeMismatch(\n        Conversation.self,\n        DecodingError.Context(\n          codingPath: decoder.codingPath,\n          debugDescription: \"Expected String or ConversationObject\"))\n    }\n  }\n\n  /// Conversation object\n  public struct ConversationObject: Codable {\n    public init(\n      id: String,\n      createdAt: Int? = nil,\n      error: ErrorObject? = nil,\n      incompleteDetails: IncompleteDetails? = nil)\n    {\n      self.id = id\n      self.createdAt = createdAt\n      self.error = error\n      self.incompleteDetails = incompleteDetails\n    }\n\n    /// Error object for conversation\n    public struct ErrorObject: Codable {\n      /// The error code for the response\n      public let code: String\n\n      /// A human-readable description of the error\n      public let message: String\n\n      public init(code: String, message: String) {\n        self.code = code\n        self.message = message\n      }\n    }\n\n    /// Incomplete details structure\n    public struct IncompleteDetails: Codable {\n      /// The reason why the response is incomplete\n      public let reason: String\n\n      public init(reason: String) {\n        self.reason = reason\n      }\n    }\n\n    /// The unique ID of the conversation\n    public var id: String\n\n    /// Unix timestamp (in seconds) of when this conversation was created\n    public var createdAt: Int?\n\n    /// An error object returned when the model fails to generate a Response\n    public var error: ErrorObject?\n\n    /// Details about why the response is incomplete\n    public var incompleteDetails: IncompleteDetails?\n\n    enum CodingKeys: String, CodingKey {\n      case id\n      case createdAt = \"created_at\"\n      case error\n      case incompleteDetails = \"incomplete_details\"\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n    switch self {\n    case .id(let string):\n      try container.encode(string)\n    case .object(let object):\n      try container.encode(object)\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/GetInputItemsParameter.swift",
    "content": "//\n//  GetInputItemsParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: GetInputItemsParameter\n\n/// [Get input items for a response](https://platform.openai.com/docs/api-reference/responses/input-items)\npublic struct GetInputItemsParameter: Codable {\n  /// Initialize a new GetInputItemsParameter\n  public init(\n    after: String? = nil,\n    include: [ResponseInclude]? = nil,\n    limit: Int? = nil,\n    order: String? = nil)\n  {\n    self.after = after\n    self.include = include?.map(\\.rawValue)\n    self.limit = limit\n    self.order = order\n  }\n\n  /// An item ID to list items after, used in pagination.\n  public var after: String?\n\n  /// Additional fields to include in the response.\n  public var include: [String]?\n\n  /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  public var limit: Int?\n\n  /// The order to return the input items in. Default is desc. One of 'asc' or 'desc'.\n  public var order: String?\n\n  enum CodingKeys: String, CodingKey {\n    case after\n    case include\n    case limit\n    case order\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/GetResponseParameter.swift",
    "content": "//\n//  GetResponseParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: GetResponseParameter\n\n/// [Get a model response](https://platform.openai.com/docs/api-reference/responses/get)\npublic struct GetResponseParameter: Codable {\n  /// Initialize a new GetResponseParameter\n  public init(\n    include: [ResponseInclude]? = nil,\n    includeObfuscation: Bool? = nil,\n    startingAfter: Int? = nil,\n    stream: Bool? = nil)\n  {\n    self.include = include?.map(\\.rawValue)\n    self.includeObfuscation = includeObfuscation\n    self.startingAfter = startingAfter\n    self.stream = stream\n  }\n\n  /// Additional fields to include in the response.\n  public var include: [String]?\n\n  /// When true, stream obfuscation will be enabled. Stream obfuscation adds random characters to an obfuscation field on streaming delta events to normalize payload sizes as a mitigation to certain side-channel attacks.\n  public var includeObfuscation: Bool?\n\n  /// The sequence number of the event after which to start streaming.\n  public var startingAfter: Int?\n\n  /// If set to true, the model response data will be streamed to the client as it is generated using server-sent events.\n  public var stream: Bool?\n\n  enum CodingKeys: String, CodingKey {\n    case include\n    case includeObfuscation = \"include_obfuscation\"\n    case startingAfter = \"starting_after\"\n    case stream\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/InputType.swift",
    "content": "//\n//  InputType.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: - InputType\n\n/// Text, image, or file inputs to the model, used to generate a response.\n///\n/// Learn more:\n///\n/// [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n/// [Image inputs](https://platform.openai.com/docs/guides/images)\n/// [File inputs](https://platform.openai.com/docs/guides/pdf-files)\n/// [Conversation state](https://platform.openai.com/docs/guides/conversation-state)\n/// [Function calling](https://platform.openai.com/docs/guides/function-calling)\npublic enum InputType: Codable {\n  /// A text input to the model, equivalent to a text input with the user role.\n  case string(String)\n\n  /// A list of one or many input items to the model, containing different content types.\n  case array([InputItem])\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n\n    if let text = try? container.decode(String.self) {\n      self = .string(text)\n    } else if let array = try? container.decode([InputItem].self) {\n      self = .array(array)\n    } else {\n      throw DecodingError.dataCorruptedError(\n        in: container,\n        debugDescription: \"Input must be a string or an array of input items\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n    switch self {\n    case .string(let text):\n      try container.encode(text)\n    case .array(let items):\n      try container.encode(items)\n    }\n  }\n}\n\n// MARK: - InputItem\n\n/// An item representing part of the context for the response to be generated by the model.\n/// Can contain text, images, and audio inputs, as well as previous assistant responses and tool call outputs.\npublic enum InputItem: Codable {\n  /// Input message with role and content\n  case message(InputMessage)\n\n  /// Output message from the model (for conversation history)\n  case outputMessage(OutputMessage)\n\n  /// File search tool call\n  case fileSearchToolCall(FileSearchToolCall)\n\n  /// Function tool call\n  case functionToolCall(FunctionToolCall)\n\n  /// Function tool call output\n  case functionToolCallOutput(FunctionToolCallOutput)\n\n  /// Computer tool call output\n  case computerToolCallOutput(ComputerToolCallOutput)\n\n  /// Local shell call output\n  case localShellCallOutput(LocalShellCallOutput)\n\n  /// MCP approval response\n  case mcpApprovalResponse(MCPApprovalResponse)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"message\":\n      self = try .message(InputMessage(from: decoder))\n    case \"file_search_call\":\n      self = try .fileSearchToolCall(FileSearchToolCall(from: decoder))\n    case \"function_call\":\n      self = try .functionToolCall(FunctionToolCall(from: decoder))\n    case \"function_call_output\":\n      self = try .functionToolCallOutput(FunctionToolCallOutput(from: decoder))\n    case \"computer_call_output\":\n      self = try .computerToolCallOutput(ComputerToolCallOutput(from: decoder))\n    case \"local_shell_call_output\":\n      self = try .localShellCallOutput(LocalShellCallOutput(from: decoder))\n    case \"mcp_approval_response\":\n      self = try .mcpApprovalResponse(MCPApprovalResponse(from: decoder))\n    default:\n      // Try to decode as message if type is not recognized\n      self = try .message(InputMessage(from: decoder))\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    switch self {\n    case .message(let message):\n      try message.encode(to: encoder)\n    case .outputMessage(let message):\n      try message.encode(to: encoder)\n    case .fileSearchToolCall(let call):\n      try call.encode(to: encoder)\n    case .functionToolCall(let call):\n      try call.encode(to: encoder)\n    case .functionToolCallOutput(let output):\n      try output.encode(to: encoder)\n    case .computerToolCallOutput(let output):\n      try output.encode(to: encoder)\n    case .localShellCallOutput(let output):\n      try output.encode(to: encoder)\n    case .mcpApprovalResponse(let response):\n      try response.encode(to: encoder)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n    case role\n  }\n}\n\n// MARK: - InputMessage\n\n/// A message input to the model with a role indicating instruction following hierarchy\npublic struct InputMessage: Codable {\n  public init(role: String, content: MessageContent, type: String? = \"message\", status: String? = nil, id: String? = nil) {\n    self.role = role\n    self.content = content\n    self.type = type\n    self.status = status\n    self.id = id\n  }\n\n  /// The role of the message input (user, system, assistant, developer)\n  public let role: String\n\n  /// The content of the message\n  public let content: MessageContent\n\n  /// The type of the message input. Always \"message\"\n  public let type: String?\n\n  /// The status of item. Populated when items are returned via API\n  public let status: String?\n\n  /// The unique ID of the message (for assistant messages with response IDs)\n  public let id: String?\n}\n\n// MARK: - MessageContent\n\n/// Content for messages - can be string or array of content items\npublic enum MessageContent: Codable {\n  /// Text input as string\n  case text(String)\n\n  /// Array of content items\n  case array([ContentItem])\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n\n    if let text = try? container.decode(String.self) {\n      self = .text(text)\n    } else if let array = try? container.decode([ContentItem].self) {\n      self = .array(array)\n    } else {\n      throw DecodingError.dataCorruptedError(\n        in: container,\n        debugDescription: \"Content must be a string or an array of content items\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n    switch self {\n    case .text(let text):\n      try container.encode(text)\n    case .array(let items):\n      try container.encode(items)\n    }\n  }\n}\n\n// MARK: - ContentItem\n\n/// Content item types for messages\npublic enum ContentItem: Codable {\n  /// Text content\n  case text(TextContent)\n\n  /// Image content\n  case image(ImageContent)\n\n  /// File content\n  case file(FileContent)\n\n  /// Audio content\n  case audio(AudioContent)\n\n  /// Output text (for assistant messages)\n  case outputText(OutputTextContent)\n\n  /// Refusal (for assistant messages)\n  case refusal(RefusalContent)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    let singleValueContainer = try decoder.singleValueContainer()\n\n    switch type {\n    case \"input_text\":\n      self = try .text(singleValueContainer.decode(TextContent.self))\n\n    case \"input_image\":\n      self = try .image(singleValueContainer.decode(ImageContent.self))\n\n    case \"input_file\":\n      self = try .file(singleValueContainer.decode(FileContent.self))\n\n    case \"input_audio\":\n      self = try .audio(singleValueContainer.decode(AudioContent.self))\n\n    case \"output_text\":\n      self = try .outputText(singleValueContainer.decode(OutputTextContent.self))\n\n    case \"refusal\":\n      self = try .refusal(singleValueContainer.decode(RefusalContent.self))\n\n    default:\n      throw DecodingError.dataCorruptedError(\n        forKey: .type,\n        in: container,\n        debugDescription: \"Unknown content type: \\(type)\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n\n    switch self {\n    case .text(let text):\n      try container.encode(text)\n    case .image(let image):\n      try container.encode(image)\n    case .file(let file):\n      try container.encode(file)\n    case .audio(let audio):\n      try container.encode(audio)\n    case .outputText(let output):\n      try container.encode(output)\n    case .refusal(let refusal):\n      try container.encode(refusal)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n}\n\n// MARK: - TextContent\n\n/// Text content structure\npublic struct TextContent: Codable {\n  /// The text content\n  public let text: String\n\n  /// The type of content, always \"input_text\"\n  public let type = \"input_text\"\n\n  public init(text: String) {\n    self.text = text\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case text\n    case type\n  }\n}\n\n// MARK: - ImageContent\n\n/// Image content structure\npublic struct ImageContent: Codable {\n  public init(detail: String? = \"auto\", fileId: String? = nil, imageUrl: String? = nil) {\n    self.detail = detail\n    self.fileId = fileId\n    self.imageUrl = imageUrl\n  }\n\n  /// The type of content, always \"input_image\"\n  public let type = \"input_image\"\n\n  /// The detail level of the image. One of high, low, or auto. Defaults to auto when creating.\n  public let detail: String?\n\n  /// The ID of the file to be sent to the model\n  public let fileId: String?\n\n  /// The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL\n  public let imageUrl: String?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case detail\n    case fileId = \"file_id\"\n    case imageUrl = \"image_url\"\n  }\n}\n\n// MARK: - FileContent\n\n/// File content structure\npublic struct FileContent: Codable {\n  public init(fileData: String? = nil, fileId: String? = nil, fileUrl: String? = nil, filename: String? = nil) {\n    self.fileData = fileData\n    self.fileId = fileId\n    self.fileUrl = fileUrl\n    self.filename = filename\n  }\n\n  /// The type of content, always \"input_file\"\n  public let type = \"input_file\"\n\n  /// The content of the file to be sent to the model\n  public let fileData: String?\n\n  /// The ID of the file to be sent to the model\n  public let fileId: String?\n\n  /// The URL of the file to be sent to the model\n  public let fileUrl: String?\n\n  /// The name of the file to be sent to the model\n  public let filename: String?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case fileData = \"file_data\"\n    case fileId = \"file_id\"\n    case fileUrl = \"file_url\"\n    case filename\n  }\n}\n\n// MARK: - AudioContent\n\n/// Audio content structure\npublic struct AudioContent: Codable {\n  /// The audio data\n  public let data: String\n\n  /// The format of the audio\n  public let format: String\n\n  /// The type of content, always \"input_audio\"\n  public let type = \"input_audio\"\n\n  public init(data: String, format: String) {\n    self.data = data\n    self.format = format\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case data\n    case format\n    case type\n  }\n}\n\n// MARK: - OutputTextContent\n\n/// A text output from the model\npublic struct OutputTextContent: Codable {\n  public init(text: String, annotations: [Any]? = nil) {\n    self.text = text\n    self.annotations = annotations\n  }\n\n  /// Custom encoding/decoding to handle annotations\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    text = try container.decode(String.self, forKey: .text)\n    annotations = nil // Skip decoding annotations for now\n  }\n\n  /// The text content\n  public let text: String\n\n  /// Annotations in the text, if any\n  public let annotations: [Any]? // For now, using Any. Can be made more specific later\n\n  /// The type of the content. Always \"output_text\"\n  public let type = \"output_text\"\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n    try container.encode(text, forKey: .text)\n    try container.encode(type, forKey: .type)\n    // Skip encoding annotations for now\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case text\n    case annotations\n    case type\n  }\n}\n\n// MARK: - RefusalContent\n\n/// A refusal from the model\npublic struct RefusalContent: Codable {\n  /// The refusal explanation from the model\n  public let refusal: String\n\n  /// The type of the refusal. Always \"refusal\"\n  public let type = \"refusal\"\n\n  public init(refusal: String) {\n    self.refusal = refusal\n  }\n}\n\n// MARK: - OutputMessage\n\n/// An output message from the model (used in conversation history)\npublic struct OutputMessage: Codable {\n  public init(content: [ContentItem], id: String, role: String = \"assistant\", status: String? = nil, type: String = \"message\") {\n    self.content = content\n    self.id = id\n    self.role = role\n    self.status = status\n    self.type = type\n  }\n\n  /// The content of the output message\n  public let content: [ContentItem]\n\n  /// The unique ID of the output message\n  public let id: String\n\n  /// The role of the output message. Always \"assistant\"\n  public let role: String\n\n  /// The status of the message. One of in_progress, completed, or incomplete. Populated when items are returned via API.\n  public let status: String?\n\n  /// The type of the output message. Always \"message\"\n  public let type: String\n}\n\n// MARK: - FileSearchToolCall\n\n/// File search tool call\npublic struct FileSearchToolCall: Codable {\n  public let id: String\n  public let queries: [String]\n  public let status: String\n  public let type = \"file_search_call\"\n  public let results: [FileSearchResult]?\n\n  public struct FileSearchResult: Codable {\n    public let attributes: [String: String]?\n    public let fileId: String?\n    public let filename: String?\n    public let score: Double?\n    public let text: String?\n\n    enum CodingKeys: String, CodingKey {\n      case attributes\n      case fileId = \"file_id\"\n      case filename, score, text\n    }\n  }\n}\n\n// MARK: - FunctionToolCall\n\n/// Function tool call\npublic struct FunctionToolCall: Codable {\n  public init(arguments: String, callId: String, name: String, id: String? = nil, status: String? = nil) {\n    self.arguments = arguments\n    self.callId = callId\n    self.name = name\n    self.id = id\n    self.status = status\n  }\n\n  public let arguments: String\n  public let callId: String\n  public let name: String\n  public let type = \"function_call\"\n  public let id: String?\n  public let status: String?\n\n  enum CodingKeys: String, CodingKey {\n    case arguments\n    case callId = \"call_id\"\n    case name, type, id, status\n  }\n}\n\n// MARK: - FunctionToolCallOutput\n\n/// Function tool call output\npublic struct FunctionToolCallOutput: Codable {\n  public let callId: String\n  public let output: String\n  public let type = \"function_call_output\"\n  public let id: String?\n  public let status: String?\n\n  public init(callId: String, output: String, id: String? = nil, status: String? = nil) {\n    self.callId = callId\n    self.output = output\n    self.id = id\n    self.status = status\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case callId = \"call_id\"\n    case output, type, id, status\n  }\n}\n\n// MARK: - ComputerToolCallOutput\n\n/// The output of a computer tool call\npublic struct ComputerToolCallOutput: Codable {\n  public init(\n    callId: String,\n    id: String,\n    output: ComputerScreenshot,\n    acknowledgedSafetyChecks: [SafetyCheck]? = nil,\n    status: String? = nil)\n  {\n    self.callId = callId\n    self.id = id\n    self.output = output\n    self.acknowledgedSafetyChecks = acknowledgedSafetyChecks\n    self.status = status\n  }\n\n  /// A computer screenshot\n  public struct ComputerScreenshot: Codable {\n    /// The type of the screenshot. Always \"computer_screenshot\"\n    public let type: String\n\n    /// The identifier of an uploaded file that contains the screenshot\n    public let fileId: String?\n\n    /// The URL of the screenshot image\n    public let imageUrl: String?\n\n    enum CodingKeys: String, CodingKey {\n      case type\n      case fileId = \"file_id\"\n      case imageUrl = \"image_url\"\n    }\n  }\n\n  /// Safety check structure\n  public struct SafetyCheck: Codable {\n    /// The type of the pending safety check\n    public let code: String\n\n    /// The ID of the pending safety check\n    public let id: String\n\n    /// Details about the pending safety check\n    public let message: String\n  }\n\n  /// The ID of the computer tool call that produced the output\n  public let callId: String\n\n  /// The unique ID of the computer call tool output\n  public let id: String\n\n  /// A computer screenshot image used with the computer use tool\n  public let output: ComputerScreenshot\n\n  /// The type of the computer tool call output. Always \"computer_call_output\"\n  public let type = \"computer_call_output\"\n\n  /// The safety checks reported by the API that have been acknowledged by the developer\n  public let acknowledgedSafetyChecks: [SafetyCheck]?\n\n  /// The status of the message input. One of in_progress, completed, or incomplete\n  public let status: String?\n\n  enum CodingKeys: String, CodingKey {\n    case callId = \"call_id\"\n    case id\n    case output\n    case type\n    case acknowledgedSafetyChecks = \"acknowledged_safety_checks\"\n    case status\n  }\n}\n\n// MARK: - LocalShellCallOutput\n\n/// The output of a local shell tool call\npublic struct LocalShellCallOutput: Codable {\n  public init(id: String, output: String, status: String? = nil) {\n    self.id = id\n    self.output = output\n    self.status = status\n  }\n\n  /// The unique ID of the local shell tool call generated by the model\n  public let id: String\n\n  /// A JSON string of the output of the local shell tool call\n  public let output: String\n\n  /// The type of the local shell tool call output. Always \"local_shell_call_output\"\n  public let type = \"local_shell_call_output\"\n\n  /// The status of the item. One of in_progress, completed, or incomplete\n  public let status: String?\n\n  enum CodingKeys: String, CodingKey {\n    case id, output, type, status\n  }\n}\n\n// MARK: - MCPApprovalResponse\n\n/// A response to an MCP approval request\npublic struct MCPApprovalResponse: Codable {\n  public init(approvalRequestId: String, approve: Bool, id: String, reason: String? = nil) {\n    self.approvalRequestId = approvalRequestId\n    self.approve = approve\n    self.id = id\n    self.reason = reason\n  }\n\n  /// The ID of the approval request being answered\n  public let approvalRequestId: String\n\n  /// Whether the request was approved\n  public let approve: Bool\n\n  /// The unique ID of the approval response\n  public let id: String\n\n  /// The type of the item. Always \"mcp_approval_response\"\n  public let type = \"mcp_approval_response\"\n\n  /// Optional reason for the decision\n  public let reason: String?\n\n  enum CodingKeys: String, CodingKey {\n    case approvalRequestId = \"approval_request_id\"\n    case approve\n    case id\n    case type\n    case reason\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/ModelResponseParameter.swift",
    "content": "//\n//  ModelResponseParameter.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: ModelResponseParameter\n\n/// [Create a model response](https://platform.openai.com/docs/api-reference/responses/create)\npublic struct ModelResponseParameter: Codable {\n  /// Initialize a new ModelResponseParameter\n  public init(\n    input: InputType,\n    model: Model,\n    background: Bool? = nil,\n    conversation: Conversation? = nil,\n    include: [ResponseInclude]? = nil,\n    instructions: String? = nil,\n    maxOutputTokens: Int? = nil,\n    maxToolCalls: Int? = nil,\n    metadata: [String: String]? = nil,\n    parallelToolCalls: Bool? = nil,\n    previousResponseId: String? = nil,\n    prompt: Prompt? = nil,\n    promptCacheKey: String? = nil,\n    safetyIdentifier: String? = nil,\n    reasoning: Reasoning? = nil,\n    serviceTier: String? = nil,\n    store: Bool? = nil,\n    stream: Bool? = nil,\n    streamOptions: StreamOptions? = nil,\n    temperature: Double? = nil,\n    text: TextConfiguration? = nil,\n    toolChoice: ToolChoiceMode? = nil,\n    tools: [Tool]? = nil,\n    topP: Double? = nil,\n    topLogprobs: Int? = nil,\n    truncation: TruncationStrategy? = nil,\n    user: String? = nil)\n  {\n    self.background = background\n    self.conversation = conversation\n    self.input = input\n    self.model = model.value\n    self.include = include?.map(\\.rawValue)\n    self.instructions = instructions\n    self.maxOutputTokens = maxOutputTokens\n    self.maxToolCalls = maxToolCalls\n    self.metadata = metadata\n    self.parallelToolCalls = parallelToolCalls\n    self.previousResponseId = previousResponseId\n    self.prompt = prompt\n    self.promptCacheKey = promptCacheKey\n    self.safetyIdentifier = safetyIdentifier\n    self.reasoning = reasoning\n    self.serviceTier = serviceTier\n    self.store = store\n    self.stream = stream\n    self.streamOptions = streamOptions\n    self.temperature = temperature\n    self.text = text\n    self.toolChoice = toolChoice\n    self.tools = tools\n    self.topP = topP\n    self.topLogprobs = topLogprobs\n    self.truncation = truncation?.rawValue\n    self.user = user\n  }\n\n  /// Audio detail structure\n  public struct AudioDetail: Codable {\n    public var data: String\n    public var format: String\n\n    public init(data: String, format: String) {\n      self.data = data\n      self.format = format\n    }\n  }\n\n  /// The truncation strategy to use for the model response\n  public enum TruncationStrategy: String {\n    /// If the input to this Response exceeds the model's context window size, the model will truncate the response to fit the context window by dropping items from the beginning of the conversation\n    case auto\n\n    /// If the input size will exceed the context window size for a model, the request will fail with a 400 error\n    case disabled\n  }\n\n  /// Defaults to false\n  /// Whether to run the model response in the background. Learn more.\n  public var background: Bool?\n\n  /// Defaults to null\n  /// The conversation that this response belongs to. Items from this conversation are prepended to input_items for this response request. Input items and output items from this response are automatically added to this conversation after this response completes.\n  public var conversation: Conversation?\n\n  /// Text, image, or file inputs to the model, used to generate a response.\n  /// A text input to the model, equivalent to a text input with the user role.\n  /// A list of one or many input items to the [model](https://platform.openai.com/docs/models), containing different content types.\n  public var input: InputType\n\n  /// Model ID used to generate the response, like gpt-4o or o1. OpenAI offers a wide range of models with\n  /// different capabilities, performance characteristics, and price points.\n  /// Refer to the model guide to browse and compare available models.\n  public var model: String\n\n  /// Specify additional output data to include in the model response. Currently supported values are:\n  /// - web_search_call.action.sources: Include the sources of the web search tool call.\n  /// - code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n  /// - computer_call_output.output.image_url: Include image urls from the computer call output.\n  /// - file_search_call.results: Include the search results of the file search tool call.\n  /// - message.input_image.image_url: Include image urls from the input message.\n  /// - message.output_text.logprobs: Include logprobs with assistant messages.\n  /// - reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n  public var include: [String]?\n\n  /// Inserts a system (or developer) message as the first item in the model's context.\n  /// When using along with previous_response_id, the instructions from a previous response will be not be\n  /// carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses.\n  public var instructions: String?\n\n  /// An upper bound for the number of tokens that can be generated for a response, including visible output tokens\n  /// and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).\n  public var maxOutputTokens: Int?\n\n  /// The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n  public var maxToolCalls: Int?\n\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information\n  /// about the object in a structured format, and querying for objects via API or the dashboard.\n  /// Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.\n  public var metadata: [String: String]?\n\n  /// Whether to allow the model to run tool calls in parallel.\n  /// Defaults to true\n  public var parallelToolCalls: Bool?\n\n  /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n  /// Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state)\n  public var previousResponseId: String?\n\n  /// Reference to a prompt template and its variables. Learn more.\n  public var prompt: Prompt?\n\n  /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the user field. Learn more.\n  public var promptCacheKey: String?\n\n  /// A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. Learn more.\n  public var safetyIdentifier: String?\n\n  /// o-series models only\n  /// Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning)\n  public var reasoning: Reasoning?\n\n  /// Defaults to true\n  /// Whether to store the generated model response for later retrieval via API.\n  public var store: Bool?\n\n  /// Defaults to auto\n  /// Specifies the processing type used for serving the request.\n  ///\n  /// If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.\n  /// If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.\n  /// If set to 'flex' or 'priority', then the request will be processed with the corresponding service tier.\n  /// When not set, the default behavior is 'auto'.\n  /// When the service_tier parameter is set, the response body will include the service_tier value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.\n  public var serviceTier: String?\n\n  /// If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n  /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information.\n  public var stream: Bool?\n\n  /// Defaults to null\n  /// Options for streaming responses. Only set this when you set stream: true.\n  public var streamOptions: StreamOptions?\n\n  /// Defaults to 1\n  /// What sampling temperature to use, between 0 and 2.\n  /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// We generally recommend altering this or top_p but not both.\n  public var temperature: Double?\n\n  /// Configuration options for a text response from the model. Can be plain text or structured JSON data. Learn more:\n  /// [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n  /// [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)\n  public var text: TextConfiguration?\n\n  /// How the model should select which tool (or tools) to use when generating a response.\n  /// See the tools parameter to see how to specify which tools the model can call.\n  public var toolChoice: ToolChoiceMode?\n\n  /// An array of tools the model may call while generating a response. You can specify which tool to use by setting the tool_choice parameter.\n  /// The two categories of tools you can provide the model are:\n  /// Built-in tools: Tools that are provided by OpenAI that extend the model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search) or [file search](https://platform.openai.com/docs/guides/tools-file-search0. Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools).\n  /// Function calls (custom tools): Functions that are defined by you, enabling the model to call your own code. Learn more about [function calling.](https://platform.openai.com/docs/guides/function-calling)\n  public var tools: [Tool]?\n\n  /// Defaults to 1\n  /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n  /// So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n  public var topP: Double?\n\n  /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n  public var topLogprobs: Int?\n\n  /// Defaults to disabled\n  /// The truncation strategy to use for the model response. See TruncationStrategy enum for available options.\n  public var truncation: String?\n\n  /// Deprecated\n  /// This field is being replaced by safety_identifier and prompt_cache_key. Use prompt_cache_key instead to maintain caching optimizations. A stable identifier for your end-users. Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. Learn more.\n  @available(*, deprecated, message: \"This field is being replaced by safety_identifier and prompt_cache_key\")\n  public var user: String?\n\n  /// Coding keys for ModelResponseParameter\n  enum CodingKeys: String, CodingKey {\n    case background\n    case conversation\n    case input\n    case model\n    case include\n    case instructions\n    case maxOutputTokens = \"max_output_tokens\"\n    case maxToolCalls = \"max_tool_calls\"\n    case metadata\n    case parallelToolCalls = \"parallel_tool_calls\"\n    case previousResponseId = \"previous_response_id\"\n    case prompt\n    case promptCacheKey = \"prompt_cache_key\"\n    case safetyIdentifier = \"safety_identifier\"\n    case reasoning\n    case serviceTier = \"service_tier\"\n    case store\n    case stream\n    case streamOptions = \"stream_options\"\n    case temperature\n    case text\n    case toolChoice = \"tool_choice\"\n    case tools\n    case topP = \"top_p\"\n    case topLogprobs = \"top_logprobs\"\n    case truncation\n    case user\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/Prompt.swift",
    "content": "//\n//  Prompt.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: - Prompt\n\n/// Reference to a prompt template and its variables. Learn more.\npublic struct Prompt: Codable {\n  public init(id: String, variables: [String: PromptVariableValue]? = nil, version: String? = nil) {\n    self.id = id\n    self.variables = variables\n    self.version = version\n  }\n\n  /// The unique identifier of the prompt template to use.\n  public var id: String\n\n  /// Optional map of values to substitute in for variables in your prompt. The substitution values can either be strings, or other Response input types like images or files.\n  public var variables: [String: PromptVariableValue]?\n\n  /// Optional version of the prompt template.\n  public var version: String?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case variables\n    case version\n  }\n}\n\n// MARK: - PromptVariableValue\n\n/// A value for a prompt variable that can be either a string or an input item (image, file, etc.)\npublic enum PromptVariableValue: Codable {\n  /// String value\n  case string(String)\n\n  /// Input item value (image, file, etc.)\n  case inputItem(InputItem)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n\n    if let stringValue = try? container.decode(String.self) {\n      self = .string(stringValue)\n    } else if let inputItem = try? container.decode(InputItem.self) {\n      self = .inputItem(inputItem)\n    } else {\n      throw DecodingError.typeMismatch(\n        PromptVariableValue.self,\n        DecodingError.Context(\n          codingPath: decoder.codingPath,\n          debugDescription: \"Expected String or InputItem\"))\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n    switch self {\n    case .string(let value):\n      try container.encode(value)\n    case .inputItem(let item):\n      try container.encode(item)\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/ResponseInclude.swift",
    "content": "//\n//  ResponseInclude.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: ResponseInclude\n\n/// Specify additional output data to include in the model response.\npublic enum ResponseInclude: String {\n  /// Include the sources of the web search tool call.\n  case webSearchCallActionSources = \"web_search_call.action.sources\"\n\n  /// Includes the outputs of python code execution in code interpreter tool call items.\n  case codeInterpreterCallOutputs = \"code_interpreter_call.outputs\"\n\n  /// Include image urls from the computer call output.\n  case computerCallOutputImageUrl = \"computer_call_output.output.image_url\"\n\n  /// Include the search results of the file search tool call.\n  case fileSearchCallResults = \"file_search_call.results\"\n\n  /// Include image urls from the input message.\n  case messageInputImageImageUrl = \"message.input_image.image_url\"\n\n  /// Include logprobs with assistant messages.\n  case messageOutputTextLogprobs = \"message.output_text.logprobs\"\n\n  /// Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n  case reasoningEncryptedContent = \"reasoning.encrypted_content\"\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Response/StreamOptions.swift",
    "content": "//\n//  StreamOptions.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: StreamOptions\n\n/// Defaults to null\n/// Options for streaming responses. Only set this when you set stream: true.\npublic struct StreamOptions: Codable {\n  /// When true, stream obfuscation will be enabled. Stream obfuscation adds random characters to an obfuscation field on streaming delta events to normalize payload sizes as a mitigation to certain side-channel attacks. These obfuscation fields are included by default, but add a small amount of overhead to the data stream. You can set include_obfuscation to false to optimize for bandwidth if you trust the network links between your application and the OpenAI API.\n  public var includeObfuscation: Bool?\n\n  public init(includeObfuscation: Bool? = nil) {\n    self.includeObfuscation = includeObfuscation\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case includeObfuscation = \"include_obfuscation\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Runs/CreateThreadAndRunParameter.swift",
    "content": "//\n//  CreateThreadAndRunParameter.swift\n//\n//\n//  Created by James Rochabrun on 11/17/23.\n//\n\nimport Foundation\n\n/// [Create a thread and run it in one request.](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun)\npublic struct CreateThreadAndRunParameter: Encodable {\n  public init(\n    assistantId: String,\n    thread: CreateThreadParameters?,\n    model: String?,\n    instructions: String?,\n    tools: [AssistantObject.Tool]?,\n    metadata: [String: String]? = nil,\n    temperature: Double? = nil,\n    topP: Double? = nil,\n    maxPromptTokens: Int? = nil,\n    maxCompletionTokens: Int? = nil,\n    truncationStrategy: TruncationStrategy? = nil,\n    toolChoice: ToolChoice? = nil,\n    responseFormat: ResponseFormat? = nil)\n  {\n    self.assistantId = assistantId\n    self.thread = thread\n    self.model = model\n    self.instructions = instructions\n    self.tools = tools\n    self.metadata = metadata\n    self.temperature = temperature\n    self.topP = topP\n    self.maxPromptTokens = maxPromptTokens\n    self.maxCompletionTokens = maxCompletionTokens\n    self.truncationStrategy = truncationStrategy\n    self.toolChoice = toolChoice\n    self.responseFormat = responseFormat\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case assistantId = \"assistant_id\"\n    case thread\n    case model\n    case instructions\n    case tools\n    case metadata\n    case temperature\n    case topP = \"top_p\"\n    case stream\n    case maxPromptTokens = \"max_prompt_tokens\"\n    case maxCompletionTokens = \"max_completion_tokens\"\n    case truncationStrategy = \"truncation_strategy\"\n    case toolChoice = \"tool_choice\"\n    case responseFormat = \"response_format\"\n  }\n\n  /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run.\n  let assistantId: String\n  /// A thread to create.\n  let thread: CreateThreadParameters?\n  /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.\n  let model: String?\n  /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.\n  let instructions: String?\n  /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.\n  let tools: [AssistantObject.Tool]?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  let metadata: [String: String]?\n  /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// Defaults to 1\n  let temperature: Double?\n  /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n  let topP: Double?\n  /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message.\n  var stream = false\n  /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status incomplete. See incomplete_details for more info.\n  let maxPromptTokens: Int?\n  /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status complete. See incomplete_details for more info.\n  let maxCompletionTokens: Int?\n  /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.\n  let truncationStrategy: TruncationStrategy?\n  /// Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling a tool. Specifying a particular tool like {\"type\": \"file_search\"} or {\"type\": \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that tool.\n  let toolChoice: ToolChoice?\n  /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n  /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n  /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n  let responseFormat: ResponseFormat?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Runs/ModifyRunParameters.swift",
    "content": "//\n//  ModifyRunParameters.swift\n//\n//\n//  Created by James Rochabrun on 11/29/23.\n//\n\nimport Foundation\n\n/// Modifies a [Run](https://platform.openai.com/docs/api-reference/runs/modifyRun)\n/// Only the metadata can be modified.\npublic struct ModifyRunParameters: Encodable {\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public var metadata: [String: String]\n\n  public init(\n    metadata: [String: String])\n  {\n    self.metadata = metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Runs/RunParameter.swift",
    "content": "//\n//  RunParameter.swift\n//\n//\n//  Created by James Rochabrun on 11/16/23.\n//\n\nimport Foundation\n\n/// [Create a run.](https://platform.openai.com/docs/api-reference/runs/createRun)\npublic struct RunParameter: Encodable {\n  public init(\n    assistantID: String,\n    model: String? = nil,\n    instructions: String? = nil,\n    additionalInstructions: String? = nil,\n    additionalMessages: [MessageParameter]? = nil,\n    tools: [AssistantObject.Tool]? = nil,\n    metadata: [String: String]? = nil,\n    temperature: Double? = nil,\n    maxPromptTokens: Int? = nil,\n    maxCompletionTokens: Int? = nil,\n    truncationStrategy: TruncationStrategy? = nil,\n    toolChoice: ToolChoice? = nil,\n    responseFormat: ResponseFormat? = nil)\n  {\n    self.assistantID = assistantID\n    self.model = model\n    self.instructions = instructions\n    self.additionalInstructions = additionalInstructions\n    self.additionalMessages = additionalMessages\n    self.tools = tools\n    self.metadata = metadata\n    self.temperature = temperature\n    self.maxPromptTokens = maxPromptTokens\n    self.maxCompletionTokens = maxCompletionTokens\n    self.truncationStrategy = truncationStrategy\n    self.toolChoice = toolChoice\n    self.responseFormat = responseFormat\n  }\n\n  /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run.\n  public let assistantID: String\n\n  enum CodingKeys: String, CodingKey {\n    case assistantID = \"assistant_id\"\n    case model\n    case instructions\n    case additionalInstructions = \"additional_instructions\"\n    case additionalMessages = \"additional_messages\"\n    case tools\n    case metadata\n    case stream\n    case temperature\n    case maxPromptTokens = \"max_prompt_tokens\"\n    case maxCompletionTokens = \"max_completion_tokens\"\n    case truncationStrategy = \"truncation_strategy\"\n    case toolChoice = \"tool_choice\"\n    case responseFormat = \"response_format\"\n  }\n\n  /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.\n  let model: String?\n  /// Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.\n  let instructions: String?\n  /// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions.\n  let additionalInstructions: String?\n  /// Adds additional messages to the thread before creating the run.\n  let additionalMessages: [MessageParameter]?\n  /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.\n  let tools: [AssistantObject.Tool]?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  let metadata: [String: String]?\n  /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// Optional Defaults to 1\n  let temperature: Double?\n  /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message.\n  var stream = false\n  /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status complete. See incomplete_details for more info.\n  let maxPromptTokens: Int?\n  /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status complete. See incomplete_details for more info.\n  let maxCompletionTokens: Int?\n  /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.\n  let truncationStrategy: TruncationStrategy?\n  /// Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling a tool. Specifying a particular tool like {\"type\": \"file_search\"} or {\"type\": \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that tool.\n  let toolChoice: ToolChoice?\n  /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n  /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n  /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n  let responseFormat: ResponseFormat?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Runs/RunToolsOutputParameter.swift",
    "content": "//\n//  RunToolsOutputParameter.swift\n//\n//\n//  Created by James Rochabrun on 11/16/23.\n//\n\nimport Foundation\n\n/// When a run has the status: \"requires_action\" and required_action.type is submit_tool_outputs, this endpoint can be used to submit the [outputs](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) from the tool calls once they're all completed. All outputs must be submitted in a single request.\npublic struct RunToolsOutputParameter: Encodable {\n  public init(\n    toolOutputs: [ToolOutput])\n  {\n    self.toolOutputs = toolOutputs\n  }\n\n  public struct ToolOutput: Encodable {\n    /// The ID of the tool call in the `required_action` object within the run object the output is being submitted for.\n    public let toolCallId: String?\n    /// The output of the tool call to be submitted to continue the run.\n    public let output: String?\n\n    enum CodingKeys: String, CodingKey {\n      case toolCallId = \"tool_call_id\"\n      case output\n    }\n\n    public init(\n      toolCallId: String?,\n      output: String?)\n    {\n      self.toolCallId = toolCallId\n      self.output = output\n    }\n  }\n\n  /// A list of tools for which the outputs are being submitted.\n  public let toolOutputs: [ToolOutput]\n  /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message.\n  public var stream = false\n\n  enum CodingKeys: String, CodingKey {\n    case toolOutputs = \"tool_outputs\"\n    case stream\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Threads/CreateThreadParameters.swift",
    "content": "//\n//  CreateThreadParameters.swift\n//\n//\n//  Created by James Rochabrun on 11/16/23.\n//\n\nimport Foundation\n\n/// Create a [Thread](https://platform.openai.com/docs/api-reference/threads/createThread)\npublic struct CreateThreadParameters: Encodable {\n  public init(\n    messages: [MessageObject]? = nil,\n    toolResources: ToolResources? = nil,\n    metadata: [String: String]? = nil)\n  {\n    self.messages = messages\n    self.toolResources = toolResources\n    self.metadata = metadata\n  }\n\n  /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with.\n  public var messages: [MessageObject]?\n  /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n  public var toolResources: ToolResources?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public var metadata: [String: String]?\n\n  enum CodingKeys: String, CodingKey {\n    case messages\n    case toolResources = \"tool_resources\"\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/Threads/ModifyThreadParameters.swift",
    "content": "//\n//  ModifyThreadParameters.swift\n//\n//\n//  Created by James Rochabrun on 11/25/23.\n//\n\nimport Foundation\n\n/// Modifies a [Thread](https://platform.openai.com/docs/api-reference/threads/modifyThread)\n/// Only the metadata can be modified.\npublic struct ModifyThreadParameters: Encodable {\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public var metadata: [String: String]\n\n  public init(\n    metadata: [String: String])\n  {\n    self.metadata = metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/VectorStore/VectorStoreParameter.swift",
    "content": "//\n//  VectorStoreParameter.swift\n//\n//\n//  Created by James Rochabrun on 4/27/24.\n//\n\nimport Foundation\n\n/// Vector stores are used to store files for use by the file_search tool.\n///\n/// Related guide: [File Search](https://platform.openai.com/docs/assistants/tools/file-search)\n///\n/// Create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores).\npublic struct VectorStoreParameter: Encodable {\n  public init(\n    fileIDS: [String]? = nil,\n    name: String? = nil,\n    expiresAfter: ExpirationPolicy? = nil,\n    metadata: [String: String]? = nil)\n  {\n    self.fileIDS = fileIDS\n    self.name = name\n    self.expiresAfter = expiresAfter\n    self.metadata = metadata\n  }\n\n  /// Encoding only no nil parameters, this will avoid sending nil values when using this parameter in the \"modifyVectorStore\" request.\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n    if let fileIDS {\n      try container.encode(fileIDS, forKey: .fileIDS)\n    }\n    if let name {\n      try container.encode(name, forKey: .name)\n    }\n    if let expiresAfter {\n      try container.encode(expiresAfter, forKey: .expiresAfter)\n    }\n    if let metadata {\n      try container.encode(metadata, forKey: .metadata)\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case fileIDS = \"file_ids\"\n    case name\n    case expiresAfter = \"expires_after\"\n    case metadata\n  }\n\n  /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like file_search that can access files.\n  let fileIDS: [String]?\n  /// The name of the vector store.\n  let name: String?\n  /// The expiration policy for a vector store.\n  let expiresAfter: ExpirationPolicy?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  let metadata: [String: String]?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/VectorStoreFileBatch/VectorStoreFileBatchParameter.swift",
    "content": "//\n//  VectorStoreFileBatchParameter.swift\n//\n//\n//  Created by James Rochabrun on 4/29/24.\n//\n\nimport Foundation\n\n/// [Create vector store file batchBeta](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)\npublic struct VectorStoreFileBatchParameter: Encodable {\n  /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like file_search that can access files.\n  public let fileIDS: [String]\n\n  enum CodingKeys: String, CodingKey {\n    case fileIDS = \"file_ids\"\n  }\n\n  public init(fileIDS: [String]) {\n    self.fileIDS = fileIDS\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Parameters/VectorStoreFiles/VectorStoreFileParameter.swift",
    "content": "//\n//  VectorStoreFileParameter.swift\n//\n//\n//  Created by James Rochabrun on 4/28/24.\n//\n\nimport Foundation\n\n/// [Vector Store Files](https://platform.openai.com/docs/api-reference/vector-stores-files)\npublic struct VectorStoreFileParameter: Encodable {\n  /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like file_search that can access files.\n  public let fileID: String\n\n  enum CodingKeys: String, CodingKey {\n    case fileID = \"file_id\"\n  }\n\n  public init(fileID: String) {\n    self.fileID = fileID\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Assistants/AssistantObject.swift",
    "content": "//\n//  AssistantObject.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// BETA.\n/// Represents an [assistant](https://platform.openai.com/docs/api-reference/assistants) that can call the model and use tools.\npublic struct AssistantObject: Decodable {\n  public init(\n    id: String,\n    object: String,\n    createdAt: Int,\n    name: String?,\n    description: String?,\n    model: String,\n    instructions: String?,\n    tools: [Tool],\n    toolResources: ToolResources?,\n    metadata: [String: String]?,\n    temperature: Double?,\n    topP: Double?,\n    responseFormat: ResponseFormat?)\n  {\n    self.id = id\n    self.object = object\n    self.createdAt = createdAt\n    self.name = name\n    self.description = description\n    self.model = model\n    self.instructions = instructions\n    self.tools = tools\n    self.toolResources = toolResources\n    self.metadata = metadata\n    self.temperature = temperature\n    self.topP = topP\n    self.responseFormat = responseFormat\n  }\n\n  public struct Tool: Codable {\n    public init(\n      type: ToolType,\n      function: ChatCompletionParameters.ChatFunction? = nil)\n    {\n      self.type = type.rawValue\n      self.function = function\n    }\n\n    public enum ToolType: String, CaseIterable {\n      case codeInterpreter = \"code_interpreter\"\n      case fileSearch = \"file_search\"\n      case function\n    }\n\n    /// The type of tool being defined.\n    public let type: String\n    public let function: ChatCompletionParameters.ChatFunction?\n\n    /// Helper.\n    public var displayToolType: ToolType? { .init(rawValue: type) }\n  }\n\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always \"assistant\".\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the assistant was created.\n  public let createdAt: Int\n  /// The name of the assistant. The maximum length is 256 characters.\n  public let name: String?\n  /// The description of the assistant. The maximum length is 512 characters.\n  public let description: String?\n  /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.\n  public let model: String\n  /// The system instructions that the assistant uses. The maximum length is 32768 characters.\n  public let instructions: String?\n  /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function.\n  public let tools: [Tool]\n  /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n  public let toolResources: ToolResources?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public let metadata: [String: String]?\n  /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// Defaults to 1\n  public var temperature: Double?\n  /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n  /// Defaults to 1\n  public var topP: Double?\n  /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.\n  /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n  /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n  /// Defaults to `auto`\n  public var responseFormat: ResponseFormat?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case name\n    case description\n    case model\n    case instructions\n    case tools\n    case toolResources = \"tool_resources\"\n    case metadata\n    case temperature\n    case topP = \"top_p\"\n    case responseFormat = \"response_format\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Assistants/AssistantStreamEvent.swift",
    "content": "//\n//  AssistantStreamEvent.swift\n//\n//\n//  Created by James Rochabrun on 3/22/24.\n//\n\nimport Foundation\n\n/// A model that helps retrieve an object from an event.\npublic enum AssistantStreamEvent {\n  /// Occurs when a new thread is created.\n  /// - data is a thread\n  case threadCreated\n\n  /// Occurs when a new run is created.\n  /// - data is a run\n  case threadRunCreated\n\n  /// Occurs when a run moves to a queued status.\n  /// - data is a run\n  case threadRunQueued(RunObject)\n\n  /// Occurs when a run moves to an in_progress status.\n  /// - data is a run\n  case threadRunInProgress(RunObject)\n\n  /// Occurs when a run moves to a requires_action status.\n  /// - data is a run\n  case threadRunRequiresAction(RunObject)\n\n  /// Occurs when a run is completed.\n  /// - data is a run\n  case threadRunCompleted(RunObject)\n\n  /// Occurs when a run fails.\n  /// - data is a run\n  case threadRunFailed(RunObject)\n\n  /// Occurs when a run moves to a cancelling status.\n  /// - data is a run\n  case threadRunCancelling(RunObject)\n\n  /// Occurs when a run is cancelled.\n  /// - data is a run\n  case threadRunCancelled(RunObject)\n\n  /// Occurs when a run expires.\n  /// - data is a run\n  case threadRunExpired(RunObject)\n\n  /// Occurs when a run step is created.\n  /// - data is a run step\n  case threadRunStepCreated\n\n  /// Occurs when a run step moves to an in_progress state.\n  /// - data is a run step\n  case threadRunStepInProgress\n\n  /// Occurs when parts of a run step are being streamed.\n  /// - data is a run step delta\n  case threadRunStepDelta(RunStepDeltaObject)\n\n  /// Occurs when a run step is completed.\n  /// - data is a run step\n  case threadRunStepCompleted\n\n  /// Occurs when a run step fails.\n  /// - data is a run step\n  case threadRunStepFailed\n\n  /// Occurs when a run step is cancelled.\n  /// - data is a run step\n  case threadRunStepCancelled\n\n  /// Occurs when a run step expires.\n  /// - data is a run step\n  case threadRunStepExpired\n\n  /// Occurs when a message is created.\n  /// - data is a message\n  case threadMessageCreated\n\n  /// Occurs when a message moves to an in_progress state.\n  /// - data is a message\n  case threadMessageInProgress\n\n  /// Occurs when parts of a message are being streamed.\n  /// - data is a message delta\n  case threadMessageDelta(MessageDeltaObject)\n\n  /// Occurs when a message is completed.\n  /// - data is a message\n  case threadMessageCompleted\n\n  /// Occurs when a message ends before it is completed.\n  /// - data is a message\n  case threadMessageIncomplete\n\n  /// Occurs when an error occurs. This can happen due to an internal server error or a timeout.\n  /// - data is an error\n  case error\n\n  /// Occurs when a stream ends.\n  /// - data is [DONE]\n  case done\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Assistants/AssistantStreamEventObject.swift",
    "content": "//\n//  AssistantStreamEventObject.swift\n//\n//\n//  Created by James Rochabrun on 3/22/24.\n//\n\nimport Foundation\n\n/// Represents an [event](https://platform.openai.com/docs/api-reference/assistants-streaming/events) emitted when streaming a Run.\n/// Each event in a server-sent events stream has an event and data property:\npublic enum AssistantStreamEventObject: String {\n  /// Occurs when a new thread is created.\n  /// - data is a [thread](https://platform.openai.com/docs/api-reference/threads/object)\n  case threadCreated = \"thread.created\"\n\n  /// Occurs during the life cycle of a run.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRun = \"thread.run\"\n\n  /// Occurs when a new run is created.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunCreated = \"thread.run.created\"\n\n  /// Occurs when a run moves to a queued status.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunQueued = \"thread.run.queued\"\n\n  /// Occurs when a run moves to an in_progress status.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunInProgress = \"thread.run.in_progress\"\n\n  /// Occurs when a run moves to a requires_action status.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunRequiresAction = \"thread.run.requires_action\"\n\n  /// Occurs when a run is completed.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunCompleted = \"thread.run.completed\"\n\n  /// Occurs when a run fails.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunFailed = \"thread.run.failed\"\n\n  /// Occurs when a run moves to a cancelling status.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunCancelling = \"thread.run.cancelling\"\n\n  /// Occurs when a run is cancelled.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunCancelled = \"thread.run.cancelled\"\n\n  /// Occurs when a run expires.\n  /// - data is a [run](https://platform.openai.com/docs/api-reference/runs/object)\n  case threadRunExpired = \"thread.run.expired\"\n\n  /// Occurs when a run step is created.\n  /// - data is a [run step](https://platform.openai.com/docs/api-reference/runs/step-object)\n  case threadRunStepCreated = \"thread.run.step.created\"\n\n  /// Occurs when a run step moves to an in_progress state.\n  /// - data is a [run step](https://platform.openai.com/docs/api-reference/runs/step-object)\n  case threadRunStepInProgress = \"thread.run.step.in_progress\"\n\n  /// Occurs when parts of a run step are being streamed.\n  /// - data is a [run step delta](https://platform.openai.com/docs/api-reference/assistants-streaming/run-step-delta-object)\n  case threadRunStepDelta = \"thread.run.step.delta\"\n\n  /// Occurs when a run step is completed.\n  /// - data is a [run step](https://platform.openai.com/docs/api-reference/runs/step-object)\n  case threadRunStepCompleted = \"thread.run.step.completed\"\n\n  /// Occurs when a run step fails.\n  /// - data is a [run step](https://platform.openai.com/docs/api-reference/runs/step-object)\n  case threadRunStepFailed = \"thread.run.step.failed\"\n\n  /// Occurs when a run step is cancelled.\n  /// - data is a [run step](https://platform.openai.com/docs/api-reference/runs/step-object)\n  case threadRunStepCancelled = \"thread.run.step.cancelled\"\n\n  /// Occurs when a run step expires.\n  /// - data is a [run step](https://platform.openai.com/docs/api-reference/runs/step-object)\n  case threadRunStepExpired = \"thread.run.step.expired\"\n\n  /// Occurs when a message is created.\n  /// - data is a [message](https://platform.openai.com/docs/api-reference/messages/object)\n  case threadMessageCreated = \"thread.message.created\"\n\n  /// Occurs when a message moves to an in_progress state.\n  /// - data is a [message](https://platform.openai.com/docs/api-reference/messages/object)\n  case threadMessageInProgress = \"thread.message.in_progress\"\n\n  /// Not documented\n  case threadMessage = \"thread.message\"\n\n  /// Occurs when parts of a message are being streamed.\n  /// - data is a [message delta](https://platform.openai.com/docs/api-reference/assistants-streaming/message-delta-object)\n  case threadMessageDelta = \"thread.message.delta\"\n\n  /// Occurs when a message is completed.\n  /// - data is a [message](https://platform.openai.com/docs/api-reference/messages/object)\n  case threadMessageCompleted = \"thread.message.completed\"\n\n  /// Occurs when a message ends before it is completed.\n  /// - data is a [message](https://platform.openai.com/docs/api-reference/messages/object)\n  case threadMessageIncomplete = \"thread.message.incomplete\"\n\n  /// Occurs when an error occurs. This can happen due to an internal server error or a timeout.\n  /// - data is an error\n  case error\n\n  /// Occurs when a stream ends.\n  /// - data is [DONE]\n  case done\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Audio/AudioObject.swift",
    "content": "//\n//  AudioObject.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n/// The [audio](https://platform.openai.com/docs/api-reference/audio) response.\npublic struct AudioObject: Decodable {\n  public struct Word: Decodable {\n    /// The text content of the word.\n    public let word: String\n    /// Start time of the word in seconds.\n    public let start: Double\n    /// End time of the word in seconds.\n    public let end: Double\n  }\n\n  public struct Segment: Decodable {\n    /// Unique identifier of the segment.\n    public let id: Int\n    /// Seek offset of the segment.\n    public let seek: Int\n    /// Start time of the segment in seconds.\n    public let start: Double\n    ///  End time of the segment in seconds.\n    public let end: Double\n    /// Text content of the segment.\n    public let text: String\n    /// Array of token IDs for the text content.\n    public let tokens: [Int]\n    /// Temperature parameter used for generating the segment.\n    public let temperature: Double\n    /// Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.\n    public let avgLogprob: Double\n    /// Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.\n    public let compressionRatio: Double\n    /// Probability of no speech in the segment. If the value is higher than 1.0 and the avg_logprob is below -1, consider this segment silent.\n    public let noSpeechProb: Double\n\n    enum CodingKeys: String, CodingKey {\n      case id\n      case seek\n      case start\n      case end\n      case text\n      case tokens\n      case temperature\n      case avgLogprob = \"avg_logprob\"\n      case compressionRatio = \"compression_ratio\"\n      case noSpeechProb = \"no_speech_prob\"\n    }\n  }\n\n  /// The language of the input audio.\n  public let language: String?\n  /// The duration of the input audio.\n  public let duration: String?\n  /// The transcribed text if the request uses the `transcriptions` API, or the translated text if the request uses the `translations` endpoint.\n  public let text: String\n  /// Extracted words and their corresponding timestamps.\n  public let words: [Word]?\n  /// Segments of the transcribed text and their corresponding details.\n  public let segments: [Segment]?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Audio/AudioSpeechObject.swift",
    "content": "//\n//  AudioSpeechObject.swift\n//\n//\n//  Created by James Rochabrun on 11/14/23.\n//\n\nimport Foundation\n\n/// The [audio speech](https://platform.openai.com/docs/api-reference/audio/createSpeech) response.\npublic struct AudioSpeechObject: Decodable {\n  /// The audio file content.\n  public let output: Data\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Batch/BatchObject.swift",
    "content": "//\n//  BatchObject.swift\n//\n//\n//  Created by James Rochabrun on 4/27/24.\n//\n\nimport Foundation\n\npublic struct BatchObject: Decodable {\n  public struct Error: Decodable {\n    let object: String\n    let data: [Data]\n\n    public struct Data: Decodable {\n      /// An error code identifying the error type.\n      let code: String\n      /// A human-readable message providing more details about the error.\n      let message: String\n      /// The name of the parameter that caused the error, if applicable.\n      let param: String?\n      /// The line number of the input file where the error occurred, if applicable.\n      let line: Int?\n    }\n  }\n\n  public struct RequestCount: Decodable {\n    /// Total number of requests in the batch.\n    let total: Int\n    /// Number of requests that have been completed successfully.\n    let completed: Int\n    /// Number of requests that have failed.\n    let failed: Int\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case endpoint\n    case errors\n    case inputFileID = \"input_file_id\"\n    case completionWindow = \"completion_window\"\n    case status\n    case outputFileID = \"output_file_id\"\n    case errorFileID = \"error_file_id\"\n    case createdAt = \"created_at\"\n    case inProgressAt = \"in_progress_at\"\n    case expiresAt = \"expires_at\"\n    case finalizingAt = \"finalizing_at\"\n    case completedAt = \"completed_at\"\n    case failedAt = \"failed_at\"\n    case expiredAt = \"expired_at\"\n    case cancellingAt = \"cancelling_at\"\n    case cancelledAt = \"cancelled_at\"\n    case requestCounts = \"request_counts\"\n    case metadata\n  }\n\n  let id: String\n  /// The object type, which is always batch.\n  let object: String\n  /// The OpenAI API endpoint used by the batch.\n  let endpoint: String\n\n  let errors: Error\n  /// The ID of the input file for the batch.\n  let inputFileID: String\n  /// The time frame within which the batch should be processed.\n  let completionWindow: String\n  /// The current status of the batch.\n  let status: String\n  /// The ID of the file containing the outputs of successfully executed requests.\n  let outputFileID: String\n  /// The ID of the file containing the outputs of requests with errors.\n  let errorFileID: String\n  /// The Unix timestamp (in seconds) for when the batch was created.\n  let createdAt: Int\n  /// The Unix timestamp (in seconds) for when the batch started processing.\n  let inProgressAt: Int\n  /// The Unix timestamp (in seconds) for when the batch will expire.\n  let expiresAt: Int\n  /// The Unix timestamp (in seconds) for when the batch started finalizing.\n  let finalizingAt: Int\n  /// The Unix timestamp (in seconds) for when the batch was completed.\n  let completedAt: Int\n  /// The Unix timestamp (in seconds) for when the batch failed.\n  let failedAt: Int\n  /// The Unix timestamp (in seconds) for when the batch expired.\n  let expiredAt: Int\n  /// The Unix timestamp (in seconds) for when the batch started cancelling.\n  let cancellingAt: Int\n  /// The Unix timestamp (in seconds) for when the batch was cancelled.\n  let cancelledAt: Int\n  /// The request counts for different statuses within the batch.\n  let requestCounts: RequestCount\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  let metadata: [String: String]\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Chat/ChatCompletionChunkObject.swift",
    "content": "//\n//  ChatCompletionChunkObject.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n/// Represents a [streamed](https://platform.openai.com/docs/api-reference/chat/streaming) chunk of a chat completion response returned by model, based on the provided input.\npublic struct ChatCompletionChunkObject: Decodable {\n  public struct ChatChoice: Decodable {\n    public struct Delta: Decodable {\n      /// The contents of the chunk message.\n      public let content: String?\n      /// The reasoning content generated by the model, if available.\n      public let reasoningContent: String?\n      /// The tool calls generated by the model, such as function calls.\n      public let toolCalls: [ToolCall]?\n      /// The name and arguments of a function that should be called, as generated by the model.\n      @available(*, deprecated, message: \"Deprecated and replaced by `tool_calls`\")\n      public let functionCall: FunctionCall?\n      /// The role of the author of this message.\n      public let role: String?\n      /// The refusal message generated by the model.\n      public let refusal: String?\n\n      enum CodingKeys: String, CodingKey {\n        case content\n        case reasoningContent = \"reasoning_content\"\n        case toolCalls = \"tool_calls\"\n        case functionCall = \"function_call\"\n        case role\n        case refusal\n      }\n    }\n\n    public struct LogProb: Decodable {\n      /// A list of message content tokens with log probability information.\n      let content: [TokenDetail]?\n    }\n\n    public struct TokenDetail: Decodable {\n      enum CodingKeys: String, CodingKey {\n        case token, logprob, bytes\n        case topLogprobs = \"top_logprobs\"\n      }\n\n      struct TopLogProb: Decodable {\n        /// The token.\n        let token: String?\n        /// The log probability of this token.\n        let logprob: Double?\n        /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n        let bytes: [Int]?\n      }\n\n      /// The token.\n      let token: String?\n      /// The log probability of this token.\n      let logprob: Double?\n      /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n      let bytes: [Int]?\n      /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.\n      let topLogprobs: [TopLogProb]?\n    }\n\n    /// Provided by the Vision API.\n    public struct FinishDetails: Decodable {\n      let type: String?\n    }\n\n    /// A chat completion delta generated by streamed model responses.\n    public let delta: Delta?\n    /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.\n    public let finishReason: IntOrStringValue?\n    /// The index of the choice in the list of choices.\n    public let index: Int?\n    /// Provided by the Vision API.\n    public let finishDetails: FinishDetails?\n    /// Log probability information for the choice.\n    public let logprobs: LogProb?\n\n    enum CodingKeys: String, CodingKey {\n      case delta\n      case finishReason = \"finish_reason\"\n      case index\n      case finishDetails = \"finish_details\"\n      case logprobs\n    }\n  }\n\n  /// A unique identifier for the chat completion chunk.\n  public let id: String?\n  /// A list of chat completion choices. Can be more than one if n is greater than 1.\n  public let choices: [ChatChoice]?\n  /// The Unix timestamp (in seconds) of when the chat completion chunk was created.\n  public let created: Int?\n  /// The model to generate the completion.\n  public let model: String?\n  /// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.\n  public let serviceTier: String?\n  /// This fingerprint represents the backend configuration that the model runs with.\n  /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n  public let systemFingerprint: String?\n  /// The object type, which is always chat.completion.chunk.\n  public let object: String?\n  /// An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request. When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n  public let usage: ChatUsage?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case choices\n    case created\n    case model\n    case serviceTier = \"service_tier\"\n    case systemFingerprint = \"system_fingerprint\"\n    case object\n    case usage\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Chat/ChatCompletionObject.swift",
    "content": "//\n//  ChatCompletionObject.swift\n//\n//\n//  Created by James Rochabrun on 10/10/23.\n//\n\nimport Foundation\n\n/// Represents a chat [completion](https://platform.openai.com/docs/api-reference/chat/object) response returned by model, based on the provided input.\npublic struct ChatCompletionObject: Decodable {\n  public struct ChatChoice: Decodable {\n    public struct ChatMessage: Decodable {\n      /// Provided by the Vision API.\n      public struct FinishDetails: Decodable {\n        let type: String?\n      }\n\n      public struct Audio: Decodable {\n        /// Unique identifier for this audio response.\n        public let id: String?\n        /// The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.\n        public let expiresAt: Int?\n        /// Base64 encoded audio bytes generated by the model, in the format specified in the request.\n        public let data: String?\n        /// Transcript of the audio generated by the model.\n        public let transcript: String?\n\n        enum CodingKeys: String, CodingKey {\n          case id\n          case expiresAt = \"expires_at\"\n          case data\n          case transcript\n        }\n      }\n\n      /// The contents of the message.\n      public let content: String?\n      /// The tool calls generated by the model, such as function calls.\n      public let toolCalls: [ToolCall]?\n      /// The name and arguments of a function that should be called, as generated by the model.\n      @available(*, deprecated, message: \"Deprecated and replaced by `tool_calls`\")\n      public let functionCall: FunctionCall?\n      /// The role of the author of this message.\n      public let role: String?\n      /// The reasoning content generated by the model, if available.\n      public let reasoningContent: String?\n      /// Provided by the Vision API.\n      public let finishDetails: FinishDetails?\n      /// The refusal message generated by the model.\n      public let refusal: String?\n      /// If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).\n      public let audio: Audio?\n\n      enum CodingKeys: String, CodingKey {\n        case content\n        case toolCalls = \"tool_calls\"\n        case functionCall = \"function_call\"\n        case role\n        case finishDetails = \"finish_details\"\n        case reasoningContent = \"reasoning_content\"\n        case refusal\n        case audio\n      }\n    }\n\n    public struct LogProb: Decodable {\n      /// A list of message content tokens with log probability information.\n      let content: [TokenDetail]?\n    }\n\n    public struct TokenDetail: Decodable {\n      enum CodingKeys: String, CodingKey {\n        case token, logprob, bytes\n        case topLogprobs = \"top_logprobs\"\n      }\n\n      struct TopLogProb: Decodable {\n        /// The token.\n        let token: String?\n        /// The log probability of this token.\n        let logprob: Double?\n        /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n        let bytes: [Int]?\n      }\n\n      /// The token.\n      let token: String?\n      /// The log probability of this token.\n      let logprob: Double?\n      /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.\n      let bytes: [Int]?\n      /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.\n      let topLogprobs: [TopLogProb]?\n    }\n\n    /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.\n    public let finishReason: IntOrStringValue?\n    /// The index of the choice in the list of choices.\n    public let index: Int?\n    /// A chat completion message generated by the model.\n    public let message: ChatMessage?\n    /// Log probability information for the choice.\n    public let logprobs: LogProb?\n\n    enum CodingKeys: String, CodingKey {\n      case finishReason = \"finish_reason\"\n      case index\n      case message\n      case logprobs\n    }\n  }\n\n  /// A unique identifier for the chat completion.\n  public let id: String?\n  /// A list of chat completion choices. Can be more than one if n is greater than 1.\n  public let choices: [ChatChoice]?\n  /// The Unix timestamp (in seconds) of when the chat completion was created.\n  public let created: Int?\n  /// The model used for the chat completion.\n  public let model: String?\n  /// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.\n  public let serviceTier: String?\n  /// This fingerprint represents the backend configuration that the model runs with.\n  /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n  public let systemFingerprint: String?\n  /// The object type, which is always chat.completion.\n  public let object: String?\n  /// Usage statistics for the completion request.\n  public let usage: ChatUsage?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case choices\n    case created\n    case model\n    case serviceTier = \"service_tier\"\n    case systemFingerprint = \"system_fingerprint\"\n    case object\n    case usage\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Chat/ChatUsage.swift",
    "content": "//\n//  ChatUsage.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 1/3/25.\n//\n\nimport Foundation\n\n// MARK: - ChatUsage\n\npublic struct ChatUsage: Decodable {\n  /// Number of tokens in the prompt\n  public let promptTokens: Int?\n  /// Number of tokens in the generated completion\n  public let completionTokens: Int?\n  /// Total number of tokens used in the request (prompt + completion)\n  public let totalTokens: Int?\n  /// Detailed breakdown of prompt tokens\n  public let promptTokensDetails: PromptTokenDetails?\n  /// Detailed breakdown of completion tokens\n  public let completionTokensDetails: CompletionTokenDetails?\n\n  enum CodingKeys: String, CodingKey {\n    case promptTokens = \"prompt_tokens\"\n    case completionTokens = \"completion_tokens\"\n    case totalTokens = \"total_tokens\"\n    case promptTokensDetails = \"prompt_tokens_details\"\n    case completionTokensDetails = \"completion_tokens_details\"\n  }\n}\n\n// MARK: - PromptTokenDetails\n\npublic struct PromptTokenDetails: Decodable {\n  /// Number of tokens retrieved from cache\n  public let cachedTokens: Int?\n  /// Number of tokens used for audio processing\n  public let audioTokens: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case cachedTokens = \"cached_tokens\"\n    case audioTokens = \"audio_tokens\"\n  }\n}\n\n// MARK: - CompletionTokenDetails\n\npublic struct CompletionTokenDetails: Decodable {\n  /// Number of tokens used for reasoning\n  public let reasoningTokens: Int?\n  /// Number of tokens used for audio processing\n  public let audioTokens: Int?\n  /// Number of tokens in accepted predictions\n  public let acceptedPredictionTokens: Int?\n  /// Number of tokens in rejected predictions\n  public let rejectedPredictionTokens: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case reasoningTokens = \"reasoning_tokens\"\n    case audioTokens = \"audio_tokens\"\n    case acceptedPredictionTokens = \"accepted_prediction_tokens\"\n    case rejectedPredictionTokens = \"rejected_prediction_tokens\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Conversations/ConversationModel.swift",
    "content": "//\n//  ConversationModel.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 10/5/25.\n//\n\nimport Foundation\n\n/// A conversation object returned from the Conversations API\npublic struct ConversationModel: Decodable {\n  /// The unique ID of the conversation\n  public let id: String\n\n  /// The object type, which is always \"conversation\"\n  public let object: String\n\n  /// The time at which the conversation was created, measured in seconds since the Unix epoch\n  public let createdAt: Int\n\n  /// Set of 16 key-value pairs that can be attached to an object\n  /// Keys: max 64 characters, Values: max 512 characters\n  public let metadata: [String: String]\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Delta.swift",
    "content": "//\n//  Delta.swift\n//\n//\n//  Created by James Rochabrun on 3/22/24.\n//\n\nimport Foundation\n\n/// Protocol for Assistant Stream Delta.\n/// Defines a set of requirements for objects that can be included in an assistant event stream, such as `RunStepDeltaObject` or `MessageDeltaObject`.\npublic protocol Delta: Decodable {\n  associatedtype T\n  var id: String { get }\n  var object: String { get }\n  var delta: T { get }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Embedding/EmbeddingObject.swift",
    "content": "//\n//  EmbeddingObject.swift\n//\n//\n//  Created by James Rochabrun on 10/12/23.\n//\n\nimport Foundation\n\n/// [Represents an embedding vector returned by embedding endpoint.](https://platform.openai.com/docs/api-reference/embeddings/object)\npublic struct EmbeddingObject: Decodable {\n  /// The object type, which is always \"embedding\".\n  public let object: String\n  /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.[https://platform.openai.com/docs/guides/embeddings]\n  public let embedding: [Float]\n  /// The index of the embedding in the list of embeddings.\n  public let index: Int\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/File/FileObject.swift",
    "content": "//\n//  FileObject.swift\n//\n//\n//  Created by James Rochabrun on 10/16/23.\n//\n\nimport Foundation\n\n/// The [File object](https://platform.openai.com/docs/api-reference/files/object) represents a document that has been uploaded to OpenAI.\npublic struct FileObject: Decodable {\n  public enum Status: String {\n    case uploaded\n    case processed\n    case pending\n    case error\n    case deleting\n    case deleted\n  }\n\n  /// The file identifier, which can be referenced in the API endpoints.\n  public let id: String\n  /// The size of the file in bytes.\n  public let bytes: Int?\n  /// The Unix timestamp (in seconds) for when the file was created.\n  public let createdAt: Int\n  /// The name of the file.\n  public let filename: String\n  /// The object type, which is always \"file\".\n  public let object: String\n  /// The intended purpose of the file. Currently, only \"fine-tune\" is supported.\n  public let purpose: String\n  /// Deprecated. The current status of the file, which can be either uploaded, processed, or error.\n  @available(*, deprecated, message: \"Deprecated\")\n  public let status: String?\n  /// Additional details about the status of the file. If the file is in the error state, this will include a message describing the error.\n  @available(\n    *,\n    deprecated,\n    message: \"Deprecated. For details on why a fine-tuning training file failed validation, see the error field on fine_tuning.job\")\n  public let statusDetails: String?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case bytes\n    case createdAt = \"created_at\"\n    case filename\n    case object\n    case purpose\n    case status\n    case statusDetails = \"status_details\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/FineTuning/FineTuningJobEventObject.swift",
    "content": "//\n//  FineTuningJobEventObject.swift\n//\n//\n//  Created by James Rochabrun on 10/17/23.\n//\n\nimport Foundation\n\n/// [Fine-tuning job event object](https://platform.openai.com/docs/api-reference/fine-tuning/event-object)\npublic struct FineTuningJobEventObject: Decodable {\n  public struct Data: Decodable {\n    public let step: Int\n    public let trainLoss: Double\n    public let trainMeanTokenAccuracy: Double\n\n    enum CodingKeys: String, CodingKey {\n      case step\n      case trainLoss = \"train_loss\"\n      case trainMeanTokenAccuracy = \"train_mean_token_accuracy\"\n    }\n  }\n\n  public let id: String\n\n  public let createdAt: Int\n\n  public let level: String\n\n  public let message: String\n\n  public let object: String\n\n  public let type: String?\n\n  public let data: Data?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case createdAt = \"created_at\"\n    case level\n    case message\n    case object\n    case type\n    case data\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/FineTuning/FineTuningJobObject.swift",
    "content": "//\n//  FineTuningJobObject.swift\n//\n//\n//  Created by James Rochabrun on 10/17/23.\n//\n\nimport Foundation\n\n// MARK: - FineTuningJobObject\n\n/// The fine_tuning.job object represents a [fine-tuning job](https://platform.openai.com/docs/api-reference/fine-tuning/object) that has been created through the API.\npublic struct FineTuningJobObject: Decodable {\n  public enum Status: String {\n    case validatingFiles = \"validating_files\"\n    case queued\n    case running\n    case succeeded\n    case failed\n    case cancelled\n  }\n\n  public struct HyperParameters: Decodable {\n    /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. \"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs.\n    public let nEpochs: IntOrStringValue\n\n    enum CodingKeys: String, CodingKey {\n      case nEpochs = \"n_epochs\"\n    }\n  }\n\n  /// The object identifier, which can be referenced in the API endpoints.\n  public let id: String\n  /// The Unix timestamp (in seconds) for when the fine-tuning job was created.\n  public let createdAt: Int\n  /// For fine-tuning jobs that have failed, this will contain more information on the cause of the failure.\n  public let error: OpenAIErrorResponse.Error?\n  /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running.\n  public let fineTunedModel: String?\n  /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running.\n  public let finishedAt: Int?\n  /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)  for more details.\n  public let hyperparameters: HyperParameters\n  /// The base model that is being fine-tuned.\n  public let model: String\n  /// The object type, which is always \"fine_tuning.job\".\n  public let object: String\n  /// The organization that owns the fine-tuning job.\n  public let organizationId: String\n  /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n  public let resultFiles: [String]\n  /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.\n  public let status: String\n  /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running.\n  public let trainedTokens: Int?\n\n  /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n  public let trainingFile: String\n  /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n  public let validationFile: String?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case createdAt = \"created_at\"\n    case error\n    case fineTunedModel = \"fine_tuned_model\"\n    case finishedAt = \"finished_at\"\n    case hyperparameters\n    case model\n    case object\n    case organizationId = \"organization_id\"\n    case resultFiles = \"result_files\"\n    case status\n    case trainedTokens = \"trained_tokens\"\n    case trainingFile = \"training_file\"\n    case validationFile = \"validation_file\"\n  }\n}\n\n// MARK: - IntOrStringValue\n\npublic enum IntOrStringValue: Decodable {\n  case int(Int)\n  case string(String)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n    if let intValue = try? container.decode(Int.self) {\n      self = .int(intValue)\n      return\n    }\n    if let stringValue = try? container.decode(String.self) {\n      self = .string(stringValue)\n      return\n    }\n    throw DecodingError.dataCorruptedError(in: container, debugDescription: \"Invalid value for IntOrStringValue\")\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Image/ImageObject.swift",
    "content": "//\n//  ImageObject.swift\n//\n//\n//  Created by James Rochabrun on 10/12/23.\n//\n\nimport Foundation\n\n/// [Represents the url or the content of an image generated by the OpenAI API.](https://platform.openai.com/docs/api-reference/images/object)\npublic struct ImageObject: Decodable {\n  /// The URL of the generated image, if response_format is url (default).\n  public let url: URL?\n  /// The base64-encoded JSON of the generated image, if response_format is b64_json.\n  public let b64Json: String?\n  /// The prompt that was used to generate the image, if there was any revision to the prompt.\n  public let revisedPrompt: String?\n\n  enum CodingKeys: String, CodingKey {\n    case url\n    case b64Json = \"b64_json\"\n    case revisedPrompt = \"revised_prompt\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/ImageGen/CreateImageResponse.swift",
    "content": "//\n//  CreateImageResponse.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 4/24/25.\n//\n\nimport Foundation\n\n/// Response from the 'Create Image' endpoint:\n/// https://platform.openai.com/docs/api-reference/images/create\nimport Foundation\n\npublic struct CreateImageResponse: Decodable {\n  public struct ImageData: Decodable, Equatable {\n    /// Base64-encoded image data (only present for gpt-image-1 or if `response_format = b64_json`)\n    public let b64JSON: String?\n\n    /// The URL of the generated image (default for DALL·E 2 and 3, absent for gpt-image-1)\n    public let url: String?\n\n    /// The revised prompt used (DALL·E 3 only)\n    public let revisedPrompt: String?\n\n    enum CodingKeys: String, CodingKey {\n      case b64JSON = \"b64_json\"\n      case url\n      case revisedPrompt = \"revised_prompt\"\n    }\n  }\n\n  public struct Usage: Decodable {\n    public struct InputTokensDetails: Decodable {\n      public let textTokens: Int\n      public let imageTokens: Int\n\n      enum CodingKeys: String, CodingKey {\n        case textTokens = \"text_tokens\"\n        case imageTokens = \"image_tokens\"\n      }\n    }\n\n    /// The number of input tokens (text + image)\n    public let inputTokens: Int\n\n    /// The number of output tokens (image)\n    public let outputTokens: Int\n\n    /// Total token usage\n    public let totalTokens: Int\n\n    /// Input token details (optional)\n    public let inputTokensDetails: InputTokensDetails?\n\n    enum CodingKeys: String, CodingKey {\n      case inputTokens = \"input_tokens\"\n      case outputTokens = \"output_tokens\"\n      case totalTokens = \"total_tokens\"\n      case inputTokensDetails = \"input_tokens_details\"\n    }\n  }\n\n  /// The Unix timestamp (in seconds) of when the image was created\n  public let created: TimeInterval?\n\n  /// The list of generated images\n  public let data: [ImageData]?\n\n  /// Token usage info (only for gpt-image-1)\n  public let usage: Usage?\n\n  enum CodingKeys: String, CodingKey {\n    case created\n    case data\n    case usage\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Messages/MessageContent.swift",
    "content": "//\n//  MessageContent.swift\n//\n//\n//  Created by James Rochabrun on 3/17/24.\n//\n\nimport Foundation\n\n// MARK: - AssistantMessageContent\n\n///  The [content](https://platform.openai.com/docs/api-reference/messages/object#messages/object-content) of the message in array of text and/or images.\npublic enum AssistantMessageContent: Codable {\n  case imageFile(ImageFile)\n  case imageUrl(ImageURL)\n  case text(Text)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: ContentTypeKey.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"image_file\":\n      let imageFile = try ImageFile(from: decoder)\n      self = .imageFile(imageFile)\n\n    case \"image_url\":\n      let imageUrl = try ImageURL(from: decoder)\n      self = .imageUrl(imageUrl)\n\n    case \"text\":\n      let text = try Text(from: decoder)\n      self = .text(text)\n\n    default:\n      throw DecodingError.dataCorruptedError(forKey: .type, in: container, debugDescription: \"Invalid type for content\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n    switch self {\n    case .imageFile(let imageFile):\n      try container.encode(\"image_file\", forKey: .type)\n      try container.encode(imageFile, forKey: .imageFile)\n\n    case .imageUrl(let imageUrl):\n      try container.encode(\"image_url\", forKey: .type)\n      try container.encode(imageUrl, forKey: .imageUrl)\n\n    case .text(let text):\n      try container.encode(\"text\", forKey: .type)\n      try container.encode(text, forKey: .text)\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case imageFile = \"image_file\"\n    case imageUrl = \"image_url\"\n    case text\n  }\n\n  enum ContentTypeKey: CodingKey {\n    case type\n  }\n}\n\n// MARK: - ImageFile\n\npublic struct ImageFile: Codable {\n  public struct ImageFileContent: Codable {\n    /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content.\n    public let fileID: String\n\n    enum CodingKeys: String, CodingKey {\n      case fileID = \"file_id\"\n    }\n  }\n\n  /// Always image_file.\n  public let type: String\n\n  /// References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.\n  public let imageFile: ImageFileContent\n\n  enum CodingKeys: String, CodingKey {\n    case imageFile = \"image_file\"\n    case type\n  }\n}\n\n// MARK: - ImageURL\n\npublic struct ImageURL: Codable {\n  public struct ImageUrlContent: Codable {\n    /// The [File](https://platform.openai.com/docs/api-reference/files) URL  of the image in the message content.\n    public let url: String\n\n    enum CodingKeys: String, CodingKey {\n      case url\n    }\n  }\n\n  /// Always image_url.\n  public let type: String\n\n  /// References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.\n  public let imageUrl: ImageUrlContent\n\n  enum CodingKeys: String, CodingKey {\n    case imageUrl = \"image_url\"\n    case type\n  }\n}\n\n// MARK: - Text\n\npublic struct Text: Codable {\n  /// Always text.\n  public let type: String\n  /// The text content that is part of a message.\n  public let text: TextContent\n\n  public struct TextContent: Codable {\n    /// The data that makes up the text.\n    public let value: String\n\n    public let annotations: [Annotation]?\n  }\n}\n\n// MARK: - Annotation\n\npublic enum Annotation: Codable {\n  case fileCitation(FileCitation)\n  case filePath(FilePath)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: AnnotationTypeKey.self)\n    let type = try container.decode(String.self, forKey: .type)\n    switch type {\n    case \"file_citation\":\n      let fileCitationContainer = try decoder.container(keyedBy: CodingKeys.self)\n      let fileCitation = try fileCitationContainer.decode(FileCitation.self, forKey: .fileCitation)\n      self = .fileCitation(fileCitation)\n\n    case \"file_path\":\n      let filePathContainer = try decoder.container(keyedBy: CodingKeys.self)\n      let filePath = try filePathContainer.decode(FilePath.self, forKey: .filePath)\n      self = .filePath(filePath)\n\n    default:\n      throw DecodingError.dataCorruptedError(forKey: .type, in: container, debugDescription: \"Invalid type for annotation\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n    switch self {\n    case .fileCitation(let fileCitation):\n      try container.encode(\"file_citation\", forKey: .type)\n      try container.encode(fileCitation, forKey: .fileCitation)\n\n    case .filePath(let filePath):\n      try container.encode(\"file_path\", forKey: .type)\n      try container.encode(filePath, forKey: .filePath)\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case text\n    case fileCitation = \"file_citation\"\n    case filePath = \"file_path\"\n    case startIndex = \"start_index\"\n    case endIndex = \"end_index\"\n  }\n\n  enum AnnotationTypeKey: CodingKey {\n    case type\n  }\n}\n\n// MARK: - FileCitation\n\n/// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the \"retrieval\" tool to search files.\npublic struct FileCitation: Codable {\n  public struct FileCitationDetails: Codable {\n    /// The ID of the specific File the citation is from.\n    public let fileID: String\n    /// The specific quote in the file.\n    public let quote: String\n\n    enum CodingKeys: String, CodingKey {\n      case fileID = \"file_id\"\n      case quote\n    }\n  }\n\n  /// Always file_citation, except when using Assistants API Beta, e.g. when using file_store search\n  public let type: String?\n  /// The text in the message content that needs to be replaced. Not always present with Assistants API Beta, e.g. when using file_store search\n  public let text: String?\n  public let fileCitation: FileCitationDetails?\n  public let startIndex: Int?\n  public let endIndex: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case text\n    case fileCitation = \"file_citation\"\n    case startIndex = \"start_index\"\n    case endIndex = \"end_index\"\n  }\n}\n\n// MARK: - FilePath\n\n/// A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.\npublic struct FilePath: Codable {\n  public struct FilePathDetails: Codable {\n    /// The ID of the file that was generated.\n    public let fileID: String\n\n    enum CodingKeys: String, CodingKey {\n      case fileID = \"file_id\"\n    }\n  }\n\n  /// Always file_path\n  public let type: String\n  /// The text in the message content that needs to be replaced.\n  public let text: String\n  public let filePath: FilePathDetails\n  public let startIndex: Int\n  public let endIndex: Int\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case text\n    case filePath = \"file_path\"\n    case startIndex = \"start_index\"\n    case endIndex = \"end_index\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Messages/MessageDeltaObject.swift",
    "content": "//\n//  MessageDeltaObject.swift\n//\n//\n//  Created by James Rochabrun on 3/17/24.\n//\n\nimport Foundation\n\n/// [MessageDeltaObject](https://platform.openai.com/docs/api-reference/assistants-streaming/message-delta-object)\n///\n/// Represents a message delta i.e. any changed fields on a message during streaming.\npublic struct MessageDeltaObject: Delta {\n  public struct Delta: Decodable {\n    /// The entity that produced the message. One of user or assistant.\n    public let role: String?\n    /// The content of the message in array of text and/or images.\n    public let content: [AssistantMessageContent]\n\n    enum Role: String {\n      case user\n      case assistant\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case role\n      case content\n    }\n  }\n\n  /// The identifier of the message, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always thread.message.delta.\n  public let object: String\n  /// The delta containing the fields that have changed on the Message.\n  public let delta: Delta\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Messages/MessageObject.swift",
    "content": "//\n//  MessageObject.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// BETA.\n/// Represents a [message](https://platform.openai.com/docs/api-reference/messages) within a [thread](https://platform.openai.com/docs/api-reference/threads).\n/// [Message Object](https://platform.openai.com/docs/api-reference/messages/object)\npublic struct MessageObject: Codable {\n  public init(\n    id: String,\n    object: String,\n    createdAt: Int,\n    threadID: String,\n    status: String?,\n    incompleteDetails: IncompleteDetails?,\n    completedAt: Int?,\n    role: String,\n    content: [MessageContent],\n    assistantID: String?,\n    runID: String?,\n    attachments: [MessageAttachment]?,\n    metadata: [String: String]?)\n  {\n    self.id = id\n    self.object = object\n    self.createdAt = createdAt\n    self.threadID = threadID\n    self.status = status\n    self.incompleteDetails = incompleteDetails\n    self.completedAt = completedAt\n    self.role = role\n    self.content = content\n    self.assistantID = assistantID\n    self.runID = runID\n    self.attachments = attachments\n    self.metadata = metadata\n  }\n\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always thread.message.\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the message was created.\n  public let createdAt: Int\n  /// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to.\n  public let threadID: String\n  /// The status of the message, which can be either in_progress, incomplete, or completed.\n  public let status: String?\n  /// On an incomplete message, details about why the message is incomplete.\n  public let incompleteDetails: IncompleteDetails?\n  /// The Unix timestamp (in seconds) for when the message was completed.\n  public let completedAt: Int?\n  /// The entity that produced the message. One of user or assistant.\n  public let role: String\n  /// The content of the message in array of text and/or images.\n  public let content: [MessageContent]\n  /// If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message.\n  public let assistantID: String?\n  /// If applicable, the ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the authoring of this message.\n  public let runID: String?\n  /// A list of files attached to the message, and the tools they were added to.\n  public let attachments: [MessageAttachment]?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public let metadata: [String: String]?\n\n  enum Role: String {\n    case user\n    case assistant\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case threadID = \"thread_id\"\n    case status\n    case incompleteDetails = \"incomplete_details\"\n    case completedAt = \"completed_at\"\n    case role\n    case content\n    case assistantID = \"assistant_id\"\n    case runID = \"run_id\"\n    case attachments\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Model/ModelObject.swift",
    "content": "//\n//  ModelObject.swift\n//\n//\n//  Created by James Rochabrun on 10/13/23.\n//\n\nimport Foundation\n\n/// Describes an OpenAI [model](https://platform.openai.com/docs/api-reference/models/object) offering that can be used with the API.\npublic struct ModelObject: Decodable {\n  public struct Permission: Decodable {\n    public let id: String?\n    public let object: String?\n    public let created: Int?\n    public let allowCreateEngine: Bool?\n    public let allowSampling: Bool?\n    public let allowLogprobs: Bool?\n    public let allowSearchIndices: Bool?\n    public let allowView: Bool?\n    public let allowFineTuning: Bool?\n    public let organization: String?\n    public let group: String?\n    public let isBlocking: Bool?\n\n    enum CodingKeys: String, CodingKey {\n      case id\n      case object\n      case created\n      case allowCreateEngine = \"allow_create_engine\"\n      case allowSampling = \"allow_sampling\"\n      case allowLogprobs = \"allow_logprobs\"\n      case allowSearchIndices = \"allow_search_indices\"\n      case allowView = \"allow_view\"\n      case allowFineTuning = \"allow_fine_tuning\"\n      case organization\n      case group\n      case isBlocking = \"is_blocking\"\n    }\n  }\n\n  /// The model identifier, which can be referenced in the API endpoints.\n  public let id: String\n  /// The Unix timestamp (in seconds) when the model was created.\n  public let created: Int?\n  /// The object type, which is always \"model\".\n  public let object: String?\n  /// The organization that owns the model.\n  public let ownedBy: String?\n  /// An array representing the current permissions of a model. Each element in the array corresponds to a specific permission setting. If there are no permissions or if the data is unavailable, the array may be nil.\n  public let permission: [Permission]?\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case created\n    case object\n    case ownedBy = \"owned_by\"\n    case permission\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Moderation/ModerationObject.swift",
    "content": "//\n//  ModerationObject.swift\n//\n//\n//  Created by James Rochabrun on 10/13/23.\n//\n\nimport Foundation\n\n/// The [moderation object](https://platform.openai.com/docs/api-reference/moderations/object). Represents policy compliance report by OpenAI's content moderation model against a given input.\npublic struct ModerationObject: Decodable {\n  public struct Moderation: Decodable {\n    public struct Category<T: Decodable>: Decodable {\n      /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment.\n      public let hate: T\n      /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.\n      public let hateThreatening: T\n      /// Content that expresses, incites, or promotes harassing language towards any target.\n      public let harassment: T\n      /// Harassment content that also includes violence or serious harm towards any target.\n      public let harassmentThreatening: T\n      /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.\n      public let selfHarm: T\n      /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.\n      public let selfHarmIntent: T\n      /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.\n      public let selfHarmInstructions: T\n      /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).\n      public let sexual: T\n      /// Sexual content that includes an individual who is under 18 years old.\n      public let sexualMinors: T\n      /// Content that depicts death, violence, or physical injury.\n      public let violence: T\n      /// Content that depicts death, violence, or physical injury in graphic detail.\n      public let violenceGraphic: T\n\n      enum CodingKeys: String, CodingKey {\n        case hate\n        case hateThreatening = \"hate/threatening\"\n        case harassment\n        case harassmentThreatening = \"harassment/threatening\"\n        case selfHarm = \"self-harm\"\n        case selfHarmIntent = \"self-harm/intent\"\n        case selfHarmInstructions = \"self-harm/instructions\"\n        case sexual\n        case sexualMinors = \"sexual/minors\"\n        case violence\n        case violenceGraphic = \"violence/graphic\"\n      }\n    }\n\n    /// Whether the content violates OpenAI's usage policies.\n    public let flagged: Bool\n    /// A list of the categories, and whether they are flagged or not.\n    public let categories: Category<Bool>\n    /// A list of the categories along with their scores as predicted by model.\n    public let categoryScores: Category<Double>\n\n    enum CodingKeys: String, CodingKey {\n      case categories\n      case categoryScores = \"category_scores\"\n      case flagged\n    }\n  }\n\n  /// The unique identifier for the moderation request.\n  public let id: String\n  /// The model used to generate the moderation results.\n  public let model: String\n  /// A list of moderation objects.\n  public let results: [Moderation]\n\n  public var isFlagged: Bool {\n    results.map(\\.flagged).contains(true)\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/OpenAIErrorResponse.swift",
    "content": "//\n//  OpenAIErrorResponse.swift\n//\n//\n//  Created by James Rochabrun on 11/13/23.\n//\n\nimport Foundation\n\n// {\n//  \"error\": {\n//    \"message\": \"Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\",\n//    \"type\": \"invalid_request_error\",\n//    \"param\": \"messages.[2].role\",\n//    \"code\": null\n//  }\n// }\n\npublic struct OpenAIErrorResponse: Decodable {\n  public let error: Error\n\n  public struct Error: Decodable {\n    public let message: String?\n    public let type: String?\n    public let param: String?\n    public let code: String?\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/OpenAIResponse.swift",
    "content": "//\n//  OpenAIResponse.swift\n//\n//\n//  Created by James Rochabrun on 10/13/23.\n//\n\nimport Foundation\n\n/// A generic structure for OpenAI API responses.\n/// e.g:\n/// ```json\n/// {\n/// \"object\": \"list\",\n/// \"data\": [\n///  {\n///    \"object\": \"embedding\",\n///    \"embedding\": [\n///      0.0023064255,\n///      -0.009327292,\n///      .... (1536 floats total for ada-002)\n///      -0.0028842222,\n///    ],\n///    \"index\": 0\n///  }\n/// ],\n/// \"model\": \"text-embedding-ada-002\",\n/// \"usage\": {\n///  \"prompt_tokens\": 8,\n///  \"total_tokens\": 8\n/// }\n/// }\npublic struct OpenAIResponse<T: Decodable>: Decodable {\n  public struct Usage: Decodable {\n    public let promptTokens: Int\n    public let totalTokens: Int\n\n    enum CodingKeys: String, CodingKey {\n      case promptTokens = \"prompt_tokens\"\n      case totalTokens = \"total_tokens\"\n    }\n  }\n\n  public let object: String?\n  public let data: [T]\n  public let model: String?\n  public let usage: Usage?\n  public let hasMore: Bool?\n  public let created: Int?\n  public let firstID: String?\n  public let lastID: String?\n\n  enum CodingKeys: String, CodingKey {\n    case object\n    case data\n    case model\n    case usage\n    case hasMore = \"has_more\"\n    case created\n    case firstID = \"first_id\"\n    case lastID = \"last_id\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Realtime/OpenAIRealtimeInputAudioBufferSpeechStarted.swift",
    "content": "//\n//  OpenAIRealtimeInputAudioBufferSpeechStarted.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\n/// This is sent from server to client when vad detects that speech started.\npublic struct OpenAIRealtimeInputAudioBufferSpeechStarted: Decodable, Sendable {\n  public let type = \"input_audio_buffer.speech_started\"\n  public let audioStartMs: Int\n\n  public init(audioStartMs: Int) {\n    self.audioStartMs = audioStartMs\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case audioStartMs = \"audio_start_ms\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Realtime/OpenAIRealtimeMessage.swift",
    "content": "//\n//  OpenAIRealtimeMessage.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\npublic enum OpenAIRealtimeMessage: Sendable {\n  case error(String?)\n  case sessionCreated // \"session.created\"\n  case sessionUpdated // \"session.updated\"\n  case responseCreated // \"response.created\"\n  case responseAudioDelta(String) // \"response.audio.delta\"\n  case inputAudioBufferSpeechStarted // \"input_audio_buffer.speech_started\"\n  case responseFunctionCallArgumentsDone(String, String, String) // \"response.function_call_arguments.done\"\n\n  // Add new cases for transcription\n  case responseTranscriptDelta(String) // \"response.audio_transcript.delta\"\n  case responseTranscriptDone(String) // \"response.audio_transcript.done\"\n  case inputAudioBufferTranscript(String) // \"input_audio_buffer.transcript\"\n  case inputAudioTranscriptionDelta(String) // \"conversation.item.input_audio_transcription.delta\"\n  case inputAudioTranscriptionCompleted(String) // \"conversation.item.input_audio_transcription.completed\"\n\n  // MCP (Model Context Protocol) messages\n  case mcpListToolsInProgress // \"mcp_list_tools.in_progress\"\n  case mcpListToolsCompleted([String: Any]) // \"mcp_list_tools.completed\" with tools data\n  case mcpListToolsFailed(String?) // \"mcp_list_tools.failed\" with error details\n  /// Response completion with potential errors\n  case responseDone(status: String, statusDetails: [String: Any]?) // \"response.done\"\n\n  // Text streaming (for text-only responses)\n  case responseTextDelta(String) // \"response.text.delta\"\n  case responseTextDone(String) // \"response.text.done\"\n\n  // Output item lifecycle\n  case responseOutputItemAdded(itemId: String, type: String) // \"response.output_item.added\"\n  case responseOutputItemDone(itemId: String, type: String, content: [[String: Any]]?) // \"response.output_item.done\"\n\n  // Content part lifecycle\n  case responseContentPartAdded(type: String) // \"response.content_part.added\"\n  case responseContentPartDone(type: String, text: String?) // \"response.content_part.done\"\n\n  // MCP response\n  case responseMcpCallCompleted(eventId: String?, itemId: String?, outputIndex: Int?)\n  case responseMcpCallInProgress\n\n  /// Conversation item\n  case conversationItemCreated(itemId: String, type: String, role: String?) // \"conversation.item.created\"\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Realtime/OpenAIRealtimeResponseFunctionCallArgumentsDone.swift",
    "content": "//\n//  OpenAIRealtimeResponseFunctionCallArgumentsDone.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\n/// Returned when the model-generated function call arguments are done streaming.\n/// Also emitted when a Response is interrupted, incomplete, or cancelled.\n/// https://platform.openai.com/docs/api-reference/realtime-server-events/response/function_call_arguments/done\npublic struct OpenAIRealtimeResponseFunctionCallArgumentsDone: Encodable {\n  public let type = \"response.function_call_arguments.done\"\n  public let name: String?\n  public let arguments: String?\n\n  public init(name: String, arguments: String) {\n    self.name = name\n    self.arguments = arguments\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Response/OutputItem.swift",
    "content": "//\n//  OutputItem.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n\nimport Foundation\n\n/// An output item from the model response\npublic enum OutputItem: Decodable {\n  /// An output message from the model\n  case message(Message)\n  /// The results of a file search tool call\n  case fileSearchCall(FileSearchToolCall)\n  /// A tool call to run a function\n  case functionCall(FunctionToolCall)\n  /// The results of a web search tool call\n  case webSearchCall(WebSearchToolCall)\n  /// A tool call to a computer use tool\n  case computerCall(ComputerToolCall)\n  /// A description of the chain of thought used by a reasoning model\n  case reasoning(Reasoning)\n  /// An image generation request made by the model\n  case imageGenerationCall(ImageGenerationCall)\n  /// A tool call to run code\n  case codeInterpreterCall(CodeInterpreterCall)\n  /// A tool call to run a command on the local shell\n  case localShellCall(LocalShellCall)\n  /// An invocation of a tool on an MCP server\n  case mcpCall(MCPCall)\n  /// A list of tools available on an MCP server\n  case mcpListTools(MCPListTools)\n  /// A request for human approval of a tool invocation\n  case mcpApprovalRequest(MCPApprovalRequest)\n  /// A custom tool call that returns plain text\n  case customToolCall(CustomToolCall)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"message\":\n      let message = try Message(from: decoder)\n      self = .message(message)\n\n    case \"file_search_call\":\n      let fileSearch = try FileSearchToolCall(from: decoder)\n      self = .fileSearchCall(fileSearch)\n\n    case \"function_call\":\n      let functionCall = try FunctionToolCall(from: decoder)\n      self = .functionCall(functionCall)\n\n    case \"web_search_call\":\n      let webSearch = try WebSearchToolCall(from: decoder)\n      self = .webSearchCall(webSearch)\n\n    case \"computer_call\":\n      let computerCall = try ComputerToolCall(from: decoder)\n      self = .computerCall(computerCall)\n\n    case \"reasoning\":\n      let reasoning = try Reasoning(from: decoder)\n      self = .reasoning(reasoning)\n\n    case \"image_generation_call\":\n      let imageGeneration = try ImageGenerationCall(from: decoder)\n      self = .imageGenerationCall(imageGeneration)\n\n    case \"code_interpreter_call\":\n      let codeInterpreter = try CodeInterpreterCall(from: decoder)\n      self = .codeInterpreterCall(codeInterpreter)\n\n    case \"local_shell_call\":\n      let localShell = try LocalShellCall(from: decoder)\n      self = .localShellCall(localShell)\n\n    case \"mcp_call\":\n      let mcpCall = try MCPCall(from: decoder)\n      self = .mcpCall(mcpCall)\n\n    case \"mcp_list_tools\":\n      let mcpListTools = try MCPListTools(from: decoder)\n      self = .mcpListTools(mcpListTools)\n\n    case \"mcp_approval_request\":\n      let mcpApprovalRequest = try MCPApprovalRequest(from: decoder)\n      self = .mcpApprovalRequest(mcpApprovalRequest)\n\n    case \"custom_tool_call\":\n      let customToolCall = try CustomToolCall(from: decoder)\n      self = .customToolCall(customToolCall)\n\n    default:\n      throw DecodingError.dataCorruptedError(\n        forKey: .type,\n        in: container,\n        debugDescription: \"Unknown output item type: \\(type)\")\n    }\n  }\n\n  // MARK: - Output Message\n\n  /// An output message from the model\n  public struct Message: Decodable {\n    /// The content of the output message\n    public let content: [ContentItem]\n    /// The unique ID of the output message\n    public let id: String\n    /// The role of the output message. Always \"assistant\"\n    public let role: String\n    /// The status of the message input. One of \"in_progress\", \"completed\", or \"incomplete\"\n    public let status: String?\n    /// The type of the output message. Always \"message\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case content, id, role, status, type\n    }\n  }\n\n  /// Content item in an output message\n  public enum ContentItem: Decodable {\n    /// Text output from the model\n    case outputText(OutputText)\n    /// A refusal from the model\n    case refusal(Refusal)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.container(keyedBy: CodingKeys.self)\n      let type = try container.decode(String.self, forKey: .type)\n\n      switch type {\n      case \"output_text\":\n        let text = try OutputText(from: decoder)\n        self = .outputText(text)\n\n      case \"refusal\":\n        let refusal = try Refusal(from: decoder)\n        self = .refusal(refusal)\n\n      default:\n        throw DecodingError.dataCorruptedError(\n          forKey: .type,\n          in: container,\n          debugDescription: \"Unknown content item type: \\(type)\")\n      }\n    }\n\n    /// Text output from the model\n    public struct OutputText: Decodable {\n      /// The text content\n      public let text: String\n      /// Annotations in the text, if any\n      public let annotations: [Annotation]\n      /// The type of the content. Always \"output_text\"\n      public let type: String\n\n      enum CodingKeys: String, CodingKey {\n        case text, annotations, type\n      }\n    }\n\n    /// A refusal from the model\n    public struct Refusal: Decodable {\n      /// The refusal explanation from the model\n      public let refusal: String\n      /// The type of the refusal. Always \"refusal\"\n      public let type: String\n\n      enum CodingKeys: String, CodingKey {\n        case refusal, type\n      }\n    }\n\n    /// Annotation in text output\n    public struct Annotation: Decodable {\n      // Properties would be defined based on different annotation types\n      // Such as file_citation, etc.\n    }\n\n    /// Other content types could be added here as they are defined\n\n    private enum CodingKeys: String, CodingKey {\n      case type\n    }\n  }\n\n  // MARK: - File Search Tool Call\n\n  /// The results of a file search tool call\n  public struct FileSearchToolCall: Decodable {\n    /// A search result from a file search\n    public struct SearchResult: Decodable {\n      /// Set of key-value pairs attached to the object\n      public let attributes: [String: ComputerToolCall.AnyCodable]?\n      /// The unique ID of the file\n      public let fileId: String?\n      /// The name of the file\n      public let filename: String?\n      /// The relevance score of the file - a value between 0 and 1\n      public let score: Double?\n      /// The text that was retrieved from the file\n      public let text: String?\n\n      enum CodingKeys: String, CodingKey {\n        case attributes\n        case fileId = \"file_id\"\n        case filename\n        case score\n        case text\n      }\n    }\n\n    /// The unique ID of the file search tool call\n    public let id: String\n    /// The queries used to search for files\n    public let queries: [String]\n    /// The status of the file search tool call\n    public let status: String?\n    /// The type of the file search tool call. Always \"file_search_call\"\n    public let type: String\n    /// The results of the file search tool call\n    public let results: [SearchResult]?\n\n    enum CodingKeys: String, CodingKey {\n      case id, queries, status, type, results\n    }\n  }\n\n  // MARK: - Function Tool Call\n\n  /// A tool call to run a function\n  public struct FunctionToolCall: Decodable {\n    /// A JSON string of the arguments to pass to the function\n    public let arguments: String\n    /// The unique ID of the function tool call generated by the model\n    public let callId: String\n    /// The name of the function to run\n    public let name: String\n    /// The type of the function tool call. Always \"function_call\"\n    public let type: String\n    /// The unique ID of the function tool call\n    public let id: String\n    /// The status of the item. One of \"in_progress\", \"completed\", or \"incomplete\"\n    public let status: String?\n\n    enum CodingKeys: String, CodingKey {\n      case arguments, callId = \"call_id\", name, type, id, status\n    }\n  }\n\n  // MARK: - Custom Tool Call\n\n  /// A custom tool call that returns plain text instead of JSON\n  public struct CustomToolCall: Decodable {\n    /// The unique ID of the custom tool call\n    public let id: String\n    /// The type of the custom tool call. Always \"custom_tool_call\"\n    public let type: String\n    /// The status of the item. One of \"in_progress\", \"completed\", or \"incomplete\"\n    public let status: String?\n    /// The call ID for this custom tool call\n    public let callId: String\n    /// The plain text input to the custom tool\n    public let input: String\n    /// The name of the custom tool\n    public let name: String\n\n    enum CodingKeys: String, CodingKey {\n      case id, type, status\n      case callId = \"call_id\"\n      case input, name\n    }\n  }\n\n  // MARK: - Web Search Tool Call\n\n  /// The results of a web search tool call\n  public struct WebSearchToolCall: Decodable {\n    /// Action taken in web search\n    public struct Action: Decodable {\n      // Action properties will use AnyCodable for flexibility\n      // as different action types (search, open_page, find) have different structures\n    }\n\n    /// An object describing the specific action taken in this web search call\n    public let action: Action?\n    /// The unique ID of the web search tool call\n    public let id: String\n    /// The status of the web search tool call\n    public let status: String?\n    /// The type of the web search tool call. Always \"web_search_call\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case action, id, status, type\n    }\n  }\n\n  // MARK: - Computer Tool Call\n\n  /// A tool call to a computer use tool\n  public struct ComputerToolCall: Decodable {\n    /// Computer action to perform\n    public struct ComputerAction: Decodable {\n      /// Initialize with type and parameters\n      public init(type: String, parameters: [String: AnyCodable]) {\n        self.type = type\n        self.parameters = parameters\n      }\n\n      public init(from decoder: Decoder) throws {\n        let container = try decoder.container(keyedBy: CodingKeys.self)\n        type = try container.decode(String.self, forKey: .type)\n\n        // Decode remaining keys as parameters\n        let additionalInfo = try decoder.singleValueContainer()\n        let allData = try additionalInfo.decode([String: AnyCodable].self)\n\n        // Filter out the 'type' key to get just the parameters\n        var params = allData\n        params.removeValue(forKey: \"type\")\n        parameters = params\n      }\n\n      /// The type of computer action to perform\n      public let type: String\n\n      private enum CodingKeys: String, CodingKey {\n        case type\n      }\n\n      /// Additional parameters for the action, varies by action type\n      private let parameters: [String: AnyCodable]\n    }\n\n    /// A type that can hold any decodable value\n    public struct AnyCodable: Codable {\n      public init(_ value: Any) {\n        self.value = value\n      }\n\n      public init(from decoder: Decoder) throws {\n        let container = try decoder.singleValueContainer()\n\n        if container.decodeNil() {\n          value = Any?.none as Any\n        } else if let bool = try? container.decode(Bool.self) {\n          value = bool\n        } else if let int = try? container.decode(Int.self) {\n          value = int\n        } else if let double = try? container.decode(Double.self) {\n          value = double\n        } else if let string = try? container.decode(String.self) {\n          value = string\n        } else if let array = try? container.decode([AnyCodable].self) {\n          value = array\n        } else if let dictionary = try? container.decode([String: AnyCodable].self) {\n          value = dictionary\n        } else {\n          throw DecodingError.dataCorruptedError(\n            in: container,\n            debugDescription: \"AnyCodable cannot decode value\")\n        }\n      }\n\n      public func encode(to encoder: Encoder) throws {\n        var container = encoder.singleValueContainer()\n\n        // Handle special cases first\n        if value is NSNull {\n          try container.encodeNil()\n          return\n        }\n\n        // Special handling for nil optional values\n        // This is a safe way to check if the value is a nil optional\n        let mirror = Mirror(reflecting: value)\n        if mirror.displayStyle == .optional, mirror.children.isEmpty {\n          try container.encodeNil()\n          return\n        }\n\n        // Handle other value types\n        switch value {\n        case let bool as Bool:\n          try container.encode(bool)\n        case let int as Int:\n          try container.encode(int)\n        case let double as Double:\n          try container.encode(double)\n        case let string as String:\n          try container.encode(string)\n        case let array as [AnyCodable]:\n          try container.encode(array)\n        case let dictionary as [String: AnyCodable]:\n          try container.encode(dictionary)\n        default:\n          let context = EncodingError.Context(\n            codingPath: container.codingPath,\n            debugDescription: \"AnyCodable cannot encode value \\(value)\")\n          throw EncodingError.invalidValue(value, context)\n        }\n      }\n\n      private let value: Any\n    }\n\n    /// Safety check for computer actions\n    public struct SafetyCheck: Decodable {\n      /// The type of the pending safety check\n      public let code: String\n      /// The ID of the pending safety check\n      public let id: String\n      /// Details about the pending safety check\n      public let message: String\n    }\n\n    /// The action to perform with the computer tool\n    public let action: ComputerAction\n    /// An identifier used when responding to the tool call with output\n    public let callId: String\n    /// The unique ID of the computer call\n    public let id: String\n    /// The pending safety checks for the computer call\n    public let pendingSafetyChecks: [SafetyCheck]\n    /// The status of the item\n    public let status: String?\n    /// The type of the computer call. Always \"computer_call\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case action, callId = \"call_id\", id, pendingSafetyChecks = \"pending_safety_checks\", status, type\n    }\n  }\n\n  // MARK: - Reasoning\n\n  /// A description of the chain of thought used by a reasoning model\n  public struct Reasoning: Decodable {\n    /// Summary content in reasoning\n    public struct SummaryItem: Decodable {\n      /// A short summary of the reasoning used by the model\n      public let text: String\n      /// The type of the object. Always \"summary_text\"\n      public let type: String\n    }\n\n    /// Reasoning text content\n    public struct ReasoningContent: Decodable {\n      /// The reasoning text from the model\n      public let text: String\n      /// The type of the reasoning text. Always \"reasoning_text\"\n      public let type: String\n    }\n\n    /// The unique identifier of the reasoning content\n    public let id: String\n    /// Reasoning summary contents\n    public let summary: [SummaryItem]\n    /// The type of the object. Always \"reasoning\"\n    public let type: String\n    /// The status of the item\n    public let status: String?\n    /// Reasoning text content\n    public let content: [ReasoningContent]?\n    /// The encrypted content of the reasoning item\n    public let encryptedContent: String?\n\n    enum CodingKeys: String, CodingKey {\n      case id, summary, type, status, content\n      case encryptedContent = \"encrypted_content\"\n    }\n  }\n\n  // MARK: - Image Generation Call\n\n  /// An image generation request made by the model\n  public struct ImageGenerationCall: Decodable {\n    /// The unique ID of the image generation call\n    public let id: String\n    /// The generated image encoded in base64\n    public let result: String?\n    /// The status of the image generation call\n    public let status: String?\n    /// The type of the image generation call. Always \"image_generation_call\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case id, result, status, type\n    }\n  }\n\n  // MARK: - Code Interpreter Call\n\n  /// A tool call to run code\n  public struct CodeInterpreterCall: Decodable {\n    /// Output from code interpreter\n    public enum Output: Decodable {\n      case log(LogOutput)\n      case image(ImageOutput)\n\n      public init(from decoder: Decoder) throws {\n        let container = try decoder.container(keyedBy: CodingKeys.self)\n        let type = try container.decode(String.self, forKey: .type)\n\n        switch type {\n        case \"logs\":\n          let output = try LogOutput(from: decoder)\n          self = .log(output)\n\n        case \"image\":\n          let output = try ImageOutput(from: decoder)\n          self = .image(output)\n\n        default:\n          throw DecodingError.dataCorruptedError(\n            forKey: .type,\n            in: container,\n            debugDescription: \"Unknown output type: \\(type)\")\n        }\n      }\n\n      public struct LogOutput: Decodable {\n        public let type: String\n        public let logs: String\n      }\n\n      public struct ImageOutput: Decodable {\n        public let type: String\n        public let image: ImageData\n\n        public struct ImageData: Decodable {\n          public let url: String?\n          public let fileId: String?\n\n          enum CodingKeys: String, CodingKey {\n            case url\n            case fileId = \"file_id\"\n          }\n        }\n      }\n\n      private enum CodingKeys: String, CodingKey {\n        case type\n      }\n    }\n\n    /// The code to run, or null if not available\n    public let code: String?\n    /// The ID of the container used to run the code\n    public let containerId: String\n    /// The unique ID of the code interpreter tool call\n    public let id: String\n    /// The outputs generated by the code interpreter\n    public let outputs: [Output]?\n    /// The status of the code interpreter tool call\n    public let status: String?\n    /// The type of the code interpreter tool call. Always \"code_interpreter_call\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case code\n      case containerId = \"container_id\"\n      case id\n      case outputs\n      case status\n      case type\n    }\n  }\n\n  // MARK: - Local Shell Call\n\n  /// A tool call to run a command on the local shell\n  public struct LocalShellCall: Decodable {\n    /// Execute a shell command on the server\n    public struct Action: Decodable {\n      /// The command to run\n      public let command: [String]\n      /// The type of the local shell action. Always \"exec\"\n      public let type: String\n      /// Environment variables to set for the command\n      public let env: [String: String]?\n      /// Optional timeout in milliseconds for the command\n      public let timeoutMs: Int?\n      /// Optional user to run the command as\n      public let user: String?\n      /// Optional working directory to run the command in\n      public let workingDirectory: String?\n\n      enum CodingKeys: String, CodingKey {\n        case command, type, env\n        case timeoutMs = \"timeout_ms\"\n        case user\n        case workingDirectory = \"working_directory\"\n      }\n    }\n\n    /// The action to perform\n    public let action: Action\n    /// The unique ID of the local shell tool call generated by the model\n    public let callId: String\n    /// The unique ID of the local shell call\n    public let id: String\n    /// The status of the local shell call\n    public let status: String?\n    /// The type of the local shell call. Always \"local_shell_call\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case action\n      case callId = \"call_id\"\n      case id\n      case status\n      case type\n    }\n  }\n\n  // MARK: - MCP Call\n\n  /// An invocation of a tool on an MCP server\n  public struct MCPCall: Decodable {\n    /// A JSON string of the arguments passed to the tool\n    public let arguments: String\n    /// The unique ID of the tool call\n    public let id: String\n    /// The name of the tool that was run\n    public let name: String\n    /// The label of the MCP server running the tool\n    public let serverLabel: String\n    /// The type of the item. Always \"mcp_call\"\n    public let type: String\n    /// Unique identifier for the MCP tool call approval request\n    public let approvalRequestId: String?\n    /// The error from the tool call, if any\n    public let error: String?\n    /// The output from the tool call\n    public let output: String?\n    /// The status of the tool call. One of in_progress, completed, incomplete, calling, or failed\n    public let status: String?\n\n    enum CodingKeys: String, CodingKey {\n      case arguments\n      case id\n      case name\n      case serverLabel = \"server_label\"\n      case type\n      case approvalRequestId = \"approval_request_id\"\n      case error\n      case output\n      case status\n    }\n  }\n\n  // MARK: - MCP List Tools\n\n  /// A list of tools available on an MCP server\n  public struct MCPListTools: Decodable {\n    /// Tool information\n    public struct Tool: Decodable {\n      /// The name of the tool\n      public let name: String\n      /// Description of the tool\n      public let description: String?\n      /// Input schema for the tool\n      public let inputSchema: [String: ComputerToolCall.AnyCodable]?\n      /// Additional annotations about the tool\n      public let annotations: [String: ComputerToolCall.AnyCodable]?\n\n      enum CodingKeys: String, CodingKey {\n        case name\n        case description\n        case inputSchema = \"input_schema\"\n        case annotations\n      }\n    }\n\n    /// The unique ID of the list\n    public let id: String\n    /// The label of the MCP server\n    public let serverLabel: String\n    /// The tools available on the server\n    public let tools: [Tool]\n    /// The type of the item. Always \"mcp_list_tools\"\n    public let type: String\n    /// Error message if the server could not list tools\n    public let error: String?\n\n    enum CodingKeys: String, CodingKey {\n      case id\n      case serverLabel = \"server_label\"\n      case tools\n      case type\n      case error\n    }\n  }\n\n  // MARK: - MCP Approval Request\n\n  /// A request for human approval of a tool invocation\n  public struct MCPApprovalRequest: Decodable {\n    /// A JSON string of arguments for the tool\n    public let arguments: String\n    /// The unique ID of the approval request\n    public let id: String\n    /// The name of the tool to run\n    public let name: String\n    /// The label of the MCP server making the request\n    public let serverLabel: String\n    /// The type of the item. Always \"mcp_approval_request\"\n    public let type: String\n\n    enum CodingKeys: String, CodingKey {\n      case arguments\n      case id\n      case name\n      case serverLabel = \"server_label\"\n      case type\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Response/ResponseModel.swift",
    "content": "//\n//  ResponseModel.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n/// The Response object returned when retrieving a model response\n/// [Get a model response](https://platform.openai.com/docs/api-reference/responses/get)\npublic struct ResponseModel: Decodable {\n  /// The status of the response generation.\n  public enum Status: String, Decodable {\n    case completed\n    case failed\n    case inProgress = \"in_progress\"\n    case cancelled\n    case queued\n    case incomplete\n  }\n\n  public struct ErrorObject: Decodable {\n    /// The error code for the response.\n    public let code: String\n\n    /// A human-readable description of the error.\n    public let message: String\n  }\n\n  /// Incomplete details structure\n  public struct IncompleteDetails: Decodable {\n    /// The reason why the response is incomplete\n    public let reason: String\n  }\n\n  /// Input tokens details\n  public struct InputTokensDetails: Decodable {\n    /// Number of cached tokens\n    public let cachedTokens: Int\n\n    enum CodingKeys: String, CodingKey {\n      case cachedTokens = \"cached_tokens\"\n    }\n  }\n\n  /// Output tokens details\n  public struct OutputTokensDetails: Decodable {\n    /// Number of reasoning tokens\n    public let reasoningTokens: Int\n\n    enum CodingKeys: String, CodingKey {\n      case reasoningTokens = \"reasoning_tokens\"\n    }\n  }\n\n  /// Instructions type - can be a string, an array of strings, or an array of messages (for reusable prompts)\n  public enum InstructionsType: Decodable {\n    case string(String)\n    case array([String])\n    case messages([InputMessage])\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.singleValueContainer()\n\n      if let stringValue = try? container.decode(String.self) {\n        self = .string(stringValue)\n      } else if let arrayValue = try? container.decode([String].self) {\n        self = .array(arrayValue)\n      } else if let messagesValue = try? container.decode([InputMessage].self) {\n        self = .messages(messagesValue)\n      } else {\n        throw DecodingError.dataCorruptedError(\n          in: container,\n          debugDescription: \"Expected String, [String], or [InputMessage] for instructions\")\n      }\n    }\n  }\n\n  /// Whether to run the model response in the background. Learn more.\n  public let background: Bool?\n\n  /// The conversation that this response belongs to. Input items and output items from this response are automatically added to this conversation.\n  public let conversation: Conversation?\n\n  /// Unix timestamp (in seconds) of when this Response was created.\n  public let createdAt: Int\n\n  /// An error object returned when the model fails to generate a Response.\n  public let error: ErrorObject?\n\n  /// Unique identifier for this Response.\n  public let id: String\n\n  /// Details about why the response is incomplete.\n  public let incompleteDetails: IncompleteDetails?\n\n  /// A system (or developer) message inserted into the model's context.\n  /// When using along with previous_response_id, the instructions from a previous response will not be carried over to the next response.\n  /// This makes it simple to swap out system (or developer) messages in new responses.\n  public let instructions: InstructionsType?\n\n  /// An upper bound for the number of tokens that can be generated for a response, including visible output tokens\n  /// and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).\n  public let maxOutputTokens: Int?\n\n  /// The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n  public let maxToolCalls: Int?\n\n  /// Set of 16 key-value pairs that can be attached to an object.\n  /// This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.\n  /// Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.\n  public let metadata: [String: String]\n\n  /// Model ID used to generate the response, like gpt-4o or o1.\n  /// OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points.\n  /// Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.\n  public let model: String\n\n  /// The object type of this resource - always set to response.\n  public let object: String\n\n  /// An array of content items generated by the model.\n  public let output: [OutputItem]\n\n  /// Whether to allow the model to run tool calls in parallel.\n  public let parallelToolCalls: Bool\n\n  /// The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n  ///  Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state).\n  public let previousResponseId: String?\n\n  /// Reference to a prompt template and its variables. Learn more.\n  public let prompt: Prompt?\n\n  /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the user field. Learn more.\n  public let promptCacheKey: String?\n\n  /// Configuration options for reasoning models.\n  public let reasoning: Reasoning?\n\n  /// A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. Learn more.\n  public let safetyIdentifier: String?\n\n  /// Specifies the latency tier to use for processing the request.\n  /// This parameter is relevant for customers subscribed to the scale tier service:\n  /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.\n  /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarantee.\n  /// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarantee.\n  /// - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](https://platform.openai.com/docs/guides/flex-processing).\n  /// When not set, the default behavior is 'auto'.\n  /// When this parameter is set, the response body will include the service_tier utilized.\n  public let serviceTier: String?\n\n  /// The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n  public let status: Status?\n\n  /// Whether to store the message. Used for model improvement.\n  public let store: Bool?\n\n  /// What sampling temperature to use, between 0 and 2.\n  /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n  /// We generally recommend altering this or top_p but not both.\n  public let temperature: Double?\n\n  /// Configuration options for a text response from the model.\n  public let text: TextConfiguration?\n\n  /// How the model should select which tool (or tools) to use when generating a response.\n  /// See the tools parameter to see how to specify which tools the model can call.\n  public let toolChoice: ToolChoiceMode?\n\n  /// An array of tools the model may call while generating a response. You can specify which tool to use by setting the tool_choice parameter.\n  /// The two categories of tools you can provide the model are:\n  /// Built-in tools: Tools that are provided by OpenAI that extend the model's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search) or [file search](https://platform.openai.com/docs/guides/tools-file-search0. Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools).\n  /// Function calls (custom tools): Functions that are defined by you, enabling the model to call your own code. Learn more about [function calling.](https://platform.openai.com/docs/guides/function-calling)\n  public let tools: [Tool]?\n\n  /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n  /// So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n  /// We generally recommend altering this or temperature but not both.\n  public let topP: Double?\n\n  /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n  public let topLogprobs: Int?\n\n  /// The truncation strategy to use for the model response.\n  public let truncation: String?\n\n  /// Represents token usage details.\n  public let usage: Usage?\n\n  /// A unique identifier representing your end-user.\n  public let user: String?\n\n  /// Convenience property that aggregates all text output from output_text items in the output array.\n  /// Similar to the outputText property in Python and JavaScript SDKs.\n  public var outputText: String? {\n    let outputTextItems = output.compactMap { outputItem -> String? in\n      switch outputItem {\n      case .message(let message):\n        return message.content.compactMap { contentItem -> String? in\n          switch contentItem {\n          case .outputText(let outputText):\n            return outputText.text\n          case .refusal:\n            return nil\n          }\n        }.joined()\n\n      default:\n        return nil\n      }\n    }\n\n    return outputTextItems.isEmpty ? nil : outputTextItems.joined()\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case background\n    case conversation\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case status\n    case error\n    case incompleteDetails = \"incomplete_details\"\n    case instructions\n    case maxOutputTokens = \"max_output_tokens\"\n    case maxToolCalls = \"max_tool_calls\"\n    case model\n    case output\n    case parallelToolCalls = \"parallel_tool_calls\"\n    case previousResponseId = \"previous_response_id\"\n    case prompt\n    case promptCacheKey = \"prompt_cache_key\"\n    case reasoning\n    case safetyIdentifier = \"safety_identifier\"\n    case serviceTier = \"service_tier\"\n    case store\n    case temperature\n    case text\n    case toolChoice = \"tool_choice\"\n    case tools\n    case topP = \"top_p\"\n    case topLogprobs = \"top_logprobs\"\n    case truncation\n    case usage\n    case user\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Response/ResponseStreamEvent.swift",
    "content": "//\n//  ResponseStreamEvent.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 6/7/25.\n//\n\nimport Foundation\n\n// MARK: - ResponseStreamEvent\n\n/// Represents all possible streaming events from the Responses API\npublic enum ResponseStreamEvent: Decodable {\n  /// Emitted when a response is created\n  case responseCreated(ResponseCreatedEvent)\n\n  /// Emitted when the response is in progress\n  case responseInProgress(ResponseInProgressEvent)\n\n  /// Emitted when the model response is complete\n  case responseCompleted(ResponseCompletedEvent)\n\n  /// Emitted when a response fails\n  case responseFailed(ResponseFailedEvent)\n\n  /// Emitted when a response finishes as incomplete\n  case responseIncomplete(ResponseIncompleteEvent)\n\n  /// Emitted when a response is queued\n  case responseQueued(ResponseQueuedEvent)\n\n  /// Emitted when a new output item is added\n  case outputItemAdded(OutputItemAddedEvent)\n\n  /// Emitted when an output item is marked done\n  case outputItemDone(OutputItemDoneEvent)\n\n  /// Emitted when a new content part is added\n  case contentPartAdded(ContentPartAddedEvent)\n\n  /// Emitted when a content part is done\n  case contentPartDone(ContentPartDoneEvent)\n\n  /// Emitted when there is an additional text delta\n  case outputTextDelta(OutputTextDeltaEvent)\n\n  /// Emitted when text content is finalized\n  case outputTextDone(OutputTextDoneEvent)\n\n  /// Emitted when there is a partial refusal text\n  case refusalDelta(RefusalDeltaEvent)\n\n  /// Emitted when refusal text is finalized\n  case refusalDone(RefusalDoneEvent)\n\n  /// Emitted when there is a partial function-call arguments delta\n  case functionCallArgumentsDelta(FunctionCallArgumentsDeltaEvent)\n\n  /// Emitted when function-call arguments are finalized\n  case functionCallArgumentsDone(FunctionCallArgumentsDoneEvent)\n\n  /// Emitted when a file search call is initiated\n  case fileSearchCallInProgress(FileSearchCallInProgressEvent)\n\n  /// Emitted when a file search is currently searching\n  case fileSearchCallSearching(FileSearchCallSearchingEvent)\n\n  /// Emitted when a file search call is completed\n  case fileSearchCallCompleted(FileSearchCallCompletedEvent)\n\n  /// Emitted when a web search call is initiated\n  case webSearchCallInProgress(WebSearchCallInProgressEvent)\n\n  /// Emitted when a web search call is executing\n  case webSearchCallSearching(WebSearchCallSearchingEvent)\n\n  /// Emitted when a web search call is completed\n  case webSearchCallCompleted(WebSearchCallCompletedEvent)\n\n  /// Emitted when a new reasoning summary part is added\n  case reasoningSummaryPartAdded(ReasoningSummaryPartAddedEvent)\n\n  /// Emitted when a reasoning summary part is completed\n  case reasoningSummaryPartDone(ReasoningSummaryPartDoneEvent)\n\n  /// Emitted when a delta is added to a reasoning summary text\n  case reasoningSummaryTextDelta(ReasoningSummaryTextDeltaEvent)\n\n  /// Emitted when a reasoning summary text is completed\n  case reasoningSummaryTextDone(ReasoningSummaryTextDoneEvent)\n\n  /// Emitted when a delta is added to reasoning text\n  case reasoningTextDelta(ReasoningTextDeltaEvent)\n\n  /// Emitted when reasoning text is completed\n  case reasoningTextDone(ReasoningTextDoneEvent)\n\n  /// Emitted when an image generation call is in progress\n  case imageGenerationCallInProgress(ImageGenerationCallInProgressEvent)\n\n  /// Emitted when an image generation call is generating\n  case imageGenerationCallGenerating(ImageGenerationCallGeneratingEvent)\n\n  /// Emitted when a partial image is available\n  case imageGenerationCallPartialImage(ImageGenerationCallPartialImageEvent)\n\n  /// Emitted when an image generation call is completed\n  case imageGenerationCallCompleted(ImageGenerationCallCompletedEvent)\n\n  /// Emitted when a code interpreter call is in progress\n  case codeInterpreterCallInProgress(CodeInterpreterCallInProgressEvent)\n\n  /// Emitted when the code interpreter is actively interpreting code\n  case codeInterpreterCallInterpreting(CodeInterpreterCallInterpretingEvent)\n\n  /// Emitted when a code interpreter call is completed\n  case codeInterpreterCallCompleted(CodeInterpreterCallCompletedEvent)\n\n  /// Emitted when a partial code snippet is streamed by the code interpreter\n  case codeInterpreterCallCodeDelta(CodeInterpreterCallCodeDeltaEvent)\n\n  /// Emitted when the code snippet is finalized by the code interpreter\n  case codeInterpreterCallCodeDone(CodeInterpreterCallCodeDoneEvent)\n\n  /// Emitted when there is a delta to custom tool call input\n  case customToolCallInputDelta(CustomToolCallInputDeltaEvent)\n\n  /// Emitted when custom tool call input is complete\n  case customToolCallInputDone(CustomToolCallInputDoneEvent)\n\n  /// Emitted when there is a delta to MCP call arguments\n  case mcpCallArgumentsDelta(MCPCallArgumentsDeltaEvent)\n\n  /// Emitted when MCP call arguments are done\n  case mcpCallArgumentsDone(MCPCallArgumentsDoneEvent)\n\n  /// Emitted when an MCP call is in progress\n  case mcpCallInProgress(MCPCallInProgressEvent)\n\n  /// Emitted when an MCP call is completed\n  case mcpCallCompleted(MCPCallCompletedEvent)\n\n  /// Emitted when an MCP call failed\n  case mcpCallFailed(MCPCallFailedEvent)\n\n  /// Emitted when MCP list tools is in progress\n  case mcpListToolsInProgress(MCPListToolsInProgressEvent)\n\n  /// Emitted when MCP list tools is completed\n  case mcpListToolsCompleted(MCPListToolsCompletedEvent)\n\n  /// Emitted when MCP list tools failed\n  case mcpListToolsFailed(MCPListToolsFailedEvent)\n\n  /// Emitted when an annotation is added to output text\n  case outputTextAnnotationAdded(OutputTextAnnotationAddedEvent)\n\n  /// Emitted when there is a delta to reasoning content\n  case reasoningDelta(ReasoningDeltaEvent)\n\n  /// Emitted when reasoning content is done\n  case reasoningDone(ReasoningDoneEvent)\n\n  /// Emitted when there is a delta to reasoning summary\n  case reasoningSummaryDelta(ReasoningSummaryDeltaEvent)\n\n  /// Emitted when reasoning summary is done\n  case reasoningSummaryDone(ReasoningSummaryDoneEvent)\n\n  /// Emitted when an error occurs\n  case error(ErrorEvent)\n\n  /// For unknown event type\n  case unknownEventType(String)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"response.created\":\n      self = try .responseCreated(ResponseCreatedEvent(from: decoder))\n    case \"response.in_progress\":\n      self = try .responseInProgress(ResponseInProgressEvent(from: decoder))\n    case \"response.completed\":\n      self = try .responseCompleted(ResponseCompletedEvent(from: decoder))\n    case \"response.failed\":\n      self = try .responseFailed(ResponseFailedEvent(from: decoder))\n    case \"response.incomplete\":\n      self = try .responseIncomplete(ResponseIncompleteEvent(from: decoder))\n    case \"response.queued\":\n      self = try .responseQueued(ResponseQueuedEvent(from: decoder))\n    case \"response.output_item.added\":\n      self = try .outputItemAdded(OutputItemAddedEvent(from: decoder))\n    case \"response.output_item.done\":\n      self = try .outputItemDone(OutputItemDoneEvent(from: decoder))\n    case \"response.content_part.added\":\n      self = try .contentPartAdded(ContentPartAddedEvent(from: decoder))\n    case \"response.content_part.done\":\n      self = try .contentPartDone(ContentPartDoneEvent(from: decoder))\n    case \"response.output_text.delta\":\n      self = try .outputTextDelta(OutputTextDeltaEvent(from: decoder))\n    case \"response.output_text.done\":\n      self = try .outputTextDone(OutputTextDoneEvent(from: decoder))\n    case \"response.refusal.delta\":\n      self = try .refusalDelta(RefusalDeltaEvent(from: decoder))\n    case \"response.refusal.done\":\n      self = try .refusalDone(RefusalDoneEvent(from: decoder))\n    case \"response.function_call_arguments.delta\":\n      self = try .functionCallArgumentsDelta(FunctionCallArgumentsDeltaEvent(from: decoder))\n    case \"response.function_call_arguments.done\":\n      self = try .functionCallArgumentsDone(FunctionCallArgumentsDoneEvent(from: decoder))\n    case \"response.file_search_call.in_progress\":\n      self = try .fileSearchCallInProgress(FileSearchCallInProgressEvent(from: decoder))\n    case \"response.file_search_call.searching\":\n      self = try .fileSearchCallSearching(FileSearchCallSearchingEvent(from: decoder))\n    case \"response.file_search_call.completed\":\n      self = try .fileSearchCallCompleted(FileSearchCallCompletedEvent(from: decoder))\n    case \"response.web_search_call.in_progress\":\n      self = try .webSearchCallInProgress(WebSearchCallInProgressEvent(from: decoder))\n    case \"response.web_search_call.searching\":\n      self = try .webSearchCallSearching(WebSearchCallSearchingEvent(from: decoder))\n    case \"response.web_search_call.completed\":\n      self = try .webSearchCallCompleted(WebSearchCallCompletedEvent(from: decoder))\n    case \"response.reasoning_summary_part.added\":\n      self = try .reasoningSummaryPartAdded(ReasoningSummaryPartAddedEvent(from: decoder))\n    case \"response.reasoning_summary_part.done\":\n      self = try .reasoningSummaryPartDone(ReasoningSummaryPartDoneEvent(from: decoder))\n    case \"response.reasoning_summary_text.delta\":\n      self = try .reasoningSummaryTextDelta(ReasoningSummaryTextDeltaEvent(from: decoder))\n    case \"response.reasoning_summary_text.done\":\n      self = try .reasoningSummaryTextDone(ReasoningSummaryTextDoneEvent(from: decoder))\n    case \"response.reasoning_text.delta\":\n      self = try .reasoningTextDelta(ReasoningTextDeltaEvent(from: decoder))\n    case \"response.reasoning_text.done\":\n      self = try .reasoningTextDone(ReasoningTextDoneEvent(from: decoder))\n    case \"response.image_generation_call.in_progress\":\n      self = try .imageGenerationCallInProgress(ImageGenerationCallInProgressEvent(from: decoder))\n    case \"response.image_generation_call.generating\":\n      self = try .imageGenerationCallGenerating(ImageGenerationCallGeneratingEvent(from: decoder))\n    case \"response.image_generation_call.partial_image\":\n      self = try .imageGenerationCallPartialImage(ImageGenerationCallPartialImageEvent(from: decoder))\n    case \"response.image_generation_call.completed\":\n      self = try .imageGenerationCallCompleted(ImageGenerationCallCompletedEvent(from: decoder))\n    case \"response.code_interpreter_call.in_progress\":\n      self = try .codeInterpreterCallInProgress(CodeInterpreterCallInProgressEvent(from: decoder))\n    case \"response.code_interpreter_call.interpreting\":\n      self = try .codeInterpreterCallInterpreting(CodeInterpreterCallInterpretingEvent(from: decoder))\n    case \"response.code_interpreter_call.completed\":\n      self = try .codeInterpreterCallCompleted(CodeInterpreterCallCompletedEvent(from: decoder))\n    case \"response.code_interpreter_call_code.delta\":\n      self = try .codeInterpreterCallCodeDelta(CodeInterpreterCallCodeDeltaEvent(from: decoder))\n    case \"response.code_interpreter_call_code.done\":\n      self = try .codeInterpreterCallCodeDone(CodeInterpreterCallCodeDoneEvent(from: decoder))\n    case \"response.custom_tool_call_input.delta\":\n      self = try .customToolCallInputDelta(CustomToolCallInputDeltaEvent(from: decoder))\n    case \"response.custom_tool_call_input.done\":\n      self = try .customToolCallInputDone(CustomToolCallInputDoneEvent(from: decoder))\n    case \"response.mcp_call.arguments.delta\":\n      self = try .mcpCallArgumentsDelta(MCPCallArgumentsDeltaEvent(from: decoder))\n    case \"response.mcp_call.arguments.done\":\n      self = try .mcpCallArgumentsDone(MCPCallArgumentsDoneEvent(from: decoder))\n    case \"response.mcp_call.in_progress\":\n      self = try .mcpCallInProgress(MCPCallInProgressEvent(from: decoder))\n    case \"response.mcp_call.completed\":\n      self = try .mcpCallCompleted(MCPCallCompletedEvent(from: decoder))\n    case \"response.mcp_call.failed\":\n      self = try .mcpCallFailed(MCPCallFailedEvent(from: decoder))\n    case \"response.mcp_list_tools.in_progress\":\n      self = try .mcpListToolsInProgress(MCPListToolsInProgressEvent(from: decoder))\n    case \"response.mcp_list_tools.completed\":\n      self = try .mcpListToolsCompleted(MCPListToolsCompletedEvent(from: decoder))\n    case \"response.mcp_list_tools.failed\":\n      self = try .mcpListToolsFailed(MCPListToolsFailedEvent(from: decoder))\n    case \"response.output_text.annotation.added\":\n      self = try .outputTextAnnotationAdded(OutputTextAnnotationAddedEvent(from: decoder))\n    case \"response.reasoning.delta\":\n      self = try .reasoningDelta(ReasoningDeltaEvent(from: decoder))\n    case \"response.reasoning.done\":\n      self = try .reasoningDone(ReasoningDoneEvent(from: decoder))\n    case \"response.reasoning_summary.delta\":\n      self = try .reasoningSummaryDelta(ReasoningSummaryDeltaEvent(from: decoder))\n    case \"response.reasoning_summary.done\":\n      self = try .reasoningSummaryDone(ReasoningSummaryDoneEvent(from: decoder))\n    case \"error\":\n      self = try .error(ErrorEvent(from: decoder))\n    default:\n      self = .unknownEventType(type)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n}\n\n// MARK: - ResponseCreatedEvent\n\n/// Emitted when a response is created\npublic struct ResponseCreatedEvent: Decodable {\n  public let type: String\n  public let response: ResponseModel\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case response\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ResponseInProgressEvent\n\n/// Emitted when the response is in progress\npublic struct ResponseInProgressEvent: Decodable {\n  public let type: String\n  public let response: ResponseModel\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case response\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ResponseCompletedEvent\n\n/// Emitted when the model response is complete\npublic struct ResponseCompletedEvent: Decodable {\n  public let type: String\n  public let response: ResponseModel\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case response\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ResponseFailedEvent\n\n/// Emitted when a response fails\npublic struct ResponseFailedEvent: Decodable {\n  public let type: String\n  public let response: ResponseModel\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case response\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ResponseIncompleteEvent\n\n/// Emitted when a response finishes as incomplete\npublic struct ResponseIncompleteEvent: Decodable {\n  public let type: String\n  public let response: ResponseModel\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case response\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ResponseQueuedEvent\n\n/// Emitted when a response is queued\npublic struct ResponseQueuedEvent: Decodable {\n  public let type: String\n  public let response: ResponseModel\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case response\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - OutputItemAddedEvent\n\n/// Emitted when a new output item is added\npublic struct OutputItemAddedEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let item: StreamOutputItem\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case item\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - OutputItemDoneEvent\n\n/// Emitted when an output item is marked done\npublic struct OutputItemDoneEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let item: StreamOutputItem\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case item\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ContentPartAddedEvent\n\n/// Emitted when a new content part is added\npublic struct ContentPartAddedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let part: ContentPart\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case part\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ContentPartDoneEvent\n\n/// Emitted when a content part is done\npublic struct ContentPartDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let part: ContentPart\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case part\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - OutputTextDeltaEvent\n\n/// Emitted when there is an additional text delta\npublic struct OutputTextDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let delta: String\n  public let logprobs: [LogprobContent]?\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case delta\n    case logprobs\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - LogprobContent\n\n/// Log probability information for a token in the delta\npublic struct LogprobContent: Decodable {\n  /// The token string\n  public let token: String\n  /// The log probability of the token\n  public let logprob: Double\n  /// Array of the most likely tokens and their log probabilities\n  public let topLogprobs: [TopLogprob]?\n  /// Byte offsets for the token\n  public let bytes: [Int]?\n\n  enum CodingKeys: String, CodingKey {\n    case token\n    case logprob\n    case topLogprobs = \"top_logprobs\"\n    case bytes\n  }\n}\n\n// MARK: - TopLogprob\n\n/// Information about a top alternative token\npublic struct TopLogprob: Decodable {\n  /// The token string\n  public let token: String\n  /// The log probability of the token\n  public let logprob: Double\n  /// Byte offsets for the token\n  public let bytes: [Int]?\n}\n\n// MARK: - OutputTextDoneEvent\n\n/// Emitted when text content is finalized\npublic struct OutputTextDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let text: String\n  public let logprobs: [LogprobContent]?\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case text\n    case logprobs\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - RefusalDeltaEvent\n\n/// Emitted when there is a partial refusal text\npublic struct RefusalDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - RefusalDoneEvent\n\n/// Emitted when refusal text is finalized\npublic struct RefusalDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let refusal: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case refusal\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - FunctionCallArgumentsDeltaEvent\n\n/// Emitted when there is a partial function-call arguments delta\npublic struct FunctionCallArgumentsDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - FunctionCallArgumentsDoneEvent\n\n/// Emitted when function-call arguments are finalized\npublic struct FunctionCallArgumentsDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let name: String?\n  public let arguments: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case name\n    case arguments\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - FileSearchCallInProgressEvent\n\n/// Emitted when a file search call is initiated\npublic struct FileSearchCallInProgressEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - FileSearchCallSearchingEvent\n\n/// Emitted when a file search is currently searching\npublic struct FileSearchCallSearchingEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - FileSearchCallCompletedEvent\n\n/// Emitted when a file search call is completed\npublic struct FileSearchCallCompletedEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - WebSearchCallInProgressEvent\n\n/// Emitted when a web search call is initiated\npublic struct WebSearchCallInProgressEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - WebSearchCallSearchingEvent\n\n/// Emitted when a web search call is executing\npublic struct WebSearchCallSearchingEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - WebSearchCallCompletedEvent\n\n/// Emitted when a web search call is completed\npublic struct WebSearchCallCompletedEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningSummaryPartAddedEvent\n\n/// Emitted when a new reasoning summary part is added\npublic struct ReasoningSummaryPartAddedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let summaryIndex: Int\n  public let part: SummaryPart\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case summaryIndex = \"summary_index\"\n    case part\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningSummaryPartDoneEvent\n\n/// Emitted when a reasoning summary part is completed\npublic struct ReasoningSummaryPartDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let summaryIndex: Int\n  public let part: SummaryPart\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case summaryIndex = \"summary_index\"\n    case part\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningSummaryTextDeltaEvent\n\n/// Emitted when a delta is added to a reasoning summary text\npublic struct ReasoningSummaryTextDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let summaryIndex: Int\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case summaryIndex = \"summary_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningSummaryTextDoneEvent\n\n/// Emitted when a reasoning summary text is completed\npublic struct ReasoningSummaryTextDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let summaryIndex: Int\n  public let text: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case summaryIndex = \"summary_index\"\n    case text\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningTextDeltaEvent\n\n/// Emitted when a delta is added to a reasoning text\npublic struct ReasoningTextDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningTextDoneEvent\n\n/// Emitted when reasoning text is completed\npublic struct ReasoningTextDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let text: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case text\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ImageGenerationCallInProgressEvent\n\n/// Emitted when an image generation call is in progress\npublic struct ImageGenerationCallInProgressEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ImageGenerationCallGeneratingEvent\n\n/// Emitted when an image generation call is generating\npublic struct ImageGenerationCallGeneratingEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ImageGenerationCallPartialImageEvent\n\n/// Emitted when a partial image is available\npublic struct ImageGenerationCallPartialImageEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n  public let partialImageIndex: Int\n  public let partialImageB64: String\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n    case partialImageIndex = \"partial_image_index\"\n    case partialImageB64 = \"partial_image_b64\"\n  }\n}\n\n// MARK: - ImageGenerationCallCompletedEvent\n\n/// Emitted when an image generation call is completed\npublic struct ImageGenerationCallCompletedEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CodeInterpreterCallInProgressEvent\n\n/// Emitted when a code interpreter call is in progress\npublic struct CodeInterpreterCallInProgressEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CodeInterpreterCallInterpretingEvent\n\n/// Emitted when the code interpreter is actively interpreting code\npublic struct CodeInterpreterCallInterpretingEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CodeInterpreterCallCompletedEvent\n\n/// Emitted when a code interpreter call is completed\npublic struct CodeInterpreterCallCompletedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CodeInterpreterCallCodeDeltaEvent\n\n/// Emitted when a partial code snippet is streamed by the code interpreter\npublic struct CodeInterpreterCallCodeDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CodeInterpreterCallCodeDoneEvent\n\n/// Emitted when the code snippet is finalized by the code interpreter\npublic struct CodeInterpreterCallCodeDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let code: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case code\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CustomToolCallInputDeltaEvent\n\n/// Event representing a delta to the input of a custom tool call\npublic struct CustomToolCallInputDeltaEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - CustomToolCallInputDoneEvent\n\n/// Event indicating that input for a custom tool call is complete\npublic struct CustomToolCallInputDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let input: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case input\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPCallArgumentsDeltaEvent\n\n/// Emitted when there is a delta to MCP call arguments\npublic struct MCPCallArgumentsDeltaEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let delta: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPCallArgumentsDoneEvent\n\n/// Emitted when MCP call arguments are done\npublic struct MCPCallArgumentsDoneEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let arguments: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case arguments\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPCallInProgressEvent\n\n/// Emitted when an MCP call is in progress\npublic struct MCPCallInProgressEvent: Decodable {\n  public let type: String\n  public let outputIndex: Int\n  public let itemId: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case outputIndex = \"output_index\"\n    case itemId = \"item_id\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPCallCompletedEvent\n\n/// Emitted when an MCP call is completed\npublic struct MCPCallCompletedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPCallFailedEvent\n\n/// Emitted when an MCP call failed\npublic struct MCPCallFailedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPListToolsInProgressEvent\n\n/// Emitted when MCP list tools is in progress\npublic struct MCPListToolsInProgressEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPListToolsCompletedEvent\n\n/// Emitted when MCP list tools is completed\npublic struct MCPListToolsCompletedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - MCPListToolsFailedEvent\n\n/// Emitted when MCP list tools failed\npublic struct MCPListToolsFailedEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - OutputTextAnnotationAddedEvent\n\n/// Emitted when an annotation is added to output text\npublic struct OutputTextAnnotationAddedEvent: Decodable {\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    type = try container.decode(String.self, forKey: .type)\n    itemId = try container.decode(String.self, forKey: .itemId)\n    outputIndex = try container.decode(Int.self, forKey: .outputIndex)\n    contentIndex = try container.decode(Int.self, forKey: .contentIndex)\n    annotationIndex = try container.decode(Int.self, forKey: .annotationIndex)\n    sequenceNumber = try container.decodeIfPresent(Int.self, forKey: .sequenceNumber)\n    // For now, decode annotation as empty dictionary\n    annotation = [:]\n  }\n\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let annotationIndex: Int\n  public let annotation: [String: Any]\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case annotationIndex = \"annotation_index\"\n    case annotation\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningDeltaEvent\n\n/// Emitted when there is a delta to reasoning content\npublic struct ReasoningDeltaEvent: Decodable {\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    type = try container.decode(String.self, forKey: .type)\n    itemId = try container.decode(String.self, forKey: .itemId)\n    outputIndex = try container.decode(Int.self, forKey: .outputIndex)\n    contentIndex = try container.decode(Int.self, forKey: .contentIndex)\n    sequenceNumber = try container.decodeIfPresent(Int.self, forKey: .sequenceNumber)\n    // For now, decode delta as empty dictionary\n    delta = [:]\n  }\n\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let delta: [String: Any]\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningDoneEvent\n\n/// Emitted when reasoning content is done\npublic struct ReasoningDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let contentIndex: Int\n  public let text: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case contentIndex = \"content_index\"\n    case text\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningSummaryDeltaEvent\n\n/// Emitted when there is a delta to reasoning summary\npublic struct ReasoningSummaryDeltaEvent: Decodable {\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    type = try container.decode(String.self, forKey: .type)\n    itemId = try container.decode(String.self, forKey: .itemId)\n    outputIndex = try container.decode(Int.self, forKey: .outputIndex)\n    summaryIndex = try container.decode(Int.self, forKey: .summaryIndex)\n    sequenceNumber = try container.decodeIfPresent(Int.self, forKey: .sequenceNumber)\n    // For now, decode delta as empty dictionary\n    delta = [:]\n  }\n\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let summaryIndex: Int\n  public let delta: [String: Any]\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case summaryIndex = \"summary_index\"\n    case delta\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ReasoningSummaryDoneEvent\n\n/// Emitted when reasoning summary is done\npublic struct ReasoningSummaryDoneEvent: Decodable {\n  public let type: String\n  public let itemId: String\n  public let outputIndex: Int\n  public let summaryIndex: Int\n  public let text: String\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case itemId = \"item_id\"\n    case outputIndex = \"output_index\"\n    case summaryIndex = \"summary_index\"\n    case text\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - ErrorEvent\n\n/// Emitted when an error occurs\npublic struct ErrorEvent: Decodable {\n  public let type: String\n  public let code: String?\n  public let message: String\n  public let param: String?\n  public let sequenceNumber: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case code\n    case message\n    case param\n    case sequenceNumber = \"sequence_number\"\n  }\n}\n\n// MARK: - StreamOutputItem\n\n/// Stream output item that supports all output types during streaming\npublic enum StreamOutputItem: Decodable {\n  /// An output message from the model\n  case message(OutputItem.Message)\n  /// The results of a file search tool call\n  case fileSearchCall(OutputItem.FileSearchToolCall)\n  /// A tool call to run a function\n  case functionCall(OutputItem.FunctionToolCall)\n  /// The results of a web search tool call\n  case webSearchCall(OutputItem.WebSearchToolCall)\n  /// A tool call to a computer use tool\n  case computerCall(OutputItem.ComputerToolCall)\n  /// A description of the chain of thought used by a reasoning model\n  case reasoning(OutputItem.Reasoning)\n  /// An image generation request made by the model\n  case imageGenerationCall(OutputItem.ImageGenerationCall)\n  /// A tool call to run code\n  case codeInterpreterCall(OutputItem.CodeInterpreterCall)\n  /// A tool call to run a command on the local shell\n  case localShellCall(OutputItem.LocalShellCall)\n  /// An invocation of a tool on an MCP server\n  case mcpCall(OutputItem.MCPCall)\n  /// A list of tools available on an MCP server\n  case mcpListTools(OutputItem.MCPListTools)\n  /// A request for human approval of a tool invocation\n  case mcpApprovalRequest(OutputItem.MCPApprovalRequest)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"message\":\n      let message = try OutputItem.Message(from: decoder)\n      self = .message(message)\n\n    case \"file_search_call\":\n      let fileSearch = try OutputItem.FileSearchToolCall(from: decoder)\n      self = .fileSearchCall(fileSearch)\n\n    case \"function_call\":\n      let functionCall = try OutputItem.FunctionToolCall(from: decoder)\n      self = .functionCall(functionCall)\n\n    case \"web_search_call\":\n      let webSearch = try OutputItem.WebSearchToolCall(from: decoder)\n      self = .webSearchCall(webSearch)\n\n    case \"computer_call\":\n      let computerCall = try OutputItem.ComputerToolCall(from: decoder)\n      self = .computerCall(computerCall)\n\n    case \"reasoning\":\n      let reasoning = try OutputItem.Reasoning(from: decoder)\n      self = .reasoning(reasoning)\n\n    case \"image_generation_call\":\n      let imageGeneration = try OutputItem.ImageGenerationCall(from: decoder)\n      self = .imageGenerationCall(imageGeneration)\n\n    case \"code_interpreter_call\":\n      let codeInterpreter = try OutputItem.CodeInterpreterCall(from: decoder)\n      self = .codeInterpreterCall(codeInterpreter)\n\n    case \"local_shell_call\":\n      let localShell = try OutputItem.LocalShellCall(from: decoder)\n      self = .localShellCall(localShell)\n\n    case \"mcp_call\":\n      let mcpCall = try OutputItem.MCPCall(from: decoder)\n      self = .mcpCall(mcpCall)\n\n    case \"mcp_list_tools\":\n      let mcpListTools = try OutputItem.MCPListTools(from: decoder)\n      self = .mcpListTools(mcpListTools)\n\n    case \"mcp_approval_request\":\n      let mcpApprovalRequest = try OutputItem.MCPApprovalRequest(from: decoder)\n      self = .mcpApprovalRequest(mcpApprovalRequest)\n\n    default:\n      throw DecodingError.dataCorruptedError(\n        forKey: .type,\n        in: container,\n        debugDescription: \"Unknown stream output item type: \\(type)\")\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n}\n\n// MARK: - ContentPart\n\n/// Content part for streaming\npublic struct ContentPart: Decodable {\n  public let type: String\n  public let text: String?\n  public let annotations: [Any]?\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    type = try container.decode(String.self, forKey: .type)\n    text = try container.decodeIfPresent(String.self, forKey: .text)\n    annotations = nil // Skip decoding annotations for now\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case text\n    case annotations\n  }\n}\n\n// MARK: - SummaryPart\n\n/// Summary part for reasoning\npublic struct SummaryPart: Decodable {\n  public let type: String\n  public let text: String\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Runs/RunObject.swift",
    "content": "//\n//  RunObject.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// BETA.\n/// A [run](https://platform.openai.com/docs/api-reference/runs) object, represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads).\n/// Related guide: [Assistants](https://platform.openai.com/docs/assistants/overview)\n/// [Run Object](https://platform.openai.com/docs/api-reference/runs/object)\npublic struct RunObject: Decodable {\n  public enum Status: String {\n    case queued\n    case inProgress = \"in_progress\"\n    case requiresAction = \"requires_action\"\n    case cancelling\n    case cancelled\n    case failed\n    case completed\n    case expired\n  }\n\n  public struct RequiredAction: Decodable {\n    public struct SubmitToolOutput: Decodable {\n      /// A list of the relevant tool calls.\n      /// - Object: ToolCall\n      /// - id: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint.\n      /// - type: The type of tool call the output is required for. For now, this is always function.\n      /// - function: The function definition.\n      public let toolCalls: [ToolCall]\n\n      private enum CodingKeys: String, CodingKey {\n        case toolCalls = \"tool_calls\"\n      }\n    }\n\n    /// For now, this is always submit_tool_outputs.\n    public let type: String\n    /// Details on the tool outputs needed for this run to continue.\n    public let submitToolsOutputs: SubmitToolOutput\n\n    private enum CodingKeys: String, CodingKey {\n      case type\n      case submitToolsOutputs = \"submit_tool_outputs\"\n    }\n  }\n\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always thread.run.\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the run was created.\n  public let createdAt: Int?\n  /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run.\n  public let threadID: String\n  /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run.\n  public let assistantID: String\n  /// The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired.\n  public let status: String\n  /// Details on the action required to continue the run. Will be null if no action is required.\n  public let requiredAction: RequiredAction?\n  /// The last error associated with this run. Will be null if there are no errors.\n  public let lastError: LastError?\n  /// The Unix timestamp (in seconds) for when the run will expire.\n  public let expiresAt: Int?\n  /// The Unix timestamp (in seconds) for when the run was started.\n  public let startedAt: Int?\n  /// The Unix timestamp (in seconds) for when the run was cancelled.\n  public let cancelledAt: Int?\n  /// The Unix timestamp (in seconds) for when the run failed.\n  public let failedAt: Int?\n  /// The Unix timestamp (in seconds) for when the run was completed.\n  public let completedAt: Int?\n  /// Details on why the run is incomplete. Will be null if the run is not incomplete.\n  public let incompleteDetails: IncompleteDetails?\n  /// The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.\n  public let model: String\n  /// The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.\n  public let instructions: String?\n  /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.\n  public let tools: [AssistantObject.Tool]\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public let metadata: [String: String]\n  /// Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).\n  public let usage: Usage?\n  /// The sampling temperature used for this run. If not set, defaults to 1.\n  public let temperature: Double?\n  /// The nucleus sampling value used for this run. If not set, defaults to 1.\n  public let topP: Double?\n  /// The maximum number of prompt tokens specified to have been used over the course of the run.\n  public let maxPromptTokens: Int?\n  /// The maximum number of completion tokens specified to have been used over the course of the run.\n  public let maxCompletionTokens: Int?\n  /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.\n  public let truncationStrategy: TruncationStrategy?\n  /// Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling a tool. Specifying a particular tool like {\"type\": \"TOOL_TYPE\"} or {\"type\": \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that tool.\n  public let toolChoice: ToolChoice?\n  /// Specifies the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n  /// Setting to { \"type\": \"json_object\" } enables JSON mode, which guarantees the message the model generates is valid JSON.\n  /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n  public let responseFormat: ResponseFormat?\n\n  public var displayStatus: Status? { .init(rawValue: status) }\n\n  private enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case threadID = \"thread_id\"\n    case assistantID = \"assistant_id\"\n    case status\n    case requiredAction = \"required_action\"\n    case lastError = \"last_error\"\n    case expiresAt = \"expires_at\"\n    case startedAt = \"started_at\"\n    case cancelledAt = \"cancelled_at\"\n    case failedAt = \"failed_at\"\n    case completedAt = \"completed_at\"\n    case incompleteDetails = \"incomplete_details\"\n    case model\n    case instructions\n    case tools\n    case metadata\n    case usage\n    case temperature\n    case topP = \"top_p\"\n    case maxPromptTokens = \"max_prompt_tokens\"\n    case maxCompletionTokens = \"max_completion_tokens\"\n    case truncationStrategy\n    case toolChoice = \"tool_choice\"\n    case responseFormat = \"response_format\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Runs/RunStepDeltaObject.swift",
    "content": "//\n//  RunStepDeltaObject.swift\n//\n//\n//  Created by James Rochabrun on 3/17/24.\n//\n\nimport Foundation\n\n/// Represents a [run step delta](https://platform.openai.com/docs/api-reference/assistants-streaming/run-step-delta-object) i.e. any changed fields on a run step during streaming.\npublic struct RunStepDeltaObject: Delta {\n  /// The identifier of the run step, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always thread.run.step.delta.\n  public let object: String\n  /// The delta containing the fields that have changed on the run step.\n  public let delta: Delta\n\n  public struct Delta: Decodable {\n    /// The details of the run step.\n    public let stepDetails: RunStepDetails\n\n    private enum CodingKeys: String, CodingKey {\n      case stepDetails = \"step_details\"\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Runs/RunStepDetails.swift",
    "content": "//\n//  RunStepDetails.swift\n//\n//\n//  Created by James Rochabrun on 3/17/24.\n//\n\nimport Foundation\n\n// MARK: - RunStepDetails\n\npublic struct RunStepDetails: Codable {\n  public struct MessageCreation: Codable {\n    /// The ID of the message that was created by this run step.\n    public let messageID: String\n\n    enum CodingKeys: String, CodingKey {\n      case messageID = \"message_id\"\n    }\n  }\n\n  public struct ToolCall: Codable {\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.container(keyedBy: CodingKeys.self)\n      index = try container.decodeIfPresent(Int.self, forKey: .index)\n      id = try container.decodeIfPresent(String.self, forKey: .id)\n      type = try container.decode(String.self, forKey: .type)\n\n      // Based on the type, decode the corresponding tool call\n      switch type {\n      case \"code_interpreter\":\n        let codeInterpreter = try container.decode(CodeInterpreterToolCall.self, forKey: .codeInterpreter)\n        toolCall = .codeInterpreterToolCall(codeInterpreter)\n\n      case \"file_search\":\n        let retrieval = try container.decode(FileSearchToolCall.self, forKey: .fileSearch)\n        toolCall = .fileSearchToolCall(retrieval)\n\n      case \"function\":\n        // Assuming you have a function key in your JSON that corresponds to this type\n        let function = try container.decode(FunctionToolCall.self, forKey: .function)\n        toolCall = .functionToolCall(function)\n\n      default:\n        throw DecodingError.dataCorruptedError(forKey: .type, in: container, debugDescription: \"Unrecognized tool call type\")\n      }\n    }\n\n    public let index: Int?\n    public let id: String?\n    public let type: String\n    public let toolCall: RunStepToolCall\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.container(keyedBy: CodingKeys.self)\n      try container.encode(id, forKey: .id)\n      try container.encode(type, forKey: .type)\n\n      // Based on the toolCall type, encode the corresponding object\n      switch toolCall {\n      case .codeInterpreterToolCall(let codeInterpreter):\n        try container.encode(codeInterpreter, forKey: .codeInterpreter)\n      case .fileSearchToolCall(let retrieval):\n        // Encode retrieval if it's not nil\n        try container.encode(retrieval, forKey: .fileSearch)\n      case .functionToolCall(let function):\n        // Encode function if it's not nil\n        try container.encode(function, forKey: .function)\n      }\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case index, id, type\n      case codeInterpreter = \"code_interpreter\"\n      case fileSearch = \"file_search\"\n      case function\n    }\n  }\n\n  /// `message_creation` or `tool_calls`\n  public let type: String\n  /// Details of the message creation by the run step.\n  public let messageCreation: MessageCreation?\n  /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.\n  public let toolCalls: [ToolCall]?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case messageCreation = \"message_creation\"\n    case toolCalls = \"tool_calls\"\n  }\n}\n\n// MARK: - RunStepToolCall\n\n/// Details of the tool call.\npublic enum RunStepToolCall: Codable {\n  case codeInterpreterToolCall(CodeInterpreterToolCall)\n  case fileSearchToolCall(FileSearchToolCall)\n  case functionToolCall(FunctionToolCall)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n\n    // Decode the `type` property to determine which case to decode\n    let type = try container.decode(TypeEnum.self)\n\n    // Switch to the appropriate case based on the type\n    switch type {\n    case .codeInterpreter:\n      let value = try CodeInterpreterToolCall(from: decoder)\n      self = .codeInterpreterToolCall(value)\n\n    case .fileSearch:\n      let value = try FileSearchToolCall(from: decoder)\n      self = .fileSearchToolCall(value)\n\n    case .function:\n      let value = try FunctionToolCall(from: decoder)\n      self = .functionToolCall(value)\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n\n    switch self {\n    case .codeInterpreterToolCall(let value):\n      try container.encode(value)\n    case .fileSearchToolCall(let value):\n      try container.encode(value)\n    case .functionToolCall(let value):\n      try container.encode(value)\n    }\n  }\n\n  private enum TypeEnum: String, Decodable {\n    case codeInterpreter = \"code_interpreter\"\n    case fileSearch = \"file_search\"\n    case function\n  }\n}\n\n// MARK: - CodeInterpreterToolCall\n\npublic struct CodeInterpreterToolCall: Codable {\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    input = try container.decodeIfPresent(String.self, forKey: .input)\n    // This is needed as the input is retrieved as \"\"input\": \"# Calculate the square root of 500900\\nmath.sqrt(500900)\"\n    input = input?.replacingOccurrences(of: \"\\\\n\", with: \"\\n\")\n    outputs = try container.decodeIfPresent([CodeInterpreterOutput].self, forKey: .outputs)\n  }\n\n  public init(input: String?, outputs: [CodeInterpreterOutput]?) {\n    self.input = input\n    self.outputs = outputs\n  }\n\n  public var input: String?\n  public var outputs: [CodeInterpreterOutput]?\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n\n    // Revert the newline characters to their escaped form\n    let encodedInput = input?.replacingOccurrences(of: \"\\n\", with: \"\\\\n\")\n    try container.encode(encodedInput, forKey: .input)\n    try container.encode(outputs, forKey: .outputs)\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case input, outputs\n  }\n}\n\n// MARK: - CodeInterpreterOutput\n\npublic enum CodeInterpreterOutput: Codable {\n  case logs(CodeInterpreterLogOutput)\n  case images(CodeInterpreterImageOutput)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let outputType = try container.decode(OutputType.self, forKey: .type)\n\n    switch outputType {\n    case .logs:\n      let logOutput = try CodeInterpreterLogOutput(from: decoder)\n      self = .logs(logOutput)\n\n    case .images:\n      let imageOutput = try CodeInterpreterImageOutput(from: decoder)\n      self = .images(imageOutput)\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n\n    switch self {\n    case .logs(let logOutput):\n      try container.encode(OutputType.logs.rawValue, forKey: .type)\n      try logOutput.encode(to: encoder)\n\n    case .images(let imageOutput):\n      try container.encode(OutputType.images.rawValue, forKey: .type)\n      try imageOutput.encode(to: encoder)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n\n  private enum OutputType: String, Decodable {\n    case logs, images\n  }\n}\n\n// MARK: - CodeInterpreterLogOutput\n\n/// Text output from the Code Interpreter tool call as part of a run step.\npublic struct CodeInterpreterLogOutput: Codable {\n  /// Always logs.\n  public var type: String\n  /// The text output from the Code Interpreter tool call.\n  public var logs: String\n\n  public init(type: String, logs: String) {\n    self.type = type\n    self.logs = logs\n  }\n}\n\n// MARK: - CodeInterpreterImageOutput\n\npublic struct CodeInterpreterImageOutput: Codable {\n  public init(type: String, image: Image) {\n    self.type = type\n    self.image = image\n  }\n\n  public struct Image: Codable {\n    /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image.\n    public var fileID: String\n\n    enum CodingKeys: String, CodingKey {\n      case fileID = \"file_id\"\n    }\n\n    public init(fileID: String) {\n      self.fileID = fileID\n    }\n  }\n\n  public var type: String\n  public var image: Image\n}\n\n// MARK: - RunStepFileSearchToolCall\n\npublic struct RunStepFileSearchToolCall: Codable {\n  /// For now, this is always going to be an empty object.\n  public let fileSearch: [String: String]?\n\n  enum CodingKeys: String, CodingKey {\n    case fileSearch = \"file_search\"\n  }\n}\n\n// MARK: - RunStepFunctionToolCall\n\npublic struct RunStepFunctionToolCall: Codable {\n  /// The name of the function.\n  public var name: String?\n  /// The arguments passed to the function.\n  public var arguments: String\n  /// The output of the function. This will be null if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet.\n  public var output: String?\n\n  public init(name: String? = nil, arguments: String, output: String? = nil) {\n    self.name = name\n    self.arguments = arguments\n    self.output = output\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Runs/RunStepObject.swift",
    "content": "//\n//  RunStepObject.swift\n//\n//\n//  Created by James Rochabrun on 11/17/23.\n//\n\nimport Foundation\n\n/// Represents a [step](https://platform.openai.com/docs/api-reference/runs/step-object) in execution of a run.\npublic struct RunStepObject: Codable {\n  public init(\n    id: String,\n    object: String,\n    createdAt: Int,\n    assistantId: String,\n    threadId: String,\n    runId: String,\n    type: String,\n    status: Status,\n    stepDetails: RunStepDetails,\n    lastError: LastError?,\n    expiredAt: Int?,\n    cancelledAt: Int?,\n    failedAt: Int?,\n    completedAt: Int?,\n    metadata: [String: String],\n    usage: Usage?)\n  {\n    self.id = id\n    self.object = object\n    self.createdAt = createdAt\n    self.assistantId = assistantId\n    self.threadId = threadId\n    self.runId = runId\n    self.type = type\n    self.status = status.rawValue\n    self.stepDetails = stepDetails\n    self.lastError = lastError\n    self.expiredAt = expiredAt\n    self.cancelledAt = cancelledAt\n    self.failedAt = failedAt\n    self.completedAt = completedAt\n    self.metadata = metadata\n    self.usage = usage\n  }\n\n  public enum Status: String {\n    case inProgress = \"in_progress\"\n    case cancelled\n    case failed\n    case completed\n    case expired\n  }\n\n  /// The identifier of the run step, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always `thread.run.step``.\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the run step was created.\n  public let createdAt: Int\n  /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step.\n  public let assistantId: String\n  /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run.\n  public let threadId: String\n  /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of.\n  public let runId: String\n  /// The type of run step, which can be either message_creation or tool_calls.\n  public let type: String\n  /// The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.\n  public let status: String\n  /// The details of the run step.\n  public let stepDetails: RunStepDetails\n  /// The last error associated with this run step. Will be null if there are no errors.\n  public let lastError: LastError?\n  /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.\n  public let expiredAt: Int?\n  /// The Unix timestamp (in seconds) for when the run step was cancelled.\n  public let cancelledAt: Int?\n  /// The Unix timestamp (in seconds) for when the run step failed.\n  public let failedAt: Int?\n  /// The Unix timestamp (in seconds) for when the run step completed.\n  public let completedAt: Int?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public let metadata: [String: String]?\n  /// Usage statistics related to the run step. This value will be null while the run step's status is in_progress.\n  public let usage: Usage?\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n\n    // Encode all properties\n    try container.encode(id, forKey: .id)\n    try container.encode(object, forKey: .object)\n    try container.encode(createdAt, forKey: .createdAt)\n    try container.encode(assistantId, forKey: .assistantId)\n    try container.encode(threadId, forKey: .threadId)\n    try container.encode(runId, forKey: .runId)\n    try container.encode(type, forKey: .type)\n    try container.encode(status, forKey: .status)\n    try container.encode(stepDetails, forKey: .stepDetails)\n\n    // Encode optional properties only if they are not nil\n    try container.encodeIfPresent(lastError, forKey: .lastError)\n    try container.encodeIfPresent(expiredAt, forKey: .expiredAt)\n    try container.encodeIfPresent(cancelledAt, forKey: .cancelledAt)\n    try container.encodeIfPresent(failedAt, forKey: .failedAt)\n    try container.encodeIfPresent(completedAt, forKey: .completedAt)\n    try container.encodeIfPresent(usage, forKey: .usage)\n\n    // For the metadata dictionary, you can encode it directly if it is not nil\n    try container.encodeIfPresent(metadata, forKey: .metadata)\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case assistantId = \"assistant_id\"\n    case threadId = \"thread_id\"\n    case runId = \"run_id\"\n    case type\n    case status\n    case stepDetails = \"step_details\"\n    case lastError = \"last_error\"\n    case expiredAt = \"expired_at\"\n    case cancelledAt = \"cancelled_at\"\n    case failedAt = \"failed_at\"\n    case completedAt = \"completed_at\"\n    case metadata\n    case usage\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/Threads/ThreadObject.swift",
    "content": "//\n//  ThreadObject.swift\n//\n//\n//  Created by James Rochabrun on 11/15/23.\n//\n\nimport Foundation\n\n/// BETA\n/// A [thread object](https://platform.openai.com/docs/api-reference/threads) represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages).\npublic struct ThreadObject: Decodable {\n  public init(\n    id: String,\n    object: String,\n    createdAt: Int,\n    metadata: [String: String])\n  {\n    self.id = id\n    self.object = object\n    self.createdAt = createdAt\n    self.metadata = metadata\n  }\n\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always thread.\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the thread was created.\n  public let createdAt: Int\n  /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n  public var toolResources: ToolResources?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public let metadata: [String: String]\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case toolResources = \"tool_resources\"\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/VectorStore/VectorStoreObject.swift",
    "content": "//\n//  VectorStoreObject.swift\n//\n//\n//  Created by James Rochabrun on 4/27/24.\n//\n\nimport Foundation\n\npublic struct VectorStoreObject: Decodable {\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always vector_store.\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the vector store was created.\n  public let createdAt: Int\n  /// The name of the vector store.\n  public let name: String\n  /// The total number of bytes used by the files in the vector store.\n  public let usageBytes: Int\n\n  public let fileCounts: FileCount\n  /// The status of the vector store, which can be either expired, in_progress, or completed. A status of completed indicates that the vector store is ready for use.\n  public let status: String\n  /// The expiration policy for a vector store.\n  public let expiresAfter: ExpirationPolicy?\n  /// The Unix timestamp (in seconds) for when the vector store will expire.\n  public let expiresAt: Int?\n  /// The Unix timestamp (in seconds) for when the vector store was last active.\n  public let lastActiveAt: Int?\n  /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n  public let metadata: [String: String]\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case name\n    case usageBytes = \"usage_bytes\"\n    case fileCounts = \"file_counts\"\n    case status\n    case expiresAfter = \"expires_after\"\n    case expiresAt = \"expires_at\"\n    case lastActiveAt = \"last_active_at\"\n    case metadata\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/VectorStoreFile/VectorStoreFileObject.swift",
    "content": "//\n//  VectorStoreFileObject.swift\n//\n//\n//  Created by James Rochabrun on 4/28/24.\n//\n\nimport Foundation\n\n/// [The Vector store file object](https://platform.openai.com/docs/api-reference/vector-stores-files/file-object)\npublic struct VectorStoreFileObject: Decodable {\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always vector_store.file.\n  public let object: String\n  /// The total vector store usage in bytes. Note that this may be different from the original file size.\n  public let usageBytes: Int\n  /// The Unix timestamp (in seconds) for when the vector store file was created.\n  public let createdAt: Int\n  /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to.\n  public let vectorStoreID: String\n  /// The status of the vector store file, which can be either in_progress, completed, cancelled, or failed. The status completed indicates that the vector store file is ready for use.\n  public let status: String\n  /// The last error associated with this vector store file. Will be null if there are no errors.\n  public let lastError: LastError?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/ResponseModels/VectorStoreFileBatch/VectorStoreFileBatchObject.swift",
    "content": "//\n//  VectorStoreFileBatchObject.swift\n//\n//\n//  Created by James Rochabrun on 4/29/24.\n//\n\nimport Foundation\n\n/// [The vector store files batch objectBeta](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/batch-object)\npublic struct VectorStoreFileBatchObject: Decodable {\n  /// The identifier, which can be referenced in API endpoints.\n  public let id: String\n  /// The object type, which is always vector_store.file_batch.\n  public let object: String\n  /// The Unix timestamp (in seconds) for when the vector store files batch was created.\n  public let createdAt: Int\n  /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to.\n  public let vectorStoreID: String\n  /// The status of the vector store files batch, which can be either in_progress, completed, cancelled or failed.\n  public let status: String\n\n  public let fileCounts: FileCount\n\n  enum CodingKeys: String, CodingKey {\n    case id\n    case object\n    case createdAt = \"created_at\"\n    case vectorStoreID = \"vector_store_id\"\n    case status\n    case fileCounts = \"file_counts\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Service/DefaultOpenAIService.swift",
    "content": "//\n//  DefaultOpenAIService.swift\n//\n//\n//  Created by James Rochabrun on 10/17/23.\n//\n\nimport Foundation\n\nstruct DefaultOpenAIService: OpenAIService {\n  init(\n    apiKey: String,\n    organizationID: String? = nil,\n    baseURL: String? = nil,\n    proxyPath: String? = nil,\n    overrideVersion: String? = nil,\n    extraHeaders: [String: String]? = nil,\n    httpClient: HTTPClient,\n    decoder: JSONDecoder = .init(),\n    debugEnabled: Bool)\n  {\n    self.httpClient = httpClient\n    self.decoder = decoder\n    self.apiKey = .bearer(apiKey)\n    self.organizationID = organizationID\n    self.extraHeaders = extraHeaders\n    openAIEnvironment = OpenAIEnvironment(\n      baseURL: baseURL ?? \"https://api.openai.com\",\n      proxyPath: proxyPath,\n      version: overrideVersion ?? \"v1\")\n    self.debugEnabled = debugEnabled\n  }\n\n  let httpClient: HTTPClient\n  let decoder: JSONDecoder\n  let openAIEnvironment: OpenAIEnvironment\n\n  // MARK: Audio\n\n  func createTranscription(\n    parameters: AudioTranscriptionParameters)\n    async throws -> AudioObject\n  {\n    let request = try OpenAIAPI.audio(.transcriptions).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: AudioObject.self, with: request)\n  }\n\n  func createTranslation(\n    parameters: AudioTranslationParameters)\n    async throws -> AudioObject\n  {\n    let request = try OpenAIAPI.audio(.translations).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: AudioObject.self, with: request)\n  }\n\n  func createSpeech(\n    parameters: AudioSpeechParameters)\n    async throws -> AudioSpeechObject\n  {\n    let request = try OpenAIAPI.audio(.speech).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    let data = try await fetchAudio(with: request)\n    return AudioSpeechObject(output: data)\n  }\n\n  #if canImport(AVFoundation)\n  func realtimeSession(\n    model: String,\n    configuration: OpenAIRealtimeSessionConfiguration)\n    async throws -> OpenAIRealtimeSession\n  {\n    // Build the WebSocket URL\n    let baseURL = openAIEnvironment.baseURL.replacingOccurrences(of: \"https://\", with: \"wss://\")\n    let version = openAIEnvironment.version ?? \"v1\"\n\n    // Check if this is an Azure endpoint (contains \"azure_openai\" in base URL or proxy path)\n    let isAzureEndpoint = openAIEnvironment.baseURL.contains(\"azure_openai\") ||\n      (openAIEnvironment.proxyPath?.contains(\"azure_openai\") ?? false)\n\n    let path: String\n    let urlString: String\n\n    if isAzureEndpoint {\n      // Azure format: path/realtime?api-version=X&deployment=Y\n      // For Airbnb's Azure proxy, deployment is passed as a query parameter\n      path = openAIEnvironment.proxyPath ?? \"\"\n      urlString = \"\\(baseURL)/\\(path)/realtime?api-version=\\(version)&deployment=\\(model)\"\n    } else {\n      // OpenAI format: path/version/realtime?model=Y\n      path = openAIEnvironment.proxyPath.map { \"\\($0)/\\(version)\" } ?? version\n      urlString = \"\\(baseURL)/\\(path)/realtime?model=\\(model)\"\n    }\n\n    guard let url = URL(string: urlString) else {\n      throw APIError.requestFailed(description: \"Invalid realtime session URL\")\n    }\n\n    // Create the WebSocket request with auth headers\n    var request = URLRequest(url: url)\n    request.setValue(apiKey.value, forHTTPHeaderField: apiKey.headerField)\n\n    // Only add openai-beta header for non-Azure endpoints\n    if !isAzureEndpoint {\n      request.setValue(\"realtime=v1\", forHTTPHeaderField: \"openai-beta\")\n    }\n\n    if let organizationID {\n      request.setValue(organizationID, forHTTPHeaderField: \"OpenAI-Organization\")\n    }\n\n    // Add any extra headers\n    extraHeaders?.forEach { key, value in\n      request.setValue(value, forHTTPHeaderField: key)\n    }\n\n    // Create the WebSocket task\n    let webSocketTask = URLSession.shared.webSocketTask(with: request)\n\n    // Return the realtime session\n    return OpenAIRealtimeSession(\n      webSocketTask: webSocketTask,\n      sessionConfiguration: configuration)\n  }\n  #endif\n\n  // MARK: Chat\n\n  func startChat(\n    parameters: ChatCompletionParameters)\n    async throws -> ChatCompletionObject\n  {\n    var chatParameters = parameters\n    chatParameters.stream = false\n    let request = try OpenAIAPI.chat.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: chatParameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ChatCompletionObject.self, with: request)\n  }\n\n  func startStreamedChat(\n    parameters: ChatCompletionParameters)\n    async throws -> AsyncThrowingStream<ChatCompletionChunkObject, Error>\n  {\n    var chatParameters = parameters\n    chatParameters.stream = true\n    let request = try OpenAIAPI.chat.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: chatParameters,\n      extraHeaders: extraHeaders)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ChatCompletionChunkObject.self, with: request)\n  }\n\n  // MARK: Embeddings\n\n  func createEmbeddings(\n    parameters: EmbeddingParameter)\n    async throws -> OpenAIResponse<EmbeddingObject>\n  {\n    let request = try OpenAIAPI.embeddings.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<EmbeddingObject>.self, with: request)\n  }\n\n  // MARK: Fine-tuning\n\n  func createFineTuningJob(\n    parameters: FineTuningJobParameters)\n    async throws -> FineTuningJobObject\n  {\n    let request = try OpenAIAPI.fineTuning(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: FineTuningJobObject.self, with: request)\n  }\n\n  func listFineTuningJobs(\n    after lastJobID: String? = nil,\n    limit: Int? = nil)\n    async throws -> OpenAIResponse<FineTuningJobObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let lastJobID, let limit {\n      queryItems = [.init(name: \"after\", value: lastJobID), .init(name: \"limit\", value: \"\\(limit)\")]\n    } else if let lastJobID {\n      queryItems = [.init(name: \"after\", value: lastJobID)]\n    } else if let limit {\n      queryItems = [.init(name: \"limit\", value: \"\\(limit)\")]\n    }\n\n    let request = try OpenAIAPI.fineTuning(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<FineTuningJobObject>.self, with: request)\n  }\n\n  func retrieveFineTuningJob(\n    id: String)\n    async throws -> FineTuningJobObject\n  {\n    let request = try OpenAIAPI.fineTuning(.retrieve(jobID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: FineTuningJobObject.self, with: request)\n  }\n\n  func cancelFineTuningJobWith(\n    id: String)\n    async throws -> FineTuningJobObject\n  {\n    let request = try OpenAIAPI.fineTuning(.cancel(jobID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: FineTuningJobObject.self, with: request)\n  }\n\n  func listFineTuningEventsForJobWith(\n    id: String,\n    after lastEventId: String? = nil,\n    limit: Int? = nil)\n    async throws -> OpenAIResponse<FineTuningJobEventObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let lastEventId, let limit {\n      queryItems = [.init(name: \"after\", value: lastEventId), .init(name: \"limit\", value: \"\\(limit)\")]\n    } else if let lastEventId {\n      queryItems = [.init(name: \"after\", value: lastEventId)]\n    } else if let limit {\n      queryItems = [.init(name: \"limit\", value: \"\\(limit)\")]\n    }\n    let request = try OpenAIAPI.fineTuning(.events(jobID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<FineTuningJobEventObject>.self, with: request)\n  }\n\n  // MARK: Files\n\n  func listFiles()\n    async throws -> OpenAIResponse<FileObject>\n  {\n    let request = try OpenAIAPI.file(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<FileObject>.self, with: request)\n  }\n\n  func uploadFile(\n    parameters: FileParameters)\n    async throws -> FileObject\n  {\n    let request = try OpenAIAPI.file(.upload).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: FileObject.self, with: request)\n  }\n\n  func deleteFileWith(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.file(.delete(fileID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func retrieveFileWith(\n    id: String)\n    async throws -> FileObject\n  {\n    let request = try OpenAIAPI.file(.retrieve(fileID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: FileObject.self, with: request)\n  }\n\n  func retrieveContentForFileWith(\n    id: String)\n    async throws -> [[String: Any]]\n  {\n    let request = try OpenAIAPI.file(.retrieveFileContent(fileID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetchContentsOfFile(request: request)\n  }\n\n  // MARK: Images\n\n  func legacyCreateImages(\n    parameters: ImageCreateParameters)\n    async throws -> OpenAIResponse<ImageObject>\n  {\n    let request = try OpenAIAPI.images(.generations).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ImageObject>.self, with: request)\n  }\n\n  func legacyEditImage(\n    parameters: ImageEditParameters)\n    async throws -> OpenAIResponse<ImageObject>\n  {\n    let request = try OpenAIAPI.images(.edits).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ImageObject>.self, with: request)\n  }\n\n  func legacyCreateImageVariations(\n    parameters: ImageVariationParameters)\n    async throws -> OpenAIResponse<ImageObject>\n  {\n    let request = try OpenAIAPI.images(.variations).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ImageObject>.self, with: request)\n  }\n\n  func createImages(\n    parameters: CreateImageParameters)\n    async throws -> CreateImageResponse\n  {\n    let request = try OpenAIAPI.images(.generations).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: CreateImageResponse.self, with: request)\n  }\n\n  func editImage(\n    parameters: CreateImageEditParameters)\n    async throws -> CreateImageResponse\n  {\n    let request = try OpenAIAPI.images(.edits).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: CreateImageResponse.self, with: request)\n  }\n\n  func createImageVariations(\n    parameters: CreateImageVariationParameters)\n    async throws -> CreateImageResponse\n  {\n    let request = try OpenAIAPI.images(.variations).multiPartRequest(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters)\n    return try await fetch(debugEnabled: debugEnabled, type: CreateImageResponse.self, with: request)\n  }\n\n  // MARK: Models\n\n  func listModels()\n    async throws -> OpenAIResponse<ModelObject>\n  {\n    let request = try OpenAIAPI.model(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<ModelObject>.self, with: request)\n  }\n\n  func retrieveModelWith(\n    id: String)\n    async throws -> ModelObject\n  {\n    let request = try OpenAIAPI.model(.retrieve(modelID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ModelObject.self, with: request)\n  }\n\n  func deleteFineTuneModelWith(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.model(.deleteFineTuneModel(modelID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Moderations\n\n  func createModerationFromText(\n    parameters: ModerationParameter<String>)\n    async throws -> ModerationObject\n  {\n    let request = try OpenAIAPI.moderations.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ModerationObject.self, with: request)\n  }\n\n  func createModerationFromTexts(\n    parameters: ModerationParameter<[String]>)\n    async throws -> ModerationObject\n  {\n    let request = try OpenAIAPI.moderations.request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ModerationObject.self, with: request)\n  }\n\n  // MARK: Assistants [BETA]\n\n  func createAssistant(\n    parameters: AssistantParameters)\n    async throws -> AssistantObject\n  {\n    let request = try OpenAIAPI.assistant(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  func retrieveAssistant(\n    id: String)\n    async throws -> AssistantObject\n  {\n    let request = try OpenAIAPI.assistant(.retrieve(assistantID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  func modifyAssistant(\n    id: String,\n    parameters: AssistantParameters)\n    async throws -> AssistantObject\n  {\n    let request = try OpenAIAPI.assistant(.modify(assistantID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: AssistantObject.self, with: request)\n  }\n\n  func deleteAssistant(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.assistant(.delete(assistantID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func listAssistants(\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<AssistantObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try OpenAIAPI.assistant(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<AssistantObject>.self, with: request)\n  }\n\n  // MARK: Thread [BETA]\n\n  func createThread(\n    parameters: CreateThreadParameters)\n    async throws -> ThreadObject\n  {\n    let request = try OpenAIAPI.thread(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  func retrieveThread(id: String)\n    async throws -> ThreadObject\n  {\n    let request = try OpenAIAPI.thread(.retrieve(threadID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  func modifyThread(\n    id: String,\n    parameters: ModifyThreadParameters)\n    async throws -> ThreadObject\n  {\n    let request = try OpenAIAPI.thread(.modify(threadID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ThreadObject.self, with: request)\n  }\n\n  func deleteThread(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.thread(.delete(threadID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Message [BETA]\n\n  func createMessage(\n    threadID: String,\n    parameters: MessageParameter)\n    async throws -> MessageObject\n  {\n    let request = try OpenAIAPI.message(.create(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  func retrieveMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> MessageObject\n  {\n    let request = try OpenAIAPI.message(.retrieve(threadID: threadID, messageID: messageID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  func modifyMessage(\n    threadID: String,\n    messageID: String,\n    parameters: ModifyMessageParameters)\n    async throws -> MessageObject\n  {\n    let request = try OpenAIAPI.message(.modify(threadID: threadID, messageID: messageID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: MessageObject.self, with: request)\n  }\n\n  func deleteMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.message(.delete(threadID: threadID, messageID: messageID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func listMessages(\n    threadID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil,\n    runID: String? = nil)\n    async throws -> OpenAIResponse<MessageObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let runID {\n      queryItems.append(.init(name: \"run_id\", value: runID))\n    }\n    let request = try OpenAIAPI.message(.list(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<MessageObject>.self, with: request)\n  }\n\n  // MARK: Run [BETA]\n\n  func createRun(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> RunObject\n  {\n    let request = try OpenAIAPI.run(.create(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func retrieveRun(\n    threadID: String,\n    runID: String)\n    async throws -> RunObject\n  {\n    let request = try OpenAIAPI.run(.retrieve(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func modifyRun(\n    threadID: String,\n    runID: String,\n    parameters: ModifyRunParameters)\n    async throws -> RunObject\n  {\n    let request = try OpenAIAPI.run(.modify(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func listRuns(\n    threadID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<RunObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try OpenAIAPI.run(.list(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<RunObject>.self, with: request)\n  }\n\n  func cancelRun(\n    threadID: String,\n    runID: String)\n    async throws -> RunObject\n  {\n    let request = try OpenAIAPI.run(.cancel(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func submitToolOutputsToRun(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> RunObject\n  {\n    let request = try OpenAIAPI.run(.submitToolOutput(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  func createThreadAndRun(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> RunObject\n  {\n    let request = try OpenAIAPI.run(.createThreadAndRun).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunObject.self, with: request)\n  }\n\n  // MARK: Run Step [BETA]\n\n  func retrieveRunstep(\n    threadID: String,\n    runID: String,\n    stepID: String)\n    async throws -> RunStepObject\n  {\n    let request = try OpenAIAPI.runStep(.retrieve(threadID: threadID, runID: runID, stepID: stepID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: RunStepObject.self, with: request)\n  }\n\n  func listRunSteps(\n    threadID: String,\n    runID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<RunStepObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try OpenAIAPI.runStep(.list(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<RunStepObject>.self, with: request)\n  }\n\n  func createRunStream(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runParameters = parameters\n    runParameters.stream = true\n    let request = try OpenAIAPI.run(.create(threadID: threadID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: runParameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  func createThreadAndRunStream(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runParameters = parameters\n    runParameters.stream = true\n    let request = try OpenAIAPI.run(.createThreadAndRun).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  func submitToolOutputsToRunStream(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    var runToolsOutputParameter = parameters\n    runToolsOutputParameter.stream = true\n    let request = try OpenAIAPI.run(.submitToolOutput(threadID: threadID, runID: runID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: runToolsOutputParameter,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetchAssistantStreamEvents(with: request, debugEnabled: debugEnabled)\n  }\n\n  // MARK: Batch\n\n  func createBatch(\n    parameters: BatchParameter)\n    async throws -> BatchObject\n  {\n    let request = try OpenAIAPI.batch(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: BatchObject.self, with: request)\n  }\n\n  func retrieveBatch(\n    id: String)\n    async throws -> BatchObject\n  {\n    let request = try OpenAIAPI.batch(.retrieve(batchID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: BatchObject.self, with: request)\n  }\n\n  func cancelBatch(\n    id: String)\n    async throws -> BatchObject\n  {\n    let request = try OpenAIAPI.batch(.cancel(batchID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: BatchObject.self, with: request)\n  }\n\n  func listBatch(\n    after: String? = nil,\n    limit: Int? = nil)\n    async throws -> OpenAIResponse<BatchObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    let request = try OpenAIAPI.batch(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<BatchObject>.self, with: request)\n  }\n\n  // MARK: Vector Store\n\n  func createVectorStore(\n    parameters: VectorStoreParameter)\n    async throws -> VectorStoreObject\n  {\n    let request = try OpenAIAPI.vectorStore(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  func listVectorStores(\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil)\n    async throws -> OpenAIResponse<VectorStoreObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    let request = try OpenAIAPI.vectorStore(.list).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreObject>.self, with: request)\n  }\n\n  func retrieveVectorStore(\n    id: String)\n    async throws -> VectorStoreObject\n  {\n    let request = try OpenAIAPI.vectorStore(.retrieve(vectorStoreID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  func modifyVectorStore(\n    parameters: VectorStoreParameter,\n    id: String)\n    async throws -> VectorStoreObject\n  {\n    let request = try OpenAIAPI.vectorStore(.modify(vectorStoreID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreObject.self, with: request)\n  }\n\n  func deleteVectorStore(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.vectorStore(.modify(vectorStoreID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Vector Store Files\n\n  func createVectorStoreFile(\n    vectorStoreID: String,\n    parameters: VectorStoreFileParameter)\n    async throws -> VectorStoreFileObject\n  {\n    let request = try OpenAIAPI.vectorStoreFile(.create(vectorStoreID: vectorStoreID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileObject.self, with: request)\n  }\n\n  func listVectorStoreFiles(\n    vectorStoreID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil,\n    filter: String? = nil)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let filter {\n      queryItems.append(.init(name: \"filter\", value: filter))\n    }\n    let request = try OpenAIAPI.vectorStoreFile(.list(vectorStoreID: vectorStoreID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreFileObject>.self, with: request)\n  }\n\n  func retrieveVectorStoreFile(\n    vectorStoreID: String,\n    fileID: String)\n    async throws -> VectorStoreFileObject\n  {\n    let request = try OpenAIAPI.vectorStoreFile(.retrieve(vectorStoreID: vectorStoreID, fileID: fileID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileObject.self, with: request)\n  }\n\n  func deleteVectorStoreFile(\n    vectorStoreID: String,\n    fileID: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.vectorStoreFile(.delete(vectorStoreID: vectorStoreID, fileID: fileID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  // MARK: Vector Store File Batch\n\n  func createVectorStoreFileBatch(\n    vectorStoreID: String,\n    parameters: VectorStoreFileBatchParameter)\n    async throws -> VectorStoreFileBatchObject\n  {\n    let request = try OpenAIAPI.vectorStoreFileBatch(.create(vectorStoreID: vectorStoreID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileBatchObject.self, with: request)\n  }\n\n  func retrieveVectorStoreFileBatch(\n    vectorStoreID: String,\n    batchID: String)\n    async throws -> VectorStoreFileBatchObject\n  {\n    let request = try OpenAIAPI.vectorStoreFileBatch(.retrieve(vectorStoreID: vectorStoreID, batchID: batchID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileBatchObject.self, with: request)\n  }\n\n  func cancelVectorStoreFileBatch(\n    vectorStoreID: String,\n    batchID: String)\n    async throws -> VectorStoreFileBatchObject\n  {\n    let request = try OpenAIAPI.vectorStoreFileBatch(.cancel(vectorStoreID: vectorStoreID, batchID: batchID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: VectorStoreFileBatchObject.self, with: request)\n  }\n\n  func listVectorStoreFilesInABatch(\n    vectorStoreID: String,\n    batchID: String,\n    limit: Int? = nil,\n    order: String? = nil,\n    after: String? = nil,\n    before: String? = nil,\n    filter: String? = nil)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n  {\n    var queryItems = [URLQueryItem]()\n    if let limit {\n      queryItems.append(.init(name: \"limit\", value: \"\\(limit)\"))\n    }\n    if let order {\n      queryItems.append(.init(name: \"order\", value: order))\n    }\n    if let after {\n      queryItems.append(.init(name: \"after\", value: after))\n    }\n    if let before {\n      queryItems.append(.init(name: \"before\", value: before))\n    }\n    if let filter {\n      queryItems.append(.init(name: \"filter\", value: filter))\n    }\n    let request = try OpenAIAPI.vectorStoreFileBatch(.list(vectorStoreID: vectorStoreID, batchID: batchID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      betaHeaderField: Self.assistantsBetaV2,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<VectorStoreFileObject>.self, with: request)\n  }\n\n  // MARK: Response\n\n  func responseCreate(\n    _ parameters: ModelResponseParameter)\n    async throws -> ResponseModel\n  {\n    var responseParameters = parameters\n    responseParameters.stream = false\n    let request = try OpenAIAPI.response(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: responseParameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  func responseModel(\n    id: String,\n    parameters: GetResponseParameter? = nil)\n    async throws -> ResponseModel\n  {\n    var queryItems = [URLQueryItem]()\n\n    if let parameters {\n      if let include = parameters.include {\n        for item in include {\n          queryItems.append(URLQueryItem(name: \"include\", value: item))\n        }\n      }\n      if let includeObfuscation = parameters.includeObfuscation {\n        queryItems.append(URLQueryItem(name: \"include_obfuscation\", value: String(includeObfuscation)))\n      }\n      if let startingAfter = parameters.startingAfter {\n        queryItems.append(URLQueryItem(name: \"starting_after\", value: String(startingAfter)))\n      }\n      if let stream = parameters.stream {\n        queryItems.append(URLQueryItem(name: \"stream\", value: String(stream)))\n      }\n    }\n\n    let request = try OpenAIAPI.response(.get(responseID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  func responseModelStream(\n    id: String,\n    parameters: GetResponseParameter? = nil)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    var streamParameters = parameters ?? GetResponseParameter()\n    streamParameters.stream = true\n\n    var queryItems = [URLQueryItem]()\n\n    if let include = streamParameters.include {\n      for item in include {\n        queryItems.append(URLQueryItem(name: \"include\", value: item))\n      }\n    }\n    if let includeObfuscation = streamParameters.includeObfuscation {\n      queryItems.append(URLQueryItem(name: \"include_obfuscation\", value: String(includeObfuscation)))\n    }\n    if let startingAfter = streamParameters.startingAfter {\n      queryItems.append(URLQueryItem(name: \"starting_after\", value: String(startingAfter)))\n    }\n    if let stream = streamParameters.stream {\n      queryItems.append(URLQueryItem(name: \"stream\", value: String(stream)))\n    }\n\n    let request = try OpenAIAPI.response(.get(responseID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ResponseStreamEvent.self, with: request)\n  }\n\n  func responseCreateStream(\n    _ parameters: ModelResponseParameter)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n  {\n    var responseParameters = parameters\n    responseParameters.stream = true\n    let request = try OpenAIAPI.response(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: responseParameters,\n      extraHeaders: extraHeaders)\n    return try await fetchStream(debugEnabled: debugEnabled, type: ResponseStreamEvent.self, with: request)\n  }\n\n  func responseDelete(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.response(.delete(responseID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func responseCancel(\n    id: String)\n    async throws -> ResponseModel\n  {\n    let request = try OpenAIAPI.response(.cancel(responseID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ResponseModel.self, with: request)\n  }\n\n  func responseInputItems(\n    id: String,\n    parameters: GetInputItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    var queryItems = [URLQueryItem]()\n\n    if let parameters {\n      if let after = parameters.after {\n        queryItems.append(URLQueryItem(name: \"after\", value: after))\n      }\n      if let include = parameters.include {\n        for item in include {\n          queryItems.append(URLQueryItem(name: \"include\", value: item))\n        }\n      }\n      if let limit = parameters.limit {\n        queryItems.append(URLQueryItem(name: \"limit\", value: String(limit)))\n      }\n      if let order = parameters.order {\n        queryItems.append(URLQueryItem(name: \"order\", value: order))\n      }\n    }\n\n    let request = try OpenAIAPI.response(.inputItems(responseID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<InputItem>.self, with: request)\n  }\n\n  // MARK: - Conversations\n\n  func conversationCreate(\n    parameters: CreateConversationParameter?)\n    async throws -> ConversationModel\n  {\n    let request = try OpenAIAPI.conversantions(.create).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ConversationModel.self, with: request)\n  }\n\n  func getConversation(\n    id: String)\n    async throws -> ConversationModel\n  {\n    let request = try OpenAIAPI.conversantions(.get(conversationID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ConversationModel.self, with: request)\n  }\n\n  func updateConversation(\n    id: String,\n    parameters: UpdateConversationParameter)\n    async throws -> ConversationModel\n  {\n    let request = try OpenAIAPI.conversantions(.update(conversationID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: parameters,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ConversationModel.self, with: request)\n  }\n\n  func deleteConversation(\n    id: String)\n    async throws -> DeletionStatus\n  {\n    let request = try OpenAIAPI.conversantions(.delete(conversationID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: DeletionStatus.self, with: request)\n  }\n\n  func getConversationItems(\n    id: String,\n    parameters: GetConversationItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    var queryItems = [URLQueryItem]()\n    if let parameters {\n      if let after = parameters.after {\n        queryItems.append(URLQueryItem(name: \"after\", value: after))\n      }\n      if let include = parameters.include {\n        for item in include {\n          queryItems.append(URLQueryItem(name: \"include\", value: item))\n        }\n      }\n      if let limit = parameters.limit {\n        queryItems.append(URLQueryItem(name: \"limit\", value: String(limit)))\n      }\n      if let order = parameters.order {\n        queryItems.append(URLQueryItem(name: \"order\", value: order))\n      }\n    }\n    let request = try OpenAIAPI.conversantions(.items(conversationID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<InputItem>.self, with: request)\n  }\n\n  func createConversationItems(\n    id: String,\n    parameters: CreateConversationItemsParameter)\n    async throws -> OpenAIResponse<InputItem>\n  {\n    var queryItems = [URLQueryItem]()\n    if let include = parameters.include {\n      for item in include {\n        queryItems.append(URLQueryItem(name: \"include\", value: item))\n      }\n    }\n\n    // Create a body-only parameter struct for encoding\n    struct BodyParameters: Codable {\n      let items: [InputItem]\n    }\n    let bodyParams = BodyParameters(items: parameters.items)\n\n    let request = try OpenAIAPI.conversantions(.createItems(conversationID: id)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .post,\n      params: bodyParams,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: OpenAIResponse<InputItem>.self, with: request)\n  }\n\n  func getConversationItem(\n    conversationID: String,\n    itemID: String,\n    parameters: GetConversationItemParameter?)\n    async throws -> InputItem\n  {\n    var queryItems = [URLQueryItem]()\n    if let parameters, let include = parameters.include {\n      for item in include {\n        queryItems.append(URLQueryItem(name: \"include\", value: item))\n      }\n    }\n    let request = try OpenAIAPI.conversantions(.item(conversationID: conversationID, itemID: itemID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .get,\n      queryItems: queryItems,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: InputItem.self, with: request)\n  }\n\n  func deleteConversationItem(\n    conversationID: String,\n    itemID: String)\n    async throws -> ConversationModel\n  {\n    let request = try OpenAIAPI.conversantions(.deleteItem(conversationID: conversationID, itemID: itemID)).request(\n      apiKey: apiKey,\n      openAIEnvironment: openAIEnvironment,\n      organizationID: organizationID,\n      method: .delete,\n      extraHeaders: extraHeaders)\n    return try await fetch(debugEnabled: debugEnabled, type: ConversationModel.self, with: request)\n  }\n\n  private static let assistantsBetaV2 = \"assistants=v2\"\n\n  /// [authentication](https://platform.openai.com/docs/api-reference/authentication)\n  private let apiKey: Authorization\n  /// [organization](https://platform.openai.com/docs/api-reference/organization-optional)\n  private let organizationID: String?\n  /// Set this flag to TRUE if you need to print request events in DEBUG builds.\n  private let debugEnabled: Bool\n  /// Extra headers for the request.\n  private let extraHeaders: [String: String]?\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Service/OpenAIService.swift",
    "content": "//\n//  OpenAIService.swift\n//\n//\n//  Created by James Rochabrun on 10/17/23.\n//\n\nimport Foundation\n#if os(Linux)\nimport FoundationNetworking\n#endif\n\n// MARK: - APIError\n\npublic enum APIError: Error {\n  case requestFailed(description: String)\n  case responseUnsuccessful(description: String, statusCode: Int)\n  case invalidData\n  case jsonDecodingFailure(description: String)\n  case dataCouldNotBeReadMissingData(description: String)\n  case bothDecodingStrategiesFailed\n  case timeOutError\n\n  public var displayDescription: String {\n    switch self {\n    case .requestFailed(let description): description\n    case .responseUnsuccessful(let description, _): description\n    case .invalidData: \"Invalid data\"\n    case .jsonDecodingFailure(let description): description\n    case .dataCouldNotBeReadMissingData(let description): description\n    case .bothDecodingStrategiesFailed: \"Decoding strategies failed.\"\n    case .timeOutError: \"Time Out Error.\"\n    }\n  }\n}\n\n// MARK: - Authorization\n\npublic enum Authorization {\n  case apiKey(String)\n  case bearer(String)\n\n  var headerField: String {\n    switch self {\n    case .apiKey:\n      \"api-key\"\n    case .bearer:\n      \"Authorization\"\n    }\n  }\n\n  var value: String {\n    switch self {\n    case .apiKey(let value):\n      value\n    case .bearer(let value):\n      \"Bearer \\(value)\"\n    }\n  }\n}\n\n// MARK: - OpenAIEnvironment\n\n/// Represents the configuration for interacting with the OpenAI API.\npublic struct OpenAIEnvironment {\n  /// The base URL for the OpenAI API.\n  /// Example: \"https://api.openai.com\"\n  let baseURL: String\n\n  /// An optional path for proxying requests.\n  /// Example: \"/proxy-path\"\n  let proxyPath: String?\n\n  /// An optional version of the OpenAI API to use.\n  /// Example: \"v1\"\n  let version: String?\n}\n\n// MARK: - OpenAIService\n\n/// A protocol defining the required services for interacting with OpenAI's API.\n///\n/// The protocol outlines methods for fetching data and streaming responses,\n/// as well as handling JSON decoding and networking tasks.\npublic protocol OpenAIService {\n  /// The HTTP client responsible for executing all network requests.\n  ///\n  /// This client is used for tasks like sending and receiving data.\n  var httpClient: HTTPClient { get }\n\n  /// The `JSONDecoder` instance used for decoding JSON responses.\n  ///\n  /// This decoder is used to parse the JSON responses returned by the API\n  /// into model objects that conform to the `Decodable` protocol.\n  var decoder: JSONDecoder { get }\n\n  /// A computed property representing the current OpenAI environment configuration.\n  var openAIEnvironment: OpenAIEnvironment { get }\n\n  // MARK: Audio\n\n  /// - Parameter parameters: The audio transcription parameters.\n  /// - Returns: Transcriped text details.\n  /// - Throws: An error if the process fails.\n  ///\n  /// For more information, refer to [OpenAI's Audio Transcription API documentation](https://platform.openai.com/docs/api-reference/audio/createTranscription).\n  func createTranscription(\n    parameters: AudioTranscriptionParameters)\n    async throws -> AudioObject\n\n  /// - Parameter parameters: The audio translation parameters.\n  /// - Returns: Translated text details.\n  /// - Throws: An error if the process fails.\n  ///\n  /// For more information, refer to [OpenAI's Audio Translation API documentation](https://platform.openai.com/docs/api-reference/audio/createTranslation).\n  func createTranslation(\n    parameters: AudioTranslationParameters)\n    async throws -> AudioObject\n\n  /// - Parameter parameters: The audio speech parameters.\n  /// - Returns: The audio file content.\n  /// - Throws: An error if the process fails.\n  ///\n  /// For more information, refer to [OpenAI's Audio Speech API documentation](https://platform.openai.com/docs/api-reference/audio/createSpeech).\n  func createSpeech(\n    parameters: AudioSpeechParameters)\n    async throws -> AudioSpeechObject\n\n  /// Creates a realtime audio session for bidirectional streaming conversation with OpenAI.\n  ///\n  /// - Parameters:\n  ///   - model: The model to use for the realtime session (e.g., \"gpt-4o-mini-realtime-preview-2024-12-17\")\n  ///   - configuration: Session configuration including voice, turn detection, and other settings\n  /// - Returns: An `OpenAIRealtimeSession` for managing the WebSocket connection\n  /// - Throws: An error if the session creation fails\n  ///\n  /// For more information, refer to [OpenAI's Realtime API documentation](https://platform.openai.com/docs/api-reference/realtime).\n  #if canImport(AVFoundation)\n  func realtimeSession(\n    model: String,\n    configuration: OpenAIRealtimeSessionConfiguration)\n    async throws -> OpenAIRealtimeSession\n  #endif\n\n  // MARK: Chat\n\n  /// - Parameter parameters: Parameters for the chat completion request.\n  /// - Returns: A [ChatCompletionObject](https://platform.openai.com/docs/api-reference/chat/object).\n  /// - Throws: An error if the chat initiation fails.\n  ///\n  /// For more information, refer to [OpenAI's Chat completion API documentation](https://platform.openai.com/docs/api-reference/chat/create).\n  func startChat(\n    parameters: ChatCompletionParameters)\n    async throws -> ChatCompletionObject\n\n  /// - Parameter parameters: Parameters for the chat completion request.\n  /// - Returns: A streamed sequence of [ChatCompletionChunkObject](https://platform.openai.com/docs/api-reference/chat/streaming) objects.\n  /// - Throws: An error if the chat initiation fails.\n  ///\n  /// For more information, refer to [OpenAI's Chat completion API documentation](https://platform.openai.com/docs/api-reference/chat/create).\n  func startStreamedChat(\n    parameters: ChatCompletionParameters)\n    async throws -> AsyncThrowingStream<ChatCompletionChunkObject, Error>\n\n  // MARK: Embeddings\n\n  /// - Parameter parameters: Parameters for the embedding request.\n  /// - Returns: An `OpenAIResponse<EmbeddingObject>` containing the generated [embedding objects](https://platform.openai.com/docs/api-reference/embeddings/object).\n  /// - Throws: An error if the embedding creation fails.\n  ///\n  /// For more information, refer to [OpenAI's Embedding API documentation](https://platform.openai.com/docs/api-reference/embeddings/create).\n  func createEmbeddings(\n    parameters: EmbeddingParameter)\n    async throws -> OpenAIResponse<EmbeddingObject>\n\n  // MARK: Fine-tuning\n\n  /// - Parameter parameters: Parameters for the fine-tuning job request.\n  /// - Returns: A [FineTuningJobObject](https://platform.openai.com/docs/api-reference/fine-tuning/object) containing details of the created job.\n  /// - Throws: An error if the fine-tuning job creation fails.\n  ///\n  /// For more information, refer to [OpenAI's Fine-Tuning API documentation](https://platform.openai.com/docs/api-reference/fine-tuning/create).\n  func createFineTuningJob(\n    parameters: FineTuningJobParameters)\n    async throws -> FineTuningJobObject\n\n  /// Retrieves a paginated list of fine-tuning jobs.\n  ///\n  /// - Parameters:\n  ///   - lastJobID: Identifier for the last job from the previous pagination request. Optional.\n  ///   - limit: Number of fine-tuning jobs to retrieve. If `nil`, the API defaults to 20.\n  /// - Returns: An `OpenAIResponse<FineTuningJobObject>` containing a list of paginated [fine-tuning job objects](https://platform.openai.com/docs/api-reference/fine-tuning/object).\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's Fine-Tuning API documentation](https://platform.openai.com/docs/api-reference/fine-tuning/list).\n  func listFineTuningJobs(\n    after lastJobID: String?,\n    limit: Int?)\n    async throws -> OpenAIResponse<FineTuningJobObject>\n\n  /// Retrieves a specific fine-tuning job by its ID.\n  ///\n  /// - Parameter id: The identifier of the fine-tuning job to retrieve.\n  /// - Returns: A [FineTuningJobObject](https://platform.openai.com/docs/api-reference/fine-tuning/object) containing the details of the fine-tuning job.\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's Fine-Tuning API documentation](https://platform.openai.com/docs/api-reference/fine-tuning/retrieve).\n  func retrieveFineTuningJob(\n    id: String)\n    async throws -> FineTuningJobObject\n\n  /// Cancels an ongoing fine-tuning job specified by its ID.\n  ///\n  /// - Parameter id: The identifier of the fine-tuning job to cancel.\n  /// - Returns: A [FineTuningJobObject](https://platform.openai.com/docs/api-reference/fine-tuning/object) reflecting the cancelled status.\n  /// - Throws: An error if the cancellation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Fine-Tuning API documentation](https://platform.openai.com/docs/api-reference/fine-tuning/cancel).\n  func cancelFineTuningJobWith(\n    id: String)\n    async throws -> FineTuningJobObject\n\n  /// Retrieves a list of events for a specified fine-tuning job, with optional pagination.\n  ///\n  /// - Parameter id: The identifier of the fine-tuning job for which to fetch events.\n  /// - Parameter after: The ID of the last event retrieved in a previous pagination request.\n  /// - Parameter limit: The number of events to retrieve; if `nil`, the API defaults to 20.\n  /// - Returns: An `OpenAIResponse<FineTuningJobEventObject>` containing the list of events in [fine-tuning job](https://platform.openai.com/docs/api-reference/fine-tuning/object).\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's Fine-Tuning API documentation](https://platform.openai.com/docs/api-reference/fine-tuning/list-events).\n  func listFineTuningEventsForJobWith(\n    id: String,\n    after lastEventId: String?,\n    limit: Int?)\n    async throws -> OpenAIResponse<FineTuningJobEventObject>\n\n  // MARK: Files\n\n  /// Retrieves a list of files that belong to the user's organization.\n  ///\n  /// - Returns: An `OpenAIResponse<FileObject>` containing a list of [file objects](https://platform.openai.com/docs/api-reference/files/object) associated with the user's organization.\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, see [OpenAI's File API documentation](https://platform.openai.com/docs/api-reference/files/list).\n  func listFiles()\n    async throws -> OpenAIResponse<FileObject>\n\n  /// - Parameter parameters: The parameters and data needed for the file upload.\n  /// - Returns: A [FileObject](https://platform.openai.com/docs/api-reference/files/object) representing the uploaded file.\n  /// - Throws: An error if the upload process fails.\n  ///\n  /// For more details, refer to [OpenAI's File Upload API documentation](https://platform.openai.com/docs/api-reference/files/create).\n  func uploadFile(\n    parameters: FileParameters)\n    async throws -> FileObject\n\n  /// Deletes a file with the specified ID and returns its deletion status.\n  ///\n  /// - Parameter id: The identifier of the file to be deleted.\n  /// - Returns: A `DeletionStatus` indicating the outcome of the deletion.\n  /// - Throws: An error if the deletion process fails.\n  /// For more details, refer to [OpenAI's File Upload API documentation](https://platform.openai.com/docs/api-reference/files/delete).\n  func deleteFileWith(\n    id: String)\n    async throws -> DeletionStatus\n\n  /// - Parameter id: The ID of the file to retrieve.\n  /// - Returns: A [FileObject](https://platform.openai.com/docs/api-reference/files/object) containing the details of the retrieved file.\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's File API documentation](https://platform.openai.com/docs/api-reference/files/retrieve).\n  func retrieveFileWith(\n    id: String)\n    async throws -> FileObject\n\n  /// - Parameter id: The ID of the file whose content is to be retrieved.\n  /// - Returns: An array of dictionaries containing the file content.\n  /// - Throws: An error if the content retrieval process fails.\n  ///  For more information, refer to [OpenAI's File API documentation](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n  func retrieveContentForFileWith(\n    id: String)\n    async throws -> [[String: Any]]\n\n  // MARK: Images\n\n  /// - Parameter parameters: Settings for the image creation request.\n  /// - Returns: An `OpenAIResponse` containing a list of [ImageObject](https://platform.openai.com/docs/api-reference/images/object) instances that represent the created images.\n  /// - Throws: An error if the image creation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Image API documentation](https://platform.openai.com/docs/api-reference/images/create).\n  func legacyCreateImages(\n    parameters: ImageCreateParameters)\n    async throws -> OpenAIResponse<ImageObject>\n\n  /// - Parameter parameters: Settings for the image edit request.\n  /// - Returns: An `OpenAIResponse` containing a list of [ImageObject](https://platform.openai.com/docs/api-reference/images/object) instances that represent the edited images.\n  /// - Throws: An error if the image editing process fails.\n  ///\n  /// For more information, refer to [OpenAI's Image API documentation](https://platform.openai.com/docs/api-reference/images/createEdit).\n  func legacyEditImage(\n    parameters: ImageEditParameters)\n    async throws -> OpenAIResponse<ImageObject>\n\n  /// - Parameter parameters: Settings for the image variation request.\n  /// - Returns: An `OpenAIResponse` containing a list of [ImageObject](https://platform.openai.com/docs/api-reference/images/object) instances that represent the created image variations.\n  /// - Throws: An error if the image variation creation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Image API documentation](https://platform.openai.com/docs/api-reference/images/createVariation).\n  func legacyCreateImageVariations(\n    parameters: ImageVariationParameters)\n    async throws -> OpenAIResponse<ImageObject>\n\n  /// - Parameter parameters: Settings for the image creation request.\n  /// - Returns: An `OpenAIResponse` containing image generation results.\n  /// - Throws: An error if the image creation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Image API documentation](https://platform.openai.com/docs/api-reference/images/create).\n  func createImages(\n    parameters: CreateImageParameters)\n    async throws -> CreateImageResponse\n\n  /// - Parameter parameters: Settings for the image edit request.\n  /// - Returns: An `OpenAIResponse` containing edited image results.\n  /// - Throws: An error if the image editing process fails.\n  ///\n  /// For more information, refer to [OpenAI's Image API documentation](https://platform.openai.com/docs/api-reference/images/createEdit).\n  func editImage(\n    parameters: CreateImageEditParameters)\n    async throws -> CreateImageResponse\n\n  /// - Parameter parameters: Settings for the image variation request.\n  /// - Returns: An `OpenAIResponse` containing image variation results.\n  /// - Throws: An error if the image variation creation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Image API documentation](https://platform.openai.com/docs/api-reference/images/createVariation).\n  func createImageVariations(\n    parameters: CreateImageVariationParameters)\n    async throws -> CreateImageResponse\n\n  // MARK: Models\n\n  /// - Returns: An `OpenAIResponse` containing a list of [ModelObject](https://platform.openai.com/docs/api-reference/models/object) instances, detailing each available model's owner and availability.\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's Models API documentation](https://platform.openai.com/docs/api-reference/models/list).\n  func listModels()\n    async throws -> OpenAIResponse<ModelObject>\n\n  /// - Parameter id: The identifier of the model to be retrieved.\n  /// - Returns: A [ModelObject](https://platform.openai.com/docs/api-reference/models/object) containing details of the model matching the specified ID.\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's Models API documentation](https://platform.openai.com/docs/api-reference/models/retrieve).\n  func retrieveModelWith(\n    id: String)\n    async throws -> ModelObject\n\n  /// Deletes a fine-tuned model from OpenAI's service by its ID.\n  ///\n  /// - Parameter id: The identifier of the fine-tuned model to be deleted.\n  /// - Returns: A `DeletionStatus` indicating the outcome of the deletion process.\n  /// - Throws: An error if the deletion process fails.\n  ///\n  /// For more information, refer to [OpenAI's Models API documentation](https://platform.openai.com/docs/api-reference/models/delete).\n  func deleteFineTuneModelWith(\n    id: String)\n    async throws -> DeletionStatus\n\n  // MARK: Moderations\n\n  /// - Parameter parameters: The text to be moderated according to the specified settings.\n  /// - Returns: A [ModerationObject](https://platform.openai.com/docs/api-reference/moderations/object) detailing the results of the moderation process.\n  /// - Throws: An error if the moderation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Moderation API documentation](https://platform.openai.com/docs/api-reference/moderations/create).\n  func createModerationFromText(\n    parameters: ModerationParameter<String>)\n    async throws -> ModerationObject\n\n  /// - Parameter parameters: The array of texts to be moderated according to the specified settings.\n  /// - Returns: A [ModerationObject](https://platform.openai.com/docs/api-reference/moderations/object) detailing the results of the moderation process.\n  /// - Throws: An error if the moderation process fails.\n  ///\n  /// For more information, refer to [OpenAI's Moderation API documentation](https://platform.openai.com/docs/api-reference/moderations/create).\n  func createModerationFromTexts(\n    parameters: ModerationParameter<[String]>)\n    async throws -> ModerationObject\n\n  // MARK: Assistants [BETA]\n\n  /// Create an assistant with a model and instructions.\n  ///\n  /// - Parameter parameters: The parameters needed to build an assistant\n  /// - Returns: A [AssistantObject](https://platform.openai.com/docs/api-reference/assistants/object)\n  /// - Throws: An error if the request fails\n  ///\n  /// For more information, refer to [OpenAI's Assistants API documentation](https://platform.openai.com/docs/api-reference/assistants/createAssistant).\n  func createAssistant(\n    parameters: AssistantParameters)\n    async throws -> AssistantObject\n\n  /// Retrieves an assitant object by its ID.\n  ///\n  /// - Parameter id: The ID of the assistant to retrieve.\n  /// - Returns: The [AssistantObject](https://platform.openai.com/docs/api-reference/assistants/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Assistants API documentation](https://platform.openai.com/docs/api-reference/assistants/getAssistant).\n  func retrieveAssistant(\n    id: String)\n    async throws -> AssistantObject\n\n  /// Modifies an assistant.\n  ///\n  /// - Parameter id: The ID of the assistant to modify.\n  /// - Parameter parameters: The parameters needed to modify an assistant\n  /// - Returns: The modified [AssistantObject](https://platform.openai.com/docs/api-reference/assistants/object)\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Assistants documentation](https://platform.openai.com/docs/api-reference/assistants/modifyAssistant).\n  func modifyAssistant(\n    id: String,\n    parameters: AssistantParameters)\n    async throws -> AssistantObject\n\n  /// Delete an assistant.\n  ///\n  /// - Parameter id: The ID of the assistant to delete.\n  /// - Returns: Deletion Status\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Assistants documentation](https://platform.openai.com/docs/api-reference/assistants/deleteAssistant).\n  func deleteAssistant(\n    id: String)\n    async throws -> DeletionStatus\n\n  /// Returns a list of assistants.\n  ///\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.  Defaults to desc.\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Returns: An `OpenAIResponse<AssistantObject>` containing the list of [assistants](https://platform.openai.com/docs/api-reference/assistants/object).\n  /// - Throws: An error if the retrieval process fails.\n  ///\n  /// For more information, refer to [OpenAI's Assistants API documentation](https://platform.openai.com/docs/api-reference/assistants/listAssistants).\n  func listAssistants(\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<AssistantObject>\n\n  // MARK: Thread [BETA]\n\n  /// Create a thread.\n  ///\n  /// - Parameter parameters: The parameters needed to build a thread.\n  /// - Returns: A [thread](https://platform.openai.com/docs/api-reference/threads) object.\n  /// - Throws: An error if the request fails\n  ///\n  /// For more information, refer to [OpenAI's Thread API documentation](https://platform.openai.com/docs/api-reference/threads/createThread).\n  func createThread(\n    parameters: CreateThreadParameters)\n    async throws -> ThreadObject\n\n  /// Retrieves a thread.\n  ///\n  /// - Parameter id: The ID of the thread to retrieve.\n  /// - Returns: The [thread](https://platform.openai.com/docs/api-reference/threads/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Thread API documentation](https://platform.openai.com/docs/api-reference/threads/getThread).\n  func retrieveThread(\n    id: String)\n    async throws -> ThreadObject\n\n  /// Modifies a thread.\n  ///\n  /// - Parameter id: The ID of the thread to modify. Only the metadata can be modified.\n  /// - Parameter parameters: The parameters needed to modify a thread. Only the metadata can be modified.\n  /// - Returns: The modified [thread](https://platform.openai.com/docs/api-reference/threads/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Thread API documentation](https://platform.openai.com/docs/api-reference/threads/modifyThread).\n  func modifyThread(\n    id: String,\n    parameters: ModifyThreadParameters)\n    async throws -> ThreadObject\n\n  /// Delete a thread.\n  ///\n  /// - Parameter id: The ID of the thread to delete.\n  /// - Returns: Deletion status.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Thread API documentation](https://platform.openai.com/docs/api-reference/threads/deleteThread).\n  func deleteThread(\n    id: String)\n    async throws -> DeletionStatus\n\n  // MARK: Message [BETA]\n\n  /// Create a message.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) to create a message for.\n  /// - Parameter parameters: The parameters needed to build a message.\n  /// - Returns: A [message](https://platform.openai.com/docs/api-reference/messages/object) object.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Message API documentation](https://platform.openai.com/docs/api-reference/messages/createMessage).\n  func createMessage(\n    threadID: String,\n    parameters: MessageParameter)\n    async throws -> MessageObject\n\n  /// Retrieve a message.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) to which this message belongs.\n  /// - Parameter messageID: The ID of the message to retrieve.\n  /// - Returns: The [message](https://platform.openai.com/docs/api-reference/threads/messages/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Message API documentation](https://platform.openai.com/docs/api-reference/messages/getMessage).\n  func retrieveMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> MessageObject\n\n  /// Modifies a message.\n  ///\n  /// - Parameter threadID: The ID of the thread to which this message belongs.\n  /// - Parameter messageID: The ID of the message to modify.\n  /// - Parameter parameters: The parameters needed to modify a message metadata.\n  /// - Returns: The modified [message](https://platform.openai.com/docs/api-reference/threads/messages/object) object.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Message API documentation](https://platform.openai.com/docs/api-reference/messages/modifyMessage).\n  func modifyMessage(\n    threadID: String,\n    messageID: String,\n    parameters: ModifyMessageParameters)\n    async throws -> MessageObject\n\n  /// Deletes a message.\n  ///\n  /// - Parameter threadID: The ID of the thread to which this message belongs.\n  /// - Parameter messageID: The ID of the message to modify.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Message API documentation](https://platform.openai.com/docs/api-reference/messages/deleteMessage).\n  func deleteMessage(\n    threadID: String,\n    messageID: String)\n    async throws -> DeletionStatus\n\n  /// Returns a list of messages for a given thread.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) the messages belong to.\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. Defaults to desc\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Parameter runID: Filter messages by the run ID that generated them.\n  /// - Returns: A list of [message](https://platform.openai.com/docs/api-reference/messages) objects.\n  /// - Throws: An error if the request fails\n  ///\n  /// For more information, refer to [OpenAI's Message API documentation](https://platform.openai.com/docs/api-reference/messages/listMessages).\n  func listMessages(\n    threadID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?,\n    runID: String?)\n    async throws -> OpenAIResponse<MessageObject>\n\n  // MARK: Run [BETA]\n\n  /// Create a run.\n  ///\n  /// - Parameter threadID: The ID of the thread to run.\n  /// - Parameter parameters: The parameters needed to build a Run.\n  /// - Returns: A [run](https://platform.openai.com/docs/api-reference/runs/object) object.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/createRun).\n  func createRun(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> RunObject\n\n  /// Retrieves a run.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run.\n  /// - Parameter runID: The ID of the run to retrieve.\n  /// - Returns: The [run](https://platform.openai.com/docs/api-reference/runs/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/getRun).\n  func retrieveRun(\n    threadID: String,\n    runID: String)\n    async throws -> RunObject\n\n  /// Modifies a run.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run.\n  /// - Parameter runID: The ID of the run to modify.\n  /// - Parameter parameters: The parameters needed to modify a run metadata.\n  /// - Returns: The modified [run](https://platform.openai.com/docs/api-reference/runs/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/modifyRun).\n  func modifyRun(\n    threadID: String,\n    runID: String,\n    parameters: ModifyRunParameters)\n    async throws -> RunObject\n\n  ///  Returns a list of runs belonging to a thread.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) the run belongs to.\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. Defaults to desc\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Returns: A list of [run](https://platform.openai.com/docs/api-reference/runs/object) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/listRuns).\n  func listRuns(\n    threadID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<RunObject>\n\n  /// Cancels a run that is in_progress.\n  ///\n  /// - Parameter threadID: The ID of the thread to which this run belongs.\n  /// - Parameter runID: The ID of the run to cancel.\n  /// - Returns: The modified [run](https://platform.openai.com/docs/api-reference/runs/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/cancelRun).\n  func cancelRun(\n    threadID: String,\n    runID: String)\n    async throws -> RunObject\n\n  /// When a run has the status: \"requires_action\" and required_action.type is submit_tool_outputs, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) to which this run belongs.\n  /// - Parameter runID: The ID of the run that requires the tool output submission.\n  /// - Parameter parameters: The parameters needed for the run tools output.\n  /// - Returns: The modified [run](https://platform.openai.com/docs/api-reference/runs/object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs).\n  func submitToolOutputsToRun(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> RunObject\n\n  /// Create a thread and run it in one request.\n  ///\n  /// - Parameter parameters: The parameters needed to create a thread and run.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun).\n  func createThreadAndRun(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> RunObject\n\n  // MARK: Run Step [BETA]\n\n  /// Retrieves a run step.\n  ///\n  /// - Parameter threadID: The ID of the thread to which the run and run step belongs.\n  /// - Parameter runID: The ID of the run to which the run step belongs.\n  /// - Parameter stepID: The ID of the run step to retrieve.\n  /// - Returns: The [run step](https://platform.openai.com/docs/api-reference/runs/step-object) object matching the specified ID.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run step API documentation](https://platform.openai.com/docs/api-reference/runs/getRunStep).\n  func retrieveRunstep(\n    threadID: String,\n    runID: String,\n    stepID: String)\n    async throws -> RunStepObject\n\n  /// Returns a list of run steps belonging to a run.\n  ///\n  /// - Parameter threadID: The ID of the thread the run and run steps belong to.\n  /// - Parameter runID: The ID of the run the run steps belong to.\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.  Defaults to desc.\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Returns: A list of [run step](https://platform.openai.com/docs/api-reference/runs/step-object) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run step API documentation](https://platform.openai.com/docs/api-reference/runs/listRunSteps).\n  func listRunSteps(\n    threadID: String,\n    runID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<RunStepObject>\n\n  /// Creates a thread and run with stream enabled.\n  ///\n  /// - Parameter parameters: The parameters needed to create a thread and run.\n  /// - Returns: An AsyncThrowingStream of [AssistantStreamEvent](https://platform.openai.com/docs/api-reference/assistants-streaming/events) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/createThreadAndRun).\n  func createThreadAndRunStream(\n    parameters: CreateThreadAndRunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n\n  /// Create a run with stream enabled.\n  ///\n  /// - Parameter threadID: The ID of the thread to run.\n  /// - Parameter parameters: The parameters needed to build a Run.\n  /// - Returns: An AsyncThrowingStream of [AssistantStreamEvent](https://platform.openai.com/docs/api-reference/assistants-streaming/events) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/createRun).\n  func createRunStream(\n    threadID: String,\n    parameters: RunParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n\n  /// When a run has the status: \"requires_action\" and required_action.type is submit_tool_outputs, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. Stream enabled\n  ///\n  /// - Parameter threadID: The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) to which this run belongs.\n  /// - Parameter runID: The ID of the run that requires the tool output submission.\n  /// - Parameter parameters: The parameters needed for the run tools output.\n  /// - Returns: An AsyncThrowingStream of [AssistantStreamEvent](https://platform.openai.com/docs/api-reference/assistants-streaming/events) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Run API documentation](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs).\n  func submitToolOutputsToRunStream(\n    threadID: String,\n    runID: String,\n    parameters: RunToolsOutputParameter)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n\n  // MARK: Batch\n\n  /// Creates and executes a batch from an uploaded file of requests\n  ///\n  /// - Parameter parameters: The parameters needed to create a batch.\n  /// - Returns: A [batch](https://platform.openai.com/docs/api-reference/batch/object) object.\n  /// - Throws: An error if the request fails\n  ///\n  /// For more information, refer to [OpenAI's Batch API documentation](https://platform.openai.com/docs/api-reference/batch/create).\n  func createBatch(\n    parameters: BatchParameter)\n    async throws -> BatchObject\n\n  /// Retrieves a batch.\n  ///\n  /// - Parameter id: The identifier of the batch to retrieve.\n  /// - Returns: A [BatchObject](https://platform.openai.com/docs/api-reference/batch/object) matching the specified ID..\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Batch documentation](https://platform.openai.com/docs/api-reference/batch/retrieve).\n  func retrieveBatch(\n    id: String)\n    async throws -> BatchObject\n\n  /// Cancels an in-progress batch.\n  ///\n  /// - Parameter id: The identifier of the batch to cancel.\n  /// - Returns: A [BatchObject](https://platform.openai.com/docs/api-reference/batch/object) matching the specified ID..\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Batch documentation](https://platform.openai.com/docs/api-reference/batch/cancel)\n  func cancelBatch(\n    id: String)\n    async throws -> BatchObject\n\n  /// List your organization's batches.\n  ///\n  /// - Parameters:\n  ///   - after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  ///   - limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Returns: An `OpenAIResponse<BatchObject>` containing a list of paginated [Batch](https://platform.openai.com/docs/api-reference/batch/object) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Batch API documentation](https://platform.openai.com/docs/api-reference/batch/list).\n  func listBatch(\n    after: String?,\n    limit: Int?)\n    async throws -> OpenAIResponse<BatchObject>\n\n  // MARK: Vector Store\n\n  /// Create a vector store.\n  ///\n  /// - Parameter parameters: The parameters needed to create a vector store.\n  /// - Returns: A [Vector store](https://platform.openai.com/docs/api-reference/vector-stores) object.\n  /// - Throws: An error if the request fails\n  ///\n  /// For more information, refer to [OpenAI's Vector store API documentation](https://platform.openai.com/docs/api-reference/vector-stores/create).\n  func createVectorStore(\n    parameters: VectorStoreParameter)\n    async throws -> VectorStoreObject\n\n  /// Returns a list of vector stores.\n  ///\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Returns: A list of [VectorStoreObject](https://platform.openai.com/docs/api-reference/vector-stores) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores API documentation](https://platform.openai.com/docs/api-reference/vector-stores/list).\n  func listVectorStores(\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?)\n    async throws -> OpenAIResponse<VectorStoreObject>\n\n  /// Retrieves a vector store.\n  ///\n  /// - Parameter id: The ID of the vector store to retrieve.\n  /// - Returns: A [Vector Store](https://platform.openai.com/docs/api-reference/vector-stores) matching the specified ID..\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Batch documentation](https://platform.openai.com/docs/api-reference/vector-stores/retrieve).\n  func retrieveVectorStore(\n    id: String)\n    async throws -> VectorStoreObject\n\n  /// Modifies a vector store.\n  ///\n  /// - Parameter id: The ID of the vector store to modify.\n  /// - Parameter parameters: The parameters needed to modify a vector store.\n  /// - Returns: A [Vector Store](https://platform.openai.com/docs/api-reference/vector-stores) matching the specified ID..\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Batch documentation](https://platform.openai.com/docs/api-reference/vector-stores/modify).\n  func modifyVectorStore(\n    parameters: VectorStoreParameter,\n    id: String)\n    async throws -> VectorStoreObject\n\n  /// Delete a vector store.\n  ///\n  /// - Parameter id: The ID of the vector store to delete.\n  /// - Returns: A Deletion status.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Batch documentation](https://platform.openai.com/docs/api-reference/vector-stores/modify).\n  func deleteVectorStore(\n    id: String)\n    async throws -> DeletionStatus\n\n  // MARK: Vector Store Files\n\n  /// Create a vector store file by attaching a [File](https://platform.openai.com/docs/api-reference/files) to a vector store.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store for which to create a File.\n  /// - Parameter parameters: The paramaters needed to create a vector store File.\n  /// - Returns: A [VectorStoreFileObject](https://platform.openai.com/docs/api-reference/vector-stores-files/file-object)\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Vectore store file documentation.](https://platform.openai.com/docs/api-reference/vector-stores-files/createFile).\n  func createVectorStoreFile(\n    vectorStoreID: String,\n    parameters: VectorStoreFileParameter)\n    async throws -> VectorStoreFileObject\n\n  /// Returns a list of vector store files.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store that the files belong to.\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Parameter filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.\n  /// - Returns: A list of [VectorStoreFileObject](https://platform.openai.com/docs/api-reference/vector-stores-files/file-object) objects.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores API documentation](https://platform.openai.com/docs/api-reference/vector-stores-files/listFiles).\n  func listVectorStoreFiles(\n    vectorStoreID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?,\n    filter: String?)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n\n  /// Retrieves a vector store file.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store that the file belongs to.\n  /// - Parameter fileID: The ID of the file being retrieved.\n  /// - Returns: A [vector store file object](https://platform.openai.com/docs/api-reference/vector-stores-files/file-object)\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores API documentation](https://platform.openai.com/docs/api-reference/vector-stores-files/getFile).\n  func retrieveVectorStoreFile(\n    vectorStoreID: String,\n    fileID: String)\n    async throws -> VectorStoreFileObject\n\n  /// Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](https://platform.openai.com/docs/api-reference/files/delete) endpoint.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store that the file belongs to.\n  /// - Parameter fileID: The ID of the file to delete.\n  /// - Returns: A deletion status.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores API documentation](https://platform.openai.com/docs/api-reference/vector-stores-files/deleteFile).\n  func deleteVectorStoreFile(\n    vectorStoreID: String,\n    fileID: String)\n    async throws -> DeletionStatus\n\n  // MARK: Vector Store File Batch\n\n  /// Create a vector store file batch.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store for which to create a File Batch.\n  /// - Parameter parameters: The paramaters needed to create a vector store File Batch.\n  /// - Returns: A [VectorStoreFileBatchObject](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/batch-object)\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's Vectore store file batch documentation.](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch).\n  func createVectorStoreFileBatch(\n    vectorStoreID: String,\n    parameters: VectorStoreFileBatchParameter)\n    async throws -> VectorStoreFileBatchObject\n\n  /// Retrieves a vector store file batch.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store that the file batch belongs to.\n  /// - Parameter batchID: The ID of the file batch being retrieved.\n  /// - Returns: A [vector store file batch object](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/batch-object)\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores file batch API documentation](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/getBatch).\n  func retrieveVectorStoreFileBatch(\n    vectorStoreID: String,\n    batchID: String)\n    async throws -> VectorStoreFileBatchObject\n\n  /// Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store that the file batch belongs to.\n  /// - Parameter batchID: The ID of the file batch to cancel.\n  /// - Returns: The modified [vector store file batch object.](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/batch-object)\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores file batch API documentation](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/cancelBatch).\n  func cancelVectorStoreFileBatch(\n    vectorStoreID: String,\n    batchID: String)\n    async throws -> VectorStoreFileBatchObject\n\n  /// Returns a list of vector store files in a batch.\n  ///\n  /// - Parameter vectorStoreID: The ID of the vector store that the files belong to.\n  /// - Parameter batchID: The ID of the file batch that the files belong to.\n  /// - Parameter limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n  /// - Parameter order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.\n  /// - Parameter after: A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n  /// - Parameter before: A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n  /// - Parameter filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.\n  /// - Returns: A list of [VectorStoreFileObject](https://platform.openai.com/docs/api-reference/vector-stores-files/file-object) objects in a batch.\n  /// - Throws: An error if the request fails.\n  ///\n  /// For more information, refer to [OpenAI's  Vector stores file batch API documentation](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/listBatchFiles).\n  func listVectorStoreFilesInABatch(\n    vectorStoreID: String,\n    batchID: String,\n    limit: Int?,\n    order: String?,\n    after: String?,\n    before: String?,\n    filter: String?)\n    async throws -> OpenAIResponse<VectorStoreFileObject>\n\n  // MARK: Response\n\n  /// Returns a [Response](https://platform.openai.com/docs/api-reference/responses/object) object.\n  ///\n  /// - Parameter ModelResponseParameter: The response model parameters\n  func responseCreate(\n    _ parameters: ModelResponseParameter)\n    async throws -> ResponseModel\n\n  /// [The Response object matching the specified ID.](https://platform.openai.com/docs/api-reference/responses/get)\n  ///\n  /// - Parameter id: The ID of the ResponseModel\n  /// - Parameter parameters: Optional query parameters for the request\n  func responseModel(\n    id: String,\n    parameters: GetResponseParameter?)\n    async throws -> ResponseModel\n\n  /// Returns a streaming [Response](https://platform.openai.com/docs/api-reference/responses/get) object for a specific response ID.\n  ///\n  /// - Parameter id: The ID of the ResponseModel\n  /// - Parameter parameters: Optional query parameters for the request (stream will be set to true)\n  /// - Returns: An AsyncThrowingStream of ResponseStreamEvent objects\n  func responseModelStream(\n    id: String,\n    parameters: GetResponseParameter?)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n\n  /// Returns a streaming [Response](https://platform.openai.com/docs/api-reference/responses/object) object.\n  ///\n  /// - Parameter parameters: The response model parameters with stream set to true\n  /// - Returns: An AsyncThrowingStream of ResponseStreamEvent objects\n  func responseCreateStream(\n    _ parameters: ModelResponseParameter)\n    async throws -> AsyncThrowingStream<ResponseStreamEvent, Error>\n\n  /// [Deletes a model response with the given ID.](https://platform.openai.com/docs/api-reference/responses/delete)\n  ///\n  /// - Parameter id: The ID of the response to delete\n  /// - Returns: A DeletionStatus object confirming the deletion\n  func responseDelete(\n    id: String)\n    async throws -> DeletionStatus\n\n  /// [Cancels a model response with the given ID.](https://platform.openai.com/docs/api-reference/responses/cancel)\n  /// Only responses created with the background parameter set to true can be cancelled.\n  ///\n  /// - Parameter id: The ID of the response to cancel\n  /// - Returns: A Response object\n  func responseCancel(\n    id: String)\n    async throws -> ResponseModel\n\n  /// [Returns a list of input items for a given response.](https://platform.openai.com/docs/api-reference/responses/input-items)\n  ///\n  /// - Parameter id: The ID of the response to retrieve input items for\n  /// - Parameter parameters: Optional query parameters for pagination and filtering\n  /// - Returns: A list of input item objects\n  func responseInputItems(\n    id: String,\n    parameters: GetInputItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n\n  // MARK: - Conversations\n\n  /// [Create a conversation.](https://platform.openai.com/docs/api-reference/conversations/create)\n  ///\n  /// - Parameter parameters: The conversation creation parameters\n  /// - Returns: A Conversation object\n  func conversationCreate(\n    parameters: CreateConversationParameter?)\n    async throws -> ConversationModel\n\n  func getConversation(\n    id: String)\n    async throws -> ConversationModel\n\n  func updateConversation(\n    id: String,\n    parameters: UpdateConversationParameter)\n    async throws -> ConversationModel\n\n  func deleteConversation(\n    id: String)\n    async throws -> DeletionStatus\n\n  func getConversationItems(\n    id: String,\n    parameters: GetConversationItemsParameter?)\n    async throws -> OpenAIResponse<InputItem>\n\n  func createConversationItems(\n    id: String,\n    parameters: CreateConversationItemsParameter)\n    async throws -> OpenAIResponse<InputItem>\n\n  func getConversationItem(\n    conversationID: String,\n    itemID: String,\n    parameters: GetConversationItemParameter?)\n    async throws -> InputItem\n\n  func deleteConversationItem(\n    conversationID: String,\n    itemID: String)\n    async throws -> ConversationModel\n}\n\nextension OpenAIService {\n  /// Asynchronously fetches the contents of a file that has been uploaded to OpenAI's service.\n  ///\n  /// This method is used exclusively for retrieving the content of uploaded files.\n  ///\n  /// - Parameter request: The `URLRequest` describing the API request to fetch the file.\n  /// - Throws: An error if the request fails.\n  /// - Returns: A dictionary array representing the file contents.\n  public func fetchContentsOfFile(\n    request: URLRequest)\n    async throws -> [[String: Any]]\n  {\n    printCurlCommand(request)\n\n    // Convert URLRequest to HTTPRequest\n    let httpRequest = try HTTPRequest(from: request)\n\n    let (data, response) = try await httpClient.data(for: httpRequest)\n\n    guard response.statusCode == 200 else {\n      var errorMessage = \"status code \\(response.statusCode)\"\n      do {\n        let error = try decoder.decode(OpenAIErrorResponse.self, from: data)\n        errorMessage = error.error.message ?? \"NO ERROR MESSAGE PROVIDED\"\n      } catch {\n        // If decoding fails, keep the original error message with status code\n      }\n      throw APIError.responseUnsuccessful(\n        description: errorMessage,\n        statusCode: response.statusCode)\n    }\n    var content = [[String: Any]]()\n    if let jsonString = String(data: data, encoding: String.Encoding.utf8) {\n      let lines = jsonString.split(separator: \"\\n\")\n      for line in lines {\n        #if DEBUG\n        print(\"DEBUG Received line:\\n\\(line)\")\n        #endif\n        if\n          let lineData = String(line).data(using: String.Encoding.utf8),\n          let jsonObject = try? JSONSerialization.jsonObject(with: lineData, options: .allowFragments) as? [String: Any]\n        {\n          content.append(jsonObject)\n        }\n      }\n    }\n    return content\n  }\n\n  /// Asynchronously fetches audio data.\n  ///\n  /// This method is used exclusively for handling audio data responses.\n  ///\n  /// - Parameter request: The `URLRequest` describing the API request to fetch the file.\n  /// - Throws: An error if the request fails.\n  /// - Returns: The audio Data\n  public func fetchAudio(\n    with request: URLRequest)\n    async throws -> Data\n  {\n    printCurlCommand(request)\n\n    // Convert URLRequest to HTTPRequest\n    let httpRequest = try HTTPRequest(from: request)\n\n    let (data, response) = try await httpClient.data(for: httpRequest)\n\n    guard response.statusCode == 200 else {\n      var errorMessage = \"Status code \\(response.statusCode)\"\n      do {\n        let errorResponse = try decoder.decode(OpenAIErrorResponse.self, from: data)\n        errorMessage = errorResponse.error.message ?? \"NO ERROR MESSAGE PROVIDED\"\n      } catch {\n        if let errorString = String(data: data, encoding: .utf8), !errorString.isEmpty {\n          errorMessage += \" - \\(errorString)\"\n        } else {\n          errorMessage += \" - No error message provided\"\n        }\n      }\n      throw APIError.responseUnsuccessful(\n        description: errorMessage,\n        statusCode: response.statusCode)\n    }\n    return data\n  }\n\n  /// Asynchronously fetches a decodable data type from OpenAI's API.\n  ///\n  /// - Parameters:\n  ///   - debugEnabled: If true the service will print events on DEBUG builds.\n  ///   - type: The `Decodable` type that the response should be decoded to.\n  ///   - request: The `URLRequest` describing the API request.\n  /// - Throws: An error if the request fails or if decoding fails.\n  /// - Returns: A value of the specified decodable type.\n  public func fetch<T: Decodable>(\n    debugEnabled: Bool,\n    type: T.Type,\n    with request: URLRequest)\n    async throws -> T\n  {\n    if debugEnabled {\n      printCurlCommand(request)\n    }\n\n    // Convert URLRequest to HTTPRequest\n    let httpRequest = try HTTPRequest(from: request)\n\n    let (data, response) = try await httpClient.data(for: httpRequest)\n\n    if debugEnabled {\n      printHTTPResponse(response)\n    }\n\n    guard response.statusCode == 200 else {\n      var errorMessage = \"status code \\(response.statusCode)\"\n      do {\n        let error = try decoder.decode(OpenAIErrorResponse.self, from: data)\n        errorMessage = error.error.message ?? \"NO ERROR MESSAGE PROVIDED\"\n      } catch {\n        // If decoding fails, keep the original error message with status code\n      }\n      throw APIError.responseUnsuccessful(\n        description: errorMessage,\n        statusCode: response.statusCode)\n    }\n    #if DEBUG\n    if debugEnabled {\n      try print(\"DEBUG JSON FETCH API = \\(JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any])\")\n    }\n    #endif\n    do {\n      return try decoder.decode(type, from: data)\n    } catch DecodingError.keyNotFound(let key, let context) {\n      let debug = \"Key '\\(key.stringValue)' not found: \\(context.debugDescription)\"\n      let codingPath = \"codingPath: \\(context.codingPath)\"\n      let debugMessage = debug + codingPath\n      #if DEBUG\n      if debugEnabled {\n        print(debugMessage)\n      }\n      #endif\n      throw APIError.dataCouldNotBeReadMissingData(description: debugMessage)\n    } catch {\n      #if DEBUG\n      if debugEnabled {\n        print(\"\\(error)\")\n      }\n      #endif\n      throw APIError.jsonDecodingFailure(description: error.localizedDescription)\n    }\n  }\n\n  /// Asynchronously fetches a stream of decodable data types from OpenAI's API for chat completions.\n  ///\n  /// This method is primarily used for streaming chat completions.\n  ///\n  /// - Parameters:\n  ///   - debugEnabled: If true the service will print events on DEBUG builds.\n  ///   - type: The `Decodable` type that each streamed response should be decoded to.\n  ///   - request: The `URLRequest` describing the API request.\n  /// - Throws: An error if the request fails or if decoding fails.\n  /// - Returns: An asynchronous throwing stream of the specified decodable type.\n  public func fetchStream<T: Decodable>(\n    debugEnabled: Bool,\n    type _: T.Type,\n    with request: URLRequest)\n    async throws -> AsyncThrowingStream<T, Error>\n  {\n    if debugEnabled {\n      printCurlCommand(request)\n    }\n\n    // Convert URLRequest to HTTPRequest\n    let httpRequest = try HTTPRequest(from: request)\n\n    let (byteStream, response) = try await httpClient.bytes(for: httpRequest)\n\n    if debugEnabled {\n      printHTTPResponse(response)\n    }\n\n    guard response.statusCode == 200 else {\n      var errorMessage = \"status code \\(response.statusCode)\"\n      do {\n        // For error responses, we need to get the raw data instead of using the stream\n        // as error responses are regular JSON, not streaming data\n        let (errorData, _) = try await httpClient.data(for: httpRequest)\n        let error = try decoder.decode(OpenAIErrorResponse.self, from: errorData)\n        errorMessage = error.error.message ?? \"NO ERROR MESSAGE PROVIDED\"\n      } catch {\n        // If decoding fails, keep the original error message with status code\n      }\n      throw APIError.responseUnsuccessful(\n        description: errorMessage,\n        statusCode: response.statusCode)\n    }\n\n    // Create a stream from the lines\n    guard case .lines(let lineStream) = byteStream else {\n      throw APIError.requestFailed(description: \"Expected line stream but got byte stream\")\n    }\n\n    return AsyncThrowingStream { continuation in\n      let fetchTask = Task {\n        do {\n          for try await line in lineStream {\n            if\n              line.hasPrefix(\"data:\"), line != \"data: [DONE]\",\n              let data = String(line.dropFirst(5)).data(using: .utf8)\n            {\n              #if DEBUG\n              if debugEnabled {\n                try print(\n                  \"DEBUG JSON STREAM LINE = \\(JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any])\")\n              }\n              #endif\n              do {\n                let decoded = try self.decoder.decode(T.self, from: data)\n                continuation.yield(decoded)\n              } catch DecodingError.keyNotFound(let key, let context) {\n                let debug = \"Key '\\(key.stringValue)' not found: \\(context.debugDescription)\"\n                let codingPath = \"codingPath: \\(context.codingPath)\"\n                let debugMessage = debug + codingPath\n                #if DEBUG\n                if debugEnabled {\n                  print(debugMessage)\n                }\n                #endif\n                throw APIError.dataCouldNotBeReadMissingData(description: debugMessage)\n              } catch {\n                #if DEBUG\n                if debugEnabled {\n                  debugPrint(\"CONTINUATION ERROR DECODING \\(error.localizedDescription)\")\n                }\n                #endif\n                continuation.finish(throwing: error)\n              }\n            }\n          }\n          continuation.finish()\n        } catch DecodingError.keyNotFound(let key, let context) {\n          let debug = \"Key '\\(key.stringValue)' not found: \\(context.debugDescription)\"\n          let codingPath = \"codingPath: \\(context.codingPath)\"\n          let debugMessage = debug + codingPath\n          #if DEBUG\n          if debugEnabled {\n            print(debugMessage)\n          }\n          #endif\n          throw APIError.dataCouldNotBeReadMissingData(description: debugMessage)\n        } catch {\n          #if DEBUG\n          if debugEnabled {\n            print(\"CONTINUATION ERROR DECODING \\(error.localizedDescription)\")\n          }\n          #endif\n          continuation.finish(throwing: error)\n        }\n      }\n      continuation.onTermination = { @Sendable _ in\n        fetchTask.cancel()\n      }\n    }\n  }\n\n  public func fetchAssistantStreamEvents(\n    with request: URLRequest,\n    debugEnabled: Bool)\n    async throws -> AsyncThrowingStream<AssistantStreamEvent, Error>\n  {\n    printCurlCommand(request)\n\n    // Convert URLRequest to HTTPRequest\n    let httpRequest = try HTTPRequest(from: request)\n\n    let (byteStream, response) = try await httpClient.bytes(for: httpRequest)\n\n    printHTTPResponse(response)\n\n    guard response.statusCode == 200 else {\n      var errorMessage = \"status code \\(response.statusCode)\"\n      do {\n        // For error responses, we need to get the raw data instead of using the stream\n        // as error responses are regular JSON, not streaming data\n        let (errorData, _) = try await httpClient.data(for: httpRequest)\n        let error = try decoder.decode(OpenAIErrorResponse.self, from: errorData)\n        errorMessage = error.error.message ?? \"NO ERROR MESSAGE PROVIDED\"\n      } catch {\n        // If decoding fails, keep the original error message with status code\n      }\n      throw APIError.responseUnsuccessful(\n        description: errorMessage,\n        statusCode: response.statusCode)\n    }\n\n    // Create a stream from the lines\n    guard case .lines(let lineStream) = byteStream else {\n      throw APIError.requestFailed(description: \"Expected line stream but got byte stream\")\n    }\n\n    return AsyncThrowingStream { continuation in\n      let streamTask = Task {\n        do {\n          for try await line in lineStream {\n            if\n              line.hasPrefix(\"data:\"), line != \"data: [DONE]\",\n              let data = String(line.dropFirst(5)).data(using: .utf8)\n            {\n              do {\n                if\n                  let json = try JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any],\n                  let object = json[\"object\"] as? String,\n                  let eventObject = AssistantStreamEventObject(rawValue: object)\n                {\n                  switch eventObject {\n                  case .threadMessageDelta:\n                    let decoded = try self.decoder.decode(MessageDeltaObject.self, from: data)\n                    continuation.yield(.threadMessageDelta(decoded))\n\n                  case .threadRunStepDelta:\n                    let decoded = try self.decoder.decode(RunStepDeltaObject.self, from: data)\n                    continuation.yield(.threadRunStepDelta(decoded))\n\n                  case .threadRun:\n                    // We expect a object of type \"thread.run.SOME_STATE\" in the data object\n                    // However what we get is a `thread.run` object but we can check the status\n                    // of the decoded run to determine the stream event.\n                    // If we check the event line instead, this will contain the expected \"event: thread.run.step.completed\" for example.\n                    // Therefore the need to stream this event in the following way.\n                    let decoded = try self.decoder.decode(RunObject.self, from: data)\n                    switch RunObject.Status(rawValue: decoded.status) {\n                    case .queued:\n                      continuation.yield(.threadRunQueued(decoded))\n                    case .inProgress:\n                      continuation.yield(.threadRunInProgress(decoded))\n                    case .requiresAction:\n                      continuation.yield(.threadRunRequiresAction(decoded))\n                    case .cancelling:\n                      continuation.yield(.threadRunCancelling(decoded))\n                    case .cancelled:\n                      continuation.yield(.threadRunCancelled(decoded))\n                    case .failed:\n                      continuation.yield(.threadRunFailed(decoded))\n                    case .completed:\n                      continuation.yield(.threadRunCompleted(decoded))\n                    case .expired:\n                      continuation.yield(.threadRunExpired(decoded))\n                    default:\n                      #if DEBUG\n                      if debugEnabled {\n                        try print(\n                          \"DEBUG threadRun status not found = \\(JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any])\")\n                      }\n                      #endif\n                    }\n\n                  default:\n                    #if DEBUG\n                    if debugEnabled {\n                      try print(\n                        \"DEBUG EVENT \\(eventObject.rawValue) IGNORED = \\(JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any])\")\n                    }\n                    #endif\n                  }\n                } else {\n                  #if DEBUG\n                  if debugEnabled {\n                    try print(\n                      \"DEBUG EVENT DECODE IGNORED = \\(JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any])\")\n                  }\n                  #endif\n                }\n              } catch DecodingError.keyNotFound(let key, let context) {\n                #if DEBUG\n                if debugEnabled {\n                  try print(\n                    \"DEBUG Decoding Object Failed = \\(JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String: Any])\")\n                }\n                #endif\n                let debug = \"Key '\\(key.stringValue)' not found: \\(context.debugDescription)\"\n                let codingPath = \"codingPath: \\(context.codingPath)\"\n                let debugMessage = debug + codingPath\n                #if DEBUG\n                if debugEnabled {\n                  print(debugMessage)\n                }\n                #endif\n                throw APIError.dataCouldNotBeReadMissingData(description: debugMessage)\n              } catch {\n                #if DEBUG\n                if debugEnabled {\n                  debugPrint(\"CONTINUATION ERROR DECODING \\(error.localizedDescription)\")\n                }\n                #endif\n                continuation.finish(throwing: error)\n              }\n            }\n          }\n\n          continuation.finish()\n        } catch DecodingError.keyNotFound(let key, let context) {\n          let debug = \"Key '\\(key.stringValue)' not found: \\(context.debugDescription)\"\n          let codingPath = \"codingPath: \\(context.codingPath)\"\n          let debugMessage = debug + codingPath\n          #if DEBUG\n          if debugEnabled {\n            print(debugMessage)\n          }\n          #endif\n          throw APIError.dataCouldNotBeReadMissingData(description: debugMessage)\n        } catch {\n          #if DEBUG\n          if debugEnabled {\n            print(\"CONTINUATION ERROR DECODING \\(error.localizedDescription)\")\n          }\n          #endif\n          continuation.finish(throwing: error)\n        }\n      }\n\n      continuation.onTermination = { @Sendable _ in\n        streamTask.cancel()\n      }\n    }\n  }\n\n  // MARK: Debug Helpers\n\n  private func prettyPrintJSON(\n    _ data: Data)\n    -> String?\n  {\n    guard\n      let jsonObject = try? JSONSerialization.jsonObject(with: data, options: []),\n      let prettyData = try? JSONSerialization.data(withJSONObject: jsonObject, options: [.prettyPrinted]),\n      let prettyPrintedString = String(data: prettyData, encoding: .utf8)\n    else { return nil }\n    return prettyPrintedString\n  }\n\n  private func printCurlCommand(\n    _ request: URLRequest)\n  {\n    guard let url = request.url, let httpMethod = request.httpMethod else {\n      debugPrint(\"Invalid URL or HTTP method.\")\n      return\n    }\n\n    var baseCommand = \"curl \\(url.absoluteString)\"\n\n    // Add method if not GET\n    if httpMethod != \"GET\" {\n      baseCommand += \" -X \\(httpMethod)\"\n    }\n\n    // Add headers if any, masking the Authorization token\n    if let headers = request.allHTTPHeaderFields {\n      for (header, value) in headers {\n        let maskedValue = header.lowercased() == \"authorization\" ? maskAuthorizationToken(value) : value\n        baseCommand += \" \\\\\\n-H \\\"\\(header): \\(maskedValue)\\\"\"\n      }\n    }\n\n    // Add body if present\n    if let httpBody = request.httpBody, let bodyString = prettyPrintJSON(httpBody) {\n      // The body string is already pretty printed and should be enclosed in single quotes\n      baseCommand += \" \\\\\\n-d '\\(bodyString)'\"\n    }\n\n    // Print the final command\n    #if DEBUG\n    print(baseCommand)\n    #endif\n  }\n\n  private func prettyPrintJSON(\n    _ data: Data)\n    -> String\n  {\n    guard\n      let jsonObject = try? JSONSerialization.jsonObject(with: data, options: []),\n      let prettyData = try? JSONSerialization.data(withJSONObject: jsonObject, options: [.prettyPrinted]),\n      let prettyPrintedString = String(data: prettyData, encoding: .utf8)\n    else { return \"Could not print JSON - invalid format\" }\n    return prettyPrintedString\n  }\n\n  private func maskAuthorizationToken(_ token: String) -> String {\n    if token.count > 6 {\n      let prefix = String(token.prefix(3))\n      let suffix = String(token.suffix(3))\n      return \"\\(prefix)................\\(suffix)\"\n    } else {\n      return \"INVALID TOKEN LENGTH\"\n    }\n  }\n\n  /// Print HTTP Response information for debugging\n  /// - Parameter response: The HTTP response to print\n  private func printHTTPResponse(_ response: HTTPResponse) {\n    print(\"STATUS CODE: \\(response.statusCode)\")\n    print(\"HEADERS: \\(response.headers)\")\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Service/OpenAIServiceFactory.swift",
    "content": "//\n//  OpenAIServiceFactory.swift\n//\n//\n//  Created by James Rochabrun on 10/18/23.\n//\n\nimport Foundation\n\npublic class OpenAIServiceFactory {\n  /// Creates and returns an instance of `OpenAIService`.\n  ///\n  /// - Parameters:\n  ///   - apiKey: The API key required for authentication.\n  ///   - organizationID: The optional organization ID for multi-tenancy (default is `nil`).\n  ///   - decoder: The JSON decoder to be used for parsing API responses (default is `JSONDecoder.init()`).\n  ///   - httpClient: The HTTPClient to be used for network calls. Defaults to `HTTPClientFactory.createDefault()`\n  ///   - debugEnabled: If `true` service prints event on DEBUG builds, default to `false`.\n  ///\n  /// - Returns: A fully configured object conforming to `OpenAIService`.\n  public static func service(\n    apiKey: String,\n    organizationID: String? = nil,\n    decoder: JSONDecoder = .init(),\n    httpClient: HTTPClient? = nil,\n    debugEnabled: Bool = false)\n    -> OpenAIService\n  {\n    let client = httpClient ?? HTTPClientFactory.createDefault()\n    return DefaultOpenAIService(\n      apiKey: apiKey,\n      organizationID: organizationID,\n      httpClient: client,\n      decoder: decoder,\n      debugEnabled: debugEnabled)\n  }\n\n  // MARK: Azure\n\n  /// Creates and returns an instance of `OpenAIService`.\n  ///\n  /// - Parameters:\n  ///   - azureConfiguration: The AzureOpenAIConfiguration.\n  ///   - decoder: The JSON decoder to be used for parsing API responses (default is `JSONDecoder.init()`).\n  ///   - httpClient: The HTTPClient to be used for network calls. Defaults to `HTTPClientFactory.createDefault()`\n  ///   - debugEnabled: If `true` service prints event on DEBUG builds, default to `false`.\n  ///\n  /// - Returns: A fully configured object conforming to `OpenAIService`.\n  public static func service(\n    azureConfiguration: AzureOpenAIConfiguration,\n    decoder: JSONDecoder = .init(),\n    httpClient: HTTPClient? = nil,\n    debugEnabled: Bool = false)\n    -> OpenAIService\n  {\n    let client = httpClient ?? HTTPClientFactory.createDefault()\n    return DefaultOpenAIAzureService(\n      azureConfiguration: azureConfiguration,\n      httpClient: client,\n      decoder: decoder,\n      debugEnabled: debugEnabled)\n  }\n\n  #if !os(Linux)\n\n  // MARK: AIProxy\n\n  /// Creates and returns an instance of `OpenAIService` for use with aiproxy.pro\n  /// Use this service to protect your OpenAI API key before going to production.\n  ///\n  /// - Parameters:\n  ///   - aiproxyPartialKey: The partial key provided in the 'API Keys' section of the AIProxy dashboard.\n  ///                        Please see the integration guide for acquiring your key, at https://www.aiproxy.pro/docs\n  ///\n  ///   - aiproxyServiceURL: The service URL is displayed in the AIProxy dashboard when you submit your OpenAI key.\n  ///                        This argument is required for keys that you submitted after July 22nd, 2024. If you are an\n  ///                        existing customer that configured your AIProxy project before July 22nd, you may continue\n  ///                        to leave this blank.\n  ///\n  ///   - aiproxyClientID: If your app already has client or user IDs that you want to annotate AIProxy requests\n  ///                      with, you can pass a clientID here. If you do not have existing client or user IDs, leave\n  ///                      the `clientID` argument out, and IDs will be generated automatically for you.\n  ///   - debugEnabled: If `true` service prints event on DEBUG builds, default to `false`.\n  ///\n  /// - Returns: A conformer of OpenAIService that proxies all requests through api.aiproxy.pro\n  public static func service(\n    aiproxyPartialKey: String,\n    aiproxyServiceURL: String? = nil,\n    aiproxyClientID: String? = nil,\n    debugEnabled: Bool = false)\n    -> OpenAIService\n  {\n    AIProxyService(\n      partialKey: aiproxyPartialKey,\n      serviceURL: aiproxyServiceURL,\n      clientID: aiproxyClientID,\n      debugEnabled: debugEnabled)\n  }\n  #endif\n\n  // MARK: Custom URL\n\n  /// Creates and returns an instance of `OpenAIService`.\n  ///\n  /// Use this service if you need to provide a custom URL, for example to run local models with OpenAI endpoints compatibility using Ollama.\n  /// Check [Ollama blog post](https://ollama.com/blog/openai-compatibility) for more.\n  ///\n  /// - Parameters:\n  ///   - apiKey: The optional API key required for authentication.\n  ///   - baseURL: The local host URL. defaults to  \"http://localhost:11434\"\n  ///   - httpClient: The HTTPClient to be used for network calls. Defaults to `HTTPClientFactory.createDefault()`\n  ///   - debugEnabled: If `true` service prints event on DEBUG builds, default to `false`.\n  ///\n  /// - Returns: A fully configured object conforming to `OpenAIService`.\n  public static func service(\n    apiKey: Authorization = .apiKey(\"\"),\n    baseURL: String,\n    httpClient: HTTPClient? = nil,\n    debugEnabled: Bool = false)\n    -> OpenAIService\n  {\n    let client = httpClient ?? HTTPClientFactory.createDefault()\n    return LocalModelService(\n      apiKey: apiKey,\n      baseURL: baseURL,\n      httpClient: client,\n      debugEnabled: debugEnabled)\n  }\n\n  // MARK: Proxy Path\n\n  /// Creates and returns an instance of `OpenAIService`.\n  ///\n  /// Use this service if you need to provide a custom URL with a proxy path, for example to run Groq.\n  ///\n  /// - Parameters:\n  ///   - apiKey: The optional API key required for authentication.\n  ///   - baseURL: The local host URL.  e.g \"https://api.groq.com\" or \"https://generativelanguage.googleapis.com\"\n  ///   - proxyPath: The proxy path e.g `openai`\n  ///   - overrideVersion: The API version. defaults to `v1`\n  ///   - extraHeaders: Additional headers needed for the request. Do not provide API key in these headers.\n  ///   - httpClient: The HTTPClient to be used for network calls. Defaults to `HTTPClientFactory.createDefault()`\n  ///   - debugEnabled: If `true` service prints event on DEBUG builds, default to `false`.\n  ///\n  /// - Returns: A fully configured object conforming to `OpenAIService`.\n  public static func service(\n    apiKey: String,\n    overrideBaseURL: String,\n    proxyPath: String? = nil,\n    overrideVersion: String? = nil,\n    extraHeaders: [String: String]? = nil,\n    httpClient: HTTPClient? = nil,\n    debugEnabled: Bool = false)\n    -> OpenAIService\n  {\n    let client = httpClient ?? HTTPClientFactory.createDefault()\n    return DefaultOpenAIService(\n      apiKey: apiKey,\n      baseURL: overrideBaseURL,\n      proxyPath: proxyPath,\n      overrideVersion: overrideVersion,\n      extraHeaders: extraHeaders,\n      httpClient: client,\n      debugEnabled: debugEnabled)\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/AudioController.swift",
    "content": "//\n//  AudioController.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\n#if canImport(AVFoundation)\nimport AVFoundation\nimport OSLog\n\nprivate let logger = Logger(subsystem: \"com.swiftopenai\", category: \"Audio\")\n\n// MARK: - AudioController\n\n/// Use this class to control the streaming of mic data and playback of PCM16 data.\n/// Audio played using the `playPCM16Audio` method does not interfere with the mic data streaming out of the `micStream` AsyncStream.\n/// That is, if you use this to control audio in an OpenAI realtime session, the model will not hear itself.\n///\n/// ## Implementor's note\n/// We use either AVAudioEngine or AudioToolbox for mic data, depending on the platform and whether headphones are attached.\n/// The following arrangement provides for the best user experience:\n///\n///     +----------+---------------+------------------+\n///     | Platform | Headphones    | Audio API        |\n///     +----------+---------------+------------------+\n///     | macOS    | Yes           | AudioEngine      |\n///     | macOS    | No            | AudioToolbox     |\n///     | iOS      | Yes           | AudioEngine      |\n///     | iOS      | No            | AudioToolbox     |\n///     | watchOS  | Yes           | AudioEngine      |\n///     | watchOS  | No            | AudioEngine      |\n///     +----------+---------------+------------------+\n///\n@RealtimeActor\npublic final class AudioController {\n  public init(modes: [Mode]) async throws {\n    self.modes = modes\n    #if os(iOS)\n    try? AVAudioSession.sharedInstance().setCategory(\n      .playAndRecord,\n      mode: .voiceChat,\n      options: [.defaultToSpeaker, .allowBluetooth])\n    try? AVAudioSession.sharedInstance().setActive(true, options: [])\n\n    #elseif os(watchOS)\n    try? AVAudioSession.sharedInstance().setCategory(.playAndRecord)\n    try? await AVAudioSession.sharedInstance().activate(options: [])\n    #endif\n\n    audioEngine = AVAudioEngine()\n\n    if modes.contains(.record) {\n      #if os(macOS) || os(iOS)\n      microphonePCMSampleVendor = AudioUtils.headphonesConnected\n        ? try MicrophonePCMSampleVendorAE(audioEngine: audioEngine)\n        : MicrophonePCMSampleVendorAT()\n      #else\n      microphonePCMSampleVendor = try MicrophonePCMSampleVendorAE(audioEngine: audioEngine)\n      #endif\n    }\n\n    if modes.contains(.playback) {\n      audioPCMPlayer = try await AudioPCMPlayer(audioEngine: audioEngine)\n    }\n\n    audioEngine.prepare()\n\n    // Nesting `start` in a Task is necessary on watchOS.\n    // There is some sort of race, and letting the runloop tick seems to \"fix\" it.\n    // If I call `prepare` and `start` in serial succession, then there is no playback on watchOS (sometimes).\n    Task {\n      try self.audioEngine.start()\n    }\n  }\n\n  public enum Mode {\n    case record\n    case playback\n  }\n\n  public let modes: [Mode]\n\n  public func micStream() throws -> AsyncStream<AVAudioPCMBuffer> {\n    guard\n      modes.contains(.record),\n      let microphonePCMSampleVendor\n    else {\n      throw OpenAIError.assertion(\"Please pass [.record] to the AudioController initializer\")\n    }\n    return try microphonePCMSampleVendor.start()\n  }\n\n  public func stop() {\n    microphonePCMSampleVendor?.stop()\n    audioPCMPlayer?.interruptPlayback()\n  }\n\n  public func playPCM16Audio(base64String: String) {\n    guard\n      modes.contains(.playback),\n      let audioPCMPlayer\n    else {\n      logger.error(\"Please pass [.playback] to the AudioController initializer\")\n      return\n    }\n    audioPCMPlayer.playPCM16Audio(from: base64String)\n  }\n\n  public func interruptPlayback() {\n    guard\n      modes.contains(.playback),\n      let audioPCMPlayer\n    else {\n      logger.error(\"Please pass [.playback] to the AudioController initializer\")\n      return\n    }\n    audioPCMPlayer.interruptPlayback()\n  }\n\n  private let audioEngine: AVAudioEngine\n  private var microphonePCMSampleVendor: MicrophonePCMSampleVendor? = nil\n  private var audioPCMPlayer: AudioPCMPlayer? = nil\n\n}\n#endif\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/DeletionStatus.swift",
    "content": "//\n//  DeletionStatus.swift\n//\n//\n//  Created by James Rochabrun on 4/27/24.\n//\n\nimport Foundation\n\npublic struct DeletionStatus: Decodable {\n  public let id: String\n  public let object: String\n  public let deleted: Bool\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/ExpirationPolicy.swift",
    "content": "//\n//  ExpirationPolicy.swift\n//\n//\n//  Created by James Rochabrun on 4/27/24.\n//\n\nimport Foundation\n\npublic struct ExpirationPolicy: Codable {\n  /// Anchor timestamp after which the expiration policy applies. Supported anchors: last_active_at.\n  let anchor: String\n  /// The number of days after the anchor time that the vector store will expire.\n  let days: Int\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/FileCount.swift",
    "content": "//\n//  FileCount.swift\n//\n//\n//  Created by James Rochabrun on 4/29/24.\n//\n\nimport Foundation\n\npublic struct FileCount: Decodable {\n  /// The number of files that are currently being processed.\n  let inProgress: Int\n  /// The number of files that have been successfully processed.\n  let completed: Int\n  /// The number of files that have failed to process.\n  let failed: Int\n  /// The number of files that were cancelled.\n  let cancelled: Int\n  /// The total number of files.\n  let total: Int\n\n  enum CodingKeys: String, CodingKey {\n    case inProgress = \"in_progress\"\n    case completed\n    case failed\n    case cancelled\n    case total\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/IncompleteDetails.swift",
    "content": "//\n//  IncompleteDetails.swift\n//\n//\n//  Created by James Rochabrun on 4/25/24.\n//\n\nimport Foundation\n\n/// Message: On an incomplete message, details about why the message is incomplete.\n/// Run: Details on why the run is incomplete. Will be null if the run is not incomplete.\npublic struct IncompleteDetails: Codable {\n  /// Message: The reason the message is incomplete.\n  /// Run: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.\n  let reason: String\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/JSONSchema.swift",
    "content": "//\n//  JSONSchema.swift\n//\n//\n//  Created by James Rochabrun on 8/10/24.\n//\n\nimport Foundation\n\n// MARK: - JSONSchemaType\n\n/// Supported schemas\n///\n/// Structured Outputs supports a subset of the JSON Schema language.\n///\n/// Supported types\n///\n/// The following types are supported for Structured Outputs:\n///\n/// String\n/// Number\n/// Boolean\n/// Object\n/// Array\n/// Enum\n/// anyOf\npublic enum JSONSchemaType: Codable, Equatable {\n  case string\n  case number\n  case integer\n  case boolean\n  case object\n  case array\n  case null\n  case union([JSONSchemaType])\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n    if let string = try? container.decode(String.self) {\n      switch string {\n      case \"string\": self = .string\n      case \"number\": self = .number\n      case \"integer\": self = .integer\n      case \"boolean\": self = .boolean\n      case \"object\": self = .object\n      case \"array\": self = .array\n      case \"null\": self = .null\n      default: throw DecodingError.dataCorruptedError(in: container, debugDescription: \"Unknown type: \\(string)\")\n      }\n    } else if let array = try? container.decode([String].self) {\n      let types = try array.map { typeString -> JSONSchemaType in\n        guard let type = JSONSchemaType(rawValue: typeString) else {\n          throw DecodingError.dataCorruptedError(in: container, debugDescription: \"Unknown type in union: \\(typeString)\")\n        }\n        return type\n      }\n      self = .union(types)\n    } else {\n      throw DecodingError.dataCorruptedError(in: container, debugDescription: \"Expected a string or an array of strings\")\n    }\n  }\n\n  private init?(rawValue: String) {\n    switch rawValue {\n    case \"string\": self = .string\n    case \"number\": self = .number\n    case \"integer\": self = .integer\n    case \"boolean\": self = .boolean\n    case \"object\": self = .object\n    case \"array\": self = .array\n    case \"null\": self = .null\n    default: return nil\n    }\n  }\n\n  public static func optional(_ type: JSONSchemaType) -> JSONSchemaType {\n    .union([type, .null])\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n    switch self {\n    case .string: try container.encode(\"string\")\n    case .number: try container.encode(\"number\")\n    case .integer: try container.encode(\"integer\")\n    case .boolean: try container.encode(\"boolean\")\n    case .object: try container.encode(\"object\")\n    case .array: try container.encode(\"array\")\n    case .null: try container.encode(\"null\")\n    case .union(let types): try container.encode(types.map(\\.rawValue))\n    }\n  }\n\n  private var rawValue: String {\n    switch self {\n    case .string: \"string\"\n    case .number: \"number\"\n    case .integer: \"integer\"\n    case .boolean: \"boolean\"\n    case .object: \"object\"\n    case .array: \"array\"\n    case .null: \"null\"\n    case .union: fatalError(\"Union type doesn't have a single raw value\")\n    }\n  }\n}\n\n// MARK: - JSONSchema\n\npublic class JSONSchema: Codable, Equatable {\n  public init(\n    type: JSONSchemaType? = nil,\n    description: String? = nil,\n    properties: [String: JSONSchema]? = nil,\n    items: JSONSchema? = nil,\n    required: [String]? = nil,\n    additionalProperties: Bool = false,\n    enum: [String]? = nil,\n    ref: String? = nil)\n  {\n    self.type = type\n    self.description = description\n    self.properties = properties\n    self.items = items\n    self.required = required\n    self.additionalProperties = additionalProperties\n    self.enum = `enum`\n    self.ref = ref\n  }\n\n  public required init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n\n    if let ref = try? container.decode(String.self, forKey: .ref) {\n      self.ref = ref\n      type = nil\n      description = nil\n      properties = nil\n      items = nil\n      required = nil\n      additionalProperties = false\n      `enum` = nil\n      return\n    }\n\n    type = try container.decodeIfPresent(JSONSchemaType.self, forKey: .type)\n    description = try container.decodeIfPresent(String.self, forKey: .description)\n    properties = try container.decodeIfPresent([String: JSONSchema].self, forKey: .properties)\n    items = try container.decodeIfPresent(JSONSchema.self, forKey: .items)\n    required = try container.decodeIfPresent([String].self, forKey: .required)\n    additionalProperties = try container.decodeIfPresent(Bool.self, forKey: .additionalProperties)\n    `enum` = try container.decodeIfPresent([String].self, forKey: .enum)\n    ref = nil\n  }\n\n  public let type: JSONSchemaType?\n  public let description: String?\n  public var properties: [String: JSONSchema]?\n  public var items: JSONSchema?\n  /// To use Structured Outputs, all fields or function parameters [must be specified as required.](https://platform.openai.com/docs/guides/structured-outputs/all-fields-must-be-required)\n  /// Although all fields must be required (and the model will return a value for each parameter), it is possible to emulate an optional parameter by using a union type with null.\n  public let required: [String]?\n  /// Structured Outputs only supports generating specified keys / values, so we require developers to set additionalProperties: false to opt into Structured Outputs.\n  public let additionalProperties: Bool?\n  public let `enum`: [String]?\n  public var ref: String?\n\n  public static func ==(lhs: JSONSchema, rhs: JSONSchema) -> Bool {\n    lhs.type == rhs.type &&\n      lhs.description == rhs.description &&\n      lhs.properties == rhs.properties &&\n      lhs.items == rhs.items &&\n      lhs.required == rhs.required &&\n      lhs.additionalProperties == rhs.additionalProperties &&\n      lhs.enum == rhs.enum &&\n      lhs.ref == rhs.ref\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n\n    if let ref {\n      try container.encode(ref, forKey: .ref)\n      return\n    }\n\n    try container.encodeIfPresent(type, forKey: .type)\n    try container.encodeIfPresent(description, forKey: .description)\n    try container.encodeIfPresent(properties, forKey: .properties)\n    try container.encodeIfPresent(items, forKey: .items)\n    try container.encodeIfPresent(required, forKey: .required)\n    try container.encodeIfPresent(additionalProperties, forKey: .additionalProperties)\n    try container.encodeIfPresent(`enum`, forKey: .enum)\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type, description, properties, items, required, additionalProperties, `enum`, ref = \"$ref\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/LastError.swift",
    "content": "//\n//  LastError.swift\n//\n//\n//  Created by James Rochabrun on 4/28/24.\n//\n\nimport Foundation\n\npublic struct LastError: Codable {\n  /// One of server_error or rate_limit_exceeded.\n  let code: String\n  /// A human-readable description of the error.\n  let message: String\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/MessageAttachment.swift",
    "content": "//\n//  MessageAttachment.swift\n//\n//\n//  Created by James Rochabrun on 4/25/24.\n//\n\nimport Foundation\n\n/// Messages have attachments instead of file_ids. attachments are helpers that add files to the Thread’s tool_resources.\n/// [V2](https://platform.openai.com/docs/assistants/migration/what-has-changed)\npublic struct MessageAttachment: Codable {\n  let fileID: String\n  let tools: [AssistantObject.Tool]\n\n  enum CodingKeys: String, CodingKey {\n    case fileID = \"file_id\"\n    case tools\n  }\n\n  public init(fileID: String, tools: [AssistantObject.Tool]) {\n    self.fileID = fileID\n    self.tools = tools\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/OpenAIError.swift",
    "content": "//\n//  OpenAIError.swift\n//  SwiftOpenAI\n//\n\nimport Foundation\n\npublic enum OpenAIError: LocalizedError {\n  case audioConfigurationError(String)\n  case assertion(String)\n\n  public var errorDescription: String? {\n    switch self {\n    case .audioConfigurationError(let message):\n      \"Audio configuration error: \\(message)\"\n    case .assertion(let message):\n      \"SwiftOpenAI - A library precondition was not met: \\(message)\"\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/OpenAIJSONValue.swift",
    "content": "//\n//  OpenAIJSONValue.swift\n//  SwiftOpenAI\n//\n//  Created from AIProxySwift\n//  Original: https://github.com/lzell/AIProxySwift\n//\n\nimport Foundation\n\n// MARK: - OpenAIJSONValue\n\n/// Use OpenAIJSONValue when an Encodable or Decodable model has JSON members with types that are not known\n/// ahead of time.\n///\n/// For example, AI providers often include 'tool use' functionality, where the request to the provider\n/// contains a JSON schema defining the contract that the tool should conform to.  With OpenAIJSONValue, the\n/// user may supply a schema that makes sense for them, unencumbered by strict codable compiler requirements.\n///\n/// Example usage:\n///\n///     let toolSchema: [String: OpenAIJSONValue] = [\n///         \"properties\": [\n///             \"ticker\": [\n///                 \"type\": \"string\",\n///                 \"description\": \"The stock ticker symbol, e.g. AAPL for Apple Inc.\"\n///             ]\n///         ],\n///         \"required\": [\"ticker\"]\n///     ]\n///\n///     let encoder = JSONEncoder()\n///     try encoder.encode(toolSchema) // Compiler is happy\n///\npublic enum OpenAIJSONValue: Codable, Sendable {\n  case null(NSNull)\n  case bool(Bool)\n  case int(Int)\n  case double(Double)\n  case string(String)\n  case array([OpenAIJSONValue])\n  case object([String: OpenAIJSONValue])\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n    if container.decodeNil() {\n      self = .null(NSNull())\n    } else if let bool = try? container.decode(Bool.self) {\n      self = .bool(bool)\n    } else if let int = try? container.decode(Int.self) {\n      self = .int(int)\n    } else if let double = try? container.decode(Double.self) {\n      self = .double(double)\n    } else if let string = try? container.decode(String.self) {\n      self = .string(string)\n    } else if let array = try? container.decode([OpenAIJSONValue].self) {\n      self = .array(array)\n    } else if let object = try? container.decode([String: OpenAIJSONValue].self) {\n      self = .object(object)\n    } else {\n      throw DecodingError.dataCorruptedError(\n        in: container,\n        debugDescription: \"Unexpected JSON value\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n    switch self {\n    case .null:\n      try container.encodeNil()\n    case .bool(let bool):\n      try container.encode(bool)\n    case .int(let int):\n      try container.encode(int)\n    case .double(let double):\n      try container.encode(double)\n    case .string(let string):\n      try container.encode(string)\n    case .array(let array):\n      try container.encode(array)\n    case .object(let object):\n      try container.encode(object)\n    }\n  }\n\n}\n\nextension [String: OpenAIJSONValue] {\n  nonisolated public var untypedDictionary: [String: any Sendable] {\n    convertToUntypedDictionary(self)\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByNilLiteral\n\nextension OpenAIJSONValue: ExpressibleByNilLiteral {\n  public init(nilLiteral _: ()) {\n    self = .null(NSNull())\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByBooleanLiteral\n\nextension OpenAIJSONValue: ExpressibleByBooleanLiteral {\n  public init(booleanLiteral value: BooleanLiteralType) {\n    self = .bool(value)\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByIntegerLiteral\n\nextension OpenAIJSONValue: ExpressibleByIntegerLiteral {\n  public init(integerLiteral value: IntegerLiteralType) {\n    self = .int(value)\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByFloatLiteral\n\nextension OpenAIJSONValue: ExpressibleByFloatLiteral {\n  public init(floatLiteral value: FloatLiteralType) {\n    self = .double(value)\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByStringLiteral\n\nextension OpenAIJSONValue: ExpressibleByStringLiteral {\n  public init(stringLiteral value: StringLiteralType) {\n    self = .string(value)\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByArrayLiteral\n\nextension OpenAIJSONValue: ExpressibleByArrayLiteral {\n  public init(arrayLiteral elements: OpenAIJSONValue...) {\n    self = .array(elements)\n  }\n}\n\n// MARK: - OpenAIJSONValue + ExpressibleByDictionaryLiteral\n\nextension OpenAIJSONValue: ExpressibleByDictionaryLiteral {\n  public init(dictionaryLiteral elements: (String, OpenAIJSONValue)...) {\n    self = .object(.init(uniqueKeysWithValues: elements))\n  }\n}\n\nnonisolated private func convertToUntyped(_ input: OpenAIJSONValue) -> any Sendable {\n  switch input {\n  case .null:\n    NSNull()\n  case .bool(let bool):\n    bool\n  case .int(let int):\n    int\n  case .double(let double):\n    double\n  case .string(let string):\n    string\n  case .array(let array):\n    array.map { convertToUntyped($0) }\n  case .object(let dictionary):\n    convertToUntypedDictionary(dictionary)\n  }\n}\n\nnonisolated private func convertToUntypedDictionary(\n  _ input: [String: OpenAIJSONValue])\n  -> [String: any Sendable]\n{\n  input.mapValues { v in\n    switch v {\n    case .null:\n      NSNull()\n    case .bool(let bool):\n      bool\n    case .int(let int):\n      int\n    case .double(let double):\n      double\n    case .string(let string):\n      string\n    case .array(let array):\n      array.map { convertToUntyped($0) }\n    case .object(let dictionary):\n      convertToUntypedDictionary(dictionary)\n    }\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/Reasoning.swift",
    "content": "//\n//  Reasoning.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n/// Reasoning configuration for o-series models\npublic struct Reasoning: Codable {\n  public init(effort: String? = nil, summary: Summary? = nil) {\n    self.effort = effort\n    self.summary = summary?.rawValue\n  }\n\n  public enum Summary: String {\n    case auto\n    case concise\n    case detailed\n  }\n\n  /// Defaults to medium\n  /// Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are low, medium, high and minimal.\n  /// Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n  /// The new minimal setting produces very few reasoning tokens for cases where you need the fastest possible time-to-first-token. We often see better performance when the model can produce a few tokens when needed versus none. The default is medium.\n  ///\n  /// The minimal setting performs especially well in coding and instruction following scenarios, adhering closely to given directions. However, it may require prompting to act more proactively. To improve the model's reasoning quality, even at minimal effort, encourage it to “think” or outline its steps before answering.\n  public var effort: String?\n\n  /// A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n  public var summary: String?\n\n  enum CodingKeys: String, CodingKey {\n    case effort\n    case summary\n  }\n\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/ResponseFormat.swift",
    "content": "//\n//  ResponseFormat.swift\n//\n//\n//  Created by James Rochabrun on 4/13/24.\n//\n\nimport Foundation\n\n// MARK: - ResponseFormat\n\n/// An object specifying the format that the model must output. Compatible with GPT-4o, GPT-4o mini, GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.\n///\n/// Setting to { \"type\": \"json_schema\", \"json_schema\": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide.](https://platform.openai.com/docs/guides/structured-outputs)\n///\n/// Setting to { \"type\": \"json_object\" } enables JSON mode, which ensures the message the model generates is valid JSON.\n///\n/// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n///\n/// [OpenAI announcement](https://openai.com/index/introducing-structured-outputs-in-the-api/)\n///\n/// [Documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format)\npublic enum ResponseFormat: Codable, Equatable {\n  case text // The type of response format being defined: text.\n  case jsonObject // The type of response format being defined: json_object.\n  case jsonSchema(JSONSchemaResponseFormat) // The type of response format being defined: json_schema.\n  case unknown\n\n  public init(from decoder: Decoder) throws {\n    // Attempt to decode the response format as a single string\n    if\n      let singleValueContainer = try? decoder.singleValueContainer(),\n      let typeString = try? singleValueContainer.decode(String.self)\n    {\n      switch typeString {\n      case \"text\":\n        self = .text\n      case \"json_object\":\n        self = .jsonObject\n      default:\n        self = .unknown\n      }\n      return\n    }\n\n    // If it’s not a single string, decode it as a dictionary\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"text\":\n      self = .text\n\n    case \"json_object\":\n      self = .jsonObject\n\n    case \"json_schema\":\n      let jsonSchema = try container.decode(JSONSchemaResponseFormat.self, forKey: .jsonSchema)\n      self = .jsonSchema(jsonSchema)\n\n    default:\n      self = .unknown\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n    switch self {\n    case .text:\n      try container.encode(\"text\", forKey: .type)\n\n    case .jsonObject:\n      try container.encode(\"json_object\", forKey: .type)\n\n    case .jsonSchema(let jsonSchema):\n      try container.encode(\"json_schema\", forKey: .type)\n      try container.encode(jsonSchema, forKey: .jsonSchema)\n\n    case .unknown:\n      try container.encode(\"unknown\", forKey: .type)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n    case jsonSchema = \"json_schema\"\n  }\n}\n\n// MARK: - JSONSchemaResponseFormat\n\n/// [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs/structured-outputs)\n/// Specifically to be used for Response format with structured outputs.\npublic struct JSONSchemaResponseFormat: Codable, Equatable {\n  let name: String\n  let description: String?\n  let strict: Bool\n  let schema: JSONSchema\n\n  public init(name: String, description: String? = nil, strict: Bool, schema: JSONSchema) {\n    self.name = name\n    self.description = description\n    self.strict = strict\n    self.schema = schema\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/TextConfiguration.swift",
    "content": "//\n//  TextConfiguration.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: - TextConfiguration\n\n/// Text configuration options\npublic struct TextConfiguration: Codable {\n  /// An object specifying the format that the model must output\n  public var format: FormatType\n\n  /// The verbosity level for the response (e.g., \"low\", \"medium\", \"high\")\n  public var verbosity: String?\n\n  public init(format: FormatType, verbosity: String? = nil) {\n    self.format = format\n    self.verbosity = verbosity\n  }\n}\n\n// MARK: - FormatType\n\n/// Format types for text response\npublic enum FormatType: Codable {\n  case text\n  case jsonSchema(JSONSchema, name: String? = nil)\n  case jsonObject\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"text\":\n      self = .text\n\n    case \"json_schema\":\n      let schema = try container.decode(JSONSchema.self, forKey: .schema)\n      self = .jsonSchema(schema)\n\n    case \"json_object\":\n      self = .jsonObject\n\n    default:\n      throw DecodingError.dataCorruptedError(\n        forKey: .type,\n        in: container,\n        debugDescription: \"Unknown format type: \\(type)\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n\n    switch self {\n    case .text:\n      try container.encode(\"text\", forKey: .type)\n\n    case .jsonSchema(let schema, let name):\n      try container.encode(\"json_schema\", forKey: .type)\n      try container.encode(name ?? \"schema_response\", forKey: .name)\n      try container.encode(schema, forKey: .schema)\n\n    case .jsonObject:\n      try container.encode(\"json_object\", forKey: .type)\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case schema\n    case name\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/Tool.swift",
    "content": "//\n//  Tool.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n/// An array of tools the model may call while generating a response\npublic enum Tool: Codable {\n  /// A tool that searches for relevant content from uploaded files\n  case fileSearch(FileSearchTool)\n\n  /// Defines a function in your own code the model can choose to call\n  case function(FunctionTool)\n\n  /// A tool that runs Python code to help generate a response to a prompt\n  case codeInterpreter(CodeInterpreterTool)\n\n  /// A tool that generates images using a model like gpt-image-1\n  case imageGeneration(ImageGenerationTool)\n\n  /// A tool that allows the model to execute shell commands in a local environment\n  case localShell(LocalShellTool)\n\n  /// A tool that controls a virtual computer\n  case computerUse(ComputerUseTool)\n\n  /// This tool searches the web for relevant results to use in a response\n  case webSearch(WebSearchTool)\n\n  /// Give the model access to additional tools via remote Model Context Protocol (MCP) servers\n  case mcp(MCPTool)\n\n  /// A custom tool that returns plain text instead of JSON\n  case custom(CustomTool)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    let singleValueContainer = try decoder.singleValueContainer()\n\n    switch type {\n    case \"file_search\":\n      self = try .fileSearch(singleValueContainer.decode(FileSearchTool.self))\n    case \"function\":\n      self = try .function(singleValueContainer.decode(FunctionTool.self))\n    case \"code_interpreter\":\n      self = try .codeInterpreter(singleValueContainer.decode(CodeInterpreterTool.self))\n    case \"image_generation\":\n      self = try .imageGeneration(singleValueContainer.decode(ImageGenerationTool.self))\n    case \"local_shell\":\n      self = try .localShell(singleValueContainer.decode(LocalShellTool.self))\n    case \"computer_use_preview\":\n      self = try .computerUse(singleValueContainer.decode(ComputerUseTool.self))\n    case \"web_search\", \"web_search_2025_08_26\", \"web_search_preview\", \"web_search_preview_2025_03_11\":\n      self = try .webSearch(singleValueContainer.decode(WebSearchTool.self))\n    case \"mcp\":\n      self = try .mcp(singleValueContainer.decode(MCPTool.self))\n    case \"custom\":\n      self = try .custom(singleValueContainer.decode(CustomTool.self))\n    default:\n      throw DecodingError.dataCorruptedError(\n        forKey: .type,\n        in: container,\n        debugDescription: \"Unknown tool type: \\(type)\")\n    }\n  }\n\n  /// Specifies the comparison operator for filters\n  public enum ComparisonOperator: String, Codable {\n    /// Equals\n    case equals = \"eq\"\n\n    /// Not equal\n    case notEqual = \"ne\"\n\n    /// Greater than\n    case greaterThan = \"gt\"\n\n    /// Greater than or equal\n    case greaterThanOrEqual = \"gte\"\n\n    /// Less than\n    case lessThan = \"lt\"\n\n    /// Less than or equal\n    case lessThanOrEqual = \"lte\"\n  }\n\n  /// High level guidance for the amount of context window space to use for the search\n  public enum SearchContextSize: String, Codable {\n    /// Low context window space (fewer tokens)\n    case low\n\n    /// Medium context window space (default)\n    case medium\n\n    /// High context window space (more tokens)\n    case high\n  }\n\n  /// The type of the web search tool\n  public enum WebSearchType: Codable {\n    /// Standard web search\n    case webSearch\n\n    /// Updated web search (2025-08-26 version)\n    case webSearch20250826\n\n    /// Preview web search\n    case webSearchPreview\n\n    /// Preview web search (2025-03-11 version)\n    case webSearchPreview20250311\n\n    /// Custom search type for future compatibility\n    case custom(String)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.singleValueContainer()\n      let value = try container.decode(String.self)\n\n      switch value {\n      case \"web_search\":\n        self = .webSearch\n      case \"web_search_2025_08_26\":\n        self = .webSearch20250826\n      case \"web_search_preview\":\n        self = .webSearchPreview\n      case \"web_search_preview_2025_03_11\":\n        self = .webSearchPreview20250311\n      default:\n        self = .custom(value)\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .webSearch:\n        try container.encode(\"web_search\")\n      case .webSearch20250826:\n        try container.encode(\"web_search_2025_08_26\")\n      case .webSearchPreview:\n        try container.encode(\"web_search_preview\")\n      case .webSearchPreview20250311:\n        try container.encode(\"web_search_preview_2025_03_11\")\n      case .custom(let value):\n        try container.encode(value)\n      }\n    }\n  }\n\n  /// A tool that searches for relevant content from uploaded files\n  public struct FileSearchTool: Codable {\n    public init(\n      vectorStoreIds: [String],\n      filters: FileSearchFilter? = nil,\n      maxNumResults: Int? = nil,\n      rankingOptions: RankingOptions? = nil)\n    {\n      self.vectorStoreIds = vectorStoreIds\n      self.filters = filters\n      self.maxNumResults = maxNumResults\n      self.rankingOptions = rankingOptions\n    }\n\n    /// The type of the file search tool. Always file_search.\n    public let type = \"file_search\"\n\n    /// The IDs of the vector stores to search.\n    public let vectorStoreIds: [String]\n\n    /// A filter to apply based on file attributes.\n    public let filters: FileSearchFilter?\n\n    /// The maximum number of results to return. This number should be between 1 and 50 inclusive.\n    public let maxNumResults: Int?\n\n    /// Ranking options for search.\n    public let rankingOptions: RankingOptions?\n\n    enum CodingKeys: String, CodingKey {\n      case type\n      case vectorStoreIds = \"vector_store_ids\"\n      case filters\n      case maxNumResults = \"max_num_results\"\n      case rankingOptions = \"ranking_options\"\n    }\n  }\n\n  /// Filter for file search\n  public enum FileSearchFilter: Codable {\n    /// A filter used to compare a specified attribute key to a given value\n    case comparison(ComparisonFilter)\n\n    /// Combine multiple filters using and or or\n    case compound(CompoundFilter)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.container(keyedBy: CodingKeys.self)\n      if let type = try? container.decode(String.self, forKey: .type) {\n        if type == \"and\" || type == \"or\" {\n          self = try .compound(decoder.singleValueContainer().decode(CompoundFilter.self))\n        } else {\n          self = try .comparison(decoder.singleValueContainer().decode(ComparisonFilter.self))\n        }\n      } else {\n        throw DecodingError.keyNotFound(\n          CodingKeys.type,\n          .init(\n            codingPath: container.codingPath,\n            debugDescription: \"Type key missing for FileSearchFilter\"))\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .comparison(let filter):\n        try container.encode(filter)\n      case .compound(let filter):\n        try container.encode(filter)\n      }\n    }\n\n    private enum CodingKeys: String, CodingKey {\n      case type\n    }\n  }\n\n  /// A filter used to compare a specified attribute key to a given value\n  public struct ComparisonFilter: Codable {\n    public init(key: String, type: ComparisonOperator, value: FilterValue) {\n      self.key = key\n      self.type = type\n      self.value = value\n    }\n\n    /// The key to compare against the value\n    public let key: String\n\n    /// Specifies the comparison operator\n    public let type: ComparisonOperator\n\n    /// The value to compare against the attribute key; supports string, number, or boolean types\n    public let value: FilterValue\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.container(keyedBy: CodingKeys.self)\n      try container.encode(key, forKey: .key)\n      try container.encode(type.rawValue, forKey: .type)\n      try value.encode(to: encoder)\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case key\n      case type\n      case value\n    }\n  }\n\n  /// Filter value type (string, number, or boolean)\n  public enum FilterValue: Codable {\n    case string(String)\n    case number(Double)\n    case boolean(Bool)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.singleValueContainer()\n\n      if let stringValue = try? container.decode(String.self) {\n        self = .string(stringValue)\n      } else if let numberValue = try? container.decode(Double.self) {\n        self = .number(numberValue)\n      } else if let boolValue = try? container.decode(Bool.self) {\n        self = .boolean(boolValue)\n      } else {\n        throw DecodingError.dataCorruptedError(\n          in: container,\n          debugDescription: \"Expected string, number, or boolean for filter value\")\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .string(let value):\n        try container.encode(value)\n      case .number(let value):\n        try container.encode(value)\n      case .boolean(let value):\n        try container.encode(value)\n      }\n    }\n  }\n\n  /// Combine multiple filters using and or or\n  public struct CompoundFilter: Codable {\n    /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.\n    public let filters: [FileSearchFilter]\n\n    /// Type of operation: and or or\n    public let type: String\n\n    public init(filters: [FileSearchFilter], type: String) {\n      self.filters = filters\n      self.type = type\n    }\n  }\n\n  /// The code interpreter container. Can be a container ID or an object that specifies uploaded file IDs to make available to your code.\n  public enum CodeInterpreterContainer: Codable {\n    /// The container ID\n    case id(String)\n\n    /// Configuration for a code interpreter container. Optionally specify the IDs of the files to run the code on.\n    case auto(fileIds: [String]?)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.singleValueContainer()\n\n      if let containerId = try? container.decode(String.self) {\n        self = .id(containerId)\n      } else if let autoContainer = try? container.decode(AutoContainer.self) {\n        self = .auto(fileIds: autoContainer.fileIds)\n      } else {\n        throw DecodingError.dataCorruptedError(\n          in: container,\n          debugDescription: \"Expected string container ID or auto container object\")\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .id(let containerId):\n        try container.encode(containerId)\n      case .auto(let fileIds):\n        try container.encode(AutoContainer(fileIds: fileIds))\n      }\n    }\n\n    /// Configuration for a code interpreter container\n    private struct AutoContainer: Codable {\n      /// Always auto\n      let type = \"auto\"\n\n      /// An optional list of uploaded files to make available to your code\n      let fileIds: [String]?\n\n      init(fileIds: [String]?) {\n        self.fileIds = fileIds\n      }\n\n      enum CodingKeys: String, CodingKey {\n        case type\n        case fileIds = \"file_ids\"\n      }\n    }\n  }\n\n  /// Optional mask for inpainting\n  public struct InputImageMask: Codable {\n    public init(fileId: String? = nil, imageUrl: String? = nil) {\n      self.fileId = fileId\n      self.imageUrl = imageUrl\n    }\n\n    /// File ID for the mask image\n    public let fileId: String?\n\n    /// Base64-encoded mask image\n    public let imageUrl: String?\n\n    enum CodingKeys: String, CodingKey {\n      case fileId = \"file_id\"\n      case imageUrl = \"image_url\"\n    }\n  }\n\n  /// Ranking options for search\n  public struct RankingOptions: Codable {\n    /// The ranker to use for the file search. Defaults to auto\n    public let ranker: String?\n\n    /// The score threshold for the file search, a number between 0 and 1.\n    /// Numbers closer to 1 will attempt to return only the most relevant results,\n    /// but may return fewer results. Defaults to 0\n    public let scoreThreshold: Double?\n\n    public init(ranker: String? = nil, scoreThreshold: Double? = nil) {\n      self.ranker = ranker\n      self.scoreThreshold = scoreThreshold\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case ranker\n      case scoreThreshold = \"score_threshold\"\n    }\n  }\n\n  /// Defines a function in your own code the model can choose to call\n  public struct FunctionTool: Codable {\n    public init(\n      name: String,\n      parameters: JSONSchema,\n      strict: Bool? = nil,\n      description: String? = nil)\n    {\n      self.name = name\n      self.parameters = parameters\n      self.strict = strict\n      self.description = description\n    }\n\n    /// The name of the function to call\n    public let name: String\n\n    /// A JSON schema object describing the parameters of the function\n    public let parameters: JSONSchema\n\n    /// Whether to enforce strict parameter validation. Default true\n    public let strict: Bool?\n\n    /// The type of the function tool. Always function\n    public let type = \"function\"\n\n    /// A description of the function. Used by the model to determine whether or not to call the function\n    public let description: String?\n\n    enum CodingKeys: String, CodingKey {\n      case name\n      case parameters\n      case strict\n      case type\n      case description\n    }\n  }\n\n  /// A tool that runs Python code to help generate a response to a prompt\n  public struct CodeInterpreterTool: Codable {\n    public init(container: CodeInterpreterContainer) {\n      self.container = container\n    }\n\n    /// The code interpreter container. Can be a container ID or an object that specifies uploaded file IDs to make available to your code.\n    public let container: CodeInterpreterContainer\n\n    /// The type of the code interpreter tool. Always code_interpreter\n    public let type = \"code_interpreter\"\n\n    enum CodingKeys: String, CodingKey {\n      case container\n      case type\n    }\n  }\n\n  /// A tool that generates images using a model like gpt-image-1\n  public struct ImageGenerationTool: Codable {\n    public init(\n      background: String? = nil,\n      inputFidelity: String? = nil,\n      inputImageMask: InputImageMask? = nil,\n      model: String? = nil,\n      moderation: String? = nil,\n      outputCompression: Int? = nil,\n      outputFormat: String? = nil,\n      partialImages: Int? = nil,\n      quality: String? = nil,\n      size: String? = nil)\n    {\n      self.background = background\n      self.inputFidelity = inputFidelity\n      self.inputImageMask = inputImageMask\n      self.model = model\n      self.moderation = moderation\n      self.outputCompression = outputCompression\n      self.outputFormat = outputFormat\n      self.partialImages = partialImages\n      self.quality = quality\n      self.size = size\n    }\n\n    /// The type of the image generation tool. Always image_generation\n    public let type = \"image_generation\"\n\n    /// Defaults to auto\n    /// Background type for the generated image. One of transparent, opaque, or auto. Default: auto.\n    public let background: String?\n\n    /// Defaults to low\n    /// Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for gpt-image-1. Supports high and low. Defaults to low.\n    public let inputFidelity: String?\n\n    /// Optional mask for inpainting. Contains image_url (string, optional) and file_id (string, optional).\n    public let inputImageMask: InputImageMask?\n\n    /// Defaults to gpt-image-1\n    /// The image generation model to use. Default: gpt-image-1.\n    public let model: String?\n\n    /// Defaults to auto\n    /// Moderation level for the generated image. Default: auto.\n    public let moderation: String?\n\n    /// Defaults to 100\n    /// Compression level for the output image. Default: 100.\n    public let outputCompression: Int?\n\n    /// Defaults to png\n    /// The output format of the generated image. One of png, webp, or jpeg. Default: png.\n    public let outputFormat: String?\n\n    /// Defaults to 0\n    /// Number of partial images to generate in streaming mode, from 0 (default value) to 3.\n    public let partialImages: Int?\n\n    /// Defaults to auto\n    /// The quality of the generated image. One of low, medium, high, or auto. Default: auto.\n    public let quality: String?\n\n    /// Defaults to auto\n    /// The size of the generated image. One of 1024x1024, 1024x1536, 1536x1024, or auto. Default: auto.\n    public let size: String?\n\n    enum CodingKeys: String, CodingKey {\n      case type\n      case background\n      case inputFidelity = \"input_fidelity\"\n      case inputImageMask = \"input_image_mask\"\n      case model\n      case moderation\n      case outputCompression = \"output_compression\"\n      case outputFormat = \"output_format\"\n      case partialImages = \"partial_images\"\n      case quality\n      case size\n    }\n  }\n\n  /// A tool that allows the model to execute shell commands in a local environment\n  public struct LocalShellTool: Codable {\n    public init() { }\n\n    /// The type of the local shell tool. Always local_shell\n    public let type = \"local_shell\"\n\n    enum CodingKeys: String, CodingKey {\n      case type\n    }\n  }\n\n  /// A tool that controls a virtual computer\n  public struct ComputerUseTool: Codable {\n    public init(\n      displayHeight: Int,\n      displayWidth: Int,\n      environment: String)\n    {\n      self.displayHeight = displayHeight\n      self.displayWidth = displayWidth\n      self.environment = environment\n    }\n\n    /// The height of the computer display\n    public let displayHeight: Int\n\n    /// The width of the computer display\n    public let displayWidth: Int\n\n    /// The type of computer environment to control\n    public let environment: String\n\n    /// The type of the computer use tool. Always computer_use_preview\n    public let type = \"computer_use_preview\"\n\n    enum CodingKeys: String, CodingKey {\n      case displayHeight = \"display_height\"\n      case displayWidth = \"display_width\"\n      case environment\n      case type\n    }\n  }\n\n  /// Filters for web search\n  public struct WebSearchFilters: Codable {\n    public init(allowedDomains: [String]? = nil) {\n      self.allowedDomains = allowedDomains\n    }\n\n    /// Defaults to []\n    /// Allowed domains for the search. If not provided, all domains are allowed. Subdomains of the provided domains are allowed as well.\n    /// Example: [\"pubmed.ncbi.nlm.nih.gov\"]\n    public let allowedDomains: [String]?\n\n    enum CodingKeys: String, CodingKey {\n      case allowedDomains = \"allowed_domains\"\n    }\n  }\n\n  /// This tool searches the web for relevant results to use in a response\n  public struct WebSearchTool: Codable {\n    public init(\n      type: WebSearchType,\n      filters: WebSearchFilters? = nil,\n      searchContextSize: SearchContextSize? = nil,\n      userLocation: UserLocation? = nil)\n    {\n      self.type = type\n      self.filters = filters\n      self.searchContextSize = searchContextSize\n      self.userLocation = userLocation\n    }\n\n    /// The type of the web search tool\n    public let type: WebSearchType\n\n    /// Filters for the search\n    public let filters: WebSearchFilters?\n\n    /// High level guidance for the amount of context window space to use for the search\n    public let searchContextSize: SearchContextSize?\n\n    /// Approximate location parameters for the search\n    public let userLocation: UserLocation?\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.container(keyedBy: CodingKeys.self)\n\n      // Special handling for type\n      switch type {\n      case .webSearch:\n        try container.encode(\"web_search\", forKey: .type)\n      case .webSearch20250826:\n        try container.encode(\"web_search_2025_08_26\", forKey: .type)\n      case .webSearchPreview:\n        try container.encode(\"web_search_preview\", forKey: .type)\n      case .webSearchPreview20250311:\n        try container.encode(\"web_search_preview_2025_03_11\", forKey: .type)\n      case .custom(let value):\n        try container.encode(value, forKey: .type)\n      }\n\n      try container.encodeIfPresent(filters, forKey: .filters)\n      try container.encodeIfPresent(searchContextSize, forKey: .searchContextSize)\n      try container.encodeIfPresent(userLocation, forKey: .userLocation)\n    }\n\n    enum CodingKeys: String, CodingKey {\n      case type\n      case filters\n      case searchContextSize = \"search_context_size\"\n      case userLocation = \"user_location\"\n    }\n  }\n\n  /// Unconstrained free-form text format\n  public struct TextFormat: Codable {\n    public init() { }\n\n    /// Unconstrained text format. Always text\n    public let type = \"text\"\n\n    enum CodingKeys: String, CodingKey {\n      case type\n    }\n  }\n\n  /// A grammar defined by the user\n  public struct GrammarFormat: Codable {\n    public init(definition: String, syntax: String) {\n      self.definition = definition\n      self.syntax = syntax\n    }\n\n    /// The grammar definition\n    public let definition: String\n\n    /// The syntax of the grammar definition. One of lark or regex\n    public let syntax: String\n\n    /// Grammar format. Always grammar\n    public let type = \"grammar\"\n\n    enum CodingKeys: String, CodingKey {\n      case definition\n      case syntax\n      case type\n    }\n  }\n\n  /// The input format for the custom tool\n  public enum ToolFormat: Codable {\n    /// Unconstrained free-form text\n    case text(TextFormat)\n\n    /// A grammar defined by the user\n    case grammar(GrammarFormat)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.container(keyedBy: CodingKeys.self)\n      let type = try container.decode(String.self, forKey: .type)\n\n      let singleValueContainer = try decoder.singleValueContainer()\n\n      switch type {\n      case \"text\":\n        self = try .text(singleValueContainer.decode(TextFormat.self))\n      case \"grammar\":\n        self = try .grammar(singleValueContainer.decode(GrammarFormat.self))\n      default:\n        throw DecodingError.dataCorruptedError(\n          forKey: .type,\n          in: container,\n          debugDescription: \"Unknown format type: \\(type)\")\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .text(let format):\n        try container.encode(format)\n      case .grammar(let format):\n        try container.encode(format)\n      }\n    }\n\n    private enum CodingKeys: String, CodingKey {\n      case type\n    }\n  }\n\n  /// A custom tool that processes input using a specified format\n  public struct CustomTool: Codable {\n    public init(\n      name: String,\n      description: String? = nil,\n      format: ToolFormat? = nil)\n    {\n      self.name = name\n      self.description = description\n      self.format = format\n    }\n\n    /// The name of the custom tool, used to identify it in tool calls\n    public let name: String\n\n    /// Optional description of the custom tool, used to provide more context\n    public let description: String?\n\n    /// The input format for the custom tool. Default is unconstrained text\n    public let format: ToolFormat?\n\n    /// The type of the custom tool. Always custom\n    public let type = \"custom\"\n\n    enum CodingKeys: String, CodingKey {\n      case name\n      case description\n      case format\n      case type\n    }\n  }\n\n  /// Identifier for service connectors\n  public enum ConnectorId: String, Codable {\n    /// Dropbox connector\n    case dropbox = \"connector_dropbox\"\n\n    /// Gmail connector\n    case gmail = \"connector_gmail\"\n\n    /// Google Calendar connector\n    case googleCalendar = \"connector_googlecalendar\"\n\n    /// Google Drive connector\n    case googleDrive = \"connector_googledrive\"\n\n    /// Microsoft Teams connector\n    case microsoftTeams = \"connector_microsoftteams\"\n\n    /// Outlook Calendar connector\n    case outlookCalendar = \"connector_outlookcalendar\"\n\n    /// Outlook Email connector\n    case outlookEmail = \"connector_outlookemail\"\n\n    /// SharePoint connector\n    case sharePoint = \"connector_sharepoint\"\n  }\n\n  /// A filter object to specify which tools are allowed\n  public struct MCPToolFilter: Codable {\n    public init(readOnly: Bool? = nil, toolNames: [String]? = nil) {\n      self.readOnly = readOnly\n      self.toolNames = toolNames\n    }\n\n    /// Indicates whether or not a tool modifies data or is read-only. If an MCP server is annotated with readOnlyHint, it will match this filter.\n    public let readOnly: Bool?\n\n    /// List of allowed tool names\n    public let toolNames: [String]?\n\n    enum CodingKeys: String, CodingKey {\n      case readOnly = \"read_only\"\n      case toolNames = \"tool_names\"\n    }\n  }\n\n  /// List of allowed tool names or a filter object\n  public enum AllowedTools: Codable {\n    /// A string array of allowed tool names\n    case toolNames([String])\n\n    /// A filter object to specify which tools are allowed\n    case filter(MCPToolFilter)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.singleValueContainer()\n\n      if let toolNames = try? container.decode([String].self) {\n        self = .toolNames(toolNames)\n      } else if let filter = try? container.decode(MCPToolFilter.self) {\n        self = .filter(filter)\n      } else {\n        throw DecodingError.dataCorruptedError(\n          in: container,\n          debugDescription: \"Expected array of strings or MCPToolFilter object\")\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .toolNames(let names):\n        try container.encode(names)\n      case .filter(let filter):\n        try container.encode(filter)\n      }\n    }\n  }\n\n  /// Approval filters for MCP tools\n  public struct ApprovalFilters: Codable {\n    public init(always: MCPToolFilter? = nil, never: MCPToolFilter? = nil) {\n      self.always = always\n      self.never = never\n    }\n\n    /// A filter object to specify which tools always require approval\n    public let always: MCPToolFilter?\n\n    /// A filter object to specify which tools never require approval\n    public let never: MCPToolFilter?\n  }\n\n  /// Specify which of the MCP server's tools require approval\n  public enum RequireApproval: Codable {\n    /// All tools require approval\n    case always\n\n    /// No tools require approval\n    case never\n\n    /// Specify which tools require approval using filters\n    case filters(ApprovalFilters)\n\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.singleValueContainer()\n\n      if let stringValue = try? container.decode(String.self) {\n        switch stringValue {\n        case \"always\":\n          self = .always\n        case \"never\":\n          self = .never\n        default:\n          throw DecodingError.dataCorruptedError(\n            in: container,\n            debugDescription: \"Unknown require_approval value: \\(stringValue)\")\n        }\n      } else if let filters = try? container.decode(ApprovalFilters.self) {\n        self = .filters(filters)\n      } else {\n        throw DecodingError.dataCorruptedError(\n          in: container,\n          debugDescription: \"Expected 'always', 'never', or ApprovalFilters object\")\n      }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n      var container = encoder.singleValueContainer()\n\n      switch self {\n      case .always:\n        try container.encode(\"always\")\n      case .never:\n        try container.encode(\"never\")\n      case .filters(let filters):\n        try container.encode(filters)\n      }\n    }\n  }\n\n  /// Give the model access to additional tools via remote Model Context Protocol (MCP) servers\n  public struct MCPTool: Codable {\n    public init(\n      serverLabel: String,\n      allowedTools: AllowedTools? = nil,\n      authorization: String? = nil,\n      connectorId: ConnectorId? = nil,\n      headers: [String: String]? = nil,\n      requireApproval: RequireApproval? = nil,\n      serverDescription: String? = nil,\n      serverUrl: String? = nil)\n    {\n      self.serverLabel = serverLabel\n      self.allowedTools = allowedTools\n      self.authorization = authorization\n      self.connectorId = connectorId\n      self.headers = headers\n      self.requireApproval = requireApproval\n      self.serverDescription = serverDescription\n      self.serverUrl = serverUrl\n    }\n\n    /// A label for this MCP server, used to identify it in tool calls\n    public let serverLabel: String\n\n    /// The type of the MCP tool. Always mcp\n    public let type = \"mcp\"\n\n    /// List of allowed tool names or a filter object\n    public let allowedTools: AllowedTools?\n\n    /// An OAuth access token that can be used with a remote MCP server, either with a custom MCP server URL or a service connector\n    public let authorization: String?\n\n    /// Identifier for service connectors. One of server_url or connector_id must be provided\n    public let connectorId: ConnectorId?\n\n    /// Optional HTTP headers to send to the MCP server. Use for authentication or other purposes\n    public let headers: [String: String]?\n\n    /// Defaults to always\n    /// Specify which of the MCP server's tools require approval\n    public let requireApproval: RequireApproval?\n\n    /// Optional description of the MCP server, used to provide more context\n    public let serverDescription: String?\n\n    /// The URL for the MCP server. One of server_url or connector_id must be provided\n    public let serverUrl: String?\n\n    enum CodingKeys: String, CodingKey {\n      case serverLabel = \"server_label\"\n      case type\n      case allowedTools = \"allowed_tools\"\n      case authorization\n      case connectorId = \"connector_id\"\n      case headers\n      case requireApproval = \"require_approval\"\n      case serverDescription = \"server_description\"\n      case serverUrl = \"server_url\"\n    }\n  }\n\n  /// Approximate location parameters for the search\n  public struct UserLocation: Codable {\n    public init(\n      city: String? = nil,\n      country: String? = nil,\n      region: String? = nil,\n      timezone: String? = nil)\n    {\n      self.city = city\n      self.country = country\n      self.region = region\n      self.timezone = timezone\n    }\n\n    /// Custom decoder implementation to handle the constant \"type\" property\n    public init(from decoder: Decoder) throws {\n      let container = try decoder.container(keyedBy: CodingKeys.self)\n\n      // We can optionally verify the type value matches what we expect\n      let decodedType = try container.decodeIfPresent(String.self, forKey: .type)\n      if let decodedType, decodedType != \"approximate\" {\n        // You can choose to throw an error here or just log a warning\n        print(\"Warning: Expected UserLocation type to be 'approximate', but got '\\(decodedType)'\")\n      }\n\n      // Decode the optional properties\n      city = try container.decodeIfPresent(String.self, forKey: .city)\n      country = try container.decodeIfPresent(String.self, forKey: .country)\n      region = try container.decodeIfPresent(String.self, forKey: .region)\n      timezone = try container.decodeIfPresent(String.self, forKey: .timezone)\n    }\n\n    /// The type of location approximation. Always approximate\n    public let type = \"approximate\"\n\n    /// Free text input for the city of the user, e.g. San Francisco\n    public let city: String?\n\n    /// The two-letter ISO country code of the user, e.g. US\n    public let country: String?\n\n    /// Free text input for the region of the user, e.g. California\n    public let region: String?\n\n    /// The IANA timezone of the user, e.g. America/Los_Angeles\n    public let timezone: String?\n\n    enum CodingKeys: String, CodingKey {\n      case type\n      case city\n      case country\n      case region\n      case timezone\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n\n    switch self {\n    case .fileSearch(let tool):\n      try container.encode(tool)\n    case .function(let tool):\n      try container.encode(tool)\n    case .codeInterpreter(let tool):\n      try container.encode(tool)\n    case .imageGeneration(let tool):\n      try container.encode(tool)\n    case .localShell(let tool):\n      try container.encode(tool)\n    case .computerUse(let tool):\n      try container.encode(tool)\n    case .webSearch(let tool):\n      try container.encode(tool)\n    case .mcp(let tool):\n      try container.encode(tool)\n    case .custom(let tool):\n      try container.encode(tool)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/ToolCall.swift",
    "content": "//\n//  ToolCall.swift\n//\n//\n//  Created by James Rochabrun on 11/14/23.\n//\n\nimport Foundation\n\n// MARK: - ToolCall\n\npublic struct ToolCall: Codable {\n  public init(\n    index: Int? = nil,\n    id: String?,\n    type: String = \"function\",\n    function: FunctionCall)\n  {\n    self.index = index\n    self.id = id\n    self.type = type\n    self.function = function\n  }\n\n  public let index: Int?\n  /// The ID of the tool call.\n  public let id: String?\n  /// The type of the tool. Currently, only `function` is supported.\n  public let type: String?\n  /// The function that the model called.\n  public let function: FunctionCall\n}\n\n// MARK: - FunctionCall\n\npublic struct FunctionCall: Codable {\n  /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.\n  public let arguments: String\n  /// The name of the function to call.\n  public let name: String?\n\n  public init(\n    arguments: String,\n    name: String)\n  {\n    self.arguments = arguments\n    self.name = name\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/ToolChoice.swift",
    "content": "//\n//  ToolChoice.swift\n//\n//\n//  Created by James Rochabrun on 4/13/24.\n//\n\nimport Foundation\n\n/// string `none` means the model will not call a function and instead generates a message.\n///\n/// `auto` means the model can pick between generating a message or calling a function.\n///\n/// `object` Specifies a tool the model should use. Use to force the model to call a specific function. The type of the tool. Currently, only` function` is supported. `{\"type: \"function\", \"function\": {\"name\": \"my_function\"}}`\n///\n/// `required` To force the model to always call one or more functions, you can set tool_choice: \"required\". The model will then select which function(s) to call.\n///\n/// [Function Calling](https://platform.openai.com/docs/guides/function-calling)\npublic enum ToolChoice: Codable, Equatable {\n  case none\n  case auto\n  case required\n  case function(type: String = \"function\", name: String)\n\n  public init(from decoder: Decoder) throws {\n    // Handle the 'function' case:\n    if\n      let container = try? decoder.container(keyedBy: CodingKeys.self),\n      let functionContainer = try? container.nestedContainer(keyedBy: FunctionCodingKeys.self, forKey: .function)\n    {\n      let name = try functionContainer.decode(String.self, forKey: .name)\n      self = .function(type: \"function\", name: name)\n      return\n    }\n\n    // Handle the 'auto' and 'none' cases\n    let container = try decoder.singleValueContainer()\n    switch try container.decode(String.self) {\n    case \"none\":\n      self = .none\n    case \"auto\":\n      self = .auto\n    case \"required\":\n      self = .required\n    default:\n      throw DecodingError.dataCorruptedError(in: container, debugDescription: \"Invalid tool_choice structure\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    switch self {\n    case .none:\n      var container = encoder.singleValueContainer()\n      try container.encode(CodingKeys.none.rawValue)\n\n    case .auto:\n      var container = encoder.singleValueContainer()\n      try container.encode(CodingKeys.auto.rawValue)\n\n    case .required:\n      var container = encoder.singleValueContainer()\n      try container.encode(CodingKeys.required.rawValue)\n\n    case .function(let type, let name):\n      var container = encoder.container(keyedBy: CodingKeys.self)\n      try container.encode(type, forKey: .type)\n      var functionContainer = container.nestedContainer(keyedBy: FunctionCodingKeys.self, forKey: .function)\n      try functionContainer.encode(name, forKey: .name)\n    }\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case none\n    case auto\n    case required\n    case type\n    case function\n  }\n\n  enum FunctionCodingKeys: String, CodingKey {\n    case name\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/ToolChoiceMode.swift",
    "content": "//\n//  ToolChoiceMode.swift\n//  SwiftOpenAI\n//\n//  Created by James Rochabrun on 3/15/25.\n//\n\nimport Foundation\n\n// MARK: - ToolChoiceMode\n\n/// Controls which (if any) tool is called by the model.\npublic enum ToolChoiceMode: Codable {\n  /// Means the model will not call any tool and instead generates a message.\n  case none\n\n  /// Means the model can pick between generating a message or calling one or more tools.\n  case auto\n\n  /// Means the model must call one or more tools.\n  case required\n\n  /// Indicates that the model should use a built-in tool to generate a response.\n  case hostedTool(HostedToolType)\n\n  /// Use this option to force the model to call a specific function.\n  case functionTool(FunctionTool)\n\n  /// Use this option to force the model to call a specific custom tool.\n  case customTool(CustomToolChoice)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.singleValueContainer()\n\n    if let stringValue = try? container.decode(String.self) {\n      switch stringValue {\n      case \"none\":\n        self = .none\n      case \"auto\":\n        self = .auto\n      case \"required\":\n        self = .required\n      default:\n        throw DecodingError.dataCorruptedError(\n          in: container,\n          debugDescription: \"Unknown tool choice string value: \\(stringValue)\")\n      }\n    } else if let hostedTool = try? container.decode(HostedToolType.self) {\n      self = .hostedTool(hostedTool)\n    } else if let functionTool = try? container.decode(FunctionTool.self) {\n      self = .functionTool(functionTool)\n    } else if let customTool = try? container.decode(CustomToolChoice.self) {\n      self = .customTool(customTool)\n    } else {\n      throw DecodingError.dataCorruptedError(\n        in: container,\n        debugDescription: \"Invalid tool choice value\")\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.singleValueContainer()\n\n    switch self {\n    case .none:\n      try container.encode(\"none\")\n    case .auto:\n      try container.encode(\"auto\")\n    case .required:\n      try container.encode(\"required\")\n    case .hostedTool(let toolType):\n      try container.encode(toolType)\n    case .functionTool(let tool):\n      try container.encode(tool)\n    case .customTool(let tool):\n      try container.encode(tool)\n    }\n  }\n}\n\n// MARK: - HostedToolType\n\n/// Hosted tool type enum\npublic enum HostedToolType: Codable {\n  /// File search tool\n  case fileSearch\n\n  /// Web search tool\n  case webSearch\n\n  /// Computer use tool\n  case computerUsePreview\n\n  /// Custom tool type for future compatibility\n  case custom(String)\n\n  public init(from decoder: Decoder) throws {\n    let container = try decoder.container(keyedBy: CodingKeys.self)\n    let type = try container.decode(String.self, forKey: .type)\n\n    switch type {\n    case \"file_search\":\n      self = .fileSearch\n    case \"web_search\", \"web_search_2025_08_26\":\n      self = .webSearch\n    case \"computer_use_preview\":\n      self = .computerUsePreview\n    default:\n      self = .custom(type)\n    }\n  }\n\n  public func encode(to encoder: Encoder) throws {\n    var container = encoder.container(keyedBy: CodingKeys.self)\n\n    switch self {\n    case .fileSearch:\n      try container.encode(\"file_search\", forKey: .type)\n    case .webSearch:\n      try container.encode(\"web_search\", forKey: .type)\n    case .computerUsePreview:\n      try container.encode(\"computer_use_preview\", forKey: .type)\n    case .custom(let value):\n      try container.encode(value, forKey: .type)\n    }\n  }\n\n  private enum CodingKeys: String, CodingKey {\n    case type\n  }\n}\n\n// MARK: - FunctionTool\n\n/// Function tool specification\npublic struct FunctionTool: Codable {\n  /// The name of the function to call\n  public var name: String\n\n  /// For function calling, the type is always function\n  public var type = \"function\"\n\n  public init(name: String) {\n    self.name = name\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case name\n    case type\n  }\n}\n\n// MARK: - CustomToolChoice\n\n/// Custom tool choice specification\npublic struct CustomToolChoice: Codable {\n  /// The name of the custom tool to call\n  public var name: String\n\n  /// For custom tool calling, the type is always custom\n  public var type = \"custom\"\n\n  public init(name: String) {\n    self.name = name\n  }\n\n  enum CodingKeys: String, CodingKey {\n    case name\n    case type\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/ToolResources.swift",
    "content": "//\n//  ToolResources.swift\n//\n//\n//  Created by James Rochabrun on 4/25/24.\n//\n\nimport Foundation\n\n/// tool_resources\n/// object or null\n///\n/// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.\n///\n/// code_interpreter\n/// properties\n/// file_ids (array)\n/// A list of file IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n///\n/// file_search\n/// properties\n/// vector_store_id (array)\n/// The ID of the vector store attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n\n// MARK: ToolResources\n\npublic struct ToolResources: Codable {\n  // MARK: ToolResources+Initializer\n\n  public init(\n    fileSearch: FileSearch? = nil,\n    codeInterpreter: CodeInterpreter? = nil)\n  {\n    self.fileSearch = fileSearch\n    self.codeInterpreter = codeInterpreter\n  }\n\n  // MARK: FileSearch\n\n  public struct FileSearch: Codable {\n    public init(\n      vectorStoreIds: [String]?,\n      vectorStores: [VectorStore]?)\n    {\n      self.vectorStoreIds = vectorStoreIds\n      self.vectorStores = vectorStores\n    }\n\n    public struct VectorStore: Codable {\n      public init(\n        fileIDS: [String]?,\n        chunkingStrategy: ChunkingStrategy?,\n        metadata: [String: String]?)\n      {\n        self.fileIDS = fileIDS\n        self.chunkingStrategy = chunkingStrategy\n        self.metadata = metadata\n      }\n\n      public enum ChunkingStrategy: Codable {\n        case auto\n\n        /// `maxChunkSizeTokens`: The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 and the maximum value is 4096.\n        /// `chunk_overlap_tokens`: The number of tokens that overlap between chunks. The default value is 400.\n        /// Note that the overlap must not exceed half of max_chunk_size_tokens.\n        case `static`(maxChunkSizeTokens: Int, chunkOverlapTokens: Int)\n\n        public init(from decoder: Decoder) throws {\n          let container = try decoder.container(keyedBy: CodingKeys.self)\n          let type = try container.decode(String.self, forKey: .type)\n          switch type {\n          case \"auto\":\n            self = .auto\n\n          case \"static\":\n            let maxChunkSizeTokens = try container.decode(Int.self, forKey: .maxChunkSizeTokens)\n            let chunkOverlapTokens = try container.decode(Int.self, forKey: .chunkOverlapTokens)\n            self = .static(maxChunkSizeTokens: maxChunkSizeTokens, chunkOverlapTokens: chunkOverlapTokens)\n\n          default:\n            throw DecodingError.dataCorruptedError(forKey: CodingKeys.type, in: container, debugDescription: \"Invalid type value\")\n          }\n        }\n\n        public func encode(to encoder: Encoder) throws {\n          var container = encoder.container(keyedBy: CodingKeys.self)\n          switch self {\n          case .auto:\n            try container.encode(\"auto\", forKey: .type)\n          case .static(let maxChunkSizeTokens, let chunkOverlapTokens):\n            try container.encode(\"static\", forKey: .type)\n            try container.encode(maxChunkSizeTokens, forKey: .maxChunkSizeTokens)\n            try container.encode(chunkOverlapTokens, forKey: .chunkOverlapTokens)\n          }\n        }\n\n        enum CodingKeys: String, CodingKey {\n          case type\n          case maxChunkSizeTokens = \"max_chunk_size_tokens\"\n          case chunkOverlapTokens = \"chunk_overlap_tokens\"\n        }\n      }\n\n      /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n      public let fileIDS: [String]?\n      /// The chunking strategy used to chunk the file(s). If not set, will use the auto strategy.\n      public let chunkingStrategy: ChunkingStrategy?\n      /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n      public let metadata: [String: String]?\n\n      enum CodingKeys: String, CodingKey {\n        case fileIDS = \"file_ids\"\n        case chunkingStrategy = \"chunking_strategy\"\n        case metadata\n      }\n    }\n\n    /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n    public let vectorStoreIds: [String]?\n\n    /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n    public let vectorStores: [VectorStore]?\n\n    enum CodingKeys: String, CodingKey {\n      case vectorStoreIds = \"vector_store_ids\"\n      case vectorStores = \"vector_stores\"\n    }\n  }\n\n  // MARK: CodeInterpreter\n\n  public struct CodeInterpreter: Codable {\n    public let fileIds: [String]\n\n    enum CodingKeys: String, CodingKey {\n      case fileIds = \"file_ids\"\n    }\n\n    public init(fileIds: [String]) {\n      self.fileIds = fileIds\n    }\n  }\n\n  public let fileSearch: FileSearch?\n  public let codeInterpreter: CodeInterpreter?\n\n  enum CodingKeys: String, CodingKey {\n    case fileSearch = \"file_search\"\n    case codeInterpreter = \"code_interpreter\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/TruncationStrategy.swift",
    "content": "//\n//  TruncationStrategy.swift\n//\n//\n//  Created by James Rochabrun on 4/13/24.\n//\n\nimport Foundation\n\npublic struct TruncationStrategy: Codable {\n  /// The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.\n  let type: String\n  /// The number of most recent messages from the thread when constructing the context for the run.\n  let lastMessage: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case type\n    case lastMessage = \"last_messages\"\n  }\n}\n"
  },
  {
    "path": "Sources/OpenAI/Public/Shared/Usage.swift",
    "content": "//\n//  Usage.swift\n//\n//\n//  Created by James Rochabrun on 4/13/24.\n//\n\nimport Foundation\n\n/// Represents token usage details including input tokens, output tokens, a breakdown of output tokens, and the total tokens used.\npublic struct Usage: Codable {\n  /// Details about input tokens\n  public struct InputTokensDetails: Codable {\n    /// Number of cached tokens\n    public let cachedTokens: Int?\n\n    enum CodingKeys: String, CodingKey {\n      case cachedTokens = \"cached_tokens\"\n    }\n  }\n\n  /// A detailed breakdown of the output tokens.\n  public struct OutputTokensDetails: Codable {\n    /// The number of reasoning tokens.\n    public let reasoningTokens: Int?\n\n    enum CodingKeys: String, CodingKey {\n      case reasoningTokens = \"reasoning_tokens\"\n    }\n  }\n\n  /// Number of completion tokens used over the course of the run step.\n  public let completionTokens: Int?\n\n  /// Number of prompt tokens used over the course of the run step.\n  public let promptTokens: Int?\n\n  /// The number of input tokens.\n  public let inputTokens: Int?\n\n  /// Details about input tokens\n  public let inputTokensDetails: InputTokensDetails?\n\n  /// The number of output tokens.\n  public let outputTokens: Int?\n\n  /// A detailed breakdown of the output tokens.\n  public let outputTokensDetails: OutputTokensDetails?\n\n  /// The total number of tokens used.\n  public let totalTokens: Int?\n\n  enum CodingKeys: String, CodingKey {\n    case completionTokens = \"completion_tokens\"\n    case promptTokens = \"prompt_tokens\"\n    case inputTokens = \"input_tokens\"\n    case inputTokensDetails = \"input_tokens_details\"\n    case outputTokens = \"output_tokens\"\n    case outputTokensDetails = \"output_tokens_details\"\n    case totalTokens = \"total_tokens\"\n  }\n}\n"
  },
  {
    "path": "Tests/OpenAITests/InputTypeTests.swift",
    "content": "import XCTest\n@testable import SwiftOpenAI\n\nfinal class InputTypeTests: XCTestCase {\n  // MARK: - Basic InputType Tests\n\n  func testStringInput() throws {\n    // Test simple string input\n    let json = \"\"\"\n      \"Tell me a story about a unicorn\"\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let input = try decoder.decode(InputType.self, from: json.data(using: .utf8)!)\n\n    switch input {\n    case .string(let text):\n      XCTAssertEqual(text, \"Tell me a story about a unicorn\")\n    case .array:\n      XCTFail(\"Expected string input, got array\")\n    }\n\n    // Test encoding\n    let encoder = JSONEncoder()\n    let encoded = try encoder.encode(input)\n    let decodedString = String(data: encoded, encoding: .utf8)\n    XCTAssertEqual(decodedString, \"\\\"Tell me a story about a unicorn\\\"\")\n  }\n\n  func testArrayInputWithMessage() throws {\n    // Test array input with message\n    let json = \"\"\"\n      [\n        {\n          \"type\": \"message\",\n          \"role\": \"user\",\n          \"content\": \"Hello, how are you?\"\n        }\n      ]\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let input = try decoder.decode(InputType.self, from: json.data(using: .utf8)!)\n\n    switch input {\n    case .string:\n      XCTFail(\"Expected array input, got string\")\n    case .array(let items):\n      XCTAssertEqual(items.count, 1)\n      if case .message(let message) = items[0] {\n        XCTAssertEqual(message.role, \"user\")\n        if case .text(let text) = message.content {\n          XCTAssertEqual(text, \"Hello, how are you?\")\n        } else {\n          XCTFail(\"Expected text content\")\n        }\n      } else {\n        XCTFail(\"Expected message input item\")\n      }\n    }\n  }\n\n  // MARK: - Message Content Tests\n\n  func testMessageWithStringContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"message\",\n        \"role\": \"user\",\n        \"content\": \"What is the weather today?\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let item = try decoder.decode(InputItem.self, from: json.data(using: .utf8)!)\n\n    if case .message(let message) = item {\n      XCTAssertEqual(message.role, \"user\")\n      if case .text(let text) = message.content {\n        XCTAssertEqual(text, \"What is the weather today?\")\n      } else {\n        XCTFail(\"Expected text content\")\n      }\n    } else {\n      XCTFail(\"Expected message item\")\n    }\n  }\n\n  func testMessageWithArrayContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"message\",\n        \"role\": \"user\",\n        \"content\": [\n          {\n            \"type\": \"input_text\",\n            \"text\": \"Describe this image:\"\n          },\n          {\n            \"type\": \"input_image\",\n            \"detail\": \"high\",\n            \"image_url\": \"https://example.com/image.jpg\"\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let item = try decoder.decode(InputItem.self, from: json.data(using: .utf8)!)\n\n    if case .message(let message) = item {\n      XCTAssertEqual(message.role, \"user\")\n      if case .array(let contentItems) = message.content {\n        XCTAssertEqual(contentItems.count, 2)\n\n        // Check text content\n        if case .text(let textContent) = contentItems[0] {\n          XCTAssertEqual(textContent.text, \"Describe this image:\")\n        } else {\n          XCTFail(\"Expected text content\")\n        }\n\n        // Check image content\n        if case .image(let imageContent) = contentItems[1] {\n          XCTAssertEqual(imageContent.detail, \"high\")\n          XCTAssertEqual(imageContent.imageUrl, \"https://example.com/image.jpg\")\n        } else {\n          XCTFail(\"Expected image content\")\n        }\n      } else {\n        XCTFail(\"Expected array content\")\n      }\n    } else {\n      XCTFail(\"Expected message item\")\n    }\n  }\n\n  // MARK: - Content Item Tests\n\n  func testInputTextContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"input_text\",\n        \"text\": \"Hello, world!\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let content = try decoder.decode(ContentItem.self, from: json.data(using: .utf8)!)\n\n    if case .text(let textContent) = content {\n      XCTAssertEqual(textContent.text, \"Hello, world!\")\n      XCTAssertEqual(textContent.type, \"input_text\")\n    } else {\n      XCTFail(\"Expected text content\")\n    }\n  }\n\n  func testInputImageContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"input_image\",\n        \"detail\": \"auto\",\n        \"file_id\": \"file-123\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let content = try decoder.decode(ContentItem.self, from: json.data(using: .utf8)!)\n\n    if case .image(let imageContent) = content {\n      XCTAssertEqual(imageContent.detail, \"auto\")\n      XCTAssertEqual(imageContent.fileId, \"file-123\")\n      XCTAssertNil(imageContent.imageUrl)\n    } else {\n      XCTFail(\"Expected image content\")\n    }\n  }\n\n  func testInputFileContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"input_file\",\n        \"file_id\": \"file-456\",\n        \"filename\": \"document.pdf\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let content = try decoder.decode(ContentItem.self, from: json.data(using: .utf8)!)\n\n    if case .file(let fileContent) = content {\n      XCTAssertEqual(fileContent.fileId, \"file-456\")\n      XCTAssertEqual(fileContent.filename, \"document.pdf\")\n      XCTAssertNil(fileContent.fileData)\n    } else {\n      XCTFail(\"Expected file content\")\n    }\n  }\n\n  func testOutputTextContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"output_text\",\n        \"text\": \"The weather today is sunny with a high of 75°F.\",\n        \"annotations\": []\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let content = try decoder.decode(ContentItem.self, from: json.data(using: .utf8)!)\n\n    if case .outputText(let outputContent) = content {\n      XCTAssertEqual(outputContent.text, \"The weather today is sunny with a high of 75°F.\")\n      XCTAssertEqual(outputContent.type, \"output_text\")\n    } else {\n      XCTFail(\"Expected output text content\")\n    }\n  }\n\n  func testRefusalContent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"refusal\",\n        \"refusal\": \"I cannot help with that request.\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let content = try decoder.decode(ContentItem.self, from: json.data(using: .utf8)!)\n\n    if case .refusal(let refusalContent) = content {\n      XCTAssertEqual(refusalContent.refusal, \"I cannot help with that request.\")\n      XCTAssertEqual(refusalContent.type, \"refusal\")\n    } else {\n      XCTFail(\"Expected refusal content\")\n    }\n  }\n\n  // MARK: - Tool Call Tests\n\n  func testFunctionToolCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"function_call\",\n        \"id\": \"fc_123\",\n        \"call_id\": \"call_abc\",\n        \"name\": \"get_weather\",\n        \"arguments\": \"{\\\\\"location\\\\\": \\\\\"San Francisco\\\\\"}\",\n        \"status\": \"completed\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let item = try decoder.decode(InputItem.self, from: json.data(using: .utf8)!)\n\n    if case .functionToolCall(let call) = item {\n      XCTAssertEqual(call.id, \"fc_123\")\n      XCTAssertEqual(call.callId, \"call_abc\")\n      XCTAssertEqual(call.name, \"get_weather\")\n      XCTAssertEqual(call.arguments, \"{\\\"location\\\": \\\"San Francisco\\\"}\")\n      XCTAssertEqual(call.status, \"completed\")\n    } else {\n      XCTFail(\"Expected function tool call\")\n    }\n  }\n\n  func testFunctionToolCallOutput() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"function_call_output\",\n        \"call_id\": \"call_abc\",\n        \"output\": \"{\\\\\"temperature\\\\\": \\\\\"72°F\\\\\", \\\\\"condition\\\\\": \\\\\"sunny\\\\\"}\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let item = try decoder.decode(InputItem.self, from: json.data(using: .utf8)!)\n\n    if case .functionToolCallOutput(let output) = item {\n      XCTAssertEqual(output.callId, \"call_abc\")\n      XCTAssertEqual(output.output, \"{\\\"temperature\\\": \\\"72°F\\\", \\\"condition\\\": \\\"sunny\\\"}\")\n    } else {\n      XCTFail(\"Expected function tool call output\")\n    }\n  }\n\n  // MARK: - Complex Conversation Test\n\n  func testComplexConversation() throws {\n    let json = \"\"\"\n      [\n        {\n          \"type\": \"message\",\n          \"role\": \"system\",\n          \"content\": \"You are a helpful assistant.\"\n        },\n        {\n          \"type\": \"message\",\n          \"role\": \"user\",\n          \"content\": \"What's the weather in Boston?\"\n        },\n        {\n          \"type\": \"function_call\",\n          \"id\": \"fc_1\",\n          \"call_id\": \"call_1\",\n          \"name\": \"get_weather\",\n          \"arguments\": \"{\\\\\"location\\\\\": \\\\\"Boston\\\\\"}\",\n          \"status\": \"completed\"\n        },\n        {\n          \"type\": \"function_call_output\",\n          \"call_id\": \"call_1\",\n          \"output\": \"{\\\\\"temperature\\\\\": \\\\\"65°F\\\\\", \\\\\"condition\\\\\": \\\\\"cloudy\\\\\"}\"\n        },\n        {\n          \"type\": \"message\",\n          \"role\": \"assistant\",\n          \"content\": [\n            {\n              \"type\": \"output_text\",\n              \"text\": \"The weather in Boston is currently 65°F and cloudy.\"\n            }\n          ],\n          \"id\": \"msg_1\",\n          \"status\": \"completed\"\n        }\n      ]\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let input = try decoder.decode(InputType.self, from: json.data(using: .utf8)!)\n\n    switch input {\n    case .string:\n      XCTFail(\"Expected array input\")\n    case .array(let items):\n      XCTAssertEqual(items.count, 5)\n\n      // Verify system message\n      if case .message(let systemMsg) = items[0] {\n        XCTAssertEqual(systemMsg.role, \"system\")\n      } else {\n        XCTFail(\"Expected system message\")\n      }\n\n      // Verify user message\n      if case .message(let userMsg) = items[1] {\n        XCTAssertEqual(userMsg.role, \"user\")\n      } else {\n        XCTFail(\"Expected user message\")\n      }\n\n      // Verify function call\n      if case .functionToolCall(let call) = items[2] {\n        XCTAssertEqual(call.name, \"get_weather\")\n      } else {\n        XCTFail(\"Expected function call\")\n      }\n\n      // Verify function output\n      if case .functionToolCallOutput = items[3] {\n        // Success\n      } else {\n        XCTFail(\"Expected function output\")\n      }\n      // Note: OutputMessage is not yet supported in InputItem enum\n      // This would need to be added to fully support conversation history\n    }\n  }\n}\n"
  },
  {
    "path": "Tests/OpenAITests/ModelResponseParameterTests.swift",
    "content": "import XCTest\n@testable import SwiftOpenAI\n\nfinal class ModelResponseParameterTests: XCTestCase {\n  // MARK: - Basic Parameter Tests\n\n  func testModelResponseParameterWithStringInput() throws {\n    // Create parameter with string input\n    let parameter = ModelResponseParameter(\n      input: .string(\"Tell me about the weather\"),\n      model: .gpt4)\n\n    // Test encoding\n    let encoder = JSONEncoder()\n    encoder.outputFormatting = .sortedKeys\n    let data = try encoder.encode(parameter)\n    let json = String(data: data, encoding: .utf8)!\n\n    // Verify JSON contains expected fields\n    XCTAssertTrue(json.contains(\"\\\"input\\\":\\\"Tell me about the weather\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"model\\\":\\\"gpt-4\\\"\"))\n  }\n\n  func testModelResponseParameterWithArrayInput() throws {\n    // Create parameter with array input\n    let inputMessage = InputMessage(\n      role: \"user\",\n      content: .text(\"What's the capital of France?\"))\n\n    let parameter = ModelResponseParameter(\n      input: .array([\n        .message(inputMessage),\n      ]),\n      model: .gpt4o)\n\n    // Test encoding\n    let encoder = JSONEncoder()\n    let data = try encoder.encode(parameter)\n    let json = String(data: data, encoding: .utf8)!\n\n    // Verify JSON structure\n    XCTAssertTrue(json.contains(\"\\\"role\\\":\\\"user\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"content\\\":\\\"What's the capital of France?\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"model\\\":\\\"gpt-4o\\\"\"))\n  }\n\n  // MARK: - Complex Input Tests\n\n  func testModelResponseParameterWithMultimodalInput() throws {\n    // Create multimodal input with text and image content\n    let textContent = TextContent(text: \"What's in this image?\")\n    let imageContent = ImageContent(\n      detail: \"high\",\n      imageUrl: \"https://example.com/image.jpg\")\n\n    let inputMessage = InputMessage(\n      role: \"user\",\n      content: .array([\n        .text(textContent),\n        .image(imageContent),\n      ]))\n\n    let parameter = ModelResponseParameter(\n      input: .array([\n        .message(inputMessage),\n      ]),\n      model: .gpt4o,\n      maxOutputTokens: 500,\n      temperature: 0.7)\n\n    // Test encoding\n    let encoder = JSONEncoder()\n    let data = try encoder.encode(parameter)\n    let json = String(data: data, encoding: .utf8)!\n\n    // Print JSON for debugging\n    print(\"Generated JSON: \\(json)\")\n\n    // Verify all fields\n    XCTAssertTrue(json.contains(\"\\\"text\\\":\\\"What's in this image?\\\"\"), \"Text not found in JSON\")\n    XCTAssertTrue(\n      json.contains(\"https:\\\\/\\\\/example.com\\\\/image.jpg\") ||\n        json.contains(\"https://example.com/image.jpg\"),\n      \"Image URL not found in JSON\")\n    XCTAssertTrue(json.contains(\"\\\"detail\\\":\\\"high\\\"\"), \"Detail not found in JSON\")\n    XCTAssertTrue(json.contains(\"\\\"max_output_tokens\\\":500\"), \"Max output tokens not found in JSON\")\n    XCTAssertTrue(json.contains(\"\\\"temperature\\\":0.7\"), \"Temperature not found in JSON\")\n  }\n\n  func testModelResponseParameterWithFunctionCalling() throws {\n    // Create function tool\n    let functionTool = Tool.function(\n      Tool.FunctionTool(\n        name: \"get_weather\",\n        parameters: JSONSchema(\n          type: .object,\n          properties: [\n            \"location\": JSONSchema(\n              type: .string,\n              description: \"The city and state\"),\n          ],\n          required: [\"location\"]),\n        strict: true,\n        description: \"Get the weather for a location\"))\n\n    // Create function call in conversation history\n    let functionCall = FunctionToolCall(\n      arguments: \"{\\\"location\\\": \\\"Boston, MA\\\"}\",\n      callId: \"call_123\",\n      name: \"get_weather\",\n      id: \"fc_456\",\n      status: \"completed\")\n\n    let functionOutput = FunctionToolCallOutput(\n      callId: \"call_123\",\n      output: \"{\\\"temperature\\\": \\\"72°F\\\", \\\"condition\\\": \\\"sunny\\\"}\")\n\n    let parameter = ModelResponseParameter(\n      input: .array([\n        .message(InputMessage(role: \"user\", content: .text(\"What's the weather in Boston?\"))),\n        .functionToolCall(functionCall),\n        .functionToolCallOutput(functionOutput),\n        .message(InputMessage(role: \"assistant\", content: .text(\"The weather in Boston is 72°F and sunny.\"))),\n      ]),\n      model: .gpt4o,\n      toolChoice: .auto,\n      tools: [functionTool])\n\n    // Test encoding\n    let encoder = JSONEncoder()\n    let data = try encoder.encode(parameter)\n    let json = String(data: data, encoding: .utf8)!\n\n    // Verify function-related fields\n    XCTAssertTrue(json.contains(\"\\\"function_call\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"function_call_output\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"get_weather\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"tool_choice\\\":\\\"auto\\\"\"))\n  }\n\n  // MARK: - Optional Parameters Tests\n\n  func testModelResponseParameterWithAllOptionalFields() throws {\n    let parameter = ModelResponseParameter(\n      input: .string(\"Hello\"),\n      model: .gpt4o,\n      include: [.fileSearchCallResults],\n      instructions: \"You are a helpful assistant\",\n      maxOutputTokens: 1000,\n      metadata: [\"user_id\": \"123\", \"session\": \"abc\"],\n      parallelToolCalls: true,\n      previousResponseId: \"resp_previous\",\n      reasoning: Reasoning(effort: \"high\"),\n      store: true,\n      stream: false,\n      temperature: 0.5,\n      text: TextConfiguration(format: .text),\n      toolChoice: ToolChoiceMode.none,\n      tools: [],\n      topP: 0.9,\n      truncation: .auto,\n      user: \"user_123\")\n\n    // Test encoding\n    let encoder = JSONEncoder()\n    encoder.outputFormatting = .sortedKeys\n    let data = try encoder.encode(parameter)\n    let json = String(data: data, encoding: .utf8)!\n\n    // Verify all optional fields are present\n    XCTAssertTrue(json.contains(\"\\\"include\\\":[\\\"file_search_call.results\\\"]\"))\n    XCTAssertTrue(json.contains(\"\\\"instructions\\\":\\\"You are a helpful assistant\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"max_output_tokens\\\":1000\"))\n    XCTAssertTrue(json.contains(\"\\\"metadata\\\":{\\\"session\\\":\\\"abc\\\",\\\"user_id\\\":\\\"123\\\"}\"))\n    XCTAssertTrue(json.contains(\"\\\"parallel_tool_calls\\\":true\"))\n    XCTAssertTrue(json.contains(\"\\\"previous_response_id\\\":\\\"resp_previous\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"reasoning\\\":{\\\"effort\\\":\\\"high\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"store\\\":true\"))\n    XCTAssertTrue(json.contains(\"\\\"stream\\\":false\"))\n    XCTAssertTrue(json.contains(\"\\\"temperature\\\":0.5\"))\n    XCTAssertTrue(json.contains(\"\\\"tool_choice\\\":\\\"none\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"top_p\\\":0.9\"))\n    XCTAssertTrue(json.contains(\"\\\"truncation\\\":\\\"auto\\\"\"))\n    XCTAssertTrue(json.contains(\"\\\"user\\\":\\\"user_123\\\"\"))\n  }\n\n  // MARK: - Decoding Tests\n\n  func testModelResponseParameterDecoding() throws {\n    let json = \"\"\"\n      {\n        \"input\": \"Hello, how can I help?\",\n        \"model\": \"gpt-4o\",\n        \"temperature\": 0.8,\n        \"max_output_tokens\": 500,\n        \"metadata\": {\n          \"request_id\": \"req_123\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let parameter = try decoder.decode(ModelResponseParameter.self, from: json.data(using: .utf8)!)\n\n    // Verify decoded values\n    if case .string(let text) = parameter.input {\n      XCTAssertEqual(text, \"Hello, how can I help?\")\n    } else {\n      XCTFail(\"Expected string input\")\n    }\n\n    XCTAssertEqual(parameter.model, \"gpt-4o\")\n    XCTAssertEqual(parameter.temperature, 0.8)\n    XCTAssertEqual(parameter.maxOutputTokens, 500)\n    XCTAssertEqual(parameter.metadata?[\"request_id\"], \"req_123\")\n  }\n\n  func testModelResponseParameterDecodingWithArrayInput() throws {\n    let json = \"\"\"\n      {\n        \"input\": [\n          {\n            \"type\": \"message\",\n            \"role\": \"system\",\n            \"content\": \"You are a helpful assistant.\"\n          },\n          {\n            \"type\": \"message\", \n            \"role\": \"user\",\n            \"content\": [\n              {\n                \"type\": \"input_text\",\n                \"text\": \"Analyze this data:\"\n              },\n              {\n                \"type\": \"input_file\",\n                \"file_id\": \"file-123\"\n              }\n            ]\n          }\n        ],\n        \"model\": \"gpt-4o\",\n        \"tools\": [\n          {\n            \"type\": \"file_search\",\n            \"vector_store_ids\": [\"vs_123\"]\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let parameter = try decoder.decode(ModelResponseParameter.self, from: json.data(using: .utf8)!)\n\n    // Verify array input\n    if case .array(let items) = parameter.input {\n      XCTAssertEqual(items.count, 2)\n\n      // Check system message\n      if case .message(let systemMsg) = items[0] {\n        XCTAssertEqual(systemMsg.role, \"system\")\n        if case .text(let text) = systemMsg.content {\n          XCTAssertEqual(text, \"You are a helpful assistant.\")\n        }\n      }\n\n      // Check user message with array content\n      if case .message(let userMsg) = items[1] {\n        XCTAssertEqual(userMsg.role, \"user\")\n        if case .array(let contentItems) = userMsg.content {\n          XCTAssertEqual(contentItems.count, 2)\n        }\n      }\n    } else {\n      XCTFail(\"Expected array input\")\n    }\n\n    // Verify tools\n    XCTAssertEqual(parameter.tools?.count, 1)\n  }\n}\n"
  },
  {
    "path": "Tests/OpenAITests/OpenAITests.swift",
    "content": "import XCTest\n@testable import SwiftOpenAI\n\nfinal class OpenAITests: XCTestCase {\n  /// OpenAI is loose with their API contract, unfortunately.\n  /// Here we test that `tool_choice` is decodable from a string OR an object,\n  /// which is required for deserializing responses from assistants:\n  /// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-tool_choice\n  func testToolChoiceIsDecodableFromStringOrObject() throws {\n    let expectedResponseMappings: [(String, ToolChoice)] = [\n      (\"\\\"auto\\\"\", .auto),\n      (\"\\\"none\\\"\", .none),\n      (\"{\\\"type\\\": \\\"function\\\", \\\"function\\\": {\\\"name\\\": \\\"my_function\\\"}}\", .function(type: \"function\", name: \"my_function\")),\n    ]\n    let decoder = JSONDecoder()\n    for (response, expectedToolChoice) in expectedResponseMappings {\n      print(response)\n      guard let jsonData = response.data(using: .utf8) else {\n        XCTFail(\"Could not create json from sample response\")\n        return\n      }\n      let toolChoice = try decoder.decode(ToolChoice.self, from: jsonData)\n      XCTAssertEqual(toolChoice, expectedToolChoice, \"Mapping from \\(response) did not yield expected result\")\n    }\n  }\n\n  /// Here we test that `response_format` is decodable from a string OR an object,\n  /// which is required for deserializing responses from assistants:\n  /// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-response_format\n  func testResponseFormatIsDecodableFromStringOrObject() throws {\n    let expectedResponseMappings: [(String, ResponseFormat)] = [\n      (\"{\\\"type\\\": \\\"json_object\\\"}\", .jsonObject),\n      (\"{\\\"type\\\": \\\"text\\\"}\", .text),\n    ]\n    let decoder = JSONDecoder()\n    for (response, expectedResponseFormat) in expectedResponseMappings {\n      print(response)\n      guard let jsonData = response.data(using: .utf8) else {\n        XCTFail(\"Could not create json from sample response\")\n        return\n      }\n      let responseFormat = try decoder.decode(ResponseFormat.self, from: jsonData)\n      XCTAssertEqual(responseFormat, expectedResponseFormat, \"Mapping from \\(response) did not yield expected result\")\n    }\n  }\n\n  /// ResponseFormat is used in other places, and in those places it can *only* be populated with an object.\n  /// OpenAI really suffers in API consistency.\n  /// If a client sets the ResponseFormat to `auto` (which is now a valid case in the codebase), we\n  /// encode to {\"type\": \"text\"} to satisfy when response_format can only be an object, such as:\n  /// https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format\n  func testAutoResponseFormatEncodesToText() throws {\n    let jsonData = try JSONEncoder().encode(ResponseFormat.text)\n    XCTAssertEqual(String(data: jsonData, encoding: .utf8), \"{\\\"type\\\":\\\"text\\\"}\")\n  }\n\n  /// Verifies that our custom encoding of ResponseFormat supports the 'text' type:\n  func testTextResponseFormatIsEncodable() throws {\n    let jsonData = try JSONEncoder().encode(ResponseFormat.text)\n    XCTAssertEqual(String(data: jsonData, encoding: .utf8), \"{\\\"type\\\":\\\"text\\\"}\")\n  }\n\n  /// Verifies that our custom encoding of ResponseFormat supports the 'json_object' type:\n  func testJSONResponseFormatIsEncodable() throws {\n    let jsonData = try JSONEncoder().encode(ResponseFormat.jsonObject)\n    XCTAssertEqual(String(data: jsonData, encoding: .utf8), \"{\\\"type\\\":\\\"json_object\\\"}\")\n  }\n\n  /// Regression test for decoding assistant runs. Thank you to Martin Brian for the repro:\n  /// https://gist.github.com/mbrian23/6863ffa705ccbb5097bd07efb2355a30\n  func testThreadRunResponseIsDecodable() throws {\n    let response = \"\"\"\n      {\n        \"id\": \"run_ZWntP0jJr391lwVu3JqFZbKV\",\n        \"object\": \"thread.run\",\n        \"created_at\": 1713979538,\n        \"assistant_id\": \"asst_qxhQxXsecIjqw9cBjFTB6yvd\",\n        \"thread_id\": \"thread_CT4hxsN5N0A5vXg4FeR4pOPD\",\n        \"status\": \"queued\",\n        \"started_at\": null,\n        \"expires_at\": 1713980138,\n        \"cancelled_at\": null,\n        \"failed_at\": null,\n        \"completed_at\": null,\n        \"required_action\": null,\n        \"last_error\": null,\n        \"model\": \"gpt-4-1106-preview\",\n        \"instructions\": \"You answer ever question with ‘hello world’\",\n        \"tools\": [],\n        \"file_ids\": [],\n        \"metadata\": {},\n        \"temperature\": 1.0,\n        \"top_p\": 1.0,\n        \"max_completion_tokens\": null,\n        \"max_prompt_tokens\": null,\n        \"truncation_strategy\": {\n          \"type\": \"auto\",\n          \"last_messages\": null\n        },\n        \"incomplete_details\": null,\n        \"usage\": null,\n        \"response_format\": \"auto\",\n        \"tool_choice\": \"auto\"\n      }\n      \"\"\"\n\n    guard let jsonData = response.data(using: .utf8) else {\n      XCTFail(\"Could not create json from sample response\")\n      return\n    }\n    let decoder = JSONDecoder()\n    let runObject = try decoder.decode(RunObject.self, from: jsonData)\n    XCTAssertEqual(runObject.id, \"run_ZWntP0jJr391lwVu3JqFZbKV\")\n  }\n\n  // MARK: - Text Input Response Test\n\n  func testTextInputResponse() throws {\n    // Text input response JSON\n    let json = \"\"\"\n      {\n        \"id\": \"resp_67ccd2bed1ec8190b14f964abc0542670bb6a6b452d3795b\",\n        \"object\": \"response\",\n        \"created_at\": 1741476542,\n        \"status\": \"completed\",\n        \"error\": null,\n        \"incomplete_details\": null,\n        \"instructions\": null,\n        \"max_output_tokens\": null,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_67ccd2bf17f0819081ff3bb2cf6508e60bb6a6b452d3795b\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"In a peaceful grove beneath a silver moon, a unicorn named Lumina discovered a hidden pool that reflected the stars. As she dipped her horn into the water, the pool began to shimmer, revealing a pathway to a magical realm of endless night skies. Filled with wonder, Lumina whispered a wish for all who dream to find their own hidden magic, and as she glanced back, her hoofprints sparkled like stardust.\",\n                \"annotations\": []\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"previous_response_id\": null,\n        \"reasoning\": {\n          \"effort\": null,\n          \"summary\": null\n        },\n        \"store\": true,\n        \"temperature\": 1.0,\n        \"text\": {\n          \"format\": {\n            \"type\": \"text\"\n          }\n        },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"top_p\": 1.0,\n        \"truncation\": \"disabled\",\n        \"usage\": {\n          \"input_tokens\": 36,\n          \"input_tokens_details\": {\n            \"cached_tokens\": 0\n          },\n          \"output_tokens\": 87,\n          \"output_tokens_details\": {\n            \"reasoning_tokens\": 0\n          },\n          \"total_tokens\": 123\n        },\n        \"user\": null,\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test basic properties\n    XCTAssertEqual(responseModel.id, \"resp_67ccd2bed1ec8190b14f964abc0542670bb6a6b452d3795b\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_476_542)\n    XCTAssertEqual(responseModel.status, .completed)\n    XCTAssertNil(responseModel.error)\n    XCTAssertNil(responseModel.incompleteDetails)\n    XCTAssertNil(responseModel.instructions)\n    XCTAssertNil(responseModel.maxOutputTokens)\n    XCTAssertEqual(responseModel.model, \"gpt-4o-2024-08-06\")\n    XCTAssertEqual(responseModel.parallelToolCalls, true)\n    XCTAssertNil(responseModel.previousResponseId)\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertEqual(responseModel.temperature, 1.0)\n    XCTAssertEqual(responseModel.tools?.count, 0)\n    XCTAssertEqual(responseModel.topP, 1.0)\n    XCTAssertEqual(responseModel.truncation, \"disabled\")\n\n    // Test usage details\n    XCTAssertNotNil(responseModel.usage)\n    XCTAssertEqual(responseModel.usage?.inputTokens, 36)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 87)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 123)\n    XCTAssertEqual(responseModel.usage?.inputTokensDetails?.cachedTokens, 0)\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 0)\n\n    // Test new fields\n    XCTAssertNil(responseModel.background)\n    XCTAssertNil(responseModel.serviceTier)\n    XCTAssertEqual(responseModel.store, true)\n\n    // Test output content\n    XCTAssertEqual(responseModel.output.count, 1)\n\n    if case .message(let message) = responseModel.output[0] {\n      XCTAssertEqual(message.id, \"msg_67ccd2bf17f0819081ff3bb2cf6508e60bb6a6b452d3795b\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.role, \"assistant\")\n      XCTAssertEqual(message.content.count, 1)\n\n      if case .outputText(let outputText) = message.content[0] {\n        XCTAssertEqual(outputText.type, \"output_text\")\n        XCTAssertTrue(outputText.text.starts(with: \"In a peaceful grove beneath a silver moon\"))\n        XCTAssertEqual(outputText.annotations.count, 0)\n      } else {\n        XCTFail(\"Expected output text content\")\n      }\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n\n    // Test outputText convenience property\n    let expectedText =\n      \"In a peaceful grove beneath a silver moon, a unicorn named Lumina discovered a hidden pool that reflected the stars. As she dipped her horn into the water, the pool began to shimmer, revealing a pathway to a magical realm of endless night skies. Filled with wonder, Lumina whispered a wish for all who dream to find their own hidden magic, and as she glanced back, her hoofprints sparkled like stardust.\"\n    XCTAssertEqual(responseModel.outputText, expectedText)\n  }\n\n  // MARK: - Image Input Response Test\n\n  func testImageInputResponse() throws {\n    // Image input response JSON\n    let json = \"\"\"\n      {\n        \"id\": \"resp_67ccd3a9da748190baa7f1570fe91ac604becb25c45c1d41\",\n        \"object\": \"response\",\n        \"created_at\": 1741476777,\n        \"status\": \"completed\",\n        \"error\": null,\n        \"incomplete_details\": null,\n        \"instructions\": null,\n        \"max_output_tokens\": null,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_67ccd3acc8d48190a77525dc6de64b4104becb25c45c1d41\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"The image depicts a scenic landscape with a wooden boardwalk or pathway leading through lush, green grass under a blue sky with some clouds. The setting suggests a peaceful natural area, possibly a park or nature reserve. There are trees and shrubs in the background.\",\n                \"annotations\": []\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"previous_response_id\": null,\n        \"reasoning\": {\n          \"effort\": null,\n          \"summary\": null\n        },\n        \"store\": true,\n        \"temperature\": 1.0,\n        \"text\": {\n          \"format\": {\n            \"type\": \"text\"\n          }\n        },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"top_p\": 1.0,\n        \"truncation\": \"disabled\",\n        \"usage\": {\n          \"input_tokens\": 328,\n          \"input_tokens_details\": {\n            \"cached_tokens\": 0\n          },\n          \"output_tokens\": 52,\n          \"output_tokens_details\": {\n            \"reasoning_tokens\": 0\n          },\n          \"total_tokens\": 380\n        },\n        \"user\": null,\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test basic properties\n    XCTAssertEqual(responseModel.id, \"resp_67ccd3a9da748190baa7f1570fe91ac604becb25c45c1d41\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_476_777)\n    XCTAssertEqual(responseModel.status, .completed)\n    XCTAssertNil(responseModel.error)\n    XCTAssertNil(responseModel.incompleteDetails)\n    XCTAssertNil(responseModel.instructions)\n    XCTAssertNil(responseModel.maxOutputTokens)\n    XCTAssertEqual(responseModel.model, \"gpt-4o-2024-08-06\")\n    XCTAssertEqual(responseModel.parallelToolCalls, true)\n    XCTAssertNil(responseModel.previousResponseId)\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertEqual(responseModel.temperature, 1.0)\n    XCTAssertEqual(responseModel.tools?.count, 0)\n    XCTAssertEqual(responseModel.topP, 1.0)\n    XCTAssertEqual(responseModel.truncation, \"disabled\")\n\n    // Test usage details\n    XCTAssertNotNil(responseModel.usage)\n    XCTAssertEqual(responseModel.usage?.inputTokens, 328)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 52)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 380)\n    XCTAssertEqual(responseModel.usage?.inputTokensDetails?.cachedTokens, 0)\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 0)\n\n    // Test output content\n    XCTAssertEqual(responseModel.output.count, 1)\n\n    if case .message(let message) = responseModel.output[0] {\n      XCTAssertEqual(message.id, \"msg_67ccd3acc8d48190a77525dc6de64b4104becb25c45c1d41\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.role, \"assistant\")\n      XCTAssertEqual(message.content.count, 1)\n\n      if case .outputText(let outputText) = message.content[0] {\n        XCTAssertEqual(outputText.type, \"output_text\")\n        XCTAssertTrue(outputText.text.starts(with: \"The image depicts a scenic landscape\"))\n        XCTAssertEqual(outputText.annotations.count, 0)\n      } else {\n        XCTFail(\"Expected output text content\")\n      }\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n\n    // Test outputText convenience property\n    let expectedText =\n      \"The image depicts a scenic landscape with a wooden boardwalk or pathway leading through lush, green grass under a blue sky with some clouds. The setting suggests a peaceful natural area, possibly a park or nature reserve. There are trees and shrubs in the background.\"\n    XCTAssertEqual(responseModel.outputText, expectedText)\n  }\n\n  // MARK: - Web Search Response Test\n\n  func testWebSearchResponse() throws {\n    // Web search response JSON\n    let json = \"\"\"\n      {\n        \"id\": \"resp_67ccf18ef5fc8190b16dbee19bc54e5f087bb177ab789d5c\",\n        \"object\": \"response\",\n        \"created_at\": 1741484430,\n        \"status\": \"completed\",\n        \"error\": null,\n        \"incomplete_details\": null,\n        \"instructions\": null,\n        \"max_output_tokens\": null,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"web_search_call\",\n            \"id\": \"ws_67ccf18f64008190a39b619f4c8455ef087bb177ab789d5c\",\n            \"status\": \"completed\"\n          },\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_67ccf190ca3881909d433c50b1f6357e087bb177ab789d5c\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"As of today, March 9, 2025, one notable positive news story...\",\n                \"annotations\": [\n                  {\n                    \"type\": \"url_citation\",\n                    \"start_index\": 442,\n                    \"end_index\": 557,\n                    \"url\": \"https://.../?utm_source=chatgpt.com\",\n                    \"title\": \"...\"\n                  },\n                  {\n                    \"type\": \"url_citation\",\n                    \"start_index\": 962,\n                    \"end_index\": 1077,\n                    \"url\": \"https://.../?utm_source=chatgpt.com\",\n                    \"title\": \"...\"\n                  },\n                  {\n                    \"type\": \"url_citation\",\n                    \"start_index\": 1336,\n                    \"end_index\": 1451,\n                    \"url\": \"https://.../?utm_source=chatgpt.com\",\n                    \"title\": \"...\"\n                  }\n                ]\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"previous_response_id\": null,\n        \"reasoning\": {\n          \"effort\": null,\n          \"summary\": null\n        },\n        \"store\": true,\n        \"temperature\": 1.0,\n        \"text\": {\n          \"format\": {\n            \"type\": \"text\"\n          }\n        },\n        \"tool_choice\": \"auto\",\n        \"tools\": [\n          {\n            \"type\": \"web_search_preview\",\n            \"domains\": [],\n            \"search_context_size\": \"medium\",\n            \"user_location\": {\n              \"type\": \"approximate\",\n              \"city\": null,\n              \"country\": \"US\",\n              \"region\": null,\n              \"timezone\": null\n            }\n          }\n        ],\n        \"top_p\": 1.0,\n        \"truncation\": \"disabled\",\n        \"usage\": {\n          \"input_tokens\": 328,\n          \"input_tokens_details\": {\n            \"cached_tokens\": 0\n          },\n          \"output_tokens\": 356,\n          \"output_tokens_details\": {\n            \"reasoning_tokens\": 0\n          },\n          \"total_tokens\": 684\n        },\n        \"user\": null,\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test basic properties\n    XCTAssertEqual(responseModel.id, \"resp_67ccf18ef5fc8190b16dbee19bc54e5f087bb177ab789d5c\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_484_430)\n    XCTAssertEqual(responseModel.status, .completed)\n    XCTAssertNil(responseModel.error)\n    XCTAssertNil(responseModel.incompleteDetails)\n    XCTAssertNil(responseModel.instructions)\n    XCTAssertNil(responseModel.maxOutputTokens)\n    XCTAssertEqual(responseModel.model, \"gpt-4o-2024-08-06\")\n    XCTAssertEqual(responseModel.parallelToolCalls, true)\n    XCTAssertNil(responseModel.previousResponseId)\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertEqual(responseModel.temperature, 1.0)\n    XCTAssertEqual(responseModel.tools?.count, 1)\n    XCTAssertEqual(responseModel.topP, 1.0)\n    XCTAssertEqual(responseModel.truncation, \"disabled\")\n\n    // Test usage details\n    XCTAssertNotNil(responseModel.usage)\n    XCTAssertEqual(responseModel.usage?.inputTokens, 328)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 356)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 684)\n    XCTAssertEqual(responseModel.usage?.inputTokensDetails?.cachedTokens, 0)\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 0)\n\n    // Test output array - should have web search call and message\n    XCTAssertEqual(responseModel.output.count, 2)\n\n    // Test web search call\n    if case .webSearchCall(let webSearch) = responseModel.output[0] {\n      XCTAssertEqual(webSearch.id, \"ws_67ccf18f64008190a39b619f4c8455ef087bb177ab789d5c\")\n      XCTAssertEqual(webSearch.status, \"completed\")\n      XCTAssertEqual(webSearch.type, \"web_search_call\")\n    } else {\n      XCTFail(\"Expected web search call output type\")\n    }\n\n    // Test message\n    if case .message(let message) = responseModel.output[1] {\n      XCTAssertEqual(message.id, \"msg_67ccf190ca3881909d433c50b1f6357e087bb177ab789d5c\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.role, \"assistant\")\n      XCTAssertEqual(message.content.count, 1)\n\n      if case .outputText(let outputText) = message.content[0] {\n        XCTAssertEqual(outputText.type, \"output_text\")\n        XCTAssertTrue(outputText.text.starts(with: \"As of today, March 9, 2025\"))\n        XCTAssertEqual(outputText.annotations.count, 3)\n      } else {\n        XCTFail(\"Expected output text content\")\n      }\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n\n    // Test outputText convenience property\n    let expectedText = \"As of today, March 9, 2025, one notable positive news story...\"\n    XCTAssertEqual(responseModel.outputText, expectedText)\n  }\n\n  // MARK: - File Search Response Test\n\n  func testFileSearchResponse() throws {\n    // File search response JSON\n    let json = \"\"\"\n      {\n        \"id\": \"resp_67ccf4c55fc48190b71bd0463ad3306d09504fb6872380d7\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"completed\",\n        \"error\": null,\n        \"incomplete_details\": null,\n        \"instructions\": null,\n        \"max_output_tokens\": null,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"file_search_call\",\n            \"id\": \"fs_67ccf4c63cd08190887ef6464ba5681609504fb6872380d7\",\n            \"status\": \"completed\",\n            \"queries\": [\n              \"attributes of an ancient brown dragon\"\n            ],\n            \"results\": null\n          },\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_67ccf4c93e5c81909d595b369351a9d309504fb6872380d7\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"The attributes of an ancient brown dragon include...\",\n                \"annotations\": [\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 320,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 576,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 815,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 815,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 1030,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 1030,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 1156,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  },\n                  {\n                    \"type\": \"file_citation\",\n                    \"index\": 1225,\n                    \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                    \"filename\": \"dragons.pdf\"\n                  }\n                ]\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"previous_response_id\": null,\n        \"reasoning\": {\n          \"effort\": null,\n          \"summary\": null\n        },\n        \"store\": true,\n        \"temperature\": 1.0,\n        \"text\": {\n          \"format\": {\n            \"type\": \"text\"\n          }\n        },\n        \"tool_choice\": \"auto\",\n        \"tools\": [\n          {\n            \"type\": \"file_search\",\n            \"filters\": null,\n            \"max_num_results\": 20,\n            \"ranking_options\": {\n              \"ranker\": \"auto\",\n              \"score_threshold\": 0.0\n            },\n            \"vector_store_ids\": [\n              \"vs_1234567890\"\n            ]\n          }\n        ],\n        \"top_p\": 1.0,\n        \"truncation\": \"disabled\",\n        \"usage\": {\n          \"input_tokens\": 18307,\n          \"input_tokens_details\": {\n            \"cached_tokens\": 0\n          },\n          \"output_tokens\": 348,\n          \"output_tokens_details\": {\n            \"reasoning_tokens\": 0\n          },\n          \"total_tokens\": 18655\n        },\n        \"user\": null,\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test basic properties\n    XCTAssertEqual(responseModel.id, \"resp_67ccf4c55fc48190b71bd0463ad3306d09504fb6872380d7\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_485_253)\n    XCTAssertEqual(responseModel.status, .completed)\n    XCTAssertNil(responseModel.error)\n    XCTAssertNil(responseModel.incompleteDetails)\n    XCTAssertNil(responseModel.instructions)\n    XCTAssertNil(responseModel.maxOutputTokens)\n    XCTAssertEqual(responseModel.model, \"gpt-4o-2024-08-06\")\n    XCTAssertEqual(responseModel.parallelToolCalls, true)\n    XCTAssertNil(responseModel.previousResponseId)\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertEqual(responseModel.temperature, 1.0)\n    XCTAssertEqual(responseModel.tools?.count, 1)\n    XCTAssertEqual(responseModel.topP, 1.0)\n    XCTAssertEqual(responseModel.truncation, \"disabled\")\n\n    // Test usage details\n    XCTAssertNotNil(responseModel.usage)\n    XCTAssertEqual(responseModel.usage?.inputTokens, 18307)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 348)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 18655)\n    XCTAssertEqual(responseModel.usage?.inputTokensDetails?.cachedTokens, 0)\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 0)\n\n    // Test output array - should have file search call and message\n    XCTAssertEqual(responseModel.output.count, 2)\n\n    // Test file search call\n    if case .fileSearchCall(let fileSearch) = responseModel.output[0] {\n      XCTAssertEqual(fileSearch.id, \"fs_67ccf4c63cd08190887ef6464ba5681609504fb6872380d7\")\n      XCTAssertEqual(fileSearch.status, \"completed\")\n      XCTAssertEqual(fileSearch.type, \"file_search_call\")\n      XCTAssertEqual(fileSearch.queries.count, 1)\n      XCTAssertEqual(fileSearch.queries[0], \"attributes of an ancient brown dragon\")\n      XCTAssertNil(fileSearch.results)\n    } else {\n      XCTFail(\"Expected file search call output type\")\n    }\n\n    // Test message\n    if case .message(let message) = responseModel.output[1] {\n      XCTAssertEqual(message.id, \"msg_67ccf4c93e5c81909d595b369351a9d309504fb6872380d7\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.role, \"assistant\")\n      XCTAssertEqual(message.content.count, 1)\n\n      if case .outputText(let outputText) = message.content[0] {\n        XCTAssertEqual(outputText.type, \"output_text\")\n        XCTAssertTrue(outputText.text.starts(with: \"The attributes of an ancient brown dragon\"))\n        XCTAssertEqual(outputText.annotations.count, 8)\n\n      } else {\n        XCTFail(\"Expected output text content\")\n      }\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n\n    // Test outputText convenience property\n    let expectedText = \"The attributes of an ancient brown dragon include...\"\n    XCTAssertEqual(responseModel.outputText, expectedText)\n  }\n\n  // MARK: - Multi-Message Output Test\n\n  func testMultiMessageOutput() throws {\n    // Create a test case with multiple messages in the output array\n    let json = \"\"\"\n      {\n        \"id\": \"resp_test_multiple_messages\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"completed\",\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_1\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"First message text\",\n                \"annotations\": []\n              }\n            ]\n          },\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_2\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"Second message text\",\n                \"annotations\": []\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"text\": { \"format\": { \"type\": \"text\" } },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test that both messages are found and their texts are joined\n    XCTAssertEqual(responseModel.output.count, 2)\n    XCTAssertEqual(responseModel.outputText, \"First message textSecond message text\")\n  }\n\n  // MARK: - Mixed Content Types Test\n\n  func testMixedContentTypes() throws {\n    // Create a test case with mixed content types\n    let json = \"\"\"\n      {\n        \"id\": \"resp_test_mixed_content\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"completed\",\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"web_search_call\",\n            \"id\": \"ws_test\",\n            \"status\": \"completed\"\n          },\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_test\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"This is the main text\",\n                \"annotations\": []\n              }\n            ]\n          },\n          {\n            \"type\": \"file_search_call\",\n            \"id\": \"fs_test\",\n            \"status\": \"completed\",\n            \"queries\": [\"test query\"],\n            \"results\": null\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"text\": { \"format\": { \"type\": \"text\" } },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test that only message content is included in outputText\n    XCTAssertEqual(responseModel.output.count, 3)\n    XCTAssertEqual(responseModel.outputText, \"This is the main text\")\n  }\n\n  // MARK: - Error Response Test\n\n  func testErrorResponse() throws {\n    // Create a test case with an error\n    let json = \"\"\"\n      {\n        \"id\": \"resp_test_error\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"failed\",\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"error\": {\n          \"code\": \"server_error\",\n          \"message\": \"The server encountered an error while processing your request.\"\n        },\n        \"output\": [],\n        \"parallel_tool_calls\": true,\n        \"text\": { \"format\": { \"type\": \"text\" } },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test error properties\n    XCTAssertEqual(responseModel.status, .failed)\n    XCTAssertNotNil(responseModel.error)\n    XCTAssertEqual(responseModel.error?.code, \"server_error\")\n    XCTAssertEqual(responseModel.error?.message, \"The server encountered an error while processing your request.\")\n\n    // Test empty output\n    XCTAssertEqual(responseModel.output.count, 0)\n    XCTAssertNil(responseModel.outputText)\n  }\n\n  // MARK: - Incomplete Response Test\n\n  func testIncompleteResponse() throws {\n    // Create a test case with incomplete status\n    let json = \"\"\"\n      {\n        \"id\": \"resp_test_incomplete\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"incomplete\",\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"incomplete_details\": {\n          \"reason\": \"content_filter\"\n        },\n        \"output\": [\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_incomplete\",\n            \"status\": \"incomplete\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"Partial response...\",\n                \"annotations\": []\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"text\": { \"format\": { \"type\": \"text\" } },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test incomplete properties\n    XCTAssertEqual(responseModel.status, .incomplete)\n    XCTAssertNotNil(responseModel.incompleteDetails)\n    XCTAssertEqual(responseModel.incompleteDetails?.reason, \"content_filter\")\n\n    // Test partial output\n    XCTAssertEqual(responseModel.output.count, 1)\n    if case .message(let message) = responseModel.output[0] {\n      XCTAssertEqual(message.status, \"incomplete\")\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n\n    // Partial text should still be extracted\n    XCTAssertEqual(responseModel.outputText, \"Partial response...\")\n  }\n\n  // MARK: - Function Call Test\n\n  func testFunctionCallResponse() throws {\n    // Create a test case with a function call using real-world OpenAI response\n    let json = \"\"\"\n      {\n        \"id\": \"resp_67ca09c5efe0819096d0511c92b8c890096610f474011cc0\",\n        \"object\": \"response\",\n        \"created_at\": 1741294021,\n        \"status\": \"completed\",\n        \"error\": null,\n        \"incomplete_details\": null,\n        \"instructions\": null,\n        \"max_output_tokens\": null,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"function_call\",\n            \"id\": \"fc_67ca09c6bedc8190a7abfec07b1a1332096610f474011cc0\",\n            \"call_id\": \"call_unLAR8MvFNptuiZK6K6HCy5k\",\n            \"name\": \"get_current_weather\",\n            \"arguments\": \"{\\\\\\\"location\\\\\\\":\\\\\\\"Boston, MA\\\\\\\",\\\\\\\"unit\\\\\\\":\\\\\\\"celsius\\\\\\\"}\",\n            \"status\": \"completed\"\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"previous_response_id\": null,\n        \"reasoning\": {\n          \"effort\": null,\n          \"summary\": null\n        },\n        \"store\": true,\n        \"temperature\": 1.0,\n        \"text\": {\n          \"format\": {\n            \"type\": \"text\"\n          }\n        },\n        \"tool_choice\": \"auto\",\n        \"tools\": [\n          {\n            \"type\": \"function\",\n            \"description\": \"Get the current weather in a given location\",\n            \"name\": \"get_current_weather\",\n            \"parameters\": {\n              \"type\": \"object\",\n              \"properties\": {\n                \"location\": {\n                  \"type\": \"string\",\n                  \"description\": \"The city and state, e.g. San Francisco, CA\"\n                },\n                \"unit\": {\n                  \"type\": \"string\",\n                  \"enum\": [\n                    \"celsius\",\n                    \"fahrenheit\"\n                  ]\n                }\n              },\n              \"required\": [\n                \"location\",\n                \"unit\"\n              ]\n            },\n            \"strict\": true\n          }\n        ],\n        \"top_p\": 1.0,\n        \"truncation\": \"disabled\",\n        \"usage\": {\n          \"input_tokens\": 291,\n          \"output_tokens\": 23,\n          \"output_tokens_details\": {\n            \"reasoning_tokens\": 0\n          },\n          \"total_tokens\": 314\n        },\n        \"user\": null,\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test basic properties\n    XCTAssertEqual(responseModel.id, \"resp_67ca09c5efe0819096d0511c92b8c890096610f474011cc0\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_294_021)\n    XCTAssertEqual(responseModel.status, .completed)\n\n    // Test tool configuration\n    XCTAssertEqual(responseModel.tools?.count, 1)\n    if let tools = responseModel.tools, case .function(let functionTool) = tools[0] {\n      XCTAssertEqual(functionTool.name, \"get_current_weather\")\n      XCTAssertEqual(functionTool.description, \"Get the current weather in a given location\")\n      XCTAssertEqual(functionTool.type, \"function\")\n      XCTAssertEqual(functionTool.strict, true)\n    } else {\n      XCTFail(\"Expected function tool\")\n    }\n\n    // Test function call properties\n    XCTAssertEqual(responseModel.output.count, 1)\n    if case .functionCall(let functionCall) = responseModel.output[0] {\n      XCTAssertEqual(functionCall.id, \"fc_67ca09c6bedc8190a7abfec07b1a1332096610f474011cc0\")\n      XCTAssertEqual(functionCall.status, \"completed\")\n      XCTAssertEqual(functionCall.callId, \"call_unLAR8MvFNptuiZK6K6HCy5k\")\n      XCTAssertEqual(functionCall.name, \"get_current_weather\")\n      XCTAssertEqual(functionCall.arguments, \"{\\\"location\\\":\\\"Boston, MA\\\",\\\"unit\\\":\\\"celsius\\\"}\")\n    } else {\n      XCTFail(\"Expected function call output type\")\n    }\n\n    // Test outputText - should be nil since there's no message\n    XCTAssertNil(responseModel.outputText)\n  }\n\n  func testReasoningResponse() throws {\n    // Create a test case with reasoning effort\n    let json = \"\"\"\n      {\n        \"id\": \"resp_67ccd7eca01881908ff0b5146584e408072912b2993db808\",\n        \"object\": \"response\",\n        \"created_at\": 1741477868,\n        \"status\": \"completed\",\n        \"error\": null,\n        \"incomplete_details\": null,\n        \"instructions\": null,\n        \"max_output_tokens\": null,\n        \"model\": \"o1-2024-12-17\",\n        \"output\": [\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_67ccd7f7b5848190a6f3e95d809f6b44072912b2993db808\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"The classic tongue twister...\",\n                \"annotations\": []\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"previous_response_id\": null,\n        \"reasoning\": {\n          \"effort\": \"high\",\n          \"summary\": null\n        },\n        \"store\": true,\n        \"temperature\": 1.0,\n        \"text\": {\n          \"format\": {\n            \"type\": \"text\"\n          }\n        },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"top_p\": 1.0,\n        \"truncation\": \"disabled\",\n        \"usage\": {\n          \"input_tokens\": 81,\n          \"input_tokens_details\": {\n            \"cached_tokens\": 0\n          },\n          \"output_tokens\": 1035,\n          \"output_tokens_details\": {\n            \"reasoning_tokens\": 832\n          },\n          \"total_tokens\": 1116\n        },\n        \"user\": null,\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test basic properties\n    XCTAssertEqual(responseModel.id, \"resp_67ccd7eca01881908ff0b5146584e408072912b2993db808\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_477_868)\n    XCTAssertEqual(responseModel.status, .completed)\n    XCTAssertEqual(responseModel.model, \"o1-2024-12-17\")\n\n    // Test reasoning properties\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertEqual(responseModel.reasoning?.effort, \"high\")\n\n    // Test usage details\n    XCTAssertNotNil(responseModel.usage)\n    XCTAssertEqual(responseModel.usage?.inputTokens, 81)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 1035)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 1116)\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 832)\n\n    // Test output content\n    XCTAssertEqual(responseModel.output.count, 1)\n\n    if case .message(let message) = responseModel.output[0] {\n      XCTAssertEqual(message.id, \"msg_67ccd7f7b5848190a6f3e95d809f6b44072912b2993db808\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.role, \"assistant\")\n      XCTAssertEqual(message.content.count, 1)\n\n      if case .outputText(let outputText) = message.content[0] {\n        XCTAssertEqual(outputText.type, \"output_text\")\n        XCTAssertEqual(outputText.text, \"The classic tongue twister...\")\n        XCTAssertEqual(outputText.annotations.count, 0)\n      } else {\n        XCTFail(\"Expected output text content\")\n      }\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n\n    // Test outputText convenience property\n    XCTAssertEqual(responseModel.outputText, \"The classic tongue twister...\")\n  }\n\n  // MARK: - New Fields Test (background, serviceTier, store)\n\n  func testResponseModelNewFields() throws {\n    // Test response with new fields\n    let json = \"\"\"\n      {\n        \"id\": \"resp_test_new_fields\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"completed\",\n        \"background\": true,\n        \"service_tier\": \"flex\",\n        \"store\": false,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [\n          {\n            \"type\": \"message\",\n            \"id\": \"msg_test\",\n            \"status\": \"completed\",\n            \"role\": \"assistant\",\n            \"content\": [\n              {\n                \"type\": \"output_text\",\n                \"text\": \"Test response with new fields\",\n                \"annotations\": []\n              }\n            ]\n          }\n        ],\n        \"parallel_tool_calls\": true,\n        \"text\": { \"format\": { \"type\": \"text\" } },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test new fields\n    XCTAssertEqual(responseModel.background, true)\n    XCTAssertEqual(responseModel.serviceTier, \"flex\")\n    XCTAssertEqual(responseModel.store, false)\n    XCTAssertEqual(responseModel.status, .completed)\n  }\n\n  // MARK: - Status Enum Test\n\n  func testResponseModelStatusEnum() throws {\n    let statusValues: [(String, ResponseModel.Status)] = [\n      (\"completed\", .completed),\n      (\"failed\", .failed),\n      (\"in_progress\", .inProgress),\n      (\"cancelled\", .cancelled),\n      (\"queued\", .queued),\n      (\"incomplete\", .incomplete),\n    ]\n\n    for (jsonStatus, expectedStatus) in statusValues {\n      let json = \"\"\"\n        {\n          \"id\": \"resp_status_test\",\n          \"object\": \"response\",\n          \"created_at\": 1741485253,\n          \"status\": \"\\(jsonStatus)\",\n          \"model\": \"gpt-4o-2024-08-06\",\n          \"output\": [],\n          \"parallel_tool_calls\": true,\n          \"text\": { \"format\": { \"type\": \"text\" } },\n          \"tool_choice\": \"auto\",\n          \"tools\": [],\n          \"metadata\": {}\n        }\n        \"\"\"\n\n      let decoder = JSONDecoder()\n      let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n      XCTAssertEqual(responseModel.status, expectedStatus, \"Status '\\(jsonStatus)' should decode to \\(expectedStatus)\")\n    }\n  }\n\n  // MARK: - Service Tier Values Test\n\n  func testServiceTierValues() throws {\n    let serviceTierValues = [\"auto\", \"default\", \"flex\", \"scale\"]\n\n    for tier in serviceTierValues {\n      let json = \"\"\"\n        {\n          \"id\": \"resp_service_tier_test\",\n          \"object\": \"response\",\n          \"created_at\": 1741485253,\n          \"status\": \"completed\",\n          \"service_tier\": \"\\(tier)\",\n          \"model\": \"gpt-4o-2024-08-06\",\n          \"output\": [],\n          \"parallel_tool_calls\": true,\n          \"text\": { \"format\": { \"type\": \"text\" } },\n          \"tool_choice\": \"auto\",\n          \"tools\": [],\n          \"metadata\": {}\n        }\n        \"\"\"\n\n      let decoder = JSONDecoder()\n      let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n      XCTAssertEqual(responseModel.serviceTier, tier, \"Service tier '\\(tier)' should be properly decoded\")\n    }\n  }\n\n  // MARK: - Null Fields Test\n\n  func testResponseModelNullFields() throws {\n    // Test that null/missing fields are properly handled\n    let json = \"\"\"\n      {\n        \"id\": \"resp_null_fields_test\",\n        \"object\": \"response\",\n        \"created_at\": 1741485253,\n        \"status\": \"completed\",\n        \"background\": null,\n        \"service_tier\": null,\n        \"store\": null,\n        \"model\": \"gpt-4o-2024-08-06\",\n        \"output\": [],\n        \"parallel_tool_calls\": true,\n        \"text\": { \"format\": { \"type\": \"text\" } },\n        \"tool_choice\": \"auto\",\n        \"tools\": [],\n        \"metadata\": {}\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n    // Test that null values are properly decoded as nil\n    XCTAssertNil(responseModel.background)\n    XCTAssertNil(responseModel.serviceTier)\n    XCTAssertNil(responseModel.store)\n  }\n}\n"
  },
  {
    "path": "Tests/OpenAITests/OutputItemTests.swift",
    "content": "import XCTest\n@testable import SwiftOpenAI\n\nfinal class OutputItemTests: XCTestCase {\n  // MARK: - Message Tests\n\n  func testOutputItemMessage() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"message\",\n        \"id\": \"msg_123\",\n        \"role\": \"assistant\",\n        \"status\": \"completed\",\n        \"content\": [\n          {\n            \"type\": \"output_text\",\n            \"text\": \"Hello, world!\",\n            \"annotations\": []\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .message(let message) = outputItem {\n      XCTAssertEqual(message.id, \"msg_123\")\n      XCTAssertEqual(message.role, \"assistant\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.type, \"message\")\n      XCTAssertEqual(message.content.count, 1)\n    } else {\n      XCTFail(\"Expected message output item\")\n    }\n  }\n\n  // MARK: - File Search Tests\n\n  func testOutputItemFileSearchCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"file_search_call\",\n        \"id\": \"fs_123\",\n        \"queries\": [\"search term\"],\n        \"status\": \"completed\",\n        \"results\": []\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .fileSearchCall(let fileSearch) = outputItem {\n      XCTAssertEqual(fileSearch.id, \"fs_123\")\n      XCTAssertEqual(fileSearch.queries, [\"search term\"])\n      XCTAssertEqual(fileSearch.status, \"completed\")\n      XCTAssertEqual(fileSearch.type, \"file_search_call\")\n    } else {\n      XCTFail(\"Expected file search call output item\")\n    }\n  }\n\n  // MARK: - Function Call Tests\n\n  func testOutputItemFunctionCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"function_call\",\n        \"id\": \"func_123\",\n        \"call_id\": \"call_456\",\n        \"name\": \"get_weather\",\n        \"arguments\": \"{\\\\\"location\\\\\": \\\\\"San Francisco\\\\\"}\",\n        \"status\": \"completed\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .functionCall(let functionCall) = outputItem {\n      XCTAssertEqual(functionCall.id, \"func_123\")\n      XCTAssertEqual(functionCall.callId, \"call_456\")\n      XCTAssertEqual(functionCall.name, \"get_weather\")\n      XCTAssertEqual(functionCall.arguments, \"{\\\"location\\\": \\\"San Francisco\\\"}\")\n      XCTAssertEqual(functionCall.status, \"completed\")\n      XCTAssertEqual(functionCall.type, \"function_call\")\n    } else {\n      XCTFail(\"Expected function call output item\")\n    }\n  }\n\n  // MARK: - Web Search Tests\n\n  func testOutputItemWebSearchCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"web_search_call\",\n        \"id\": \"ws_123\",\n        \"status\": \"completed\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .webSearchCall(let webSearch) = outputItem {\n      XCTAssertEqual(webSearch.id, \"ws_123\")\n      XCTAssertEqual(webSearch.status, \"completed\")\n      XCTAssertEqual(webSearch.type, \"web_search_call\")\n    } else {\n      XCTFail(\"Expected web search call output item\")\n    }\n  }\n\n  // MARK: - Computer Call Tests\n\n  func testOutputItemComputerCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"computer_call\",\n        \"id\": \"comp_123\",\n        \"call_id\": \"call_789\",\n        \"action\": {\n          \"type\": \"screenshot\"\n        },\n        \"status\": \"completed\",\n        \"pending_safety_checks\": []\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .computerCall(let computerCall) = outputItem {\n      XCTAssertEqual(computerCall.id, \"comp_123\")\n      XCTAssertEqual(computerCall.callId, \"call_789\")\n      XCTAssertEqual(computerCall.action.type, \"screenshot\")\n      XCTAssertEqual(computerCall.status, \"completed\")\n      XCTAssertEqual(computerCall.type, \"computer_call\")\n    } else {\n      XCTFail(\"Expected computer call output item\")\n    }\n  }\n\n  // MARK: - Reasoning Tests\n\n  func testOutputItemReasoning() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"reasoning\",\n        \"id\": \"reason_123\",\n        \"status\": \"completed\",\n        \"summary\": [\n          {\n            \"type\": \"summary_text\",\n            \"text\": \"Thinking about the problem...\"\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .reasoning(let reasoning) = outputItem {\n      XCTAssertEqual(reasoning.id, \"reason_123\")\n      XCTAssertEqual(reasoning.status, \"completed\")\n      XCTAssertEqual(reasoning.type, \"reasoning\")\n      XCTAssertEqual(reasoning.summary.count, 1)\n      XCTAssertEqual(reasoning.summary[0].text, \"Thinking about the problem...\")\n    } else {\n      XCTFail(\"Expected reasoning output item\")\n    }\n  }\n\n  // MARK: - Image Generation Tests\n\n  func testOutputItemImageGenerationCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"image_generation_call\",\n        \"id\": \"img_123\",\n        \"status\": \"completed\",\n        \"result\": \"base64_encoded_image_data\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .imageGenerationCall(let imageGen) = outputItem {\n      XCTAssertEqual(imageGen.id, \"img_123\")\n      XCTAssertEqual(imageGen.status, \"completed\")\n      XCTAssertEqual(imageGen.result, \"base64_encoded_image_data\")\n      XCTAssertEqual(imageGen.type, \"image_generation_call\")\n    } else {\n      XCTFail(\"Expected image generation call output item\")\n    }\n  }\n\n  // MARK: - Code Interpreter Tests\n\n  func testOutputItemCodeInterpreterCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"code_interpreter_call\",\n        \"id\": \"code_123\",\n        \"container_id\": \"container_456\",\n        \"code\": \"print('Hello, world!')\",\n        \"status\": \"completed\",\n        \"outputs\": [\n          {\n            \"type\": \"logs\",\n            \"logs\": \"Hello, world!\"\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .codeInterpreterCall(let codeInterpreter) = outputItem {\n      XCTAssertEqual(codeInterpreter.id, \"code_123\")\n      XCTAssertEqual(codeInterpreter.containerId, \"container_456\")\n      XCTAssertEqual(codeInterpreter.code, \"print('Hello, world!')\")\n      XCTAssertEqual(codeInterpreter.status, \"completed\")\n      XCTAssertEqual(codeInterpreter.type, \"code_interpreter_call\")\n      XCTAssertEqual(codeInterpreter.outputs?.count, 1)\n\n      if case .log(let logOutput) = codeInterpreter.outputs?[0] {\n        XCTAssertEqual(logOutput.logs, \"Hello, world!\")\n      } else {\n        XCTFail(\"Expected log output\")\n      }\n    } else {\n      XCTFail(\"Expected code interpreter call output item\")\n    }\n  }\n\n  func testCodeInterpreterImageOutput() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"code_interpreter_call\",\n        \"id\": \"code_123\",\n        \"container_id\": \"container_456\",\n        \"status\": \"completed\",\n        \"outputs\": [\n          {\n            \"type\": \"image\",\n            \"image\": {\n              \"file_id\": \"file_123\"\n            }\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .codeInterpreterCall(let codeInterpreter) = outputItem {\n      XCTAssertEqual(codeInterpreter.outputs?.count, 1)\n\n      if case .image(let imageOutput) = codeInterpreter.outputs?[0] {\n        XCTAssertEqual(imageOutput.image.fileId, \"file_123\")\n      } else {\n        XCTFail(\"Expected image output\")\n      }\n    } else {\n      XCTFail(\"Expected code interpreter call output item\")\n    }\n  }\n\n  // MARK: - Local Shell Tests\n\n  func testOutputItemLocalShellCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"local_shell_call\",\n        \"id\": \"shell_123\",\n        \"call_id\": \"call_789\",\n        \"action\": {\n          \"type\": \"execute\",\n          \"command\": [\"ls\", \"-la\"]\n        },\n        \"status\": \"completed\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .localShellCall(let localShell) = outputItem {\n      XCTAssertEqual(localShell.id, \"shell_123\")\n      XCTAssertEqual(localShell.callId, \"call_789\")\n      XCTAssertEqual(localShell.action.type, \"execute\")\n      XCTAssertEqual(localShell.action.command, [\"ls\", \"-la\"])\n      XCTAssertEqual(localShell.status, \"completed\")\n      XCTAssertEqual(localShell.type, \"local_shell_call\")\n    } else {\n      XCTFail(\"Expected local shell call output item\")\n    }\n  }\n\n  // MARK: - MCP Call Tests\n\n  func testOutputItemMCPCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"mcp_call\",\n        \"id\": \"mcp_123\",\n        \"name\": \"get_data\",\n        \"server_label\": \"my_server\",\n        \"arguments\": \"{\\\\\"key\\\\\": \\\\\"value\\\\\"}\",\n        \"output\": \"result data\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .mcpCall(let mcpCall) = outputItem {\n      XCTAssertEqual(mcpCall.id, \"mcp_123\")\n      XCTAssertEqual(mcpCall.name, \"get_data\")\n      XCTAssertEqual(mcpCall.serverLabel, \"my_server\")\n      XCTAssertEqual(mcpCall.arguments, \"{\\\"key\\\": \\\"value\\\"}\")\n      XCTAssertEqual(mcpCall.output, \"result data\")\n      XCTAssertEqual(mcpCall.type, \"mcp_call\")\n      XCTAssertNil(mcpCall.error)\n    } else {\n      XCTFail(\"Expected MCP call output item\")\n    }\n  }\n\n  func testOutputItemMCPCallWithError() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"mcp_call\",\n        \"id\": \"mcp_123\",\n        \"name\": \"get_data\",\n        \"server_label\": \"my_server\",\n        \"arguments\": \"{\\\\\"key\\\\\": \\\\\"value\\\\\"}\",\n        \"error\": \"Tool execution failed\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .mcpCall(let mcpCall) = outputItem {\n      XCTAssertEqual(mcpCall.error, \"Tool execution failed\")\n      XCTAssertNil(mcpCall.output)\n    } else {\n      XCTFail(\"Expected MCP call output item\")\n    }\n  }\n\n  // MARK: - MCP List Tools Tests\n\n  func testOutputItemMCPListTools() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"mcp_list_tools\",\n        \"id\": \"list_123\",\n        \"server_label\": \"my_server\",\n        \"tools\": [\n          {\n            \"name\": \"tool1\",\n            \"description\": \"First tool\"\n          },\n          {\n            \"name\": \"tool2\",\n            \"description\": \"Second tool\",\n            \"input_schema\": {\n              \"type\": \"object\"\n            }\n          }\n        ]\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .mcpListTools(let mcpListTools) = outputItem {\n      XCTAssertEqual(mcpListTools.id, \"list_123\")\n      XCTAssertEqual(mcpListTools.serverLabel, \"my_server\")\n      XCTAssertEqual(mcpListTools.tools.count, 2)\n      XCTAssertEqual(mcpListTools.tools[0].name, \"tool1\")\n      XCTAssertEqual(mcpListTools.tools[0].description, \"First tool\")\n      XCTAssertEqual(mcpListTools.tools[1].name, \"tool2\")\n      XCTAssertEqual(mcpListTools.type, \"mcp_list_tools\")\n      XCTAssertNil(mcpListTools.error)\n    } else {\n      XCTFail(\"Expected MCP list tools output item\")\n    }\n  }\n\n  // MARK: - MCP Approval Request Tests\n\n  func testOutputItemMCPApprovalRequest() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"mcp_approval_request\",\n        \"id\": \"approval_123\",\n        \"name\": \"sensitive_operation\",\n        \"server_label\": \"my_server\",\n        \"arguments\": \"{\\\\\"action\\\\\": \\\\\"delete_all\\\\\"}\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let outputItem = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n\n    if case .mcpApprovalRequest(let approval) = outputItem {\n      XCTAssertEqual(approval.id, \"approval_123\")\n      XCTAssertEqual(approval.name, \"sensitive_operation\")\n      XCTAssertEqual(approval.serverLabel, \"my_server\")\n      XCTAssertEqual(approval.arguments, \"{\\\"action\\\": \\\"delete_all\\\"}\")\n      XCTAssertEqual(approval.type, \"mcp_approval_request\")\n    } else {\n      XCTFail(\"Expected MCP approval request output item\")\n    }\n  }\n\n  // MARK: - Error Tests\n\n  func testUnknownOutputItemType() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"unknown_type\",\n        \"data\": \"some data\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n\n    do {\n      _ = try decoder.decode(OutputItem.self, from: json.data(using: .utf8)!)\n      XCTFail(\"Should have thrown an error for unknown output item type\")\n    } catch {\n      // Expected error\n      XCTAssertTrue(error is DecodingError)\n    }\n  }\n}\n"
  },
  {
    "path": "Tests/OpenAITests/ResponseModelValidationTests.swift",
    "content": "import XCTest\n@testable import SwiftOpenAI\n\nfinal class ResponseModelValidationTests: XCTestCase {\n  // MARK: - Comprehensive Validation Test\n\n  func testAllResponseSchemasAreValid() throws {\n    // This test validates that all provided response schemas can be decoded\n    let schemas: [(name: String, json: String)] = [\n      (\"Text Input Response\", textInputResponseJSON),\n      (\"Image Input Response\", imageInputResponseJSON),\n      (\"Web Search Response\", webSearchResponseJSON),\n      (\"File Search Response\", fileSearchResponseJSON),\n      (\"Function Call Response\", functionCallResponseJSON),\n      (\"Reasoning Response\", reasoningResponseJSON),\n    ]\n\n    let decoder = JSONDecoder()\n\n    for (name, json) in schemas {\n      do {\n        let responseModel = try decoder.decode(ResponseModel.self, from: json.data(using: .utf8)!)\n\n        // Basic validation that the response was decoded\n        XCTAssertNotNil(responseModel.id, \"\\(name): ID should not be nil\")\n        XCTAssertEqual(responseModel.object, \"response\", \"\\(name): Object type should be 'response'\")\n        XCTAssertNotNil(responseModel.createdAt, \"\\(name): Created at should not be nil\")\n        XCTAssertNotNil(responseModel.status, \"\\(name): Status should not be nil\")\n\n        print(\"✅ \\(name) validated successfully\")\n      } catch {\n        XCTFail(\"\\(name) failed to decode: \\(error)\")\n      }\n    }\n  }\n\n  // MARK: - Individual Schema Tests\n\n  func testTextInputResponseSchemaValidation() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: textInputResponseJSON.data(using: .utf8)!)\n\n    // Validate all fields are properly decoded\n    XCTAssertEqual(responseModel.id, \"resp_67ccd2bed1ec8190b14f964abc0542670bb6a6b452d3795b\")\n    XCTAssertEqual(responseModel.object, \"response\")\n    XCTAssertEqual(responseModel.createdAt, 1_741_476_542)\n    XCTAssertEqual(responseModel.status, .completed)\n    XCTAssertNil(responseModel.error)\n    XCTAssertNil(responseModel.incompleteDetails)\n    XCTAssertNil(responseModel.instructions)\n    XCTAssertNil(responseModel.maxOutputTokens)\n    XCTAssertEqual(responseModel.model, \"gpt-4.1-2025-04-14\")\n    XCTAssertEqual(responseModel.parallelToolCalls, true)\n    XCTAssertNil(responseModel.previousResponseId)\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertNil(responseModel.reasoning?.effort)\n    XCTAssertNil(responseModel.reasoning?.summary)\n    XCTAssertEqual(responseModel.store, true)\n    XCTAssertEqual(responseModel.temperature, 1.0)\n    XCTAssertEqual(responseModel.topP, 1.0)\n    XCTAssertEqual(responseModel.truncation, \"disabled\")\n    XCTAssertNil(responseModel.user)\n    XCTAssertTrue(responseModel.metadata.isEmpty)\n\n    // Validate usage\n    XCTAssertNotNil(responseModel.usage)\n    XCTAssertEqual(responseModel.usage?.inputTokens, 36)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 87)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 123)\n    XCTAssertEqual(responseModel.usage?.inputTokensDetails?.cachedTokens, 0)\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 0)\n\n    // Validate output\n    XCTAssertEqual(responseModel.output.count, 1)\n    if case .message(let message) = responseModel.output[0] {\n      XCTAssertEqual(message.id, \"msg_67ccd2bf17f0819081ff3bb2cf6508e60bb6a6b452d3795b\")\n      XCTAssertEqual(message.status, \"completed\")\n      XCTAssertEqual(message.role, \"assistant\")\n    } else {\n      XCTFail(\"Expected message output type\")\n    }\n  }\n\n  func testImageInputResponseSchemaValidation() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: imageInputResponseJSON.data(using: .utf8)!)\n\n    // Specific validations for image response\n    XCTAssertEqual(responseModel.model, \"gpt-4.1-2025-04-14\")\n    XCTAssertEqual(responseModel.usage?.inputTokens, 328)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 52)\n    XCTAssertEqual(responseModel.usage?.totalTokens, 380)\n  }\n\n  func testWebSearchResponseSchemaValidation() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: webSearchResponseJSON.data(using: .utf8)!)\n\n    // Validate web search specific features\n    XCTAssertEqual(responseModel.output.count, 2)\n\n    // First output should be web search call\n    if case .webSearchCall(let webSearch) = responseModel.output[0] {\n      XCTAssertEqual(webSearch.id, \"ws_67ccf18f64008190a39b619f4c8455ef087bb177ab789d5c\")\n      XCTAssertEqual(webSearch.status, \"completed\")\n      XCTAssertEqual(webSearch.type, \"web_search_call\")\n    } else {\n      XCTFail(\"Expected web search call as first output\")\n    }\n\n    // Validate tools\n    XCTAssertEqual(responseModel.tools?.count, 1)\n    if let tools = responseModel.tools, case .webSearch(let webSearchTool) = tools[0] {\n      // Check that the type is webSearchPreview\n      if case .webSearchPreview = webSearchTool.type {\n        // Type is correct\n      } else {\n        XCTFail(\"Expected web search preview type\")\n      }\n      XCTAssertNotNil(webSearchTool.searchContextSize)\n      XCTAssertNotNil(webSearchTool.userLocation)\n    } else {\n      XCTFail(\"Expected web search tool\")\n    }\n  }\n\n  func testFileSearchResponseSchemaValidation() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: fileSearchResponseJSON.data(using: .utf8)!)\n\n    // Validate file search specific features\n    XCTAssertEqual(responseModel.output.count, 2)\n\n    // First output should be file search call\n    if case .fileSearchCall(let fileSearch) = responseModel.output[0] {\n      XCTAssertEqual(fileSearch.id, \"fs_67ccf4c63cd08190887ef6464ba5681609504fb6872380d7\")\n      XCTAssertEqual(fileSearch.queries.count, 1)\n      XCTAssertEqual(fileSearch.queries[0], \"attributes of an ancient brown dragon\")\n    } else {\n      XCTFail(\"Expected file search call as first output\")\n    }\n\n    // Validate tools\n    XCTAssertEqual(responseModel.tools?.count, 1)\n    if let tools = responseModel.tools, case .fileSearch(let fileSearchTool) = tools[0] {\n      XCTAssertEqual(fileSearchTool.type, \"file_search\")\n      XCTAssertEqual(fileSearchTool.maxNumResults, 20)\n      XCTAssertNotNil(fileSearchTool.rankingOptions)\n      XCTAssertEqual(fileSearchTool.vectorStoreIds.count, 1)\n    } else {\n      XCTFail(\"Expected file search tool\")\n    }\n  }\n\n  func testFunctionCallResponseSchemaValidation() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: functionCallResponseJSON.data(using: .utf8)!)\n\n    // Validate function call specific features\n    XCTAssertEqual(responseModel.output.count, 1)\n\n    // Output should be function call\n    if case .functionCall(let functionCall) = responseModel.output[0] {\n      XCTAssertEqual(functionCall.id, \"fc_67ca09c6bedc8190a7abfec07b1a1332096610f474011cc0\")\n      XCTAssertEqual(functionCall.callId, \"call_unLAR8MvFNptuiZK6K6HCy5k\")\n      XCTAssertEqual(functionCall.name, \"get_current_weather\")\n      XCTAssertEqual(functionCall.arguments, \"{\\\"location\\\":\\\"Boston, MA\\\",\\\"unit\\\":\\\"celsius\\\"}\")\n      XCTAssertEqual(functionCall.status, \"completed\")\n    } else {\n      XCTFail(\"Expected function call output\")\n    }\n\n    // Validate tools\n    XCTAssertEqual(responseModel.tools?.count, 1)\n    if let tools = responseModel.tools, case .function(let functionTool) = tools[0] {\n      XCTAssertEqual(functionTool.name, \"get_current_weather\")\n      XCTAssertEqual(functionTool.strict, true)\n    } else {\n      XCTFail(\"Expected function tool\")\n    }\n  }\n\n  func testReasoningResponseSchemaValidation() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: reasoningResponseJSON.data(using: .utf8)!)\n\n    // Validate reasoning specific features\n    XCTAssertEqual(responseModel.model, \"o1-2024-12-17\")\n    XCTAssertNotNil(responseModel.reasoning)\n    XCTAssertEqual(responseModel.reasoning?.effort, \"high\")\n\n    // Validate reasoning tokens\n    XCTAssertEqual(responseModel.usage?.outputTokensDetails?.reasoningTokens, 832)\n    XCTAssertEqual(responseModel.usage?.outputTokens, 1035)\n  }\n\n  // MARK: - InstructionsType Tests\n\n  func testInstructionsTypeStringDecoding() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: instructionsStringJSON.data(using: .utf8)!)\n\n    XCTAssertNotNil(responseModel.instructions)\n    if case .string(let value) = responseModel.instructions {\n      XCTAssertEqual(value, \"You are a helpful assistant.\")\n    } else {\n      XCTFail(\"Expected string instructions type\")\n    }\n  }\n\n  func testInstructionsTypeArrayOfStringsDecoding() throws {\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: instructionsArrayOfStringsJSON.data(using: .utf8)!)\n\n    XCTAssertNotNil(responseModel.instructions)\n    if case .array(let values) = responseModel.instructions {\n      XCTAssertEqual(values.count, 2)\n      XCTAssertEqual(values[0], \"Be helpful.\")\n      XCTAssertEqual(values[1], \"Be concise.\")\n    } else {\n      XCTFail(\"Expected array of strings instructions type\")\n    }\n  }\n\n  func testInstructionsTypeMessagesDecoding() throws {\n    // This tests the fix for issue #187 - reusable prompts return instructions as message objects\n    let decoder = JSONDecoder()\n    let responseModel = try decoder.decode(ResponseModel.self, from: instructionsMessagesJSON.data(using: .utf8)!)\n\n    XCTAssertNotNil(responseModel.instructions)\n    if case .messages(let messages) = responseModel.instructions {\n      XCTAssertEqual(messages.count, 2)\n      XCTAssertEqual(messages[0].role, \"developer\")\n      XCTAssertEqual(messages[0].type, \"message\")\n      XCTAssertEqual(messages[1].role, \"assistant\")\n\n      // Validate content of first message\n      if case .array(let contentItems) = messages[0].content {\n        XCTAssertEqual(contentItems.count, 1)\n        if case .text(let textContent) = contentItems[0] {\n          XCTAssertEqual(textContent.text, \"You are a helpful assistant for {{customer_name}}.\")\n        } else {\n          XCTFail(\"Expected text content item\")\n        }\n      } else {\n        XCTFail(\"Expected array content in message\")\n      }\n    } else {\n      XCTFail(\"Expected messages instructions type\")\n    }\n  }\n\n  // MARK: - Test Data\n\n  private let textInputResponseJSON = \"\"\"\n    {\n      \"id\": \"resp_67ccd2bed1ec8190b14f964abc0542670bb6a6b452d3795b\",\n      \"object\": \"response\",\n      \"created_at\": 1741476542,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": null,\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [\n        {\n          \"type\": \"message\",\n          \"id\": \"msg_67ccd2bf17f0819081ff3bb2cf6508e60bb6a6b452d3795b\",\n          \"status\": \"completed\",\n          \"role\": \"assistant\",\n          \"content\": [\n            {\n              \"type\": \"output_text\",\n              \"text\": \"In a peaceful grove beneath a silver moon, a unicorn named Lumina discovered a hidden pool that reflected the stars. As she dipped her horn into the water, the pool began to shimmer, revealing a pathway to a magical realm of endless night skies. Filled with wonder, Lumina whispered a wish for all who dream to find their own hidden magic, and as she glanced back, her hoofprints sparkled like stardust.\",\n              \"annotations\": []\n            }\n          ]\n        }\n      ],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": {\n        \"effort\": null,\n        \"summary\": null\n      },\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": {\n        \"format\": {\n          \"type\": \"text\"\n        }\n      },\n      \"tool_choice\": \"auto\",\n      \"tools\": [],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 36,\n        \"input_tokens_details\": {\n          \"cached_tokens\": 0\n        },\n        \"output_tokens\": 87,\n        \"output_tokens_details\": {\n          \"reasoning_tokens\": 0\n        },\n        \"total_tokens\": 123\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  private let imageInputResponseJSON = \"\"\"\n    {\n      \"id\": \"resp_67ccd3a9da748190baa7f1570fe91ac604becb25c45c1d41\",\n      \"object\": \"response\",\n      \"created_at\": 1741476777,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": null,\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [\n        {\n          \"type\": \"message\",\n          \"id\": \"msg_67ccd3acc8d48190a77525dc6de64b4104becb25c45c1d41\",\n          \"status\": \"completed\",\n          \"role\": \"assistant\",\n          \"content\": [\n            {\n              \"type\": \"output_text\",\n              \"text\": \"The image depicts a scenic landscape with a wooden boardwalk or pathway leading through lush, green grass under a blue sky with some clouds. The setting suggests a peaceful natural area, possibly a park or nature reserve. There are trees and shrubs in the background.\",\n              \"annotations\": []\n            }\n          ]\n        }\n      ],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": {\n        \"effort\": null,\n        \"summary\": null\n      },\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": {\n        \"format\": {\n          \"type\": \"text\"\n        }\n      },\n      \"tool_choice\": \"auto\",\n      \"tools\": [],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 328,\n        \"input_tokens_details\": {\n          \"cached_tokens\": 0\n        },\n        \"output_tokens\": 52,\n        \"output_tokens_details\": {\n          \"reasoning_tokens\": 0\n        },\n        \"total_tokens\": 380\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  private let webSearchResponseJSON = \"\"\"\n    {\n      \"id\": \"resp_67ccf18ef5fc8190b16dbee19bc54e5f087bb177ab789d5c\",\n      \"object\": \"response\",\n      \"created_at\": 1741484430,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": null,\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [\n        {\n          \"type\": \"web_search_call\",\n          \"id\": \"ws_67ccf18f64008190a39b619f4c8455ef087bb177ab789d5c\",\n          \"status\": \"completed\"\n        },\n        {\n          \"type\": \"message\",\n          \"id\": \"msg_67ccf190ca3881909d433c50b1f6357e087bb177ab789d5c\",\n          \"status\": \"completed\",\n          \"role\": \"assistant\",\n          \"content\": [\n            {\n              \"type\": \"output_text\",\n              \"text\": \"As of today, March 9, 2025, one notable positive news story...\",\n              \"annotations\": [\n                {\n                  \"type\": \"url_citation\",\n                  \"start_index\": 442,\n                  \"end_index\": 557,\n                  \"url\": \"https://.../?utm_source=chatgpt.com\",\n                  \"title\": \"...\"\n                },\n                {\n                  \"type\": \"url_citation\",\n                  \"start_index\": 962,\n                  \"end_index\": 1077,\n                  \"url\": \"https://.../?utm_source=chatgpt.com\",\n                  \"title\": \"...\"\n                },\n                {\n                  \"type\": \"url_citation\",\n                  \"start_index\": 1336,\n                  \"end_index\": 1451,\n                  \"url\": \"https://.../?utm_source=chatgpt.com\",\n                  \"title\": \"...\"\n                }\n              ]\n            }\n          ]\n        }\n      ],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": {\n        \"effort\": null,\n        \"summary\": null\n      },\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": {\n        \"format\": {\n          \"type\": \"text\"\n        }\n      },\n      \"tool_choice\": \"auto\",\n      \"tools\": [\n        {\n          \"type\": \"web_search_preview\",\n          \"domains\": [],\n          \"search_context_size\": \"medium\",\n          \"user_location\": {\n            \"type\": \"approximate\",\n            \"city\": null,\n            \"country\": \"US\",\n            \"region\": null,\n            \"timezone\": null\n          }\n        }\n      ],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 328,\n        \"input_tokens_details\": {\n          \"cached_tokens\": 0\n        },\n        \"output_tokens\": 356,\n        \"output_tokens_details\": {\n          \"reasoning_tokens\": 0\n        },\n        \"total_tokens\": 684\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  private let fileSearchResponseJSON = \"\"\"\n    {\n      \"id\": \"resp_67ccf4c55fc48190b71bd0463ad3306d09504fb6872380d7\",\n      \"object\": \"response\",\n      \"created_at\": 1741485253,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": null,\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [\n        {\n          \"type\": \"file_search_call\",\n          \"id\": \"fs_67ccf4c63cd08190887ef6464ba5681609504fb6872380d7\",\n          \"status\": \"completed\",\n          \"queries\": [\n            \"attributes of an ancient brown dragon\"\n          ],\n          \"results\": null\n        },\n        {\n          \"type\": \"message\",\n          \"id\": \"msg_67ccf4c93e5c81909d595b369351a9d309504fb6872380d7\",\n          \"status\": \"completed\",\n          \"role\": \"assistant\",\n          \"content\": [\n            {\n              \"type\": \"output_text\",\n              \"text\": \"The attributes of an ancient brown dragon include...\",\n              \"annotations\": [\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 320,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 576,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 815,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 815,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 1030,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 1030,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 1156,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                },\n                {\n                  \"type\": \"file_citation\",\n                  \"index\": 1225,\n                  \"file_id\": \"file-4wDz5b167pAf72nx1h9eiN\",\n                  \"filename\": \"dragons.pdf\"\n                }\n              ]\n            }\n          ]\n        }\n      ],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": {\n        \"effort\": null,\n        \"summary\": null\n      },\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": {\n        \"format\": {\n          \"type\": \"text\"\n        }\n      },\n      \"tool_choice\": \"auto\",\n      \"tools\": [\n        {\n          \"type\": \"file_search\",\n          \"filters\": null,\n          \"max_num_results\": 20,\n          \"ranking_options\": {\n            \"ranker\": \"auto\",\n            \"score_threshold\": 0.0\n          },\n          \"vector_store_ids\": [\n            \"vs_1234567890\"\n          ]\n        }\n      ],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 18307,\n        \"input_tokens_details\": {\n          \"cached_tokens\": 0\n        },\n        \"output_tokens\": 348,\n        \"output_tokens_details\": {\n          \"reasoning_tokens\": 0\n        },\n        \"total_tokens\": 18655\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  private let functionCallResponseJSON = \"\"\"\n    {\n      \"id\": \"resp_67ca09c5efe0819096d0511c92b8c890096610f474011cc0\",\n      \"object\": \"response\",\n      \"created_at\": 1741294021,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": null,\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [\n        {\n          \"type\": \"function_call\",\n          \"id\": \"fc_67ca09c6bedc8190a7abfec07b1a1332096610f474011cc0\",\n          \"call_id\": \"call_unLAR8MvFNptuiZK6K6HCy5k\",\n          \"name\": \"get_current_weather\",\n          \"arguments\": \"{\\\\\"location\\\\\":\\\\\"Boston, MA\\\\\",\\\\\"unit\\\\\":\\\\\"celsius\\\\\"}\",\n          \"status\": \"completed\"\n        }\n      ],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": {\n        \"effort\": null,\n        \"summary\": null\n      },\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": {\n        \"format\": {\n          \"type\": \"text\"\n        }\n      },\n      \"tool_choice\": \"auto\",\n      \"tools\": [\n        {\n          \"type\": \"function\",\n          \"description\": \"Get the current weather in a given location\",\n          \"name\": \"get_current_weather\",\n          \"parameters\": {\n            \"type\": \"object\",\n            \"properties\": {\n              \"location\": {\n                \"type\": \"string\",\n                \"description\": \"The city and state, e.g. San Francisco, CA\"\n              },\n              \"unit\": {\n                \"type\": \"string\",\n                \"enum\": [\n                  \"celsius\",\n                  \"fahrenheit\"\n                ]\n              }\n            },\n            \"required\": [\n              \"location\",\n              \"unit\"\n            ]\n          },\n          \"strict\": true\n        }\n      ],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 291,\n        \"output_tokens\": 23,\n        \"output_tokens_details\": {\n          \"reasoning_tokens\": 0\n        },\n        \"total_tokens\": 314\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  private let reasoningResponseJSON = \"\"\"\n    {\n      \"id\": \"resp_67ccd7eca01881908ff0b5146584e408072912b2993db808\",\n      \"object\": \"response\",\n      \"created_at\": 1741477868,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": null,\n      \"max_output_tokens\": null,\n      \"model\": \"o1-2024-12-17\",\n      \"output\": [\n        {\n          \"type\": \"message\",\n          \"id\": \"msg_67ccd7f7b5848190a6f3e95d809f6b44072912b2993db808\",\n          \"status\": \"completed\",\n          \"role\": \"assistant\",\n          \"content\": [\n            {\n              \"type\": \"output_text\",\n              \"text\": \"The classic tongue twister...\",\n              \"annotations\": []\n            }\n          ]\n        }\n      ],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": {\n        \"effort\": \"high\",\n        \"summary\": null\n      },\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": {\n        \"format\": {\n          \"type\": \"text\"\n        }\n      },\n      \"tool_choice\": \"auto\",\n      \"tools\": [],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 81,\n        \"input_tokens_details\": {\n          \"cached_tokens\": 0\n        },\n        \"output_tokens\": 1035,\n        \"output_tokens_details\": {\n          \"reasoning_tokens\": 832\n        },\n        \"total_tokens\": 1116\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  // MARK: - InstructionsType Test Data\n\n  private let instructionsStringJSON = \"\"\"\n    {\n      \"id\": \"resp_test_string_instructions\",\n      \"object\": \"response\",\n      \"created_at\": 1741476542,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": \"You are a helpful assistant.\",\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": null,\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": null,\n      \"tool_choice\": \"auto\",\n      \"tools\": [],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 10,\n        \"output_tokens\": 10,\n        \"total_tokens\": 20\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  private let instructionsArrayOfStringsJSON = \"\"\"\n    {\n      \"id\": \"resp_test_array_instructions\",\n      \"object\": \"response\",\n      \"created_at\": 1741476542,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": [\"Be helpful.\", \"Be concise.\"],\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": null,\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": null,\n      \"tool_choice\": \"auto\",\n      \"tools\": [],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 10,\n        \"output_tokens\": 10,\n        \"total_tokens\": 20\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n\n  /// This JSON represents the response format when using reusable prompts with variables (issue #187)\n  private let instructionsMessagesJSON = \"\"\"\n    {\n      \"id\": \"resp_test_messages_instructions\",\n      \"object\": \"response\",\n      \"created_at\": 1741476542,\n      \"status\": \"completed\",\n      \"error\": null,\n      \"incomplete_details\": null,\n      \"instructions\": [\n        {\n          \"type\": \"message\",\n          \"content\": [{\"type\": \"input_text\", \"text\": \"You are a helpful assistant for {{customer_name}}.\"}],\n          \"role\": \"developer\"\n        },\n        {\n          \"type\": \"message\",\n          \"content\": [{\"type\": \"input_text\", \"text\": \"\"}],\n          \"role\": \"assistant\"\n        }\n      ],\n      \"max_output_tokens\": null,\n      \"model\": \"gpt-4.1-2025-04-14\",\n      \"output\": [],\n      \"parallel_tool_calls\": true,\n      \"previous_response_id\": null,\n      \"reasoning\": null,\n      \"store\": true,\n      \"temperature\": 1.0,\n      \"text\": null,\n      \"tool_choice\": \"auto\",\n      \"tools\": [],\n      \"top_p\": 1.0,\n      \"truncation\": \"disabled\",\n      \"usage\": {\n        \"input_tokens\": 10,\n        \"output_tokens\": 10,\n        \"total_tokens\": 20\n      },\n      \"user\": null,\n      \"metadata\": {}\n    }\n    \"\"\"\n}\n"
  },
  {
    "path": "Tests/OpenAITests/ResponseStreamEventTests.swift",
    "content": "import XCTest\n@testable import SwiftOpenAI\n\nfinal class ResponseStreamEventTests: XCTestCase {\n  // MARK: - Response Events Tests\n\n  func testResponseCreatedEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.created\",\n        \"sequence_number\": 1,\n        \"response\": {\n          \"id\": \"resp_123\",\n          \"object\": \"model_response\",\n          \"created_at\": 1704067200,\n          \"model\": \"gpt-4o\",\n          \"usage\": {\n            \"prompt_tokens\": 10,\n            \"completion_tokens\": 20,\n            \"total_tokens\": 30\n          },\n          \"output\": [],\n          \"status\": \"in_progress\",\n          \"metadata\": {},\n          \"parallel_tool_calls\": true,\n          \"text\": {\n            \"format\": {\n              \"type\": \"text\"\n            }\n          },\n          \"tool_choice\": \"none\",\n          \"tools\": []\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .responseCreated(let createdEvent) = event {\n      XCTAssertEqual(createdEvent.type, \"response.created\")\n      XCTAssertEqual(createdEvent.sequenceNumber, 1)\n      XCTAssertEqual(createdEvent.response.id, \"resp_123\")\n      XCTAssertEqual(createdEvent.response.status, .inProgress)\n    } else {\n      XCTFail(\"Expected responseCreated event\")\n    }\n  }\n\n  func testResponseCompletedEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.completed\",\n        \"sequence_number\": 10,\n        \"response\": {\n          \"id\": \"resp_123\",\n          \"object\": \"model_response\",\n          \"created_at\": 1704067200,\n          \"model\": \"gpt-4o\",\n          \"usage\": {\n            \"prompt_tokens\": 10,\n            \"completion_tokens\": 20,\n            \"total_tokens\": 30\n          },\n          \"output\": [],\n          \"status\": \"completed\",\n          \"metadata\": {},\n          \"parallel_tool_calls\": true,\n          \"text\": {\n            \"format\": {\n              \"type\": \"text\"\n            }\n          },\n          \"tool_choice\": \"none\",\n          \"tools\": []\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .responseCompleted(let completedEvent) = event {\n      XCTAssertEqual(completedEvent.type, \"response.completed\")\n      XCTAssertEqual(completedEvent.sequenceNumber, 10)\n      XCTAssertEqual(completedEvent.response.status, .completed)\n    } else {\n      XCTFail(\"Expected responseCompleted event\")\n    }\n  }\n\n  // MARK: - Output Item Events Tests\n\n  func testOutputItemAddedEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.added\",\n        \"output_index\": 0,\n        \"sequence_number\": 2,\n        \"item\": {\n          \"id\": \"item_123\",\n          \"type\": \"message\",\n          \"status\": \"in_progress\",\n          \"role\": \"assistant\",\n          \"content\": []\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemAdded(let addedEvent) = event {\n      XCTAssertEqual(addedEvent.type, \"response.output_item.added\")\n      XCTAssertEqual(addedEvent.outputIndex, 0)\n      XCTAssertEqual(addedEvent.sequenceNumber, 2)\n\n      if case .message(let message) = addedEvent.item {\n        XCTAssertEqual(message.id, \"item_123\")\n        XCTAssertEqual(message.type, \"message\")\n        XCTAssertEqual(message.status, \"in_progress\")\n        XCTAssertEqual(message.role, \"assistant\")\n      } else {\n        XCTFail(\"Expected message item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemAdded event\")\n    }\n  }\n\n  func testOutputItemAddedWithFunctionCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.added\",\n        \"output_index\": 0,\n        \"sequence_number\": 2,\n        \"item\": {\n          \"id\": \"func_123\",\n          \"type\": \"function_call\",\n          \"call_id\": \"call_456\",\n          \"name\": \"get_weather\",\n          \"arguments\": \"\",\n          \"status\": \"in_progress\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemAdded(let addedEvent) = event {\n      if case .functionCall(let functionCall) = addedEvent.item {\n        XCTAssertEqual(functionCall.id, \"func_123\")\n        XCTAssertEqual(functionCall.callId, \"call_456\")\n        XCTAssertEqual(functionCall.name, \"get_weather\")\n        XCTAssertEqual(functionCall.status, \"in_progress\")\n      } else {\n        XCTFail(\"Expected function call item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemAdded event\")\n    }\n  }\n\n  func testOutputItemAddedWithImageGeneration() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.added\",\n        \"output_index\": 0,\n        \"sequence_number\": 3,\n        \"item\": {\n          \"id\": \"img_123\",\n          \"type\": \"image_generation_call\",\n          \"status\": \"in_progress\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemAdded(let addedEvent) = event {\n      if case .imageGenerationCall(let imageGen) = addedEvent.item {\n        XCTAssertEqual(imageGen.id, \"img_123\")\n        XCTAssertEqual(imageGen.type, \"image_generation_call\")\n        XCTAssertEqual(imageGen.status, \"in_progress\")\n      } else {\n        XCTFail(\"Expected image generation call item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemAdded event\")\n    }\n  }\n\n  func testOutputItemAddedWithCodeInterpreter() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.added\",\n        \"output_index\": 0,\n        \"sequence_number\": 4,\n        \"item\": {\n          \"id\": \"code_123\",\n          \"type\": \"code_interpreter_call\",\n          \"container_id\": \"container_456\",\n          \"status\": \"interpreting\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemAdded(let addedEvent) = event {\n      if case .codeInterpreterCall(let codeInterpreter) = addedEvent.item {\n        XCTAssertEqual(codeInterpreter.id, \"code_123\")\n        XCTAssertEqual(codeInterpreter.containerId, \"container_456\")\n        XCTAssertEqual(codeInterpreter.status, \"interpreting\")\n      } else {\n        XCTFail(\"Expected code interpreter call item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemAdded event\")\n    }\n  }\n\n  func testOutputItemDoneWithMCPCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.done\",\n        \"output_index\": 0,\n        \"sequence_number\": 5,\n        \"item\": {\n          \"id\": \"mcp_123\",\n          \"type\": \"mcp_call\",\n          \"name\": \"get_data\",\n          \"server_label\": \"my_server\",\n          \"arguments\": \"{\\\\\"key\\\\\": \\\\\"value\\\\\"}\",\n          \"output\": \"result data\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemDone(let doneEvent) = event {\n      if case .mcpCall(let mcpCall) = doneEvent.item {\n        XCTAssertEqual(mcpCall.id, \"mcp_123\")\n        XCTAssertEqual(mcpCall.name, \"get_data\")\n        XCTAssertEqual(mcpCall.serverLabel, \"my_server\")\n        XCTAssertEqual(mcpCall.output, \"result data\")\n      } else {\n        XCTFail(\"Expected MCP call item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemDone event\")\n    }\n  }\n\n  // MARK: - Text Events Tests\n\n  func testOutputTextDeltaEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_text.delta\",\n        \"item_id\": \"item_123\",\n        \"output_index\": 0,\n        \"content_index\": 0,\n        \"delta\": \"Hello, how can I \",\n        \"sequence_number\": 3\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputTextDelta(let deltaEvent) = event {\n      XCTAssertEqual(deltaEvent.type, \"response.output_text.delta\")\n      XCTAssertEqual(deltaEvent.itemId, \"item_123\")\n      XCTAssertEqual(deltaEvent.outputIndex, 0)\n      XCTAssertEqual(deltaEvent.contentIndex, 0)\n      XCTAssertEqual(deltaEvent.delta, \"Hello, how can I \")\n      XCTAssertEqual(deltaEvent.sequenceNumber, 3)\n    } else {\n      XCTFail(\"Expected outputTextDelta event\")\n    }\n  }\n\n  func testOutputTextDoneEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_text.done\",\n        \"item_id\": \"item_123\",\n        \"output_index\": 0,\n        \"content_index\": 0,\n        \"text\": \"Hello, how can I help you today?\",\n        \"sequence_number\": 5\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputTextDone(let doneEvent) = event {\n      XCTAssertEqual(doneEvent.type, \"response.output_text.done\")\n      XCTAssertEqual(doneEvent.text, \"Hello, how can I help you today?\")\n    } else {\n      XCTFail(\"Expected outputTextDone event\")\n    }\n  }\n\n  // MARK: - Function Call Events Tests\n\n  func testFunctionCallArgumentsDeltaEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.function_call_arguments.delta\",\n        \"item_id\": \"item_456\",\n        \"output_index\": 0,\n        \"delta\": \"{\\\\\"location\\\\\": \\\\\"San \",\n        \"sequence_number\": 4\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .functionCallArgumentsDelta(let deltaEvent) = event {\n      XCTAssertEqual(deltaEvent.type, \"response.function_call_arguments.delta\")\n      XCTAssertEqual(deltaEvent.itemId, \"item_456\")\n      XCTAssertEqual(deltaEvent.delta, \"{\\\"location\\\": \\\"San \")\n    } else {\n      XCTFail(\"Expected functionCallArgumentsDelta event\")\n    }\n  }\n\n  func testFunctionCallArgumentsDoneEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.function_call_arguments.done\",\n        \"item_id\": \"item_456\",\n        \"output_index\": 0,\n        \"name\": \"get_current_weather\",\n        \"arguments\": \"{\\\\\"location\\\\\": \\\\\"San Francisco, CA\\\\\"}\",\n        \"sequence_number\": 6\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .functionCallArgumentsDone(let doneEvent) = event {\n      XCTAssertEqual(doneEvent.type, \"response.function_call_arguments.done\")\n      XCTAssertEqual(doneEvent.name, \"get_current_weather\")\n      XCTAssertEqual(doneEvent.arguments, \"{\\\"location\\\": \\\"San Francisco, CA\\\"}\")\n    } else {\n      XCTFail(\"Expected functionCallArgumentsDone event\")\n    }\n  }\n\n  // MARK: - File Search Events Tests\n\n  func testFileSearchCallEvents() throws {\n    // Test in progress event\n    let inProgressJson = \"\"\"\n      {\n        \"type\": \"response.file_search_call.in_progress\",\n        \"output_index\": 0,\n        \"item_id\": \"fs_123\",\n        \"sequence_number\": 7\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let inProgressEvent = try decoder.decode(ResponseStreamEvent.self, from: inProgressJson.data(using: .utf8)!)\n\n    if case .fileSearchCallInProgress(let event) = inProgressEvent {\n      XCTAssertEqual(event.type, \"response.file_search_call.in_progress\")\n      XCTAssertEqual(event.itemId, \"fs_123\")\n    } else {\n      XCTFail(\"Expected fileSearchCallInProgress event\")\n    }\n\n    // Test searching event\n    let searchingJson = \"\"\"\n      {\n        \"type\": \"response.file_search_call.searching\",\n        \"output_index\": 0,\n        \"item_id\": \"fs_123\",\n        \"sequence_number\": 8\n      }\n      \"\"\"\n\n    let searchingEvent = try decoder.decode(ResponseStreamEvent.self, from: searchingJson.data(using: .utf8)!)\n\n    if case .fileSearchCallSearching(let event) = searchingEvent {\n      XCTAssertEqual(event.type, \"response.file_search_call.searching\")\n    } else {\n      XCTFail(\"Expected fileSearchCallSearching event\")\n    }\n\n    // Test completed event\n    let completedJson = \"\"\"\n      {\n        \"type\": \"response.file_search_call.completed\",\n        \"output_index\": 0,\n        \"item_id\": \"fs_123\",\n        \"sequence_number\": 9\n      }\n      \"\"\"\n\n    let completedEvent = try decoder.decode(ResponseStreamEvent.self, from: completedJson.data(using: .utf8)!)\n\n    if case .fileSearchCallCompleted(let event) = completedEvent {\n      XCTAssertEqual(event.type, \"response.file_search_call.completed\")\n    } else {\n      XCTFail(\"Expected fileSearchCallCompleted event\")\n    }\n  }\n\n  // MARK: - Web Search Events Tests\n\n  func testWebSearchCallEvents() throws {\n    let decoder = JSONDecoder()\n\n    // Test all web search event types\n    let eventTypes = [\n      (\"response.web_search_call.in_progress\", \"webSearchCallInProgress\"),\n      (\"response.web_search_call.searching\", \"webSearchCallSearching\"),\n      (\"response.web_search_call.completed\", \"webSearchCallCompleted\"),\n    ]\n\n    for (eventType, expectedCase) in eventTypes {\n      let json = \"\"\"\n        {\n          \"type\": \"\\(eventType)\",\n          \"output_index\": 0,\n          \"item_id\": \"ws_123\",\n          \"sequence_number\": 10\n        }\n        \"\"\"\n\n      let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n      switch event {\n      case .webSearchCallInProgress(let e) where expectedCase == \"webSearchCallInProgress\":\n        XCTAssertEqual(e.type, eventType)\n        XCTAssertEqual(e.itemId, \"ws_123\")\n\n      case .webSearchCallSearching(let e) where expectedCase == \"webSearchCallSearching\":\n        XCTAssertEqual(e.type, eventType)\n\n      case .webSearchCallCompleted(let e) where expectedCase == \"webSearchCallCompleted\":\n        XCTAssertEqual(e.type, eventType)\n\n      default:\n        XCTFail(\"Expected \\(expectedCase) event for type \\(eventType)\")\n      }\n    }\n  }\n\n  // MARK: - Reasoning Events Tests\n\n  func testReasoningSummaryTextDeltaEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.reasoning_summary_text.delta\",\n        \"item_id\": \"reason_123\",\n        \"output_index\": 0,\n        \"summary_index\": 0,\n        \"delta\": \"Let me think about this problem step by step\",\n        \"sequence_number\": 11\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .reasoningSummaryTextDelta(let deltaEvent) = event {\n      XCTAssertEqual(deltaEvent.type, \"response.reasoning_summary_text.delta\")\n      XCTAssertEqual(deltaEvent.delta, \"Let me think about this problem step by step\")\n      XCTAssertEqual(deltaEvent.summaryIndex, 0)\n    } else {\n      XCTFail(\"Expected reasoningSummaryTextDelta event\")\n    }\n  }\n\n  // MARK: - Error Event Test\n\n  func testErrorEvent() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"error\",\n        \"code\": \"rate_limit_exceeded\",\n        \"message\": \"You have exceeded your rate limit\",\n        \"param\": \"model\",\n        \"sequence_number\": 99\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .error(let errorEvent) = event {\n      XCTAssertEqual(errorEvent.type, \"error\")\n      XCTAssertEqual(errorEvent.code, \"rate_limit_exceeded\")\n      XCTAssertEqual(errorEvent.message, \"You have exceeded your rate limit\")\n      XCTAssertEqual(errorEvent.param, \"model\")\n      XCTAssertEqual(errorEvent.sequenceNumber, 99)\n    } else {\n      XCTFail(\"Expected error event\")\n    }\n  }\n\n  // MARK: - Complex Streaming Sequence Test\n\n  func testCompleteStreamingSequence() throws {\n    // This test simulates a complete streaming sequence with multiple events\n    let events = [\n      \"\"\"\n      {\"type\": \"response.created\", \"sequence_number\": 1, \"response\": {\"id\": \"resp_123\", \"object\": \"model_response\", \"created_at\": 1704067200, \"model\": \"gpt-4o\", \"usage\": {\"prompt_tokens\": 10, \"completion_tokens\": 0, \"total_tokens\": 10}, \"output\": [], \"status\": \"in_progress\", \"metadata\": {}, \"parallel_tool_calls\": true, \"text\": {\"format\": {\"type\": \"text\"}}, \"tool_choice\": \"none\", \"tools\": []}}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.output_item.added\", \"output_index\": 0, \"sequence_number\": 2, \"item\": {\"id\": \"item_123\", \"type\": \"message\", \"status\": \"in_progress\", \"role\": \"assistant\", \"content\": []}}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.content_part.added\", \"item_id\": \"item_123\", \"output_index\": 0, \"content_index\": 0, \"sequence_number\": 3, \"part\": {\"type\": \"text\", \"text\": \"\"}}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.output_text.delta\", \"item_id\": \"item_123\", \"output_index\": 0, \"content_index\": 0, \"delta\": \"Hello\", \"sequence_number\": 4}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.output_text.delta\", \"item_id\": \"item_123\", \"output_index\": 0, \"content_index\": 0, \"delta\": \", world!\", \"sequence_number\": 5}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.output_text.done\", \"item_id\": \"item_123\", \"output_index\": 0, \"content_index\": 0, \"text\": \"Hello, world!\", \"sequence_number\": 6}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.content_part.done\", \"item_id\": \"item_123\", \"output_index\": 0, \"content_index\": 0, \"sequence_number\": 7, \"part\": {\"type\": \"text\", \"text\": \"Hello, world!\"}}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.output_item.done\", \"output_index\": 0, \"sequence_number\": 8, \"item\": {\"id\": \"item_123\", \"type\": \"message\", \"status\": \"completed\", \"role\": \"assistant\", \"content\": [{\"type\": \"output_text\", \"text\": \"Hello, world!\", \"annotations\": []}]}}\n      \"\"\",\n      \"\"\"\n      {\"type\": \"response.completed\", \"sequence_number\": 9, \"response\": {\"id\": \"resp_123\", \"object\": \"model_response\", \"created_at\": 1704067200, \"model\": \"gpt-4o\", \"usage\": {\"prompt_tokens\": 10, \"completion_tokens\": 5, \"total_tokens\": 15}, \"output\": [], \"status\": \"completed\", \"metadata\": {}, \"parallel_tool_calls\": true, \"text\": {\"format\": {\"type\": \"text\"}}, \"tool_choice\": \"none\", \"tools\": []}}\n      \"\"\",\n    ]\n\n    let decoder = JSONDecoder()\n    var receivedEvents = [ResponseStreamEvent]()\n\n    // Decode all events\n    for eventJson in events {\n      let event = try decoder.decode(ResponseStreamEvent.self, from: eventJson.data(using: .utf8)!)\n      receivedEvents.append(event)\n    }\n\n    // Verify we received all events\n    XCTAssertEqual(receivedEvents.count, 9)\n\n    // Verify the sequence\n    if case .responseCreated = receivedEvents[0] {\n      // Success\n    } else {\n      XCTFail(\"First event should be responseCreated\")\n    }\n\n    if case .responseCompleted = receivedEvents[8] {\n      // Success\n    } else {\n      XCTFail(\"Last event should be responseCompleted\")\n    }\n  }\n\n  // MARK: - Image Generation Events Tests\n\n  func testImageGenerationEvents() throws {\n    let decoder = JSONDecoder()\n\n    // Test partial image event\n    let partialImageJson = \"\"\"\n      {\n        \"type\": \"response.image_generation_call.partial_image\",\n        \"output_index\": 0,\n        \"item_id\": \"img_123\",\n        \"sequence_number\": 12,\n        \"partial_image_index\": 0,\n        \"partial_image_b64\": \"iVBORw0KGgoAAAANS...\"\n      }\n      \"\"\"\n\n    let partialImageEvent = try decoder.decode(ResponseStreamEvent.self, from: partialImageJson.data(using: .utf8)!)\n\n    if case .imageGenerationCallPartialImage(let event) = partialImageEvent {\n      XCTAssertEqual(event.type, \"response.image_generation_call.partial_image\")\n      XCTAssertEqual(event.partialImageIndex, 0)\n      XCTAssertEqual(event.partialImageB64, \"iVBORw0KGgoAAAANS...\")\n    } else {\n      XCTFail(\"Expected imageGenerationCallPartialImage event\")\n    }\n  }\n\n  // MARK: - Additional Output Item Type Tests\n\n  func testOutputItemLocalShellCall() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.added\",\n        \"output_index\": 0,\n        \"sequence_number\": 6,\n        \"item\": {\n          \"id\": \"shell_123\",\n          \"type\": \"local_shell_call\",\n          \"call_id\": \"call_789\",\n          \"action\": {\n            \"type\": \"execute\",\n            \"command\": [\"ls\", \"-la\"]\n          },\n          \"status\": \"in_progress\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemAdded(let addedEvent) = event {\n      if case .localShellCall(let shellCall) = addedEvent.item {\n        XCTAssertEqual(shellCall.id, \"shell_123\")\n        XCTAssertEqual(shellCall.callId, \"call_789\")\n        XCTAssertEqual(shellCall.action.command, [\"ls\", \"-la\"])\n        XCTAssertEqual(shellCall.status, \"in_progress\")\n      } else {\n        XCTFail(\"Expected local shell call item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemAdded event\")\n    }\n  }\n\n  func testOutputItemMCPListTools() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.done\",\n        \"output_index\": 0,\n        \"sequence_number\": 7,\n        \"item\": {\n          \"id\": \"list_123\",\n          \"type\": \"mcp_list_tools\",\n          \"server_label\": \"my_server\",\n          \"tools\": [\n            {\n              \"name\": \"tool1\",\n              \"description\": \"First tool\"\n            }\n          ]\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemDone(let doneEvent) = event {\n      if case .mcpListTools(let listTools) = doneEvent.item {\n        XCTAssertEqual(listTools.id, \"list_123\")\n        XCTAssertEqual(listTools.serverLabel, \"my_server\")\n        XCTAssertEqual(listTools.tools.count, 1)\n        XCTAssertEqual(listTools.tools[0].name, \"tool1\")\n      } else {\n        XCTFail(\"Expected MCP list tools item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemDone event\")\n    }\n  }\n\n  func testOutputItemMCPApprovalRequest() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.output_item.added\",\n        \"output_index\": 0,\n        \"sequence_number\": 8,\n        \"item\": {\n          \"id\": \"approval_123\",\n          \"type\": \"mcp_approval_request\",\n          \"name\": \"sensitive_operation\",\n          \"server_label\": \"my_server\",\n          \"arguments\": \"{\\\\\"action\\\\\": \\\\\"delete_all\\\\\"}\"\n        }\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n    let event = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n\n    if case .outputItemAdded(let addedEvent) = event {\n      if case .mcpApprovalRequest(let approval) = addedEvent.item {\n        XCTAssertEqual(approval.id, \"approval_123\")\n        XCTAssertEqual(approval.name, \"sensitive_operation\")\n        XCTAssertEqual(approval.serverLabel, \"my_server\")\n        XCTAssertEqual(approval.arguments, \"{\\\"action\\\": \\\"delete_all\\\"}\")\n      } else {\n        XCTFail(\"Expected MCP approval request item\")\n      }\n    } else {\n      XCTFail(\"Expected outputItemAdded event\")\n    }\n  }\n\n  // MARK: - Unknown Event Type Test\n\n  func testUnknownEventType() throws {\n    let json = \"\"\"\n      {\n        \"type\": \"response.unknown_event\",\n        \"data\": \"some data\"\n      }\n      \"\"\"\n\n    let decoder = JSONDecoder()\n\n    do {\n      _ = try decoder.decode(ResponseStreamEvent.self, from: json.data(using: .utf8)!)\n    } catch {\n      XCTFail(\"Should have thrown an error for unknown event type\")\n    }\n  }\n}\n"
  },
  {
    "path": "rules.swiftformat",
    "content": "# copied from https://github.com/airbnb/swift/blob/master/Sources/AirbnbSwiftFormatTool/airbnb.swiftformat\n\n# Exclude checkout directories for common package managers\n--exclude Carthage,Pods,.build\n\n# options\n--swiftversion 6.0\n--languagemode 5\n--self remove # redundantSelf\n--importgrouping testable-bottom # sortedImports\n--commas always # trailingCommas\n--trimwhitespace always # trailingSpace\n--indent 2 #indent\n--ifdef no-indent #indent\n--indentstrings true #indent\n--wraparguments before-first # wrapArguments\n--wrapparameters before-first # wrapArguments\n--wrapcollections before-first # wrapArguments\n--wrapconditions before-first # wrapArguments\n--wrapreturntype if-multiline #wrapArguments\n--wrapeffects if-multiline #wrapArguments\n--closingparen same-line # wrapArguments\n--wraptypealiases before-first # wrapArguments\n--funcattributes prev-line # wrapAttributes\n--computedvarattrs prev-line # wrapAttributes\n--storedvarattrs same-line # wrapAttributes\n--complexattrs prev-line # wrapAttributes\n--typeattributes prev-line # wrapAttributes\n--wrapternary before-operators # wrap\n--structthreshold 20 # organizeDeclarations\n--enumthreshold 20 # organizeDeclarations\n--organizetypes class,struct,enum,extension,actor # organizeDeclarations\n--visibilityorder beforeMarks,instanceLifecycle,open,public,package,internal,fileprivate,private # organizeDeclarations\n--typeorder nestedType,staticProperty,staticPropertyWithBody,classPropertyWithBody,swiftUIPropertyWrapper,instanceProperty,instancePropertyWithBody,staticMethod,classMethod,instanceMethod # organizeDeclarations\n# --sortswiftuiprops first-appearance-sort #organizeDeclarations\n--extensionacl on-declarations # extensionAccessControl\n--patternlet inline # hoistPatternLet\n--propertytypes inferred # redundantType, propertyTypes\n--typeblanklines preserve # blankLinesAtStartOfScope, blankLinesAtEndOfScope\n--emptybraces spaced # emptyBraces\n--ranges preserve # spaceAroundOperators\n--operatorfunc no-space # spaceAroundOperators\n--someAny disabled # opaqueGenericParameters\n--elseposition same-line # elseOnSameLine\n--guardelse next-line # elseOnSameLine\n--onelineforeach convert # preferForLoop\n--shortoptionals always # typeSugar\n--semicolons never # semicolons\n--doccomments preserve # docComments\n--markcategories false\n# --header Licensed under the MIT License. See License.txt in the project root for license information.\n\n# We recommend a max width of 100 but _strictly enforce_ a max width of 130\n--maxwidth 130 # wrap\n\n# rules\n--rules anyObjectProtocol\n--rules blankLinesBetweenScopes\n--rules consecutiveSpaces\n--rules consecutiveBlankLines\n--rules duplicateImports\n--rules extensionAccessControl\n# --rules environmentEntry\n--rules hoistPatternLet\n--rules indent\n--rules markTypes\n--rules organizeDeclarations\n--rules redundantParens\n--rules redundantReturn\n--rules redundantSelf\n--rules redundantType\n--rules redundantPattern\n--rules redundantGet\n--rules redundantFileprivate\n--rules redundantRawValues\n# --rules redundantEquatable\n--rules sortImports\n--rules sortDeclarations\n--rules strongifiedSelf\n--rules trailingCommas\n--rules trailingSpace\n--rules linebreakAtEndOfFile\n--rules typeSugar\n--rules wrap\n--rules wrapMultilineStatementBraces\n--rules wrapArguments\n--rules wrapAttributes\n--rules braces\n--rules redundantClosure\n--rules redundantInit\n--rules redundantVoidReturnType\n--rules redundantOptionalBinding\n--rules redundantInternal\n--rules redundantProperty\n--rules unusedArguments\n--rules spaceInsideBrackets\n--rules spaceInsideBraces\n--rules spaceAroundBraces\n--rules spaceInsideParens\n--rules spaceAroundParens\n--rules spaceAroundOperators\n--rules enumNamespaces\n--rules blockComments\n--rules docComments\n--rules docCommentsBeforeModifiers\n--rules spaceAroundComments\n--rules spaceInsideComments\n--rules blankLinesAtStartOfScope\n--rules blankLinesAtEndOfScope\n--rules emptyBraces\n--rules hoistAwait\n--rules hoistTry\n--rules hoistPatternLet\n--rules andOperator\n--rules opaqueGenericParameters\n# --rules genericExtensions\n--rules trailingClosures\n--rules elseOnSameLine\n--rules sortTypealiases\n--rules preferForLoop\n--rules conditionalAssignment\n--rules wrapMultilineConditionalAssignment\n--rules void\n--rules blankLineAfterSwitchCase\n--rules consistentSwitchCaseSpacing\n--rules semicolons\n--rules propertyTypes\n--rules blankLinesBetweenChainedFunctions\n--rules leadingDelimiters\n--rules preferKeyPath\n--rules unusedPrivateDeclarations\n# --rules emptyExtensions\n# --rules preferCountWhere\n# --rules fileHeader\n"
  }
]